passes testing after slimming

This commit is contained in:
2026-01-03 02:30:33 -07:00
parent da300f75fe
commit 928f08cc57
18 changed files with 97 additions and 784 deletions

View File

@@ -7,8 +7,8 @@ import json
import sys
import os
# Add the current directory to the Python path
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
# Add the parent directory to the Python path
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from generate_prompts import JournalPromptGenerator

View File

@@ -6,8 +6,8 @@ Test script to verify feedback_words integration
import sys
import os
# Add current directory to path
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
# Add the parent directory to the Python path
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from generate_prompts import JournalPromptGenerator
@@ -29,17 +29,6 @@ def test_feedback_words_loading():
else:
print("No feedback words loaded (this might be expected if file is empty)")
# Test _prepare_prompt method
print("\nTesting _prepare_prompt method...")
prompt = generator._prepare_prompt()
print(f"Prompt length: {len(prompt)} characters")
# Check if feedback words are included in the prompt
if generator.feedback_words and "Feedback words:" in prompt:
print("✓ Feedback words are included in the prompt")
else:
print("✗ Feedback words are NOT included in the prompt")
# Test _prepare_prompt_with_count method
print("\nTesting _prepare_prompt_with_count method...")
prompt_with_count = generator._prepare_prompt_with_count(3)

View File

@@ -1,55 +0,0 @@
#!/usr/bin/env python3
"""
Test to demonstrate the fix for the AttributeError when API returns list instead of dict.
"""
import json
from generate_prompts import JournalPromptGenerator
def test_original_error_case():
"""Test the exact error case: API returns a list instead of a dict."""
print("Testing the original error case: API returns list instead of dict")
print("="*60)
# Create a mock generator
generator = JournalPromptGenerator()
# Simulate API returning a list (which could happen with null/malformed data)
list_response = json.dumps([]) # Empty list
print("\n1. Testing with empty list []:")
try:
result = generator._parse_ai_response(list_response)
print(f" Result: Successfully parsed {len(result)} prompts (no AttributeError)")
except AttributeError as e:
print(f" ERROR: AttributeError occurred: {e}")
except Exception as e:
print(f" Other error: {type(e).__name__}: {e}")
# Test with list containing dictionaries (another possible malformed response)
list_with_dicts = json.dumps([
{"some_key": "some value"},
{"another_key": "another value"}
])
print("\n2. Testing with list of dictionaries:")
try:
result = generator._parse_ai_response(list_with_dicts)
print(f" Result: Successfully parsed {len(result)} prompts (no AttributeError)")
except AttributeError as e:
print(f" ERROR: AttributeError occurred: {e}")
except Exception as e:
print(f" Other error: {type(e).__name__}: {e}")
# Test with None/null data (worst case)
print("\n3. Testing with None/null data (simulated):")
# We can't directly test None since json.loads would fail, but our code
# handles the case where data might be None after parsing
print("\n" + "="*60)
print("Test complete! The fix prevents AttributeError for list responses.")
if __name__ == "__main__":
test_original_error_case()

View File

@@ -1,91 +0,0 @@
#!/usr/bin/env python3
"""
Test the new format where AI returns a list and keys are generated locally.
"""
import json
import sys
import os
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
from generate_prompts import JournalPromptGenerator
def test_new_format():
"""Test the new format where AI returns a list and keys are generated locally."""
print("Testing new format: AI returns list, keys generated locally")
print("="*60)
# Create a mock generator
generator = JournalPromptGenerator(config_path=".env")
# Create a mock AI response in the new list format
mock_ai_response = [
"Write about a childhood memory that still makes you smile.",
"Describe your perfect day from start to finish.",
"What is something you've been putting off and why?",
"Imagine you could have a conversation with any historical figure.",
"Write a letter to your future self one year from now.",
"Describe a place that feels like home to you."
]
# Convert to JSON string
json_response = json.dumps(mock_ai_response)
print("\n1. Testing _parse_ai_response with list format:")
result = generator._parse_ai_response(json_response)
print(f" Result type: {type(result)}")
print(f" Number of prompts: {len(result)}")
print(f" First prompt: {result[0][:50]}...")
# Verify it's a list of strings
assert isinstance(result, list), "Result should be a list"
assert all(isinstance(prompt, str) for prompt in result), "All items should be strings"
print("\n2. Testing add_prompts_to_pool with list of strings:")
# Get initial pool size
initial_pool_size = len(generator.pool_prompts)
print(f" Initial pool size: {initial_pool_size}")
# Add prompts to pool
generator.add_prompts_to_pool(result)
# Check new pool size
new_pool_size = len(generator.pool_prompts)
print(f" New pool size: {new_pool_size}")
print(f" Added {new_pool_size - initial_pool_size} prompts")
# Check that prompts in pool have keys
print(f"\n3. Checking that prompts in pool have generated keys:")
for i, prompt_dict in enumerate(generator.pool_prompts[-len(result):]):
prompt_key = list(prompt_dict.keys())[0]
prompt_text = prompt_dict[prompt_key]
print(f" Prompt {i+1}: Key='{prompt_key}', Text='{prompt_text[:30]}...'")
assert prompt_key.startswith("poolprompt"), f"Key should start with 'poolprompt', got '{prompt_key}'"
print("\n4. Testing draw_prompts_from_pool:")
drawn_prompts = generator.draw_prompts_from_pool(count=2)
print(f" Drawn {len(drawn_prompts)} prompts from pool")
print(f" Pool size after drawing: {len(generator.pool_prompts)}")
# Check drawn prompts have keys
for i, prompt_dict in enumerate(drawn_prompts):
prompt_key = list(prompt_dict.keys())[0]
prompt_text = prompt_dict[prompt_key]
print(f" Drawn prompt {i+1}: Key='{prompt_key}', Text='{prompt_text[:30]}...'")
print("\n" + "="*60)
print("✅ All tests passed! New format works correctly.")
print("\nSummary:")
print("- AI returns prompts as a JSON list (no keys)")
print("- _parse_ai_response returns List[str]")
print("- add_prompts_to_pool generates keys locally (poolprompt000, poolprompt001, etc.)")
print("- draw_prompts_from_pool returns List[Dict[str, str]] with generated keys")
return True
if __name__ == "__main__":
test_new_format()

View File

@@ -201,7 +201,7 @@ def test_python_scripts():
"""Test that Python scripts are syntactically valid."""
print("\nTesting Python scripts...")
scripts_to_test = ["generate_prompts.py", "simple_generate.py"]
scripts_to_test = ["generate_prompts.py"]
all_valid = True
for script in scripts_to_test:

View File

@@ -1,98 +0,0 @@
#!/usr/bin/env python3
"""
Test script to verify the prompt numbering logic.
"""
import json
import configparser
def get_num_prompts():
"""Get the number of prompts from settings.cfg or default."""
config = configparser.ConfigParser()
num_prompts = 6 # Default value
try:
config.read('settings.cfg')
if 'prompts' in config and 'num_prompts' in config['prompts']:
num_prompts = int(config['prompts']['num_prompts'])
except (FileNotFoundError, ValueError):
pass
return num_prompts
def test_renumbering():
"""Test the renumbering logic."""
# Get number of prompts from config
num_prompts = get_num_prompts()
# Create a sample historic prompts list
historic_prompts = []
for i in range(60):
historic_prompts.append({
f"prompt{i:02d}": f"Old prompt {i}"
})
print(f"Original prompts: {len(historic_prompts)}")
print(f"First prompt key: {list(historic_prompts[0].keys())[0]}")
print(f"Last prompt key: {list(historic_prompts[-1].keys())[0]}")
print(f"Number of prompts from config: {num_prompts}")
# Simulate adding new prompts (as the current code would create them)
new_prompts = []
for i in range(num_prompts):
new_prompts.append({
f"prompt{len(historic_prompts) + i:02d}": f"New prompt {i}"
})
print(f"\nNew prompts to add: {len(new_prompts)}")
for i, prompt in enumerate(new_prompts):
print(f" New prompt {i}: {list(prompt.keys())[0]}")
# Prepend new prompts (reverse to maintain order)
for prompt in reversed(new_prompts):
historic_prompts.insert(0, prompt)
print(f"\nAfter prepending: {len(historic_prompts)} prompts")
print(f"First 3 prompts keys:")
for i in range(3):
print(f" {i}: {list(historic_prompts[i].keys())[0]}")
# Renumber all prompts
renumbered_prompts = []
for i, prompt_dict in enumerate(historic_prompts):
prompt_key = list(prompt_dict.keys())[0]
prompt_text = prompt_dict[prompt_key]
new_prompt_key = f"prompt{i:02d}"
renumbered_prompts.append({
new_prompt_key: prompt_text
})
print(f"\nAfter renumbering: {len(renumbered_prompts)} prompts")
print(f"First 10 prompts keys:")
for i in range(10):
print(f" prompt{i:02d}: {list(renumbered_prompts[i].keys())[0]} = {renumbered_prompts[i][f'prompt{i:02d}'][:30]}...")
# Keep only first 60
if len(renumbered_prompts) > 60:
renumbered_prompts = renumbered_prompts[:60]
print(f"\nAfter keeping only first 60: {len(renumbered_prompts)} prompts")
print(f"First prompt: {list(renumbered_prompts[0].keys())[0]} = {renumbered_prompts[0]['prompt00'][:30]}...")
print(f"Last prompt: {list(renumbered_prompts[-1].keys())[0]} = {renumbered_prompts[-1]['prompt59'][:30]}...")
# Verify the range
for i in range(60):
expected_key = f"prompt{i:02d}"
actual_key = list(renumbered_prompts[i].keys())[0]
if expected_key != actual_key:
print(f"ERROR: Expected {expected_key}, got {actual_key}")
return False
print("\n✅ All tests passed! Prompt numbering is correct.")
return True
if __name__ == "__main__":
test_renumbering()

View File

@@ -6,7 +6,9 @@ Test the error handling with a valid response.
import sys
import os
import json
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
# Add the parent directory to the Python path
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from generate_prompts import JournalPromptGenerator