tests cleanup

This commit is contained in:
2026-01-03 01:54:56 -07:00
parent ffaa7c96ba
commit b0b343e009
8 changed files with 1 additions and 0 deletions

82
tests/test_end_to_end.py Normal file
View File

@@ -0,0 +1,82 @@
#!/usr/bin/env python3
"""
End-to-end test to verify prompts are stored as simple strings without keys.
"""
import json
import sys
import os
# Add the current directory to the Python path
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
from generate_prompts import JournalPromptGenerator
def test_end_to_end():
"""Test the complete flow from parsing to pool management."""
print("Testing end-to-end flow...")
print("="*60)
# Create a generator instance
generator = JournalPromptGenerator(config_path=".env")
# Clear the pool first
generator.pool_prompts = []
generator._save_pool_prompts()
# Test 1: Simulate AI response (JSON list format)
print("\n1. Simulating AI response (JSON list format)...")
mock_ai_response = json.dumps([
"Write about a childhood memory that still makes you smile.",
"Describe your perfect day from start to finish.",
"What is something you've been putting off and why?",
"Imagine you could have a conversation with any historical figure."
])
# Parse the response
parsed_prompts = generator._parse_ai_response(mock_ai_response)
print(f" Parsed {len(parsed_prompts)} prompts")
print(f" All prompts are strings: {all(isinstance(p, str) for p in parsed_prompts)}")
print(f" No prompts have keys: {all(not isinstance(p, dict) for p in parsed_prompts)}")
# Test 2: Add to pool
print("\n2. Adding prompts to pool...")
generator.add_prompts_to_pool(parsed_prompts)
print(f" Pool now has {len(generator.pool_prompts)} prompts")
print(f" All pool prompts are strings: {all(isinstance(p, str) for p in generator.pool_prompts)}")
# Test 3: Draw from pool
print("\n3. Drawing prompts from pool...")
drawn_prompts = generator.draw_prompts_from_pool(count=2)
print(f" Drew {len(drawn_prompts)} prompts")
print(f" Drawn prompts are strings: {all(isinstance(p, str) for p in drawn_prompts)}")
print(f" Pool now has {len(generator.pool_prompts)} prompts remaining")
# Test 4: Save and load pool
print("\n4. Testing pool persistence...")
# Save current pool
generator._save_pool_prompts()
# Create a new generator instance to load the pool
generator2 = JournalPromptGenerator(config_path=".env")
print(f" New instance loaded {len(generator2.pool_prompts)} prompts from pool_prompts.json")
print(f" Loaded prompts are strings: {all(isinstance(p, str) for p in generator2.pool_prompts)}")
# Test 5: Check pool_prompts.json content
print("\n5. Checking pool_prompts.json file content...")
with open("pool_prompts.json", "r") as f:
pool_content = json.load(f)
print(f" File contains {len(pool_content)} items")
print(f" First item type: {type(pool_content[0])}")
print(f" First item is string: {isinstance(pool_content[0], str)}")
print(f" First item value: {pool_content[0][:50]}...")
print("\n" + "="*60)
print("✅ All tests passed! Prompts are stored as simple strings without keys.")
print("="*60)
return True
if __name__ == "__main__":
test_end_to_end()

View File

@@ -0,0 +1,66 @@
#!/usr/bin/env python3
"""
Test script to verify feedback_words integration
"""
import sys
import os
# Add current directory to path
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
from generate_prompts import JournalPromptGenerator
def test_feedback_words_loading():
"""Test that feedback_words are loaded correctly."""
print("Testing feedback_words integration...")
try:
# Initialize the generator
generator = JournalPromptGenerator()
# Check if feedback_words were loaded
print(f"Number of feedback words loaded: {len(generator.feedback_words)}")
if generator.feedback_words:
print("Feedback words loaded successfully:")
for i, feedback in enumerate(generator.feedback_words):
print(f" {i+1}. {feedback}")
else:
print("No feedback words loaded (this might be expected if file is empty)")
# Test _prepare_prompt method
print("\nTesting _prepare_prompt method...")
prompt = generator._prepare_prompt()
print(f"Prompt length: {len(prompt)} characters")
# Check if feedback words are included in the prompt
if generator.feedback_words and "Feedback words:" in prompt:
print("✓ Feedback words are included in the prompt")
else:
print("✗ Feedback words are NOT included in the prompt")
# Test _prepare_prompt_with_count method
print("\nTesting _prepare_prompt_with_count method...")
prompt_with_count = generator._prepare_prompt_with_count(3)
print(f"Prompt with count length: {len(prompt_with_count)} characters")
# Check if feedback words are included in the prompt with count
if generator.feedback_words and "Feedback words:" in prompt_with_count:
print("✓ Feedback words are included in the prompt with count")
else:
print("✗ Feedback words are NOT included in the prompt with count")
print("\n✅ All tests passed!")
return True
except Exception as e:
print(f"\n❌ Error during testing: {e}")
import traceback
traceback.print_exc()
return False
if __name__ == "__main__":
success = test_feedback_words_loading()
sys.exit(0 if success else 1)

55
tests/test_final_fix.py Normal file
View File

@@ -0,0 +1,55 @@
#!/usr/bin/env python3
"""
Test to demonstrate the fix for the AttributeError when API returns list instead of dict.
"""
import json
from generate_prompts import JournalPromptGenerator
def test_original_error_case():
"""Test the exact error case: API returns a list instead of a dict."""
print("Testing the original error case: API returns list instead of dict")
print("="*60)
# Create a mock generator
generator = JournalPromptGenerator()
# Simulate API returning a list (which could happen with null/malformed data)
list_response = json.dumps([]) # Empty list
print("\n1. Testing with empty list []:")
try:
result = generator._parse_ai_response(list_response)
print(f" Result: Successfully parsed {len(result)} prompts (no AttributeError)")
except AttributeError as e:
print(f" ERROR: AttributeError occurred: {e}")
except Exception as e:
print(f" Other error: {type(e).__name__}: {e}")
# Test with list containing dictionaries (another possible malformed response)
list_with_dicts = json.dumps([
{"some_key": "some value"},
{"another_key": "another value"}
])
print("\n2. Testing with list of dictionaries:")
try:
result = generator._parse_ai_response(list_with_dicts)
print(f" Result: Successfully parsed {len(result)} prompts (no AttributeError)")
except AttributeError as e:
print(f" ERROR: AttributeError occurred: {e}")
except Exception as e:
print(f" Other error: {type(e).__name__}: {e}")
# Test with None/null data (worst case)
print("\n3. Testing with None/null data (simulated):")
# We can't directly test None since json.loads would fail, but our code
# handles the case where data might be None after parsing
print("\n" + "="*60)
print("Test complete! The fix prevents AttributeError for list responses.")
if __name__ == "__main__":
test_original_error_case()

91
tests/test_new_format.py Normal file
View File

@@ -0,0 +1,91 @@
#!/usr/bin/env python3
"""
Test the new format where AI returns a list and keys are generated locally.
"""
import json
import sys
import os
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
from generate_prompts import JournalPromptGenerator
def test_new_format():
"""Test the new format where AI returns a list and keys are generated locally."""
print("Testing new format: AI returns list, keys generated locally")
print("="*60)
# Create a mock generator
generator = JournalPromptGenerator(config_path=".env")
# Create a mock AI response in the new list format
mock_ai_response = [
"Write about a childhood memory that still makes you smile.",
"Describe your perfect day from start to finish.",
"What is something you've been putting off and why?",
"Imagine you could have a conversation with any historical figure.",
"Write a letter to your future self one year from now.",
"Describe a place that feels like home to you."
]
# Convert to JSON string
json_response = json.dumps(mock_ai_response)
print("\n1. Testing _parse_ai_response with list format:")
result = generator._parse_ai_response(json_response)
print(f" Result type: {type(result)}")
print(f" Number of prompts: {len(result)}")
print(f" First prompt: {result[0][:50]}...")
# Verify it's a list of strings
assert isinstance(result, list), "Result should be a list"
assert all(isinstance(prompt, str) for prompt in result), "All items should be strings"
print("\n2. Testing add_prompts_to_pool with list of strings:")
# Get initial pool size
initial_pool_size = len(generator.pool_prompts)
print(f" Initial pool size: {initial_pool_size}")
# Add prompts to pool
generator.add_prompts_to_pool(result)
# Check new pool size
new_pool_size = len(generator.pool_prompts)
print(f" New pool size: {new_pool_size}")
print(f" Added {new_pool_size - initial_pool_size} prompts")
# Check that prompts in pool have keys
print(f"\n3. Checking that prompts in pool have generated keys:")
for i, prompt_dict in enumerate(generator.pool_prompts[-len(result):]):
prompt_key = list(prompt_dict.keys())[0]
prompt_text = prompt_dict[prompt_key]
print(f" Prompt {i+1}: Key='{prompt_key}', Text='{prompt_text[:30]}...'")
assert prompt_key.startswith("poolprompt"), f"Key should start with 'poolprompt', got '{prompt_key}'"
print("\n4. Testing draw_prompts_from_pool:")
drawn_prompts = generator.draw_prompts_from_pool(count=2)
print(f" Drawn {len(drawn_prompts)} prompts from pool")
print(f" Pool size after drawing: {len(generator.pool_prompts)}")
# Check drawn prompts have keys
for i, prompt_dict in enumerate(drawn_prompts):
prompt_key = list(prompt_dict.keys())[0]
prompt_text = prompt_dict[prompt_key]
print(f" Drawn prompt {i+1}: Key='{prompt_key}', Text='{prompt_text[:30]}...'")
print("\n" + "="*60)
print("✅ All tests passed! New format works correctly.")
print("\nSummary:")
print("- AI returns prompts as a JSON list (no keys)")
print("- _parse_ai_response returns List[str]")
print("- add_prompts_to_pool generates keys locally (poolprompt000, poolprompt001, etc.)")
print("- draw_prompts_from_pool returns List[Dict[str, str]] with generated keys")
return True
if __name__ == "__main__":
test_new_format()

283
tests/test_project.py Normal file
View File

@@ -0,0 +1,283 @@
#!/usr/bin/env python3
"""
Test script for the Daily Journal Prompt Generator.
This script tests basic functionality without making actual API calls.
"""
import json
import os
import sys
from unittest.mock import Mock, patch
# Add current directory to path to import our modules
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
# Mock response for testing (new list format)
MOCK_AI_RESPONSE = '''[
"Describe a place from your childhood that no longer exists. What made it special? What sounds, smells, and textures do you remember?",
"Write a letter to your future self 10 years from now. What hopes, fears, and questions do you want to share?",
"Imagine you wake up with a new superpower that only works on Tuesdays. What is it and how do you use it?",
"Describe a meal that represents your cultural heritage. Who taught you to make it? What memories are tied to it?",
"Write about a time you got lost, literally or metaphorically. What did you discover along the way?",
"Create a dialogue between your current self and your teenage self. What would you talk about?"
]'''
def test_file_structure():
"""Test that required files exist."""
print("Testing file structure...")
required_files = [
"ds_prompt.txt",
"historic_prompts.json",
".env",
"example.env"
]
all_exist = True
for file in required_files:
if os.path.exists(file):
print(f"{file} exists")
else:
print(f"{file} missing")
all_exist = False
return all_exist
def test_json_parsing():
"""Test that historic_prompts.json is valid JSON."""
print("\nTesting JSON parsing...")
try:
with open("historic_prompts.json", "r") as f:
data = json.load(f)
if isinstance(data, list):
print(f" ✓ historic_prompts.json is valid JSON (contains {len(data)} prompts)")
return True
else:
print(" ✗ historic_prompts.json is not a list")
return False
except json.JSONDecodeError as e:
print(f" ✗ historic_prompts.json is not valid JSON: {e}")
return False
except FileNotFoundError:
print(" ✗ historic_prompts.json not found")
return False
def test_prompt_template():
"""Test that the prompt template is readable."""
print("\nTesting prompt template...")
try:
with open("ds_prompt.txt", "r") as f:
content = f.read()
if len(content) > 0:
print(f" ✓ ds_prompt.txt is readable ({len(content)} characters)")
# Check for key phrases (now configurable, so just check for basic structure)
key_phrases = ["writing prompts", "characters", "JSON array"]
found_phrases = []
for phrase in key_phrases:
if phrase.lower() in content.lower():
found_phrases.append(phrase)
if found_phrases:
print(f" ✓ Contains key phrases: {', '.join(found_phrases)}")
else:
print(" ⚠ Missing some expected key phrases")
return True
else:
print(" ✗ ds_prompt.txt is empty")
return False
except FileNotFoundError:
print(" ✗ ds_prompt.txt not found")
return False
def test_mock_ai_response():
"""Test parsing of mock AI response."""
print("\nTesting AI response parsing...")
try:
# Test JSON parsing
data = json.loads(MOCK_AI_RESPONSE)
# Check structure - should be a list
if not isinstance(data, list):
print(f" ✗ Mock response is not a list, got {type(data)}")
return False
if len(data) != 6:
print(f" ✗ Mock response has {len(data)} items, expected 6")
return False
print(f" ✓ Mock response parsed successfully")
print(f" ✓ Contains all 6 expected prompts")
# Check prompt lengths
for i, prompt in enumerate(data):
if len(prompt) < 50:
print(f" ⚠ Prompt {i} is very short ({len(prompt)} characters)")
return True
except json.JSONDecodeError as e:
print(f" ✗ Failed to parse mock JSON: {e}")
return False
def test_environment_file():
"""Test that environment file has API key."""
print("\nTesting environment file...")
try:
with open(".env", "r") as f:
content = f.read()
# Check for API key
if "DEEPSEEK_API_KEY=" in content or "OPENAI_API_KEY=" in content:
print(" ✓ .env file contains API key configuration")
# Check if it's the example or real key
if "sk-your-actual-api-key" in content or "sk-something" in content:
print(" ⚠ .env appears to contain example API key (needs real key)")
else:
print(" ✓ .env appears to contain real API key")
return True
else:
print(" ✗ .env missing API key configuration")
return False
except FileNotFoundError:
print(" ✗ .env file not found")
return False
def test_requirements_file():
"""Test that requirements.txt exists and has expected packages."""
print("\nTesting requirements file...")
if os.path.exists("requirements.txt"):
try:
with open("requirements.txt", "r") as f:
content = f.read()
expected_packages = ["openai", "python-dotenv"]
found_packages = []
for package in expected_packages:
if package in content:
found_packages.append(package)
if found_packages:
print(f" ✓ requirements.txt contains: {', '.join(found_packages)}")
missing = [p for p in expected_packages if p not in found_packages]
if missing:
print(f" ⚠ Missing packages: {', '.join(missing)}")
return True
else:
print(" ✗ requirements.txt missing expected packages")
return False
except Exception as e:
print(f" ✗ Error reading requirements.txt: {e}")
return False
else:
print(" ✗ requirements.txt not found")
return False
def test_python_scripts():
"""Test that Python scripts are syntactically valid."""
print("\nTesting Python scripts...")
scripts_to_test = ["generate_prompts.py", "simple_generate.py"]
all_valid = True
for script in scripts_to_test:
if os.path.exists(script):
try:
with open(script, "r") as f:
# Try to compile the script
compile(f.read(), script, 'exec')
print(f"{script} is valid Python")
except SyntaxError as e:
print(f"{script} has syntax error: {e}")
all_valid = False
except Exception as e:
print(f" ✗ Error testing {script}: {e}")
all_valid = False
else:
print(f"{script} not found")
all_valid = False
return all_valid
def main():
"""Run all tests."""
print("="*60)
print("DAILY JOURNAL PROMPT GENERATOR - TEST SUITE")
print("="*60)
tests = [
("File Structure", test_file_structure),
("JSON Parsing", test_json_parsing),
("Prompt Template", test_prompt_template),
("Mock AI Response", test_mock_ai_response),
("Environment File", test_environment_file),
("Requirements File", test_requirements_file),
("Python Scripts", test_python_scripts),
]
results = []
for test_name, test_func in tests:
try:
result = test_func()
results.append((test_name, result))
except Exception as e:
print(f"{test_name} failed with exception: {e}")
results.append((test_name, False))
# Summary
print("\n" + "="*60)
print("TEST SUMMARY")
print("="*60)
passed = sum(1 for _, result in results if result)
total = len(results)
for test_name, result in results:
status = "✓ PASS" if result else "✗ FAIL"
print(f"{status}: {test_name}")
print(f"\nTotal: {passed}/{total} tests passed")
if passed == total:
print("\n✅ All tests passed! The project is ready to use.")
print("\nNext steps:")
print("1. Make sure your .env file has a real API key")
print("2. Install dependencies: pip install -r requirements.txt")
print("3. Run: python generate_prompts.py --interactive")
else:
print(f"\n{total - passed} test(s) failed. Please fix the issues above.")
return passed == total
if __name__ == "__main__":
success = main()
sys.exit(0 if success else 1)

View File

@@ -0,0 +1,98 @@
#!/usr/bin/env python3
"""
Test script to verify the prompt numbering logic.
"""
import json
import configparser
def get_num_prompts():
"""Get the number of prompts from settings.cfg or default."""
config = configparser.ConfigParser()
num_prompts = 6 # Default value
try:
config.read('settings.cfg')
if 'prompts' in config and 'num_prompts' in config['prompts']:
num_prompts = int(config['prompts']['num_prompts'])
except (FileNotFoundError, ValueError):
pass
return num_prompts
def test_renumbering():
"""Test the renumbering logic."""
# Get number of prompts from config
num_prompts = get_num_prompts()
# Create a sample historic prompts list
historic_prompts = []
for i in range(60):
historic_prompts.append({
f"prompt{i:02d}": f"Old prompt {i}"
})
print(f"Original prompts: {len(historic_prompts)}")
print(f"First prompt key: {list(historic_prompts[0].keys())[0]}")
print(f"Last prompt key: {list(historic_prompts[-1].keys())[0]}")
print(f"Number of prompts from config: {num_prompts}")
# Simulate adding new prompts (as the current code would create them)
new_prompts = []
for i in range(num_prompts):
new_prompts.append({
f"prompt{len(historic_prompts) + i:02d}": f"New prompt {i}"
})
print(f"\nNew prompts to add: {len(new_prompts)}")
for i, prompt in enumerate(new_prompts):
print(f" New prompt {i}: {list(prompt.keys())[0]}")
# Prepend new prompts (reverse to maintain order)
for prompt in reversed(new_prompts):
historic_prompts.insert(0, prompt)
print(f"\nAfter prepending: {len(historic_prompts)} prompts")
print(f"First 3 prompts keys:")
for i in range(3):
print(f" {i}: {list(historic_prompts[i].keys())[0]}")
# Renumber all prompts
renumbered_prompts = []
for i, prompt_dict in enumerate(historic_prompts):
prompt_key = list(prompt_dict.keys())[0]
prompt_text = prompt_dict[prompt_key]
new_prompt_key = f"prompt{i:02d}"
renumbered_prompts.append({
new_prompt_key: prompt_text
})
print(f"\nAfter renumbering: {len(renumbered_prompts)} prompts")
print(f"First 10 prompts keys:")
for i in range(10):
print(f" prompt{i:02d}: {list(renumbered_prompts[i].keys())[0]} = {renumbered_prompts[i][f'prompt{i:02d}'][:30]}...")
# Keep only first 60
if len(renumbered_prompts) > 60:
renumbered_prompts = renumbered_prompts[:60]
print(f"\nAfter keeping only first 60: {len(renumbered_prompts)} prompts")
print(f"First prompt: {list(renumbered_prompts[0].keys())[0]} = {renumbered_prompts[0]['prompt00'][:30]}...")
print(f"Last prompt: {list(renumbered_prompts[-1].keys())[0]} = {renumbered_prompts[-1]['prompt59'][:30]}...")
# Verify the range
for i in range(60):
expected_key = f"prompt{i:02d}"
actual_key = list(renumbered_prompts[i].keys())[0]
if expected_key != actual_key:
print(f"ERROR: Expected {expected_key}, got {actual_key}")
return False
print("\n✅ All tests passed! Prompt numbering is correct.")
return True
if __name__ == "__main__":
test_renumbering()

View File

@@ -0,0 +1,65 @@
#!/usr/bin/env python3
"""
Test the error handling with a valid response.
"""
import sys
import os
import json
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
from generate_prompts import JournalPromptGenerator
def test_valid_response():
"""Test with a valid JSON response."""
# Create a mock generator
generator = JournalPromptGenerator(config_path=".env")
# Create a valid response with 4 prompts as a list (new format)
valid_response = [
"Write about a time when you felt truly at peace.",
"Describe your ideal morning routine in detail.",
"What are three things you're grateful for today?",
"Reflect on a recent challenge and what you learned from it."
]
# Convert to JSON string
json_response = json.dumps(valid_response)
print("\n=== Test: Valid JSON response (list format) ===")
result = generator._parse_ai_response(json_response)
print(f"Number of prompts extracted: {len(result)}")
print(f"Type of result: {type(result)}")
for i, prompt_text in enumerate(result):
print(f"Prompt {i+1}: {prompt_text[:50]}...")
# Test with backticks
print("\n=== Test: Valid JSON response with backticks ===")
backticks_response = f"```json\n{json_response}\n```"
result = generator._parse_ai_response(backticks_response)
print(f"Number of prompts extracted: {len(result)}")
# Test with "json" prefix
print("\n=== Test: Valid JSON response with 'json' prefix ===")
json_prefix_response = f"json\n{json_response}"
result = generator._parse_ai_response(json_prefix_response)
print(f"Number of prompts extracted: {len(result)}")
# Test fallback for old dictionary format
print("\n=== Test: Fallback for old dictionary format ===")
old_format_response = {
"newprompt0": "Write about a time when you felt truly at peace.",
"newprompt1": "Describe your ideal morning routine in detail.",
"newprompt2": "What are three things you're grateful for today?",
"newprompt3": "Reflect on a recent challenge and what you learned from it."
}
json_old_response = json.dumps(old_format_response)
result = generator._parse_ai_response(json_old_response)
print(f"Number of prompts extracted: {len(result)}")
if __name__ == "__main__":
test_valid_response()