Files
daily-journal-prompt/test_parsing.py
2026-01-02 22:10:13 -07:00

231 lines
7.6 KiB
Python

#!/usr/bin/env python3
"""
Consolidated test file for parsing AI responses and format handling.
Combines tests from:
- test_final_fix.py (AttributeError fix for list responses)
- test_new_format.py (new list format with locally generated keys)
- test_valid_response.py (valid JSON response handling)
"""
import json
import sys
import os
# Add the current directory to the Python path
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
from generate_prompts import JournalPromptGenerator
def test_attribute_error_fix():
"""Test the fix for AttributeError when API returns list instead of dict."""
print("\n=== Test: AttributeError fix for list responses ===")
# Create a mock generator
generator = JournalPromptGenerator()
# Test with empty list []
list_response = json.dumps([]) # Empty list
print("\n1. Testing with empty list []:")
try:
result = generator._parse_ai_response(list_response)
print(f" Result: Successfully parsed {len(result)} prompts (no AttributeError)")
except AttributeError as e:
print(f" ERROR: AttributeError occurred: {e}")
return False
except Exception as e:
print(f" Other error: {type(e).__name__}: {e}")
return False
# Test with list containing dictionaries
list_with_dicts = json.dumps([
{"some_key": "some value"},
{"another_key": "another value"}
])
print("\n2. Testing with list of dictionaries:")
try:
result = generator._parse_ai_response(list_with_dicts)
print(f" Result: Successfully parsed {len(result)} prompts (no AttributeError)")
except AttributeError as e:
print(f" ERROR: AttributeError occurred: {e}")
return False
except Exception as e:
print(f" Other error: {type(e).__name__}: {e}")
return False
print("\n✅ AttributeError fix tests passed!")
return True
def test_new_list_format():
"""Test the new format where AI returns a list and keys are generated locally."""
print("\n=== Test: New list format with locally generated keys ===")
# Create a mock generator
generator = JournalPromptGenerator()
# Create a mock AI response in the new list format
mock_ai_response = [
"Write about a childhood memory that still makes you smile.",
"Describe your perfect day from start to finish.",
"What is something you've been putting off and why?",
"Imagine you could have a conversation with any historical figure.",
"Write a letter to your future self one year from now.",
"Describe a place that feels like home to you."
]
# Convert to JSON string
json_response = json.dumps(mock_ai_response)
print("\n1. Testing _parse_ai_response with list format:")
result = generator._parse_ai_response(json_response)
print(f" Result type: {type(result)}")
print(f" Number of prompts: {len(result)}")
print(f" First prompt: {result[0][:50]}...")
# Verify it's a list of strings
assert isinstance(result, list), "Result should be a list"
assert all(isinstance(prompt, str) for prompt in result), "All items should be strings"
print("\n2. Testing add_prompts_to_pool with list of strings:")
# Get initial pool size
initial_pool_size = len(generator.pool_prompts)
print(f" Initial pool size: {initial_pool_size}")
# Add prompts to pool
generator.add_prompts_to_pool(result)
# Check new pool size
new_pool_size = len(generator.pool_prompts)
print(f" New pool size: {new_pool_size}")
print(f" Added {new_pool_size - initial_pool_size} prompts")
print("\n✅ New list format tests passed!")
return True
def test_valid_json_responses():
"""Test with valid JSON responses in various formats."""
print("\n=== Test: Valid JSON response handling ===")
# Create a mock generator
generator = JournalPromptGenerator()
# Create a valid response with 4 prompts as a list (new format)
valid_response = [
"Write about a time when you felt truly at peace.",
"Describe your ideal morning routine in detail.",
"What are three things you're grateful for today?",
"Reflect on a recent challenge and what you learned from it."
]
# Convert to JSON string
json_response = json.dumps(valid_response)
print("\n1. Testing with valid JSON response (list format):")
result = generator._parse_ai_response(json_response)
print(f" Number of prompts extracted: {len(result)}")
print(f" Type of result: {type(result)}")
for i, prompt_text in enumerate(result):
print(f" Prompt {i+1}: {prompt_text[:50]}...")
# Test with backticks
print("\n2. Testing with valid JSON response with backticks:")
backticks_response = f"```json\n{json_response}\n```"
result = generator._parse_ai_response(backticks_response)
print(f" Number of prompts extracted: {len(result)}")
# Test with "json" prefix
print("\n3. Testing with valid JSON response with 'json' prefix:")
json_prefix_response = f"json\n{json_response}"
result = generator._parse_ai_response(json_prefix_response)
print(f" Number of prompts extracted: {len(result)}")
# Test fallback for old dictionary format
print("\n4. Testing fallback for old dictionary format:")
old_format_response = {
"newprompt0": "Write about a time when you felt truly at peace.",
"newprompt1": "Describe your ideal morning routine in detail.",
"newprompt2": "What are three things you're grateful for today?",
"newprompt3": "Reflect on a recent challenge and what you learned from it."
}
json_old_response = json.dumps(old_format_response)
result = generator._parse_ai_response(json_old_response)
print(f" Number of prompts extracted: {len(result)}")
print("\n✅ Valid JSON response tests passed!")
return True
def test_clean_ai_response():
"""Test the _clean_ai_response method."""
print("\n=== Test: _clean_ai_response method ===")
generator = JournalPromptGenerator()
# Test cases
test_cases = [
("```json\n[1, 2, 3]\n```", "[1, 2, 3]"),
("```\n[1, 2, 3]\n```", "[1, 2, 3]"),
("json\n[1, 2, 3]", "[1, 2, 3]"),
("JSON\n[1, 2, 3]", "[1, 2, 3]"),
(" [1, 2, 3] ", "[1, 2, 3]"),
("```json\n{\"a\": 1}\n```", "{\"a\": 1}"),
]
all_passed = True
for i, (input_text, expected) in enumerate(test_cases):
cleaned = generator._clean_ai_response(input_text)
if cleaned == expected:
print(f" Test {i+1} passed: '{input_text[:20]}...' -> '{cleaned}'")
else:
print(f" Test {i+1} FAILED: '{input_text[:20]}...' -> '{cleaned}' (expected: '{expected}')")
all_passed = False
if all_passed:
print("\n✅ _clean_ai_response tests passed!")
return True
else:
print("\n❌ _clean_ai_response tests failed!")
return False
def main():
"""Run all parsing tests."""
print("=" * 60)
print("Running Consolidated Parsing Tests")
print("=" * 60)
all_passed = True
# Run all tests
if not test_attribute_error_fix():
all_passed = False
if not test_new_list_format():
all_passed = False
if not test_valid_json_responses():
all_passed = False
if not test_clean_ai_response():
all_passed = False
print("\n" + "=" * 60)
if all_passed:
print("✅ ALL PARSING TESTS PASSED!")
else:
print("❌ SOME TESTS FAILED!")
print("=" * 60)
return all_passed
if __name__ == "__main__":
success = main()
sys.exit(0 if success else 1)