285 lines
8.8 KiB
Python
285 lines
8.8 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
Test script for the Daily Journal Prompt Generator.
|
|
This script tests basic functionality without making actual API calls.
|
|
"""
|
|
|
|
import json
|
|
import os
|
|
import sys
|
|
from unittest.mock import Mock, patch
|
|
from datetime import datetime
|
|
|
|
# Add current directory to path to import our modules
|
|
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
|
|
|
# Mock response for testing (new list format)
|
|
MOCK_AI_RESPONSE = '''[
|
|
"Describe a place from your childhood that no longer exists. What made it special? What sounds, smells, and textures do you remember?",
|
|
"Write a letter to your future self 10 years from now. What hopes, fears, and questions do you want to share?",
|
|
"Imagine you wake up with a new superpower that only works on Tuesdays. What is it and how do you use it?",
|
|
"Describe a meal that represents your cultural heritage. Who taught you to make it? What memories are tied to it?",
|
|
"Write about a time you got lost, literally or metaphorically. What did you discover along the way?",
|
|
"Create a dialogue between your current self and your teenage self. What would you talk about?"
|
|
]'''
|
|
|
|
|
|
def test_file_structure():
|
|
"""Test that required files exist."""
|
|
print("Testing file structure...")
|
|
|
|
required_files = [
|
|
"ds_prompt.txt",
|
|
"historic_prompts.json",
|
|
".env",
|
|
"example.env"
|
|
]
|
|
|
|
all_exist = True
|
|
for file in required_files:
|
|
if os.path.exists(file):
|
|
print(f" ✓ {file} exists")
|
|
else:
|
|
print(f" ✗ {file} missing")
|
|
all_exist = False
|
|
|
|
return all_exist
|
|
|
|
|
|
def test_json_parsing():
|
|
"""Test that historic_prompts.json is valid JSON."""
|
|
print("\nTesting JSON parsing...")
|
|
|
|
try:
|
|
with open("historic_prompts.json", "r") as f:
|
|
data = json.load(f)
|
|
|
|
if isinstance(data, list):
|
|
print(f" ✓ historic_prompts.json is valid JSON (contains {len(data)} prompts)")
|
|
return True
|
|
else:
|
|
print(" ✗ historic_prompts.json is not a list")
|
|
return False
|
|
|
|
except json.JSONDecodeError as e:
|
|
print(f" ✗ historic_prompts.json is not valid JSON: {e}")
|
|
return False
|
|
except FileNotFoundError:
|
|
print(" ✗ historic_prompts.json not found")
|
|
return False
|
|
|
|
|
|
def test_prompt_template():
|
|
"""Test that the prompt template is readable."""
|
|
print("\nTesting prompt template...")
|
|
|
|
try:
|
|
with open("ds_prompt.txt", "r") as f:
|
|
content = f.read()
|
|
|
|
if len(content) > 0:
|
|
print(f" ✓ ds_prompt.txt is readable ({len(content)} characters)")
|
|
|
|
# Check for key phrases (now configurable, so just check for basic structure)
|
|
key_phrases = ["writing prompts", "characters", "JSON array"]
|
|
found_phrases = []
|
|
for phrase in key_phrases:
|
|
if phrase.lower() in content.lower():
|
|
found_phrases.append(phrase)
|
|
|
|
if found_phrases:
|
|
print(f" ✓ Contains key phrases: {', '.join(found_phrases)}")
|
|
else:
|
|
print(" ⚠ Missing some expected key phrases")
|
|
|
|
return True
|
|
else:
|
|
print(" ✗ ds_prompt.txt is empty")
|
|
return False
|
|
|
|
except FileNotFoundError:
|
|
print(" ✗ ds_prompt.txt not found")
|
|
return False
|
|
|
|
|
|
def test_mock_ai_response():
|
|
"""Test parsing of mock AI response."""
|
|
print("\nTesting AI response parsing...")
|
|
|
|
try:
|
|
# Test JSON parsing
|
|
data = json.loads(MOCK_AI_RESPONSE)
|
|
|
|
# Check structure - should be a list
|
|
if not isinstance(data, list):
|
|
print(f" ✗ Mock response is not a list, got {type(data)}")
|
|
return False
|
|
|
|
if len(data) != 6:
|
|
print(f" ✗ Mock response has {len(data)} items, expected 6")
|
|
return False
|
|
|
|
print(f" ✓ Mock response parsed successfully")
|
|
print(f" ✓ Contains all 6 expected prompts")
|
|
|
|
# Check prompt lengths
|
|
for i, prompt in enumerate(data):
|
|
if len(prompt) < 50:
|
|
print(f" ⚠ Prompt {i} is very short ({len(prompt)} characters)")
|
|
|
|
return True
|
|
|
|
except json.JSONDecodeError as e:
|
|
print(f" ✗ Failed to parse mock JSON: {e}")
|
|
return False
|
|
|
|
|
|
def test_environment_file():
|
|
"""Test that environment file has API key."""
|
|
print("\nTesting environment file...")
|
|
|
|
try:
|
|
with open(".env", "r") as f:
|
|
content = f.read()
|
|
|
|
# Check for API key
|
|
if "DEEPSEEK_API_KEY=" in content or "OPENAI_API_KEY=" in content:
|
|
print(" ✓ .env file contains API key configuration")
|
|
|
|
# Check if it's the example or real key
|
|
if "sk-your-actual-api-key" in content or "sk-something" in content:
|
|
print(" ⚠ .env appears to contain example API key (needs real key)")
|
|
else:
|
|
print(" ✓ .env appears to contain real API key")
|
|
|
|
return True
|
|
else:
|
|
print(" ✗ .env missing API key configuration")
|
|
return False
|
|
|
|
except FileNotFoundError:
|
|
print(" ✗ .env file not found")
|
|
return False
|
|
|
|
|
|
def test_requirements_file():
|
|
"""Test that requirements.txt exists and has expected packages."""
|
|
print("\nTesting requirements file...")
|
|
|
|
if os.path.exists("requirements.txt"):
|
|
try:
|
|
with open("requirements.txt", "r") as f:
|
|
content = f.read()
|
|
|
|
expected_packages = ["openai", "python-dotenv"]
|
|
found_packages = []
|
|
|
|
for package in expected_packages:
|
|
if package in content:
|
|
found_packages.append(package)
|
|
|
|
if found_packages:
|
|
print(f" ✓ requirements.txt contains: {', '.join(found_packages)}")
|
|
|
|
missing = [p for p in expected_packages if p not in found_packages]
|
|
if missing:
|
|
print(f" ⚠ Missing packages: {', '.join(missing)}")
|
|
|
|
return True
|
|
else:
|
|
print(" ✗ requirements.txt missing expected packages")
|
|
return False
|
|
|
|
except Exception as e:
|
|
print(f" ✗ Error reading requirements.txt: {e}")
|
|
return False
|
|
else:
|
|
print(" ✗ requirements.txt not found")
|
|
return False
|
|
|
|
|
|
def test_python_scripts():
|
|
"""Test that Python scripts are syntactically valid."""
|
|
print("\nTesting Python scripts...")
|
|
|
|
scripts_to_test = ["generate_prompts.py", "simple_generate.py"]
|
|
all_valid = True
|
|
|
|
for script in scripts_to_test:
|
|
if os.path.exists(script):
|
|
try:
|
|
with open(script, "r") as f:
|
|
# Try to compile the script
|
|
compile(f.read(), script, 'exec')
|
|
print(f" ✓ {script} is valid Python")
|
|
except SyntaxError as e:
|
|
print(f" ✗ {script} has syntax error: {e}")
|
|
all_valid = False
|
|
except Exception as e:
|
|
print(f" ✗ Error testing {script}: {e}")
|
|
all_valid = False
|
|
else:
|
|
print(f" ✗ {script} not found")
|
|
all_valid = False
|
|
|
|
return all_valid
|
|
|
|
|
|
def main():
|
|
"""Run all tests."""
|
|
print("="*60)
|
|
print("DAILY JOURNAL PROMPT GENERATOR - TEST SUITE")
|
|
print("="*60)
|
|
|
|
tests = [
|
|
("File Structure", test_file_structure),
|
|
("JSON Parsing", test_json_parsing),
|
|
("Prompt Template", test_prompt_template),
|
|
("Mock AI Response", test_mock_ai_response),
|
|
("Environment File", test_environment_file),
|
|
("Requirements File", test_requirements_file),
|
|
("Python Scripts", test_python_scripts),
|
|
]
|
|
|
|
results = []
|
|
|
|
for test_name, test_func in tests:
|
|
try:
|
|
result = test_func()
|
|
results.append((test_name, result))
|
|
except Exception as e:
|
|
print(f" ✗ {test_name} failed with exception: {e}")
|
|
results.append((test_name, False))
|
|
|
|
# Summary
|
|
print("\n" + "="*60)
|
|
print("TEST SUMMARY")
|
|
print("="*60)
|
|
|
|
passed = sum(1 for _, result in results if result)
|
|
total = len(results)
|
|
|
|
for test_name, result in results:
|
|
status = "✓ PASS" if result else "✗ FAIL"
|
|
print(f"{status}: {test_name}")
|
|
|
|
print(f"\nTotal: {passed}/{total} tests passed")
|
|
|
|
if passed == total:
|
|
print("\n✅ All tests passed! The project is ready to use.")
|
|
print("\nNext steps:")
|
|
print("1. Make sure your .env file has a real API key")
|
|
print("2. Install dependencies: pip install -r requirements.txt")
|
|
print("3. Run: python generate_prompts.py --interactive")
|
|
else:
|
|
print(f"\n⚠ {total - passed} test(s) failed. Please fix the issues above.")
|
|
|
|
return passed == total
|
|
|
|
|
|
if __name__ == "__main__":
|
|
success = main()
|
|
sys.exit(0 if success else 1)
|
|
|
|
|