Compare commits
1 Commits
master
...
fastapi_at
| Author | SHA1 | Date | |
|---|---|---|---|
| b96e0e0008 |
@@ -62,7 +62,6 @@ A Python tool that uses OpenAI-compatible AI endpoints to generate creative writ
|
||||
daily-journal-prompt/
|
||||
├── README.md # This documentation
|
||||
├── generate_prompts.py # Main Python script with rich interface
|
||||
├── simple_generate.py # Lightweight version without rich dependency
|
||||
├── run.sh # Convenience bash script
|
||||
├── test_project.py # Test suite for the project
|
||||
├── requirements.txt # Python dependencies
|
||||
@@ -78,7 +77,6 @@ daily-journal-prompt/
|
||||
### File Descriptions
|
||||
|
||||
- **generate_prompts.py**: Main Python script with interactive mode, rich formatting, and full features
|
||||
- **simple_generate.py**: Lightweight version without rich dependency for basic usage
|
||||
- **run.sh**: Convenience bash script for easy execution
|
||||
- **test_project.py**: Test suite to verify project setup
|
||||
- **requirements.txt**: Python dependencies (openai, python-dotenv, rich)
|
||||
@@ -102,8 +100,6 @@ chmod +x run.sh
|
||||
# Interactive mode with rich interface
|
||||
./run.sh --interactive
|
||||
|
||||
# Simple version without rich dependency
|
||||
./run.sh --simple
|
||||
|
||||
# Show statistics
|
||||
./run.sh --stats
|
||||
@@ -132,8 +128,6 @@ python generate_prompts.py --interactive
|
||||
# Show statistics
|
||||
python generate_prompts.py --stats
|
||||
|
||||
# Simple version (no rich dependency needed)
|
||||
python simple_generate.py
|
||||
```
|
||||
|
||||
### Testing Your Setup
|
||||
|
||||
@@ -234,16 +234,6 @@ class JournalPromptGenerator:
|
||||
self.historic_prompts = updated_prompts
|
||||
self._save_historic_prompts()
|
||||
|
||||
def _prepare_prompt(self) -> str:
|
||||
"""Prepare the full prompt with historic context."""
|
||||
# Format historic prompts for the AI
|
||||
if self.historic_prompts:
|
||||
historic_context = json.dumps(self.historic_prompts, indent=2)
|
||||
full_prompt = f"{self.prompt_template}\n\nPrevious prompts:\n{historic_context}"
|
||||
else:
|
||||
full_prompt = self.prompt_template
|
||||
|
||||
return full_prompt
|
||||
|
||||
def _parse_ai_response(self, response_content: str) -> List[str]:
|
||||
"""
|
||||
|
||||
10
run.sh
10
run.sh
@@ -35,7 +35,6 @@ fi
|
||||
|
||||
# Parse command line arguments
|
||||
INTERACTIVE=false
|
||||
SIMPLE=false
|
||||
STATS=false
|
||||
POOL_STATS=false
|
||||
FILL_POOL=false
|
||||
@@ -47,10 +46,6 @@ while [[ $# -gt 0 ]]; do
|
||||
INTERACTIVE=true
|
||||
shift
|
||||
;;
|
||||
-s | --simple)
|
||||
SIMPLE=true
|
||||
shift
|
||||
;;
|
||||
--stats)
|
||||
STATS=true
|
||||
shift
|
||||
@@ -80,7 +75,6 @@ if [ "$HELP" = true ]; then
|
||||
echo ""
|
||||
echo "Options:"
|
||||
echo " -i, --interactive Run in interactive mode (with rich interface)"
|
||||
echo " -s, --simple Run simple version (no rich dependency)"
|
||||
echo " --stats Show prompt history statistics"
|
||||
echo " --pool-stats Show prompt pool statistics"
|
||||
echo " --fill-pool Fill prompt pool using AI (makes API call)"
|
||||
@@ -89,7 +83,6 @@ if [ "$HELP" = true ]; then
|
||||
echo "Examples:"
|
||||
echo " ./run.sh # Draw prompts from pool (default)"
|
||||
echo " ./run.sh -i # Interactive mode"
|
||||
echo " ./run.sh -s # Simple version"
|
||||
echo " ./run.sh --stats # Show history statistics"
|
||||
echo " ./run.sh --pool-stats # Show pool statistics"
|
||||
echo " ./run.sh --fill-pool # Fill prompt pool using AI"
|
||||
@@ -108,9 +101,6 @@ elif [ "$FILL_POOL" = true ]; then
|
||||
elif [ "$INTERACTIVE" = true ]; then
|
||||
echo "🎮 Starting interactive mode..."
|
||||
python3 generate_prompts.py --interactive
|
||||
elif [ "$SIMPLE" = true ]; then
|
||||
echo "⚡ Running simple version..."
|
||||
python3 simple_generate.py
|
||||
else
|
||||
echo "✨ Drawing prompts from pool..."
|
||||
python3 generate_prompts.py
|
||||
|
||||
@@ -1,319 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Simple Daily Journal Prompt Generator
|
||||
A lightweight version without rich dependency.
|
||||
"""
|
||||
|
||||
import os
|
||||
import json
|
||||
import sys
|
||||
import argparse
|
||||
import configparser
|
||||
from datetime import datetime
|
||||
from typing import List, Dict, Any
|
||||
|
||||
from openai import OpenAI
|
||||
from dotenv import load_dotenv
|
||||
|
||||
|
||||
class SimplePromptGenerator:
|
||||
"""Simple version without rich dependency."""
|
||||
|
||||
def __init__(self, config_path: str = ".env"):
|
||||
"""Initialize the generator with configuration."""
|
||||
self.config_path = config_path
|
||||
self.client = None
|
||||
self.historic_prompts = []
|
||||
self.pool_prompts = []
|
||||
self.prompt_template = ""
|
||||
self.settings = {}
|
||||
|
||||
# Load configuration
|
||||
self._load_config()
|
||||
self._load_settings()
|
||||
|
||||
# Load data files
|
||||
self._load_prompt_template()
|
||||
self._load_historic_prompts()
|
||||
self._load_pool_prompts()
|
||||
|
||||
def _load_config(self):
|
||||
"""Load configuration from environment file."""
|
||||
load_dotenv(self.config_path)
|
||||
|
||||
# Get API key
|
||||
self.api_key = os.getenv("DEEPSEEK_API_KEY") or os.getenv("OPENAI_API_KEY")
|
||||
if not self.api_key:
|
||||
print("Error: No API key found in .env file")
|
||||
print("Please add DEEPSEEK_API_KEY or OPENAI_API_KEY to your .env file")
|
||||
sys.exit(1)
|
||||
|
||||
# Get API base URL (default to DeepSeek)
|
||||
self.base_url = os.getenv("API_BASE_URL", "https://api.deepseek.com")
|
||||
|
||||
# Get model (default to deepseek-chat)
|
||||
self.model = os.getenv("MODEL", "deepseek-chat")
|
||||
|
||||
# Initialize OpenAI client
|
||||
self.client = OpenAI(
|
||||
api_key=self.api_key,
|
||||
base_url=self.base_url
|
||||
)
|
||||
|
||||
def _load_settings(self):
|
||||
"""Load settings from settings.cfg configuration file."""
|
||||
config = configparser.ConfigParser()
|
||||
|
||||
# Set default values
|
||||
self.settings = {
|
||||
'min_length': 500,
|
||||
'max_length': 1000,
|
||||
'num_prompts': 6
|
||||
}
|
||||
|
||||
try:
|
||||
config.read('settings.cfg')
|
||||
|
||||
if 'prompts' in config:
|
||||
prompts_section = config['prompts']
|
||||
|
||||
# Load min_length
|
||||
if 'min_length' in prompts_section:
|
||||
self.settings['min_length'] = int(prompts_section['min_length'])
|
||||
|
||||
# Load max_length
|
||||
if 'max_length' in prompts_section:
|
||||
self.settings['max_length'] = int(prompts_section['max_length'])
|
||||
|
||||
# Load num_prompts
|
||||
if 'num_prompts' in prompts_section:
|
||||
self.settings['num_prompts'] = int(prompts_section['num_prompts'])
|
||||
|
||||
except FileNotFoundError:
|
||||
print("Warning: settings.cfg not found, using default values")
|
||||
except ValueError as e:
|
||||
print(f"Warning: Invalid value in settings.cfg: {e}, using default values")
|
||||
except Exception as e:
|
||||
print(f"Warning: Error reading settings.cfg: {e}, using default values")
|
||||
|
||||
def _load_prompt_template(self):
|
||||
"""Load the prompt template from ds_prompt.txt and update with config values."""
|
||||
try:
|
||||
with open("ds_prompt.txt", "r") as f:
|
||||
template = f.read()
|
||||
|
||||
# Replace hardcoded values with config values
|
||||
template = template.replace(
|
||||
"between 500 and 1000 characters",
|
||||
f"between {self.settings['min_length']} and {self.settings['max_length']} characters"
|
||||
)
|
||||
|
||||
# Replace the number of prompts (6) with config value
|
||||
template = template.replace(
|
||||
"Please generate 6 writing prompts",
|
||||
f"Please generate {self.settings['num_prompts']} writing prompts"
|
||||
)
|
||||
|
||||
self.prompt_template = template
|
||||
except FileNotFoundError:
|
||||
print("Error: ds_prompt.txt not found")
|
||||
sys.exit(1)
|
||||
|
||||
def _load_historic_prompts(self):
|
||||
"""Load historic prompts from JSON file."""
|
||||
try:
|
||||
with open("historic_prompts.json", "r") as f:
|
||||
self.historic_prompts = json.load(f)
|
||||
except (FileNotFoundError, json.JSONDecodeError):
|
||||
print("Warning: Starting with empty prompt history")
|
||||
self.historic_prompts = []
|
||||
|
||||
def _save_historic_prompts(self):
|
||||
"""Save historic prompts to JSON file (keeping only last 60)."""
|
||||
# Keep only the last 60 prompts
|
||||
if len(self.historic_prompts) > 60:
|
||||
self.historic_prompts = self.historic_prompts[-60:]
|
||||
|
||||
with open("historic_prompts.json", "w") as f:
|
||||
json.dump(self.historic_prompts, f, indent=2)
|
||||
|
||||
def _prepare_prompt(self) -> str:
|
||||
"""Prepare the full prompt with historic context."""
|
||||
if self.historic_prompts:
|
||||
historic_context = json.dumps(self.historic_prompts, indent=2)
|
||||
full_prompt = f"{self.prompt_template}\n\nPrevious prompts:\n{historic_context}"
|
||||
else:
|
||||
full_prompt = self.prompt_template
|
||||
|
||||
return full_prompt
|
||||
|
||||
def _parse_ai_response(self, response_content: str) -> List[str]:
|
||||
"""Parse the AI response to extract new prompts."""
|
||||
try:
|
||||
# Try to parse as JSON
|
||||
data = json.loads(response_content)
|
||||
|
||||
# Check if data is a list (new format)
|
||||
if isinstance(data, list):
|
||||
# Return the list of prompt strings directly
|
||||
# Ensure we have the correct number of prompts
|
||||
if len(data) >= self.settings['num_prompts']:
|
||||
return data[:self.settings['num_prompts']]
|
||||
else:
|
||||
print(f"Warning: AI returned {len(data)} prompts, expected {self.settings['num_prompts']}")
|
||||
return data
|
||||
elif isinstance(data, dict):
|
||||
# Fallback for old format: dictionary with newprompt0, newprompt1, etc.
|
||||
print("Warning: AI returned dictionary format, expected list format")
|
||||
new_prompts = []
|
||||
for i in range(self.settings['num_prompts']):
|
||||
key = f"newprompt{i}"
|
||||
if key in data:
|
||||
new_prompts.append(data[key])
|
||||
return new_prompts
|
||||
else:
|
||||
print(f"Warning: AI returned unexpected data type: {type(data)}")
|
||||
return []
|
||||
|
||||
except json.JSONDecodeError:
|
||||
# If not valid JSON, try to extract prompts from text
|
||||
print("Warning: AI response is not valid JSON, attempting to extract prompts...")
|
||||
|
||||
# Look for patterns in the text
|
||||
lines = response_content.strip().split('\n')
|
||||
new_prompts = []
|
||||
|
||||
for i, line in enumerate(lines[:self.settings['num_prompts']]):
|
||||
line = line.strip()
|
||||
if line and len(line) > 50:
|
||||
new_prompts.append(line)
|
||||
|
||||
return new_prompts
|
||||
|
||||
def generate_prompts(self) -> List[str]:
|
||||
"""Generate new journal prompts using AI."""
|
||||
print("\nGenerating new journal prompts...")
|
||||
|
||||
# Prepare the prompt
|
||||
full_prompt = self._prepare_prompt()
|
||||
|
||||
try:
|
||||
# Call the AI API
|
||||
print("Calling AI API...")
|
||||
response = self.client.chat.completions.create(
|
||||
model=self.model,
|
||||
messages=[
|
||||
{"role": "system", "content": "You are a creative writing assistant that generates journal prompts. Always respond with valid JSON."},
|
||||
{"role": "user", "content": full_prompt}
|
||||
],
|
||||
temperature=0.7,
|
||||
max_tokens=2000
|
||||
)
|
||||
|
||||
response_content = response.choices[0].message.content
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error calling AI API: {e}")
|
||||
return []
|
||||
|
||||
# Parse the response
|
||||
new_prompts = self._parse_ai_response(response_content)
|
||||
|
||||
if not new_prompts:
|
||||
print("Error: Could not parse any prompts from AI response")
|
||||
return []
|
||||
|
||||
# Note: Prompts are NOT added to historic_prompts here
|
||||
# They will be added only when the user chooses one
|
||||
|
||||
return new_prompts
|
||||
|
||||
def display_prompts(self, prompts: List[Dict[str, str]]):
|
||||
"""Display generated prompts in a simple format."""
|
||||
print("\n" + "="*60)
|
||||
print("✨ NEW JOURNAL PROMPTS GENERATED ✨")
|
||||
print("="*60 + "\n")
|
||||
|
||||
for i, prompt_dict in enumerate(prompts, 1):
|
||||
# Extract prompt text
|
||||
prompt_key = list(prompt_dict.keys())[0]
|
||||
prompt_text = prompt_dict[prompt_key]
|
||||
|
||||
print(f"Prompt #{i}:")
|
||||
print("-" * 40)
|
||||
print(prompt_text)
|
||||
print("-" * 40 + "\n")
|
||||
|
||||
def show_history_stats(self):
|
||||
"""Show statistics about prompt history."""
|
||||
total_prompts = len(self.historic_prompts)
|
||||
|
||||
print("\nPrompt History Statistics:")
|
||||
print("-" * 30)
|
||||
print(f"Total prompts in history: {total_prompts}")
|
||||
print(f"History capacity: 60 prompts")
|
||||
print(f"Available slots: {max(0, 60 - total_prompts)}")
|
||||
|
||||
def save_prompt_to_file(self, prompt_dict: Dict[str, str], filename: str = None):
|
||||
"""Save a prompt to a text file."""
|
||||
prompt_key = list(prompt_dict.keys())[0]
|
||||
prompt_text = prompt_dict[prompt_key]
|
||||
|
||||
if not filename:
|
||||
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
filename = f"journal_prompt_{timestamp}.txt"
|
||||
|
||||
with open(filename, "w") as f:
|
||||
f.write(f"Journal Prompt - {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
|
||||
f.write("="*50 + "\n\n")
|
||||
f.write(prompt_text)
|
||||
f.write("\n\n" + "="*50 + "\n")
|
||||
f.write("Happy writing! ✍️\n")
|
||||
|
||||
print(f"Prompt saved to {filename}")
|
||||
|
||||
|
||||
def main():
|
||||
"""Main entry point for the simple script."""
|
||||
parser = argparse.ArgumentParser(description="Generate journal prompts using AI (simple version)")
|
||||
parser.add_argument(
|
||||
"--stats", "-s",
|
||||
action="store_true",
|
||||
help="Show history statistics"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--save", "-S",
|
||||
type=int,
|
||||
help="Save a specific prompt number to file"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--config", "-c",
|
||||
default=".env",
|
||||
help="Path to configuration file (default: .env)"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Initialize generator
|
||||
generator = SimplePromptGenerator(config_path=args.config)
|
||||
|
||||
if args.stats:
|
||||
generator.show_history_stats()
|
||||
else:
|
||||
# Generate prompts
|
||||
new_prompts = generator.generate_prompts()
|
||||
if new_prompts:
|
||||
generator.display_prompts(new_prompts)
|
||||
|
||||
# Save specific prompt if requested
|
||||
if args.save:
|
||||
prompt_num = args.save
|
||||
if 1 <= prompt_num <= len(new_prompts):
|
||||
generator.save_prompt_to_file(new_prompts[prompt_num - 1])
|
||||
else:
|
||||
print(f"Error: Prompt number must be between 1 and {len(new_prompts)}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
@@ -1,55 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Test to demonstrate the fix for the AttributeError when API returns list instead of dict.
|
||||
"""
|
||||
|
||||
import json
|
||||
from generate_prompts import JournalPromptGenerator
|
||||
|
||||
def test_original_error_case():
|
||||
"""Test the exact error case: API returns a list instead of a dict."""
|
||||
|
||||
print("Testing the original error case: API returns list instead of dict")
|
||||
print("="*60)
|
||||
|
||||
# Create a mock generator
|
||||
generator = JournalPromptGenerator()
|
||||
|
||||
# Simulate API returning a list (which could happen with null/malformed data)
|
||||
list_response = json.dumps([]) # Empty list
|
||||
|
||||
print("\n1. Testing with empty list []:")
|
||||
try:
|
||||
result = generator._parse_ai_response(list_response)
|
||||
print(f" Result: Successfully parsed {len(result)} prompts (no AttributeError)")
|
||||
except AttributeError as e:
|
||||
print(f" ERROR: AttributeError occurred: {e}")
|
||||
except Exception as e:
|
||||
print(f" Other error: {type(e).__name__}: {e}")
|
||||
|
||||
# Test with list containing dictionaries (another possible malformed response)
|
||||
list_with_dicts = json.dumps([
|
||||
{"some_key": "some value"},
|
||||
{"another_key": "another value"}
|
||||
])
|
||||
|
||||
print("\n2. Testing with list of dictionaries:")
|
||||
try:
|
||||
result = generator._parse_ai_response(list_with_dicts)
|
||||
print(f" Result: Successfully parsed {len(result)} prompts (no AttributeError)")
|
||||
except AttributeError as e:
|
||||
print(f" ERROR: AttributeError occurred: {e}")
|
||||
except Exception as e:
|
||||
print(f" Other error: {type(e).__name__}: {e}")
|
||||
|
||||
# Test with None/null data (worst case)
|
||||
print("\n3. Testing with None/null data (simulated):")
|
||||
# We can't directly test None since json.loads would fail, but our code
|
||||
# handles the case where data might be None after parsing
|
||||
|
||||
print("\n" + "="*60)
|
||||
print("Test complete! The fix prevents AttributeError for list responses.")
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_original_error_case()
|
||||
|
||||
@@ -1,91 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Test the new format where AI returns a list and keys are generated locally.
|
||||
"""
|
||||
|
||||
import json
|
||||
import sys
|
||||
import os
|
||||
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
||||
|
||||
from generate_prompts import JournalPromptGenerator
|
||||
|
||||
def test_new_format():
|
||||
"""Test the new format where AI returns a list and keys are generated locally."""
|
||||
|
||||
print("Testing new format: AI returns list, keys generated locally")
|
||||
print("="*60)
|
||||
|
||||
# Create a mock generator
|
||||
generator = JournalPromptGenerator(config_path=".env")
|
||||
|
||||
# Create a mock AI response in the new list format
|
||||
mock_ai_response = [
|
||||
"Write about a childhood memory that still makes you smile.",
|
||||
"Describe your perfect day from start to finish.",
|
||||
"What is something you've been putting off and why?",
|
||||
"Imagine you could have a conversation with any historical figure.",
|
||||
"Write a letter to your future self one year from now.",
|
||||
"Describe a place that feels like home to you."
|
||||
]
|
||||
|
||||
# Convert to JSON string
|
||||
json_response = json.dumps(mock_ai_response)
|
||||
|
||||
print("\n1. Testing _parse_ai_response with list format:")
|
||||
result = generator._parse_ai_response(json_response)
|
||||
print(f" Result type: {type(result)}")
|
||||
print(f" Number of prompts: {len(result)}")
|
||||
print(f" First prompt: {result[0][:50]}...")
|
||||
|
||||
# Verify it's a list of strings
|
||||
assert isinstance(result, list), "Result should be a list"
|
||||
assert all(isinstance(prompt, str) for prompt in result), "All items should be strings"
|
||||
|
||||
print("\n2. Testing add_prompts_to_pool with list of strings:")
|
||||
|
||||
# Get initial pool size
|
||||
initial_pool_size = len(generator.pool_prompts)
|
||||
print(f" Initial pool size: {initial_pool_size}")
|
||||
|
||||
# Add prompts to pool
|
||||
generator.add_prompts_to_pool(result)
|
||||
|
||||
# Check new pool size
|
||||
new_pool_size = len(generator.pool_prompts)
|
||||
print(f" New pool size: {new_pool_size}")
|
||||
print(f" Added {new_pool_size - initial_pool_size} prompts")
|
||||
|
||||
# Check that prompts in pool have keys
|
||||
print(f"\n3. Checking that prompts in pool have generated keys:")
|
||||
for i, prompt_dict in enumerate(generator.pool_prompts[-len(result):]):
|
||||
prompt_key = list(prompt_dict.keys())[0]
|
||||
prompt_text = prompt_dict[prompt_key]
|
||||
print(f" Prompt {i+1}: Key='{prompt_key}', Text='{prompt_text[:30]}...'")
|
||||
assert prompt_key.startswith("poolprompt"), f"Key should start with 'poolprompt', got '{prompt_key}'"
|
||||
|
||||
print("\n4. Testing draw_prompts_from_pool:")
|
||||
drawn_prompts = generator.draw_prompts_from_pool(count=2)
|
||||
print(f" Drawn {len(drawn_prompts)} prompts from pool")
|
||||
print(f" Pool size after drawing: {len(generator.pool_prompts)}")
|
||||
|
||||
# Check drawn prompts have keys
|
||||
for i, prompt_dict in enumerate(drawn_prompts):
|
||||
prompt_key = list(prompt_dict.keys())[0]
|
||||
prompt_text = prompt_dict[prompt_key]
|
||||
print(f" Drawn prompt {i+1}: Key='{prompt_key}', Text='{prompt_text[:30]}...'")
|
||||
|
||||
print("\n" + "="*60)
|
||||
print("✅ All tests passed! New format works correctly.")
|
||||
print("\nSummary:")
|
||||
print("- AI returns prompts as a JSON list (no keys)")
|
||||
print("- _parse_ai_response returns List[str]")
|
||||
print("- add_prompts_to_pool generates keys locally (poolprompt000, poolprompt001, etc.)")
|
||||
print("- draw_prompts_from_pool returns List[Dict[str, str]] with generated keys")
|
||||
|
||||
return True
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_new_format()
|
||||
|
||||
|
||||
230
test_parsing.py
Normal file
230
test_parsing.py
Normal file
@@ -0,0 +1,230 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Consolidated test file for parsing AI responses and format handling.
|
||||
Combines tests from:
|
||||
- test_final_fix.py (AttributeError fix for list responses)
|
||||
- test_new_format.py (new list format with locally generated keys)
|
||||
- test_valid_response.py (valid JSON response handling)
|
||||
"""
|
||||
|
||||
import json
|
||||
import sys
|
||||
import os
|
||||
|
||||
# Add the current directory to the Python path
|
||||
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
||||
|
||||
from generate_prompts import JournalPromptGenerator
|
||||
|
||||
|
||||
def test_attribute_error_fix():
|
||||
"""Test the fix for AttributeError when API returns list instead of dict."""
|
||||
print("\n=== Test: AttributeError fix for list responses ===")
|
||||
|
||||
# Create a mock generator
|
||||
generator = JournalPromptGenerator()
|
||||
|
||||
# Test with empty list []
|
||||
list_response = json.dumps([]) # Empty list
|
||||
print("\n1. Testing with empty list []:")
|
||||
try:
|
||||
result = generator._parse_ai_response(list_response)
|
||||
print(f" Result: Successfully parsed {len(result)} prompts (no AttributeError)")
|
||||
except AttributeError as e:
|
||||
print(f" ERROR: AttributeError occurred: {e}")
|
||||
return False
|
||||
except Exception as e:
|
||||
print(f" Other error: {type(e).__name__}: {e}")
|
||||
return False
|
||||
|
||||
# Test with list containing dictionaries
|
||||
list_with_dicts = json.dumps([
|
||||
{"some_key": "some value"},
|
||||
{"another_key": "another value"}
|
||||
])
|
||||
|
||||
print("\n2. Testing with list of dictionaries:")
|
||||
try:
|
||||
result = generator._parse_ai_response(list_with_dicts)
|
||||
print(f" Result: Successfully parsed {len(result)} prompts (no AttributeError)")
|
||||
except AttributeError as e:
|
||||
print(f" ERROR: AttributeError occurred: {e}")
|
||||
return False
|
||||
except Exception as e:
|
||||
print(f" Other error: {type(e).__name__}: {e}")
|
||||
return False
|
||||
|
||||
print("\n✅ AttributeError fix tests passed!")
|
||||
return True
|
||||
|
||||
|
||||
def test_new_list_format():
|
||||
"""Test the new format where AI returns a list and keys are generated locally."""
|
||||
print("\n=== Test: New list format with locally generated keys ===")
|
||||
|
||||
# Create a mock generator
|
||||
generator = JournalPromptGenerator()
|
||||
|
||||
# Create a mock AI response in the new list format
|
||||
mock_ai_response = [
|
||||
"Write about a childhood memory that still makes you smile.",
|
||||
"Describe your perfect day from start to finish.",
|
||||
"What is something you've been putting off and why?",
|
||||
"Imagine you could have a conversation with any historical figure.",
|
||||
"Write a letter to your future self one year from now.",
|
||||
"Describe a place that feels like home to you."
|
||||
]
|
||||
|
||||
# Convert to JSON string
|
||||
json_response = json.dumps(mock_ai_response)
|
||||
|
||||
print("\n1. Testing _parse_ai_response with list format:")
|
||||
result = generator._parse_ai_response(json_response)
|
||||
print(f" Result type: {type(result)}")
|
||||
print(f" Number of prompts: {len(result)}")
|
||||
print(f" First prompt: {result[0][:50]}...")
|
||||
|
||||
# Verify it's a list of strings
|
||||
assert isinstance(result, list), "Result should be a list"
|
||||
assert all(isinstance(prompt, str) for prompt in result), "All items should be strings"
|
||||
|
||||
print("\n2. Testing add_prompts_to_pool with list of strings:")
|
||||
|
||||
# Get initial pool size
|
||||
initial_pool_size = len(generator.pool_prompts)
|
||||
print(f" Initial pool size: {initial_pool_size}")
|
||||
|
||||
# Add prompts to pool
|
||||
generator.add_prompts_to_pool(result)
|
||||
|
||||
# Check new pool size
|
||||
new_pool_size = len(generator.pool_prompts)
|
||||
print(f" New pool size: {new_pool_size}")
|
||||
print(f" Added {new_pool_size - initial_pool_size} prompts")
|
||||
|
||||
print("\n✅ New list format tests passed!")
|
||||
return True
|
||||
|
||||
|
||||
def test_valid_json_responses():
|
||||
"""Test with valid JSON responses in various formats."""
|
||||
print("\n=== Test: Valid JSON response handling ===")
|
||||
|
||||
# Create a mock generator
|
||||
generator = JournalPromptGenerator()
|
||||
|
||||
# Create a valid response with 4 prompts as a list (new format)
|
||||
valid_response = [
|
||||
"Write about a time when you felt truly at peace.",
|
||||
"Describe your ideal morning routine in detail.",
|
||||
"What are three things you're grateful for today?",
|
||||
"Reflect on a recent challenge and what you learned from it."
|
||||
]
|
||||
|
||||
# Convert to JSON string
|
||||
json_response = json.dumps(valid_response)
|
||||
|
||||
print("\n1. Testing with valid JSON response (list format):")
|
||||
result = generator._parse_ai_response(json_response)
|
||||
print(f" Number of prompts extracted: {len(result)}")
|
||||
print(f" Type of result: {type(result)}")
|
||||
|
||||
for i, prompt_text in enumerate(result):
|
||||
print(f" Prompt {i+1}: {prompt_text[:50]}...")
|
||||
|
||||
# Test with backticks
|
||||
print("\n2. Testing with valid JSON response with backticks:")
|
||||
backticks_response = f"```json\n{json_response}\n```"
|
||||
result = generator._parse_ai_response(backticks_response)
|
||||
print(f" Number of prompts extracted: {len(result)}")
|
||||
|
||||
# Test with "json" prefix
|
||||
print("\n3. Testing with valid JSON response with 'json' prefix:")
|
||||
json_prefix_response = f"json\n{json_response}"
|
||||
result = generator._parse_ai_response(json_prefix_response)
|
||||
print(f" Number of prompts extracted: {len(result)}")
|
||||
|
||||
# Test fallback for old dictionary format
|
||||
print("\n4. Testing fallback for old dictionary format:")
|
||||
old_format_response = {
|
||||
"newprompt0": "Write about a time when you felt truly at peace.",
|
||||
"newprompt1": "Describe your ideal morning routine in detail.",
|
||||
"newprompt2": "What are three things you're grateful for today?",
|
||||
"newprompt3": "Reflect on a recent challenge and what you learned from it."
|
||||
}
|
||||
json_old_response = json.dumps(old_format_response)
|
||||
result = generator._parse_ai_response(json_old_response)
|
||||
print(f" Number of prompts extracted: {len(result)}")
|
||||
|
||||
print("\n✅ Valid JSON response tests passed!")
|
||||
return True
|
||||
|
||||
|
||||
def test_clean_ai_response():
|
||||
"""Test the _clean_ai_response method."""
|
||||
print("\n=== Test: _clean_ai_response method ===")
|
||||
|
||||
generator = JournalPromptGenerator()
|
||||
|
||||
# Test cases
|
||||
test_cases = [
|
||||
("```json\n[1, 2, 3]\n```", "[1, 2, 3]"),
|
||||
("```\n[1, 2, 3]\n```", "[1, 2, 3]"),
|
||||
("json\n[1, 2, 3]", "[1, 2, 3]"),
|
||||
("JSON\n[1, 2, 3]", "[1, 2, 3]"),
|
||||
(" [1, 2, 3] ", "[1, 2, 3]"),
|
||||
("```json\n{\"a\": 1}\n```", "{\"a\": 1}"),
|
||||
]
|
||||
|
||||
all_passed = True
|
||||
for i, (input_text, expected) in enumerate(test_cases):
|
||||
cleaned = generator._clean_ai_response(input_text)
|
||||
if cleaned == expected:
|
||||
print(f" Test {i+1} passed: '{input_text[:20]}...' -> '{cleaned}'")
|
||||
else:
|
||||
print(f" Test {i+1} FAILED: '{input_text[:20]}...' -> '{cleaned}' (expected: '{expected}')")
|
||||
all_passed = False
|
||||
|
||||
if all_passed:
|
||||
print("\n✅ _clean_ai_response tests passed!")
|
||||
return True
|
||||
else:
|
||||
print("\n❌ _clean_ai_response tests failed!")
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
"""Run all parsing tests."""
|
||||
print("=" * 60)
|
||||
print("Running Consolidated Parsing Tests")
|
||||
print("=" * 60)
|
||||
|
||||
all_passed = True
|
||||
|
||||
# Run all tests
|
||||
if not test_attribute_error_fix():
|
||||
all_passed = False
|
||||
|
||||
if not test_new_list_format():
|
||||
all_passed = False
|
||||
|
||||
if not test_valid_json_responses():
|
||||
all_passed = False
|
||||
|
||||
if not test_clean_ai_response():
|
||||
all_passed = False
|
||||
|
||||
print("\n" + "=" * 60)
|
||||
if all_passed:
|
||||
print("✅ ALL PARSING TESTS PASSED!")
|
||||
else:
|
||||
print("❌ SOME TESTS FAILED!")
|
||||
print("=" * 60)
|
||||
|
||||
return all_passed
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
success = main()
|
||||
sys.exit(0 if success else 1)
|
||||
|
||||
@@ -201,7 +201,7 @@ def test_python_scripts():
|
||||
"""Test that Python scripts are syntactically valid."""
|
||||
print("\nTesting Python scripts...")
|
||||
|
||||
scripts_to_test = ["generate_prompts.py", "simple_generate.py"]
|
||||
scripts_to_test = ["generate_prompts.py"]
|
||||
all_valid = True
|
||||
|
||||
for script in scripts_to_test:
|
||||
|
||||
@@ -1,65 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Test the error handling with a valid response.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
import json
|
||||
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
||||
|
||||
from generate_prompts import JournalPromptGenerator
|
||||
|
||||
def test_valid_response():
|
||||
"""Test with a valid JSON response."""
|
||||
|
||||
# Create a mock generator
|
||||
generator = JournalPromptGenerator(config_path=".env")
|
||||
|
||||
# Create a valid response with 4 prompts as a list (new format)
|
||||
valid_response = [
|
||||
"Write about a time when you felt truly at peace.",
|
||||
"Describe your ideal morning routine in detail.",
|
||||
"What are three things you're grateful for today?",
|
||||
"Reflect on a recent challenge and what you learned from it."
|
||||
]
|
||||
|
||||
# Convert to JSON string
|
||||
json_response = json.dumps(valid_response)
|
||||
|
||||
print("\n=== Test: Valid JSON response (list format) ===")
|
||||
result = generator._parse_ai_response(json_response)
|
||||
print(f"Number of prompts extracted: {len(result)}")
|
||||
print(f"Type of result: {type(result)}")
|
||||
|
||||
for i, prompt_text in enumerate(result):
|
||||
print(f"Prompt {i+1}: {prompt_text[:50]}...")
|
||||
|
||||
# Test with backticks
|
||||
print("\n=== Test: Valid JSON response with backticks ===")
|
||||
backticks_response = f"```json\n{json_response}\n```"
|
||||
result = generator._parse_ai_response(backticks_response)
|
||||
print(f"Number of prompts extracted: {len(result)}")
|
||||
|
||||
# Test with "json" prefix
|
||||
print("\n=== Test: Valid JSON response with 'json' prefix ===")
|
||||
json_prefix_response = f"json\n{json_response}"
|
||||
result = generator._parse_ai_response(json_prefix_response)
|
||||
print(f"Number of prompts extracted: {len(result)}")
|
||||
|
||||
# Test fallback for old dictionary format
|
||||
print("\n=== Test: Fallback for old dictionary format ===")
|
||||
old_format_response = {
|
||||
"newprompt0": "Write about a time when you felt truly at peace.",
|
||||
"newprompt1": "Describe your ideal morning routine in detail.",
|
||||
"newprompt2": "What are three things you're grateful for today?",
|
||||
"newprompt3": "Reflect on a recent challenge and what you learned from it."
|
||||
}
|
||||
json_old_response = json.dumps(old_format_response)
|
||||
result = generator._parse_ai_response(json_old_response)
|
||||
print(f"Number of prompts extracted: {len(result)}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_valid_response()
|
||||
|
||||
|
||||
Reference in New Issue
Block a user