more reliability in processing malformed content, but exeption introduced
This commit is contained in:
@@ -331,6 +331,13 @@ class JournalPromptGenerator:
|
|||||||
}
|
}
|
||||||
new_prompts.append(prompt_obj)
|
new_prompts.append(prompt_obj)
|
||||||
|
|
||||||
|
# If no prompts were found in the JSON, provide debug info
|
||||||
|
if not new_prompts:
|
||||||
|
self.console.print("\n[yellow]Warning: JSON parsed successfully but no prompts found with expected keys[/yellow]")
|
||||||
|
self.console.print(f"[yellow]Expected keys: newprompt0 to newprompt{self.settings['num_prompts']-1}[/yellow]")
|
||||||
|
self.console.print(f"[yellow]Keys found in JSON: {list(data.keys())}[/yellow]")
|
||||||
|
self.console.print(f"[yellow]Full JSON data: {json.dumps(data, indent=2)}[/yellow]")
|
||||||
|
|
||||||
return new_prompts
|
return new_prompts
|
||||||
|
|
||||||
except json.JSONDecodeError:
|
except json.JSONDecodeError:
|
||||||
@@ -352,11 +359,43 @@ class JournalPromptGenerator:
|
|||||||
}
|
}
|
||||||
new_prompts.append(prompt_obj)
|
new_prompts.append(prompt_obj)
|
||||||
|
|
||||||
# If still no prompts could be parsed, dump the full payload for debugging
|
# If still no prompts could be parsed, provide detailed debug information
|
||||||
if not new_prompts:
|
if not new_prompts:
|
||||||
self.console.print("[red]Error: Could not extract any prompts from AI response[/red]")
|
self.console.print("\n[red]ERROR: Could not extract any prompts from AI response[/red]")
|
||||||
self.console.print("[red]Full payload dump for debugging:[/red]")
|
self.console.print("[red]="*60 + "[/red]")
|
||||||
|
self.console.print("[bold red]DEBUG INFORMATION:[/bold red]")
|
||||||
|
self.console.print("[red]="*60 + "[/red]")
|
||||||
|
|
||||||
|
# Show response metadata
|
||||||
|
self.console.print(f"[yellow]Response length: {len(response_content)} characters[/yellow]")
|
||||||
|
self.console.print(f"[yellow]Expected number of prompts: {self.settings['num_prompts']}[/yellow]")
|
||||||
|
|
||||||
|
# Show first 500 characters of response
|
||||||
|
preview = response_content[:500]
|
||||||
|
if len(response_content) > 500:
|
||||||
|
preview += "..."
|
||||||
|
self.console.print(f"[yellow]Response preview (first 500 chars):[/yellow]")
|
||||||
|
self.console.print(f"[yellow]{preview}[/yellow]")
|
||||||
|
|
||||||
|
# Show cleaned content analysis
|
||||||
|
self.console.print(f"[yellow]Cleaned content length: {len(cleaned_content)} characters[/yellow]")
|
||||||
|
self.console.print(f"[yellow]Cleaned content preview: {cleaned_content[:200]}...[/yellow]")
|
||||||
|
|
||||||
|
# Show line analysis
|
||||||
|
self.console.print(f"[yellow]Number of lines in response: {len(lines)}[/yellow]")
|
||||||
|
self.console.print(f"[yellow]First 5 lines:[/yellow]")
|
||||||
|
for i, line in enumerate(lines[:5]):
|
||||||
|
self.console.print(f"[yellow] Line {i+1}: {line[:100]}{'...' if len(line) > 100 else ''}[/yellow]")
|
||||||
|
|
||||||
|
# Show JSON parsing attempt details
|
||||||
|
self.console.print(f"[yellow]JSON parsing attempted on cleaned content:[/yellow]")
|
||||||
|
self.console.print(f"[yellow] Cleaned content starts with: {cleaned_content[:50]}...[/yellow]")
|
||||||
|
|
||||||
|
# Show full payload for debugging
|
||||||
|
self.console.print("\n[bold red]FULL PAYLOAD DUMP:[/bold red]")
|
||||||
|
self.console.print("[red]" + "="*60 + "[/red]")
|
||||||
self.console.print(f"[red]{response_content}[/red]")
|
self.console.print(f"[red]{response_content}[/red]")
|
||||||
|
self.console.print("[red]" + "="*60 + "[/red]")
|
||||||
|
|
||||||
return new_prompts
|
return new_prompts
|
||||||
|
|
||||||
@@ -432,9 +471,8 @@ class JournalPromptGenerator:
|
|||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.console.print(f"[red]Error calling AI API: {e}[/red]")
|
self.console.print(f"[red]Error calling AI API: {e}[/red]")
|
||||||
self.console.print(f"[yellow]Full response content for debugging:[/yellow]")
|
self.console.print(f"[yellow]Full prompt sent to API (first 500 chars):[/yellow]")
|
||||||
self.console.print(f"[yellow]{response_content}[/yellow]")
|
self.console.print(f"[yellow]{full_prompt[:500]}...[/yellow]")
|
||||||
|
|
||||||
return []
|
return []
|
||||||
|
|
||||||
# Parse the response
|
# Parse the response
|
||||||
|
|||||||
@@ -1,5 +1 @@
|
|||||||
[
|
[]
|
||||||
{
|
|
||||||
"poolprompt004": "Recall a promise you made to yourself long ago\u2014something significant you vowed to do, be, or avoid. It might have been written down, solemnly sworn, or just a quiet internal pact. Have you kept it? If so, describe the journey of that fidelity. What did it cost you, and what did it give you? If not, explore the moment or the gradual process of breaking that promise. Was it a betrayal or a necessary evolution? Write a letter to your past self about that promise, explaining the current state of affairs with compassion and honesty."
|
|
||||||
}
|
|
||||||
]
|
|
||||||
53
test_valid_response.py
Normal file
53
test_valid_response.py
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Test the error handling with a valid response.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import json
|
||||||
|
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
||||||
|
|
||||||
|
from generate_prompts import JournalPromptGenerator
|
||||||
|
|
||||||
|
def test_valid_response():
|
||||||
|
"""Test with a valid JSON response."""
|
||||||
|
|
||||||
|
# Create a mock generator
|
||||||
|
generator = JournalPromptGenerator(config_path=".env")
|
||||||
|
|
||||||
|
# Create a valid response with 4 prompts (default num_prompts from settings)
|
||||||
|
valid_response = {
|
||||||
|
"newprompt0": "Write about a time when you felt truly at peace.",
|
||||||
|
"newprompt1": "Describe your ideal morning routine in detail.",
|
||||||
|
"newprompt2": "What are three things you're grateful for today?",
|
||||||
|
"newprompt3": "Reflect on a recent challenge and what you learned from it."
|
||||||
|
}
|
||||||
|
|
||||||
|
# Convert to JSON string
|
||||||
|
json_response = json.dumps(valid_response)
|
||||||
|
|
||||||
|
print("\n=== Test: Valid JSON response ===")
|
||||||
|
result = generator._parse_ai_response(json_response)
|
||||||
|
print(f"Number of prompts extracted: {len(result)}")
|
||||||
|
|
||||||
|
for i, prompt_dict in enumerate(result):
|
||||||
|
prompt_key = list(prompt_dict.keys())[0]
|
||||||
|
prompt_text = prompt_dict[prompt_key]
|
||||||
|
print(f"Prompt {i+1}: {prompt_text[:50]}...")
|
||||||
|
|
||||||
|
# Test with backticks
|
||||||
|
print("\n=== Test: Valid JSON response with backticks ===")
|
||||||
|
backticks_response = f"```json\n{json_response}\n```"
|
||||||
|
result = generator._parse_ai_response(backticks_response)
|
||||||
|
print(f"Number of prompts extracted: {len(result)}")
|
||||||
|
|
||||||
|
# Test with "json" prefix
|
||||||
|
print("\n=== Test: Valid JSON response with 'json' prefix ===")
|
||||||
|
json_prefix_response = f"json\n{json_response}"
|
||||||
|
result = generator._parse_ai_response(json_prefix_response)
|
||||||
|
print(f"Number of prompts extracted: {len(result)}")
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
test_valid_response()
|
||||||
|
|
||||||
Reference in New Issue
Block a user