Files
daily-journal-prompt/generate_prompts.py

918 lines
39 KiB
Python

#!/usr/bin/env python3
"""
Daily Journal Prompt Generator
A tool that uses AI to generate creative writing prompts for daily journaling.
"""
import os
import json
import sys
import argparse
import configparser
from typing import List, Dict, Any, Optional
from openai import OpenAI
from dotenv import load_dotenv
from rich.console import Console
from rich.panel import Panel
from rich.table import Table
from rich.prompt import Prompt, Confirm
from rich.progress import Progress, SpinnerColumn, TextColumn
class JournalPromptGenerator:
"""Main class for generating journal prompts using AI."""
def __init__(self, config_path: str = ".env"):
"""Initialize the generator with configuration."""
self.console = Console()
self.config_path = config_path
self.client = None
self.historic_prompts = []
self.pool_prompts = []
self.feedback_words = []
self.feedback_historic = []
self.prompt_template = ""
self.settings = {}
# Load configuration
self._load_config()
self._load_settings()
# Load data files
self._load_prompt_template()
self._load_historic_prompts()
self._load_pool_prompts()
self._load_feedback_words()
self._load_feedback_historic()
def _load_config(self):
"""Load configuration from environment file."""
load_dotenv(self.config_path)
# Get API key
self.api_key = os.getenv("DEEPSEEK_API_KEY") or os.getenv("OPENAI_API_KEY")
if not self.api_key:
self.console.print("[red]Error: No API key found in .env file[/red]")
self.console.print("Please add DEEPSEEK_API_KEY or OPENAI_API_KEY to your .env file")
sys.exit(1)
# Get API base URL (default to DeepSeek)
self.base_url = os.getenv("API_BASE_URL", "https://api.deepseek.com")
# Get model (default to deepseek-chat)
self.model = os.getenv("MODEL", "deepseek-chat")
# Initialize OpenAI client
self.client = OpenAI(
api_key=self.api_key,
base_url=self.base_url
)
def _load_settings(self):
"""Load settings from settings.cfg configuration file."""
config = configparser.ConfigParser()
# Set default values
self.settings = {
'min_length': 500,
'max_length': 1000,
'num_prompts': 6,
'cached_pool_volume': 20 # Default value
}
try:
config.read('settings.cfg')
if 'prompts' in config:
prompts_section = config['prompts']
# Load min_length
if 'min_length' in prompts_section:
self.settings['min_length'] = int(prompts_section['min_length'])
# Load max_length
if 'max_length' in prompts_section:
self.settings['max_length'] = int(prompts_section['max_length'])
# Load num_prompts
if 'num_prompts' in prompts_section:
self.settings['num_prompts'] = int(prompts_section['num_prompts'])
# Load cached_pool_volume from prefetch section
if 'prefetch' in config:
prefetch_section = config['prefetch']
if 'cached_pool_volume' in prefetch_section:
self.settings['cached_pool_volume'] = int(prefetch_section['cached_pool_volume'])
except FileNotFoundError:
self.console.print("[yellow]Warning: settings.cfg not found, using default values[/yellow]")
except ValueError as e:
self.console.print(f"[yellow]Warning: Invalid value in settings.cfg: {e}, using default values[/yellow]")
except Exception as e:
self.console.print(f"[yellow]Warning: Error reading settings.cfg: {e}, using default values[/yellow]")
def _load_prompt_template(self):
"""Load the prompt template from ds_prompt.txt."""
try:
with open("ds_prompt.txt", "r") as f:
self.prompt_template = f.read()
except FileNotFoundError:
self.console.print("[red]Error: ds_prompt.txt not found[/red]")
sys.exit(1)
def _load_historic_prompts(self):
"""Load historic prompts from JSON file."""
try:
with open("prompts_historic.json", "r") as f:
self.historic_prompts = json.load(f)
except FileNotFoundError:
self.console.print("[yellow]Warning: prompts_historic.json not found, starting with empty history[/yellow]")
self.historic_prompts = []
except json.JSONDecodeError:
self.console.print("[yellow]Warning: prompts_historic.json is corrupted, starting with empty history[/yellow]")
self.historic_prompts = []
def _save_historic_prompts(self):
"""Save historic prompts to JSON file (keeping only first 60)."""
# Keep only the first 60 prompts (newest are at the beginning)
if len(self.historic_prompts) > 60:
self.historic_prompts = self.historic_prompts[:60]
with open("prompts_historic.json", "w") as f:
json.dump(self.historic_prompts, f, indent=2)
def _load_pool_prompts(self):
"""Load pool prompts from JSON file."""
try:
with open("prompts_pool.json", "r") as f:
self.pool_prompts = json.load(f)
except FileNotFoundError:
self.console.print("[yellow]Warning: prompts_pool.json not found, starting with empty pool[/yellow]")
self.pool_prompts = []
except json.JSONDecodeError:
self.console.print("[yellow]Warning: prompts_pool.json is corrupted, starting with empty pool[/yellow]")
self.pool_prompts = []
def _load_feedback_words(self):
"""Load feedback words from JSON file."""
try:
with open("feedback_words.json", "r") as f:
self.feedback_words = json.load(f)
except FileNotFoundError:
self.console.print("[yellow]Warning: feedback_words.json not found, starting with empty feedback words[/yellow]")
self.feedback_words = []
except json.JSONDecodeError:
self.console.print("[yellow]Warning: feedback_words.json is corrupted, starting with empty feedback words[/yellow]")
self.feedback_words = []
def _load_feedback_historic(self):
"""Load historic feedback words from JSON file."""
try:
with open("feedback_historic.json", "r") as f:
self.feedback_historic = json.load(f)
except FileNotFoundError:
self.console.print("[yellow]Warning: feedback_historic.json not found, starting with empty feedback history[/yellow]")
self.feedback_historic = []
except json.JSONDecodeError:
self.console.print("[yellow]Warning: feedback_historic.json is corrupted, starting with empty feedback history[/yellow]")
self.feedback_historic = []
def _save_feedback_words(self):
"""Save feedback words to JSON file."""
with open("feedback_words.json", "w") as f:
json.dump(self.feedback_words, f, indent=2)
def _save_feedback_historic(self):
"""Save historic feedback words to JSON file (keeping only first 30)."""
# Keep only the first 30 feedback words (newest are at the beginning)
if len(self.feedback_historic) > 30:
self.feedback_historic = self.feedback_historic[:30]
with open("feedback_historic.json", "w") as f:
json.dump(self.feedback_historic, f, indent=2)
def _save_pool_prompts(self):
"""Save pool prompts to JSON file."""
with open("prompts_pool.json", "w") as f:
json.dump(self.pool_prompts, f, indent=2)
def add_prompts_to_pool(self, prompts: List[str]):
"""Add generated prompts to the pool."""
# Simply extend the pool with the new prompts (no keys)
self.pool_prompts.extend(prompts)
self._save_pool_prompts()
self.console.print(f"[green]Added {len(prompts)} prompts to pool[/green]")
def draw_prompts_from_pool(self, count: int = None) -> List[str]:
"""Draw prompts from the pool (removes them from pool)."""
if count is None:
count = self.settings['num_prompts']
if len(self.pool_prompts) < count:
self.console.print(f"[yellow]Warning: Pool only has {len(self.pool_prompts)} prompts, requested {count}[/yellow]")
count = len(self.pool_prompts)
if count == 0:
self.console.print("[red]Error: Pool is empty[/red]")
return []
# Draw prompts from the beginning of the pool
drawn_prompts = self.pool_prompts[:count]
self.pool_prompts = self.pool_prompts[count:]
# Save updated pool
self._save_pool_prompts()
return drawn_prompts
def add_prompt_to_history(self, prompt_text: str):
"""
Add a single prompt to the historic prompts cyclic buffer.
The new prompt becomes prompt00, all others shift down, and prompt59 is discarded.
"""
# Create the new prompt object
new_prompt = {
"prompt00": prompt_text
}
# Shift all existing prompts down by one position
# We'll create a new list starting with the new prompt
updated_prompts = [new_prompt]
# Add all existing prompts, shifting their numbers down by one
for i, prompt_dict in enumerate(self.historic_prompts):
if i >= 59: # We only keep 60 prompts total (00-59)
break
# Get the prompt text
prompt_key = list(prompt_dict.keys())[0]
prompt_text = prompt_dict[prompt_key]
# Create prompt with new number (shifted down by one)
new_prompt_key = f"prompt{i+1:02d}"
updated_prompts.append({
new_prompt_key: prompt_text
})
self.historic_prompts = updated_prompts
self._save_historic_prompts()
def add_feedback_words_to_history(self):
"""
Add current feedback words to the historic feedback words cyclic buffer.
The 6 new feedback words become feedback00-feedback05, all others shift down,
and feedback29 is discarded (keeping only 30 items total).
"""
# Extract just the words from the current feedback words
# Current feedback_words structure: [{"feedback00": "word", "weight": 3}, ...]
new_feedback_words = []
for i, feedback_item in enumerate(self.feedback_words):
# Get the word from the feedback item (key is feedback00, feedback01, etc.)
feedback_key = f"feedback{i:02d}"
if feedback_key in feedback_item:
word = feedback_item[feedback_key]
# Create new feedback word object with just the word (no weight)
new_feedback_words.append({
feedback_key: word
})
# If we don't have 6 feedback words, we can't add them to history
if len(new_feedback_words) != 6:
self.console.print(f"[yellow]Warning: Expected 6 feedback words, got {len(new_feedback_words)}. Not adding to history.[/yellow]")
return
# Shift all existing feedback words down by 6 positions
# We'll create a new list starting with the 6 new feedback words
updated_feedback_historic = new_feedback_words
# Add all existing feedback words, shifting their numbers down by 6
for i, feedback_dict in enumerate(self.feedback_historic):
if i >= 24: # We only keep 30 feedback words total (00-29), and we've already added 6
break
# Get the feedback word
feedback_key = list(feedback_dict.keys())[0]
word = feedback_dict[feedback_key]
# Create feedback word with new number (shifted down by 6)
new_feedback_key = f"feedback{i+6:02d}"
updated_feedback_historic.append({
new_feedback_key: word
})
self.feedback_historic = updated_feedback_historic
self._save_feedback_historic()
self.console.print("[green]Added 6 feedback words to history[/green]")
def _parse_ai_response(self, response_content: str) -> List[str]:
"""
Parse the AI response to extract new prompts.
Expected format: JSON list/array of prompt strings
Handles DeepSeek API responses that may include backticks and leading "json" string.
"""
# First, try to clean up the response content
cleaned_content = self._clean_ai_response(response_content)
try:
# Try to parse as JSON
data = json.loads(cleaned_content)
# Check if data is a list
if isinstance(data, list):
# Return the list of prompt strings directly
# Ensure we have the correct number of prompts
if len(data) >= self.settings['num_prompts']:
return data[:self.settings['num_prompts']]
else:
self.console.print(f"[yellow]Warning: AI returned {len(data)} prompts, expected {self.settings['num_prompts']}[/yellow]")
return data
elif isinstance(data, dict):
# Fallback for old format: dictionary with newprompt0, newprompt1, etc.
self.console.print("[yellow]Warning: AI returned dictionary format, expected list format[/yellow]")
new_prompts = []
for i in range(self.settings['num_prompts']):
key = f"newprompt{i}"
if key in data:
new_prompts.append(data[key])
return new_prompts
else:
self.console.print(f"[yellow]Warning: AI returned unexpected data type: {type(data)}[/yellow]")
return []
except json.JSONDecodeError:
# If not valid JSON, try to extract prompts from text
self.console.print("[yellow]Warning: AI response is not valid JSON, attempting to extract prompts...[/yellow]")
self.console.print(f"[yellow]Full response content for debugging:[/yellow]")
self.console.print(f"[yellow]{response_content}[/yellow]")
self.console.print(f"[yellow]Cleaned content: {cleaned_content}[/yellow]")
# Look for patterns in the text
lines = response_content.strip().split('\n')
new_prompts = []
for i, line in enumerate(lines[:self.settings['num_prompts']]): # Take first N non-empty lines
line = line.strip()
if line and len(line) > 50: # Reasonable minimum length for a prompt
new_prompts.append(line)
# If still no prompts could be parsed, provide detailed debug information
if not new_prompts:
self.console.print("\n[red]ERROR: Could not extract any prompts from AI response[/red]")
self.console.print("[red]="*60 + "[/red]")
self.console.print("[bold red]DEBUG INFORMATION:[/bold red]")
self.console.print("[red]="*60 + "[/red]")
# Show response metadata
self.console.print(f"[yellow]Response length: {len(response_content)} characters[/yellow]")
self.console.print(f"[yellow]Expected number of prompts: {self.settings['num_prompts']}[/yellow]")
# Show first 500 characters of response
preview = response_content[:500]
if len(response_content) > 500:
preview += "..."
self.console.print(f"[yellow]Response preview (first 500 chars):[/yellow]")
self.console.print(f"[yellow]{preview}[/yellow]")
# Show cleaned content analysis
self.console.print(f"[yellow]Cleaned content length: {len(cleaned_content)} characters[/yellow]")
self.console.print(f"[yellow]Cleaned content preview: {cleaned_content[:200]}...[/yellow]")
# Show line analysis
self.console.print(f"[yellow]Number of lines in response: {len(lines)}[/yellow]")
self.console.print(f"[yellow]First 5 lines:[/yellow]")
for i, line in enumerate(lines[:5]):
self.console.print(f"[yellow] Line {i+1}: {line[:100]}{'...' if len(line) > 100 else ''}[/yellow]")
# Show JSON parsing attempt details
self.console.print(f"[yellow]JSON parsing attempted on cleaned content:[/yellow]")
self.console.print(f"[yellow] Cleaned content starts with: {cleaned_content[:50]}...[/yellow]")
# Show full payload for debugging
self.console.print("\n[bold red]FULL PAYLOAD DUMP:[/bold red]")
self.console.print("[red]" + "="*60 + "[/red]")
self.console.print(f"[red]{response_content}[/red]")
self.console.print("[red]" + "="*60 + "[/red]")
return new_prompts
def _clean_ai_response(self, response_content: str) -> str:
"""
Clean up AI response content to handle common formatting issues from DeepSeek API.
Handles:
1. Leading/trailing backticks (```json ... ```)
2. Leading "json" string on its own line
3. Extra whitespace and newlines
"""
content = response_content.strip()
# Remove leading/trailing backticks (```json ... ```)
if content.startswith('```'):
# Find the first newline after the opening backticks
lines = content.split('\n')
if len(lines) > 1:
# Check if first line contains "json" or other language specifier
first_line = lines[0].strip()
if 'json' in first_line.lower() or first_line == '```':
# Remove the first line (```json or ```)
content = '\n'.join(lines[1:])
# Remove trailing backticks if present
if content.endswith('```'):
content = content[:-3].rstrip()
# Remove leading "json" string on its own line (case-insensitive)
lines = content.split('\n')
if len(lines) > 0:
first_line = lines[0].strip().lower()
if first_line == 'json':
content = '\n'.join(lines[1:])
# Also handle the case where "json" might be at the beginning of the first line
# but not the entire line (e.g., "json\n{...}")
content = content.strip()
if content.lower().startswith('json\n'):
content = content[4:].strip()
return content.strip()
def generate_specific_number_of_prompts(self, count: int) -> List[str]:
"""Generate a specific number of journal prompts using AI."""
self.console.print(f"\n[cyan]Generating {count} new journal prompts...[/cyan]")
# Prepare the prompt with specific count
full_prompt = self._prepare_prompt_with_count(count)
# Show progress
with Progress(
SpinnerColumn(),
TextColumn("[progress.description]{task.description}"),
transient=True,
) as progress:
task = progress.add_task("Calling AI API...", total=None)
try:
# Call the AI API
response = self.client.chat.completions.create(
model=self.model,
messages=[
{"role": "system", "content": "You are a creative writing assistant that generates journal prompts. Always respond with valid JSON."},
{"role": "user", "content": full_prompt}
],
temperature=0.7,
max_tokens=2000
)
response_content = response.choices[0].message.content
except Exception as e:
self.console.print(f"[red]Error calling AI API: {e}[/red]")
self.console.print(f"[yellow]Full prompt sent to API (first 500 chars):[/yellow]")
self.console.print(f"[yellow]{full_prompt[:500]}...[/yellow]")
return []
# Parse the response
new_prompts = self._parse_ai_response_with_count(response_content, count)
if not new_prompts:
self.console.print("[red]Error: Could not parse any prompts from AI response[/red]")
return []
return new_prompts
def _prepare_prompt_with_count(self, count: int) -> str:
"""Prepare the full prompt with historic context and specific count."""
# Start with the base template
template = self.prompt_template
# Add the instruction for the specific number of prompts
# This will be added to the prompt since it's being removed from ds_prompt.txt
prompt_instruction = f"Please generate {count} writing prompts, each between {self.settings['min_length']} and {self.settings['max_length']} characters."
# Format historic prompts for the AI
if self.historic_prompts:
historic_context = json.dumps(self.historic_prompts, indent=2)
full_prompt = f"{template}\n\n{prompt_instruction}\n\nPrevious prompts:\n{historic_context}"
else:
full_prompt = f"{template}\n\n{prompt_instruction}"
# Add feedback words if available
if self.feedback_words:
feedback_context = json.dumps(self.feedback_words, indent=2)
full_prompt = f"{full_prompt}\n\nFeedback words:\n{feedback_context}"
return full_prompt
def _parse_ai_response_with_count(self, response_content: str, expected_count: int) -> List[str]:
"""
Parse the AI response to extract new prompts with specific expected count.
"""
# First, try to clean up the response content
cleaned_content = self._clean_ai_response(response_content)
try:
# Try to parse as JSON
data = json.loads(cleaned_content)
# Check if data is a list
if isinstance(data, list):
# Return the list of prompt strings directly
# Ensure we have the correct number of prompts
if len(data) >= expected_count:
return data[:expected_count]
else:
self.console.print(f"[yellow]Warning: AI returned {len(data)} prompts, expected {expected_count}[/yellow]")
return data
elif isinstance(data, dict):
# Fallback for old format: dictionary with newprompt0, newprompt1, etc.
self.console.print("[yellow]Warning: AI returned dictionary format, expected list format[/yellow]")
new_prompts = []
for i in range(expected_count):
key = f"newprompt{i}"
if key in data:
new_prompts.append(data[key])
return new_prompts
else:
self.console.print(f"[yellow]Warning: AI returned unexpected data type: {type(data)}[/yellow]")
return []
except json.JSONDecodeError:
# If not valid JSON, try to extract prompts from text
self.console.print("[yellow]Warning: AI response is not valid JSON, attempting to extract prompts...[/yellow]")
# Look for patterns in the text
lines = response_content.strip().split('\n')
new_prompts = []
for i, line in enumerate(lines[:expected_count]): # Take first N non-empty lines
line = line.strip()
if line and len(line) > 50: # Reasonable minimum length for a prompt
new_prompts.append(line)
return new_prompts
def fill_pool_to_target(self) -> int:
"""Fill the prompt pool to reach cached_pool_volume target with a single API call."""
target_volume = self.settings['cached_pool_volume']
current_pool_size = len(self.pool_prompts)
if current_pool_size >= target_volume:
self.console.print(f"[green]Pool already has {current_pool_size} prompts, target is {target_volume}[/green]")
return 0
prompts_needed = target_volume - current_pool_size
self.console.print(f"[cyan]Current pool size: {current_pool_size}[/cyan]")
self.console.print(f"[cyan]Target pool size: {target_volume}[/cyan]")
self.console.print(f"[cyan]Prompts needed: {prompts_needed}[/cyan]")
# Make a single API call to generate exactly the number of prompts needed
self.console.print(f"\n[cyan]Making single API call to generate {prompts_needed} prompts...[/cyan]")
new_prompts = self.generate_specific_number_of_prompts(prompts_needed)
if new_prompts:
# Add all generated prompts to pool
self.pool_prompts.extend(new_prompts)
total_added = len(new_prompts)
self.console.print(f"[green]Added {total_added} prompts to pool[/green]")
# Save the updated pool
self._save_pool_prompts()
return total_added
else:
self.console.print("[red]Failed to generate prompts[/red]")
return 0
def generate_theme_feedback_words(self) -> List[str]:
"""Generate 6 theme feedback words using AI based on historic prompts."""
self.console.print("\n[cyan]Generating theme feedback words based on historic prompts...[/cyan]")
# Load the feedback prompt template
try:
with open("ds_feedback.txt", "r") as f:
feedback_template = f.read()
except FileNotFoundError:
self.console.print("[red]Error: ds_feedback.txt not found[/red]")
return []
# Prepare the full prompt with historic context and feedback words
if self.historic_prompts:
historic_context = json.dumps(self.historic_prompts, indent=2)
full_prompt = f"{feedback_template}\n\nPrevious prompts:\n{historic_context}"
# Add current feedback words if available (with weights)
if self.feedback_words:
feedback_context = json.dumps(self.feedback_words, indent=2)
full_prompt = f"{full_prompt}\n\nCurrent feedback themes (with weights):\n{feedback_context}"
# Add historic feedback words if available (just words, no weights)
if self.feedback_historic:
feedback_historic_context = json.dumps(self.feedback_historic, indent=2)
full_prompt = f"{full_prompt}\n\nHistoric feedback themes (just words):\n{feedback_historic_context}"
else:
self.console.print("[yellow]Warning: No historic prompts available for feedback analysis[/yellow]")
return []
# Show progress
with Progress(
SpinnerColumn(),
TextColumn("[progress.description]{task.description}"),
transient=True,
) as progress:
task = progress.add_task("Calling AI API for theme analysis...", total=None)
try:
# Call the AI API
response = self.client.chat.completions.create(
model=self.model,
messages=[
{"role": "system", "content": "You are a creative writing assistant that analyzes writing prompts. Always respond with valid JSON."},
{"role": "user", "content": full_prompt}
],
temperature=0.7,
max_tokens=1000
)
response_content = response.choices[0].message.content
except Exception as e:
self.console.print(f"[red]Error calling AI API: {e}[/red]")
self.console.print(f"[yellow]Full prompt sent to API (first 500 chars):[/yellow]")
self.console.print(f"[yellow]{full_prompt[:500]}...[/yellow]")
return []
# Parse the response to get 6 theme words
theme_words = self._parse_theme_words_response(response_content)
if not theme_words or len(theme_words) != 6:
self.console.print(f"[red]Error: Expected 6 theme words, got {len(theme_words) if theme_words else 0}[/red]")
return []
return theme_words
def _parse_theme_words_response(self, response_content: str) -> List[str]:
"""
Parse the AI response to extract 6 theme words.
Expected format: JSON list of 6 lowercase words.
"""
# First, try to clean up the response content
cleaned_content = self._clean_ai_response(response_content)
try:
# Try to parse as JSON
data = json.loads(cleaned_content)
# Check if data is a list
if isinstance(data, list):
# Ensure all items are strings and lowercase them
theme_words = []
for word in data:
if isinstance(word, str):
theme_words.append(word.lower().strip())
else:
theme_words.append(str(word).lower().strip())
return theme_words
else:
self.console.print(f"[yellow]Warning: AI returned unexpected data type: {type(data)}[/yellow]")
return []
except json.JSONDecodeError:
# If not valid JSON, try to extract words from text
self.console.print("[yellow]Warning: AI response is not valid JSON, attempting to extract theme words...[/yellow]")
# Look for patterns in the text
lines = response_content.strip().split('\n')
theme_words = []
for line in lines:
line = line.strip()
if line and len(line) < 50: # Theme words should be short
# Try to extract words (lowercase, no punctuation)
words = [w.lower().strip('.,;:!?()[]{}"\'') for w in line.split()]
theme_words.extend(words)
if len(theme_words) >= 6:
break
return theme_words[:6]
def collect_feedback_ratings(self, theme_words: List[str]) -> List[Dict[str, Any]]:
"""Collect user ratings (0-6) for each theme word and return structured feedback."""
self.console.print("\n[bold]Please rate each theme word from 0 to 6:[/bold]")
self.console.print("[dim]0 = Not relevant, 6 = Very relevant[/dim]\n")
feedback_items = []
for i, word in enumerate(theme_words):
while True:
try:
rating = Prompt.ask(
f"[bold]Word {i+1}: {word}[/bold]",
choices=[str(x) for x in range(0, 7)], # 0-6 inclusive
default="3"
)
rating_int = int(rating)
if 0 <= rating_int <= 6:
# Create feedback item with key (feedback00, feedback01, etc.)
feedback_key = f"feedback{i:02d}"
feedback_items.append({
feedback_key: word,
"weight": rating_int
})
break
else:
self.console.print("[yellow]Please enter a number between 0 and 6[/yellow]")
except ValueError:
self.console.print("[yellow]Please enter a valid number[/yellow]")
return feedback_items
def update_feedback_words(self, new_feedback_items: List[Dict[str, Any]]):
"""Update feedback words with new ratings."""
# Replace existing feedback words with new ones
self.feedback_words = new_feedback_items
self._save_feedback_words()
self.console.print(f"[green]Updated feedback words with {len(new_feedback_items)} items[/green]")
# Also add the new feedback words to the historic buffer
self.add_feedback_words_to_history()
def display_prompts(self, prompts: List[str]):
"""Display generated prompts in a nice format."""
self.console.print("\n" + "="*60)
self.console.print("[bold green]✨ READING FROM POOL ✨[/bold green]")
self.console.print("="*60 + "\n")
for i, prompt_text in enumerate(prompts, 1):
# Create a panel for each prompt with UI numbering
panel = Panel(
f"[cyan]{prompt_text}[/cyan]",
title=f"[bold]Prompt #{i}[/bold]",
border_style="blue",
padding=(1, 2)
)
self.console.print(panel)
self.console.print() # Empty line between prompts
def show_combined_stats(self):
"""Show combined statistics about both prompt pool and history."""
# Pool statistics
total_pool_prompts = len(self.pool_prompts)
pool_table = Table(title="Prompt Pool Statistics")
pool_table.add_column("Metric", style="cyan")
pool_table.add_column("Value", style="green")
pool_table.add_row("Prompts in pool", str(total_pool_prompts))
pool_table.add_row("Prompts per session", str(self.settings['num_prompts']))
pool_table.add_row("Target pool size", str(self.settings['cached_pool_volume']))
pool_table.add_row("Available sessions", str(total_pool_prompts // self.settings['num_prompts']))
# History statistics
total_history_prompts = len(self.historic_prompts)
history_table = Table(title="Prompt History Statistics")
history_table.add_column("Metric", style="cyan")
history_table.add_column("Value", style="green")
history_table.add_row("Total prompts in history", str(total_history_prompts))
history_table.add_row("History capacity", "60 prompts")
history_table.add_row("Available slots", str(max(0, 60 - total_history_prompts)))
# Display both tables
self.console.print(pool_table)
self.console.print() # Empty line between tables
self.console.print(history_table)
def interactive_mode(self):
"""Run in interactive mode with user prompts."""
self.console.print(Panel.fit(
"[bold]Daily Journal Prompt Generator[/bold]\n"
"Generate creative writing prompts for your journal practice",
border_style="green"
))
while True:
# Display most recent prompt from history if available
if self.historic_prompts:
most_recent_prompt = self.historic_prompts[0] # prompt00 is always first
prompt_key = list(most_recent_prompt.keys())[0]
prompt_text = most_recent_prompt[prompt_key]
self.console.print("\n" + "="*60)
self.console.print("[bold cyan]📝 CURRENT PROMPT 📝[/bold cyan]")
self.console.print("="*60)
self.console.print(f"\n[cyan]{prompt_text}[/cyan]\n")
self.console.print("="*60 + "\n")
self.console.print("\n[bold]Options:[/bold]")
self.console.print("1. Draw prompts from pool (no API call)")
self.console.print("2. Fill prompt pool using API")
self.console.print("3. View combined statistics")
self.console.print("4. Generate and rate theme feedback words")
self.console.print("5. Exit")
choice = Prompt.ask("\nEnter your choice", choices=["1", "2", "3", "4", "5"], default="1")
if choice == "1":
# Draw prompts from pool
drawn_prompts = self.draw_prompts_from_pool()
if drawn_prompts:
self.display_prompts(drawn_prompts)
# Ask which prompt to add to history
prompt_num = Prompt.ask(
"\nWhich prompt number would you like to add to history?",
choices=[str(i) for i in range(1, len(drawn_prompts) + 1)],
default="1"
)
prompt_idx = int(prompt_num) - 1
prompt_text = drawn_prompts[prompt_idx]
# Add the chosen prompt to historic prompts cyclic buffer
self.add_prompt_to_history(prompt_text)
self.console.print(f"[green]Prompt added to history as prompt00[/green]")
elif choice == "2":
# Fill prompt pool to target volume using API
total_added = self.fill_pool_to_target()
if total_added > 0:
self.console.print(f"[green]Successfully added {total_added} prompts to pool[/green]")
else:
self.console.print("[yellow]No prompts were added to pool[/yellow]")
elif choice == "3":
self.show_combined_stats()
elif choice == "4":
# Generate and rate theme feedback words
theme_words = self.generate_theme_feedback_words()
if theme_words:
feedback_items = self.collect_feedback_ratings(theme_words)
self.update_feedback_words(feedback_items)
else:
self.console.print("[yellow]No theme words were generated[/yellow]")
elif choice == "5":
self.console.print("[green]Goodbye! Happy journaling! 📓[/green]")
break
def main():
"""Main entry point for the script."""
parser = argparse.ArgumentParser(description="Generate journal prompts using AI")
parser.add_argument(
"--interactive", "-i",
action="store_true",
help="Run in interactive mode"
)
parser.add_argument(
"--config", "-c",
default=".env",
help="Path to configuration file (default: .env)"
)
parser.add_argument(
"--stats", "-s",
action="store_true",
help="Show combined statistics (pool and history)"
)
parser.add_argument(
"--fill-pool", "-f",
action="store_true",
help="Fill prompt pool using API"
)
args = parser.parse_args()
# Initialize generator
generator = JournalPromptGenerator(config_path=args.config)
if args.stats:
generator.show_combined_stats()
elif args.fill_pool:
# Fill prompt pool to target volume using API
total_added = generator.fill_pool_to_target()
if total_added > 0:
generator.console.print(f"[green]Successfully added {total_added} prompts to pool[/green]")
else:
generator.console.print("[yellow]No prompts were added to pool[/yellow]")
elif args.interactive:
generator.interactive_mode()
else:
# Default: draw prompts from pool (no API call)
drawn_prompts = generator.draw_prompts_from_pool()
if drawn_prompts:
generator.display_prompts(drawn_prompts)
generator.console.print("[yellow]Note: These prompts were drawn from the pool. Use --fill-pool to add more prompts.[/yellow]")
if __name__ == "__main__":
main()