#!/usr/bin/env python3 """ Daily Journal Prompt Generator A tool that uses AI to generate creative writing prompts for daily journaling. """ import os import json import sys import argparse import configparser from datetime import datetime from typing import List, Dict, Any, Optional from pathlib import Path from openai import OpenAI from dotenv import load_dotenv from rich.console import Console from rich.panel import Panel from rich.table import Table from rich.prompt import Prompt, Confirm from rich.progress import Progress, SpinnerColumn, TextColumn class JournalPromptGenerator: """Main class for generating journal prompts using AI.""" def __init__(self, config_path: str = ".env"): """Initialize the generator with configuration.""" self.console = Console() self.config_path = config_path self.client = None self.historic_prompts = [] self.prompt_template = "" self.settings = {} # Load configuration self._load_config() self._load_settings() # Load data files self._load_prompt_template() self._load_historic_prompts() def _load_config(self): """Load configuration from environment file.""" load_dotenv(self.config_path) # Get API key self.api_key = os.getenv("DEEPSEEK_API_KEY") or os.getenv("OPENAI_API_KEY") if not self.api_key: self.console.print("[red]Error: No API key found in .env file[/red]") self.console.print("Please add DEEPSEEK_API_KEY or OPENAI_API_KEY to your .env file") sys.exit(1) # Get API base URL (default to DeepSeek) self.base_url = os.getenv("API_BASE_URL", "https://api.deepseek.com") # Get model (default to deepseek-chat) self.model = os.getenv("MODEL", "deepseek-chat") # Initialize OpenAI client self.client = OpenAI( api_key=self.api_key, base_url=self.base_url ) def _load_settings(self): """Load settings from settings.cfg configuration file.""" config = configparser.ConfigParser() # Set default values self.settings = { 'min_length': 500, 'max_length': 1000, 'num_prompts': 6 } try: config.read('settings.cfg') if 'prompts' in config: prompts_section = config['prompts'] # Load min_length if 'min_length' in prompts_section: self.settings['min_length'] = int(prompts_section['min_length']) # Load max_length if 'max_length' in prompts_section: self.settings['max_length'] = int(prompts_section['max_length']) # Load num_prompts if 'num_prompts' in prompts_section: self.settings['num_prompts'] = int(prompts_section['num_prompts']) except FileNotFoundError: self.console.print("[yellow]Warning: settings.cfg not found, using default values[/yellow]") except ValueError as e: self.console.print(f"[yellow]Warning: Invalid value in settings.cfg: {e}, using default values[/yellow]") except Exception as e: self.console.print(f"[yellow]Warning: Error reading settings.cfg: {e}, using default values[/yellow]") def _load_prompt_template(self): """Load the prompt template from ds_prompt.txt and update with config values.""" try: with open("ds_prompt.txt", "r") as f: template = f.read() # Replace hardcoded values with config values template = template.replace( "between 500 and 1000 characters", f"between {self.settings['min_length']} and {self.settings['max_length']} characters" ) # Replace the number of prompts (6) with config value template = template.replace( "Please generate 6 writing prompts", f"Please generate {self.settings['num_prompts']} writing prompts" ) self.prompt_template = template except FileNotFoundError: self.console.print("[red]Error: ds_prompt.txt not found[/red]") sys.exit(1) def _load_historic_prompts(self): """Load historic prompts from JSON file.""" try: with open("historic_prompts.json", "r") as f: self.historic_prompts = json.load(f) except FileNotFoundError: self.console.print("[yellow]Warning: historic_prompts.json not found, starting with empty history[/yellow]") self.historic_prompts = [] except json.JSONDecodeError: self.console.print("[yellow]Warning: historic_prompts.json is corrupted, starting with empty history[/yellow]") self.historic_prompts = [] def _save_historic_prompts(self): """Save historic prompts to JSON file (keeping only first 60).""" # Keep only the first 60 prompts (newest are at the beginning) if len(self.historic_prompts) > 60: self.historic_prompts = self.historic_prompts[:60] with open("historic_prompts.json", "w") as f: json.dump(self.historic_prompts, f, indent=2) def _renumber_prompts(self): """Renumber all prompts to maintain prompt00-prompt59 range.""" renumbered_prompts = [] for i, prompt_dict in enumerate(self.historic_prompts): # Get the prompt text from the first key in the dictionary prompt_key = list(prompt_dict.keys())[0] prompt_text = prompt_dict[prompt_key] # Create new prompt with correct numbering new_prompt_key = f"prompt{i:02d}" renumbered_prompts.append({ new_prompt_key: prompt_text }) self.historic_prompts = renumbered_prompts def add_prompt_to_history(self, prompt_text: str): """ Add a single prompt to the historic prompts cyclic buffer. The new prompt becomes prompt00, all others shift down, and prompt59 is discarded. """ # Create the new prompt object new_prompt = { "prompt00": prompt_text } # Shift all existing prompts down by one position # We'll create a new list starting with the new prompt updated_prompts = [new_prompt] # Add all existing prompts, shifting their numbers down by one for i, prompt_dict in enumerate(self.historic_prompts): if i >= 59: # We only keep 60 prompts total (00-59) break # Get the prompt text prompt_key = list(prompt_dict.keys())[0] prompt_text = prompt_dict[prompt_key] # Create prompt with new number (shifted down by one) new_prompt_key = f"prompt{i+1:02d}" updated_prompts.append({ new_prompt_key: prompt_text }) self.historic_prompts = updated_prompts self._save_historic_prompts() def _prepare_prompt(self) -> str: """Prepare the full prompt with historic context.""" # Format historic prompts for the AI if self.historic_prompts: historic_context = json.dumps(self.historic_prompts, indent=2) full_prompt = f"{self.prompt_template}\n\nPrevious prompts:\n{historic_context}" else: full_prompt = self.prompt_template return full_prompt def _parse_ai_response(self, response_content: str) -> List[Dict[str, str]]: """ Parse the AI response to extract new prompts. Expected format: JSON array with keys "newprompt0" to "newpromptN" where N = num_prompts-1 """ try: # Try to parse as JSON data = json.loads(response_content) # Convert to list of prompt dictionaries new_prompts = [] for i in range(self.settings['num_prompts']): key = f"newprompt{i}" if key in data: prompt_text = data[key] prompt_obj = { f"prompt{len(self.historic_prompts) + i:02d}": prompt_text } new_prompts.append(prompt_obj) return new_prompts except json.JSONDecodeError: # If not valid JSON, try to extract prompts from text self.console.print("[yellow]Warning: AI response is not valid JSON, attempting to extract prompts...[/yellow]") self.console.print(f"[yellow]Full response content for debugging:[/yellow]") self.console.print(f"[yellow]{response_content}[/yellow]") # Look for patterns in the text lines = response_content.strip().split('\n') new_prompts = [] for i, line in enumerate(lines[:self.settings['num_prompts']]): # Take first N non-empty lines line = line.strip() if line and len(line) > 50: # Reasonable minimum length for a prompt prompt_obj = { f"prompt{len(self.historic_prompts) + i:02d}": line } new_prompts.append(prompt_obj) return new_prompts def generate_prompts(self) -> List[Dict[str, str]]: """Generate new journal prompts using AI.""" self.console.print("\n[cyan]Generating new journal prompts...[/cyan]") # Prepare the prompt full_prompt = self._prepare_prompt() # Show progress with Progress( SpinnerColumn(), TextColumn("[progress.description]{task.description}"), transient=True, ) as progress: task = progress.add_task("Calling AI API...", total=None) try: # Call the AI API response = self.client.chat.completions.create( model=self.model, messages=[ {"role": "system", "content": "You are a creative writing assistant that generates journal prompts. Always respond with valid JSON."}, {"role": "user", "content": full_prompt} ], temperature=0.7, max_tokens=2000 ) response_content = response.choices[0].message.content except Exception as e: self.console.print(f"[red]Error calling AI API: {e}[/red]") return [] # Parse the response new_prompts = self._parse_ai_response(response_content) if not new_prompts: self.console.print("[red]Error: Could not parse any prompts from AI response[/red]") return [] # Note: Prompts are NOT added to historic_prompts here # They will be added only when the user chooses one in interactive mode # via the add_prompt_to_history() method return new_prompts def display_prompts(self, prompts: List[Dict[str, str]]): """Display generated prompts in a nice format.""" self.console.print("\n" + "="*60) self.console.print("[bold green]✨ NEW JOURNAL PROMPTS GENERATED ✨[/bold green]") self.console.print("="*60 + "\n") for i, prompt_dict in enumerate(prompts, 1): # Extract prompt text (key is like "prompt60", "prompt61", etc.) prompt_key = list(prompt_dict.keys())[0] prompt_text = prompt_dict[prompt_key] # Create a panel for each prompt panel = Panel( f"[cyan]{prompt_text}[/cyan]", title=f"[bold]Prompt #{i}[/bold]", border_style="blue", padding=(1, 2) ) self.console.print(panel) self.console.print() # Empty line between prompts def show_history_stats(self): """Show statistics about prompt history.""" total_prompts = len(self.historic_prompts) table = Table(title="Prompt History Statistics") table.add_column("Metric", style="cyan") table.add_column("Value", style="green") table.add_row("Total prompts in history", str(total_prompts)) table.add_row("History capacity", "60 prompts") table.add_row("Available slots", str(max(0, 60 - total_prompts))) self.console.print(table) def interactive_mode(self): """Run in interactive mode with user prompts.""" self.console.print(Panel.fit( "[bold]Daily Journal Prompt Generator[/bold]\n" "Generate creative writing prompts for your journal practice", border_style="green" )) while True: self.console.print("\n[bold]Options:[/bold]") self.console.print("1. Generate new prompts") self.console.print("2. View history statistics") self.console.print("3. Exit") choice = Prompt.ask("\nEnter your choice", choices=["1", "2", "3"], default="1") if choice == "1": new_prompts = self.generate_prompts() if new_prompts: self.display_prompts(new_prompts) # Ask if user wants to save a prompt if Confirm.ask("\nWould you like to save one of these prompts to a file?"): prompt_num = Prompt.ask( "Which prompt number would you like to save?", choices=[str(i) for i in range(1, len(new_prompts) + 1)], default="1" ) prompt_idx = int(prompt_num) - 1 prompt_dict = new_prompts[prompt_idx] prompt_key = list(prompt_dict.keys())[0] prompt_text = prompt_dict[prompt_key] # Save to file timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") filename = f"journal_prompt_{timestamp}.txt" with open(filename, "w") as f: f.write(f"Journal Prompt - {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n") f.write("="*50 + "\n\n") f.write(prompt_text) f.write("\n\n" + "="*50 + "\n") f.write("Happy writing! ✍️\n") self.console.print(f"[green]Prompt saved to {filename}[/green]") # Add the chosen prompt to historic prompts cyclic buffer self.add_prompt_to_history(prompt_text) self.console.print(f"[green]Prompt added to history as prompt00[/green]") elif choice == "2": self.show_history_stats() elif choice == "3": self.console.print("[green]Goodbye! Happy journaling! 📓[/green]") break def main(): """Main entry point for the script.""" parser = argparse.ArgumentParser(description="Generate journal prompts using AI") parser.add_argument( "--interactive", "-i", action="store_true", help="Run in interactive mode" ) parser.add_argument( "--config", "-c", default=".env", help="Path to configuration file (default: .env)" ) parser.add_argument( "--stats", "-s", action="store_true", help="Show history statistics" ) args = parser.parse_args() # Initialize generator generator = JournalPromptGenerator(config_path=args.config) if args.stats: generator.show_history_stats() elif args.interactive: generator.interactive_mode() else: # Default: generate and display prompts new_prompts = generator.generate_prompts() if new_prompts: generator.display_prompts(new_prompts) if __name__ == "__main__": main()