262 lines
8.5 KiB
Python
262 lines
8.5 KiB
Python
|
|
#!/usr/bin/env python3
|
||
|
|
"""
|
||
|
|
Simple Daily Journal Prompt Generator
|
||
|
|
A lightweight version without rich dependency.
|
||
|
|
"""
|
||
|
|
|
||
|
|
import os
|
||
|
|
import json
|
||
|
|
import sys
|
||
|
|
import argparse
|
||
|
|
from datetime import datetime
|
||
|
|
from typing import List, Dict, Any
|
||
|
|
|
||
|
|
from openai import OpenAI
|
||
|
|
from dotenv import load_dotenv
|
||
|
|
|
||
|
|
|
||
|
|
class SimplePromptGenerator:
|
||
|
|
"""Simple version without rich dependency."""
|
||
|
|
|
||
|
|
def __init__(self, config_path: str = ".env"):
|
||
|
|
"""Initialize the generator with configuration."""
|
||
|
|
self.config_path = config_path
|
||
|
|
self.client = None
|
||
|
|
self.historic_prompts = []
|
||
|
|
self.prompt_template = ""
|
||
|
|
|
||
|
|
# Load configuration
|
||
|
|
self._load_config()
|
||
|
|
|
||
|
|
# Load data files
|
||
|
|
self._load_prompt_template()
|
||
|
|
self._load_historic_prompts()
|
||
|
|
|
||
|
|
def _load_config(self):
|
||
|
|
"""Load configuration from environment file."""
|
||
|
|
load_dotenv(self.config_path)
|
||
|
|
|
||
|
|
# Get API key
|
||
|
|
self.api_key = os.getenv("DEEPSEEK_API_KEY") or os.getenv("OPENAI_API_KEY")
|
||
|
|
if not self.api_key:
|
||
|
|
print("Error: No API key found in .env file")
|
||
|
|
print("Please add DEEPSEEK_API_KEY or OPENAI_API_KEY to your .env file")
|
||
|
|
sys.exit(1)
|
||
|
|
|
||
|
|
# Get API base URL (default to DeepSeek)
|
||
|
|
self.base_url = os.getenv("API_BASE_URL", "https://api.deepseek.com")
|
||
|
|
|
||
|
|
# Get model (default to deepseek-chat)
|
||
|
|
self.model = os.getenv("MODEL", "deepseek-chat")
|
||
|
|
|
||
|
|
# Initialize OpenAI client
|
||
|
|
self.client = OpenAI(
|
||
|
|
api_key=self.api_key,
|
||
|
|
base_url=self.base_url
|
||
|
|
)
|
||
|
|
|
||
|
|
def _load_prompt_template(self):
|
||
|
|
"""Load the prompt template from ds_prompt.txt."""
|
||
|
|
try:
|
||
|
|
with open("ds_prompt.txt", "r") as f:
|
||
|
|
self.prompt_template = f.read()
|
||
|
|
except FileNotFoundError:
|
||
|
|
print("Error: ds_prompt.txt not found")
|
||
|
|
sys.exit(1)
|
||
|
|
|
||
|
|
def _load_historic_prompts(self):
|
||
|
|
"""Load historic prompts from JSON file."""
|
||
|
|
try:
|
||
|
|
with open("historic_prompts.json", "r") as f:
|
||
|
|
self.historic_prompts = json.load(f)
|
||
|
|
except (FileNotFoundError, json.JSONDecodeError):
|
||
|
|
print("Warning: Starting with empty prompt history")
|
||
|
|
self.historic_prompts = []
|
||
|
|
|
||
|
|
def _save_historic_prompts(self):
|
||
|
|
"""Save historic prompts to JSON file (keeping only last 60)."""
|
||
|
|
# Keep only the last 60 prompts
|
||
|
|
if len(self.historic_prompts) > 60:
|
||
|
|
self.historic_prompts = self.historic_prompts[-60:]
|
||
|
|
|
||
|
|
with open("historic_prompts.json", "w") as f:
|
||
|
|
json.dump(self.historic_prompts, f, indent=2)
|
||
|
|
|
||
|
|
def _prepare_prompt(self) -> str:
|
||
|
|
"""Prepare the full prompt with historic context."""
|
||
|
|
if self.historic_prompts:
|
||
|
|
historic_context = json.dumps(self.historic_prompts, indent=2)
|
||
|
|
full_prompt = f"{self.prompt_template}\n\nPrevious prompts:\n{historic_context}"
|
||
|
|
else:
|
||
|
|
full_prompt = self.prompt_template
|
||
|
|
|
||
|
|
return full_prompt
|
||
|
|
|
||
|
|
def _parse_ai_response(self, response_content: str) -> List[Dict[str, str]]:
|
||
|
|
"""Parse the AI response to extract new prompts."""
|
||
|
|
try:
|
||
|
|
# Try to parse as JSON
|
||
|
|
data = json.loads(response_content)
|
||
|
|
|
||
|
|
# Convert to list of prompt dictionaries
|
||
|
|
new_prompts = []
|
||
|
|
for i in range(6):
|
||
|
|
key = f"newprompt{i}"
|
||
|
|
if key in data:
|
||
|
|
prompt_text = data[key]
|
||
|
|
prompt_obj = {
|
||
|
|
f"prompt{len(self.historic_prompts) + i:02d}": prompt_text
|
||
|
|
}
|
||
|
|
new_prompts.append(prompt_obj)
|
||
|
|
|
||
|
|
return new_prompts
|
||
|
|
|
||
|
|
except json.JSONDecodeError:
|
||
|
|
# If not valid JSON, try to extract prompts from text
|
||
|
|
print("Warning: AI response is not valid JSON, attempting to extract prompts...")
|
||
|
|
|
||
|
|
# Look for patterns in the text
|
||
|
|
lines = response_content.strip().split('\n')
|
||
|
|
new_prompts = []
|
||
|
|
|
||
|
|
for i, line in enumerate(lines[:6]):
|
||
|
|
line = line.strip()
|
||
|
|
if line and len(line) > 50:
|
||
|
|
prompt_obj = {
|
||
|
|
f"prompt{len(self.historic_prompts) + i:02d}": line
|
||
|
|
}
|
||
|
|
new_prompts.append(prompt_obj)
|
||
|
|
|
||
|
|
return new_prompts
|
||
|
|
|
||
|
|
def generate_prompts(self) -> List[Dict[str, str]]:
|
||
|
|
"""Generate new journal prompts using AI."""
|
||
|
|
print("\nGenerating new journal prompts...")
|
||
|
|
|
||
|
|
# Prepare the prompt
|
||
|
|
full_prompt = self._prepare_prompt()
|
||
|
|
|
||
|
|
try:
|
||
|
|
# Call the AI API
|
||
|
|
print("Calling AI API...")
|
||
|
|
response = self.client.chat.completions.create(
|
||
|
|
model=self.model,
|
||
|
|
messages=[
|
||
|
|
{"role": "system", "content": "You are a creative writing assistant that generates journal prompts. Always respond with valid JSON."},
|
||
|
|
{"role": "user", "content": full_prompt}
|
||
|
|
],
|
||
|
|
temperature=0.7,
|
||
|
|
max_tokens=2000
|
||
|
|
)
|
||
|
|
|
||
|
|
response_content = response.choices[0].message.content
|
||
|
|
|
||
|
|
except Exception as e:
|
||
|
|
print(f"Error calling AI API: {e}")
|
||
|
|
return []
|
||
|
|
|
||
|
|
# Parse the response
|
||
|
|
new_prompts = self._parse_ai_response(response_content)
|
||
|
|
|
||
|
|
if not new_prompts:
|
||
|
|
print("Error: Could not parse any prompts from AI response")
|
||
|
|
return []
|
||
|
|
|
||
|
|
# Add to historic prompts
|
||
|
|
self.historic_prompts.extend(new_prompts)
|
||
|
|
|
||
|
|
# Save updated history
|
||
|
|
self._save_historic_prompts()
|
||
|
|
|
||
|
|
return new_prompts
|
||
|
|
|
||
|
|
def display_prompts(self, prompts: List[Dict[str, str]]):
|
||
|
|
"""Display generated prompts in a simple format."""
|
||
|
|
print("\n" + "="*60)
|
||
|
|
print("✨ NEW JOURNAL PROMPTS GENERATED ✨")
|
||
|
|
print("="*60 + "\n")
|
||
|
|
|
||
|
|
for i, prompt_dict in enumerate(prompts, 1):
|
||
|
|
# Extract prompt text
|
||
|
|
prompt_key = list(prompt_dict.keys())[0]
|
||
|
|
prompt_text = prompt_dict[prompt_key]
|
||
|
|
|
||
|
|
print(f"Prompt #{i}:")
|
||
|
|
print("-" * 40)
|
||
|
|
print(prompt_text)
|
||
|
|
print("-" * 40 + "\n")
|
||
|
|
|
||
|
|
def show_history_stats(self):
|
||
|
|
"""Show statistics about prompt history."""
|
||
|
|
total_prompts = len(self.historic_prompts)
|
||
|
|
|
||
|
|
print("\nPrompt History Statistics:")
|
||
|
|
print("-" * 30)
|
||
|
|
print(f"Total prompts in history: {total_prompts}")
|
||
|
|
print(f"History capacity: 60 prompts")
|
||
|
|
print(f"Available slots: {max(0, 60 - total_prompts)}")
|
||
|
|
|
||
|
|
def save_prompt_to_file(self, prompt_dict: Dict[str, str], filename: str = None):
|
||
|
|
"""Save a prompt to a text file."""
|
||
|
|
prompt_key = list(prompt_dict.keys())[0]
|
||
|
|
prompt_text = prompt_dict[prompt_key]
|
||
|
|
|
||
|
|
if not filename:
|
||
|
|
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||
|
|
filename = f"journal_prompt_{timestamp}.txt"
|
||
|
|
|
||
|
|
with open(filename, "w") as f:
|
||
|
|
f.write(f"Journal Prompt - {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
|
||
|
|
f.write("="*50 + "\n\n")
|
||
|
|
f.write(prompt_text)
|
||
|
|
f.write("\n\n" + "="*50 + "\n")
|
||
|
|
f.write("Happy writing! ✍️\n")
|
||
|
|
|
||
|
|
print(f"Prompt saved to {filename}")
|
||
|
|
|
||
|
|
|
||
|
|
def main():
|
||
|
|
"""Main entry point for the simple script."""
|
||
|
|
parser = argparse.ArgumentParser(description="Generate journal prompts using AI (simple version)")
|
||
|
|
parser.add_argument(
|
||
|
|
"--stats", "-s",
|
||
|
|
action="store_true",
|
||
|
|
help="Show history statistics"
|
||
|
|
)
|
||
|
|
parser.add_argument(
|
||
|
|
"--save", "-S",
|
||
|
|
type=int,
|
||
|
|
help="Save a specific prompt number to file (1-6)"
|
||
|
|
)
|
||
|
|
parser.add_argument(
|
||
|
|
"--config", "-c",
|
||
|
|
default=".env",
|
||
|
|
help="Path to configuration file (default: .env)"
|
||
|
|
)
|
||
|
|
|
||
|
|
args = parser.parse_args()
|
||
|
|
|
||
|
|
# Initialize generator
|
||
|
|
generator = SimplePromptGenerator(config_path=args.config)
|
||
|
|
|
||
|
|
if args.stats:
|
||
|
|
generator.show_history_stats()
|
||
|
|
else:
|
||
|
|
# Generate prompts
|
||
|
|
new_prompts = generator.generate_prompts()
|
||
|
|
if new_prompts:
|
||
|
|
generator.display_prompts(new_prompts)
|
||
|
|
|
||
|
|
# Save specific prompt if requested
|
||
|
|
if args.save:
|
||
|
|
prompt_num = args.save
|
||
|
|
if 1 <= prompt_num <= len(new_prompts):
|
||
|
|
generator.save_prompt_to_file(new_prompts[prompt_num - 1])
|
||
|
|
else:
|
||
|
|
print(f"Error: Prompt number must be between 1 and {len(new_prompts)}")
|
||
|
|
|
||
|
|
|
||
|
|
if __name__ == "__main__":
|
||
|
|
main()
|
||
|
|
|