non-building checkpoint 1
This commit is contained in:
257
test_backend.py
Normal file
257
test_backend.py
Normal file
@@ -0,0 +1,257 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Test script to verify the backend API structure.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
|
||||
# Add backend to path
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'backend'))
|
||||
|
||||
def test_imports():
|
||||
"""Test that all required modules can be imported."""
|
||||
print("Testing imports...")
|
||||
|
||||
try:
|
||||
from app.core.config import settings
|
||||
print("✓ Config module imported successfully")
|
||||
|
||||
from app.core.logging import setup_logging
|
||||
print("✓ Logging module imported successfully")
|
||||
|
||||
from app.services.data_service import DataService
|
||||
print("✓ DataService imported successfully")
|
||||
|
||||
from app.services.ai_service import AIService
|
||||
print("✓ AIService imported successfully")
|
||||
|
||||
from app.services.prompt_service import PromptService
|
||||
print("✓ PromptService imported successfully")
|
||||
|
||||
from app.models.prompt import PromptResponse, PoolStatsResponse
|
||||
print("✓ Models imported successfully")
|
||||
|
||||
from app.api.v1.api import api_router
|
||||
print("✓ API router imported successfully")
|
||||
|
||||
return True
|
||||
|
||||
except ImportError as e:
|
||||
print(f"✗ Import error: {e}")
|
||||
return False
|
||||
except Exception as e:
|
||||
print(f"✗ Error: {e}")
|
||||
return False
|
||||
|
||||
def test_config():
|
||||
"""Test configuration loading."""
|
||||
print("\nTesting configuration...")
|
||||
|
||||
try:
|
||||
from app.core.config import settings
|
||||
|
||||
print(f"✓ Project name: {settings.PROJECT_NAME}")
|
||||
print(f"✓ Version: {settings.VERSION}")
|
||||
print(f"✓ Debug mode: {settings.DEBUG}")
|
||||
print(f"✓ Environment: {settings.ENVIRONMENT}")
|
||||
print(f"✓ Host: {settings.HOST}")
|
||||
print(f"✓ Port: {settings.PORT}")
|
||||
print(f"✓ Min prompt length: {settings.MIN_PROMPT_LENGTH}")
|
||||
print(f"✓ Max prompt length: {settings.MAX_PROMPT_LENGTH}")
|
||||
print(f"✓ Prompts per session: {settings.NUM_PROMPTS_PER_SESSION}")
|
||||
print(f"✓ Cached pool volume: {settings.CACHED_POOL_VOLUME}")
|
||||
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"✗ Configuration error: {e}")
|
||||
return False
|
||||
|
||||
def test_data_service():
|
||||
"""Test DataService initialization."""
|
||||
print("\nTesting DataService...")
|
||||
|
||||
try:
|
||||
from app.services.data_service import DataService
|
||||
|
||||
data_service = DataService()
|
||||
print("✓ DataService initialized successfully")
|
||||
|
||||
# Check data directory
|
||||
import os
|
||||
data_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), "data")
|
||||
if os.path.exists(data_dir):
|
||||
print(f"✓ Data directory exists: {data_dir}")
|
||||
|
||||
# Check for required files
|
||||
required_files = [
|
||||
'prompts_historic.json',
|
||||
'prompts_pool.json',
|
||||
'feedback_words.json',
|
||||
'feedback_historic.json',
|
||||
'ds_prompt.txt',
|
||||
'ds_feedback.txt',
|
||||
'settings.cfg'
|
||||
]
|
||||
|
||||
for file in required_files:
|
||||
file_path = os.path.join(data_dir, file)
|
||||
if os.path.exists(file_path):
|
||||
print(f"✓ {file} exists")
|
||||
else:
|
||||
print(f"⚠ {file} not found (this may be OK for new installations)")
|
||||
else:
|
||||
print(f"⚠ Data directory not found: {data_dir}")
|
||||
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"✗ DataService error: {e}")
|
||||
return False
|
||||
|
||||
def test_models():
|
||||
"""Test Pydantic models."""
|
||||
print("\nTesting Pydantic models...")
|
||||
|
||||
try:
|
||||
from app.models.prompt import (
|
||||
PromptResponse,
|
||||
PoolStatsResponse,
|
||||
HistoryStatsResponse,
|
||||
FeedbackWord
|
||||
)
|
||||
|
||||
# Test PromptResponse
|
||||
prompt = PromptResponse(
|
||||
key="prompt00",
|
||||
text="Test prompt text",
|
||||
position=0
|
||||
)
|
||||
print("✓ PromptResponse model works")
|
||||
|
||||
# Test PoolStatsResponse
|
||||
pool_stats = PoolStatsResponse(
|
||||
total_prompts=10,
|
||||
prompts_per_session=6,
|
||||
target_pool_size=20,
|
||||
available_sessions=1,
|
||||
needs_refill=True
|
||||
)
|
||||
print("✓ PoolStatsResponse model works")
|
||||
|
||||
# Test HistoryStatsResponse
|
||||
history_stats = HistoryStatsResponse(
|
||||
total_prompts=5,
|
||||
history_capacity=60,
|
||||
available_slots=55,
|
||||
is_full=False
|
||||
)
|
||||
print("✓ HistoryStatsResponse model works")
|
||||
|
||||
# Test FeedbackWord
|
||||
feedback_word = FeedbackWord(
|
||||
key="feedback00",
|
||||
word="creativity",
|
||||
weight=5
|
||||
)
|
||||
print("✓ FeedbackWord model works")
|
||||
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"✗ Models error: {e}")
|
||||
return False
|
||||
|
||||
def test_api_structure():
|
||||
"""Test API endpoint structure."""
|
||||
print("\nTesting API structure...")
|
||||
|
||||
try:
|
||||
from fastapi import FastAPI
|
||||
from app.api.v1.api import api_router
|
||||
|
||||
app = FastAPI()
|
||||
app.include_router(api_router, prefix="/api/v1")
|
||||
|
||||
# Check routes
|
||||
routes = []
|
||||
for route in app.routes:
|
||||
if hasattr(route, 'path'):
|
||||
routes.append(route.path)
|
||||
|
||||
expected_routes = [
|
||||
'/api/v1/prompts/draw',
|
||||
'/api/v1/prompts/fill-pool',
|
||||
'/api/v1/prompts/stats',
|
||||
'/api/v1/prompts/history/stats',
|
||||
'/api/v1/prompts/history',
|
||||
'/api/v1/prompts/select/{prompt_index}',
|
||||
'/api/v1/feedback/generate',
|
||||
'/api/v1/feedback/rate',
|
||||
'/api/v1/feedback/current',
|
||||
'/api/v1/feedback/history'
|
||||
]
|
||||
|
||||
print("✓ API router integrated successfully")
|
||||
print(f"✓ Found {len(routes)} routes")
|
||||
|
||||
# Check for key routes
|
||||
for expected_route in expected_routes:
|
||||
if any(expected_route in route for route in routes):
|
||||
print(f"✓ Route found: {expected_route}")
|
||||
else:
|
||||
print(f"⚠ Route not found: {expected_route}")
|
||||
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"✗ API structure error: {e}")
|
||||
return False
|
||||
|
||||
def main():
|
||||
"""Run all tests."""
|
||||
print("=" * 60)
|
||||
print("Daily Journal Prompt Generator - Backend API Test")
|
||||
print("=" * 60)
|
||||
|
||||
tests = [
|
||||
("Imports", test_imports),
|
||||
("Configuration", test_config),
|
||||
("Data Service", test_data_service),
|
||||
("Models", test_models),
|
||||
("API Structure", test_api_structure),
|
||||
]
|
||||
|
||||
results = []
|
||||
|
||||
for test_name, test_func in tests:
|
||||
print(f"\n{test_name}:")
|
||||
print("-" * 40)
|
||||
success = test_func()
|
||||
results.append((test_name, success))
|
||||
|
||||
print("\n" + "=" * 60)
|
||||
print("Test Summary:")
|
||||
print("=" * 60)
|
||||
|
||||
all_passed = True
|
||||
for test_name, success in results:
|
||||
status = "✓ PASS" if success else "✗ FAIL"
|
||||
print(f"{test_name:20} {status}")
|
||||
if not success:
|
||||
all_passed = False
|
||||
|
||||
print("\n" + "=" * 60)
|
||||
if all_passed:
|
||||
print("All tests passed! 🎉")
|
||||
print("Backend API structure is ready.")
|
||||
else:
|
||||
print("Some tests failed. Please check the errors above.")
|
||||
|
||||
return all_passed
|
||||
|
||||
if __name__ == "__main__":
|
||||
success = main()
|
||||
sys.exit(0 if success else 1)
|
||||
|
||||
Reference in New Issue
Block a user