Refactor cockpit to use DockerTmuxController pattern
Based on claude-code-tools TmuxCLIController, this refactor: - Added DockerTmuxController class for robust tmux session management - Implements send_keys() with configurable delay_enter - Implements capture_pane() for output retrieval - Implements wait_for_prompt() for pattern-based completion detection - Implements wait_for_idle() for content-hash-based idle detection - Implements wait_for_shell_prompt() for shell prompt detection Also includes workflow improvements: - Pre-task git snapshot before agent execution - Post-task commit protocol in agent guidelines Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
201
examples/demo_concurrent_tasks.py
Normal file
201
examples/demo_concurrent_tasks.py
Normal file
@@ -0,0 +1,201 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Demo: Concurrent Task Management with Responsive Dispatcher
|
||||
|
||||
This demonstrates:
|
||||
1. Dispatching multiple tasks concurrently
|
||||
2. Non-blocking task dispatch (returns immediately)
|
||||
3. Monitoring multiple jobs independently
|
||||
4. Live status updates without blocking
|
||||
5. Pretty-printed feedback
|
||||
"""
|
||||
|
||||
import sys
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
# Add lib to path
|
||||
lib_path = Path(__file__).parent.parent / "lib"
|
||||
sys.path.insert(0, str(lib_path))
|
||||
|
||||
from dispatcher_enhancements import EnhancedDispatcher
|
||||
from cli_feedback import Colors, ProgressBar
|
||||
|
||||
|
||||
def demo_concurrent_dispatch():
|
||||
"""Demo 1: Dispatch multiple tasks quickly"""
|
||||
print(f"\n{Colors.BOLD}{Colors.CYAN}=== Demo 1: Concurrent Task Dispatch ==={Colors.RESET}\n")
|
||||
print("Dispatching 5 tasks across different projects...\n")
|
||||
|
||||
enhanced = EnhancedDispatcher()
|
||||
|
||||
tasks = [
|
||||
("overbits", "fix the login button and add dark mode"),
|
||||
("musica", "analyze audio waveform and optimize performance"),
|
||||
("dss", "verify digital signature chain of custody"),
|
||||
("librechat", "implement chat message search functionality"),
|
||||
("admin", "update all system dependencies and run security scan"),
|
||||
]
|
||||
|
||||
job_ids = []
|
||||
start_time = time.time()
|
||||
|
||||
# Dispatch all tasks
|
||||
for project, task in tasks:
|
||||
print(f"Dispatching: {project}")
|
||||
job_id, status = enhanced.dispatch_and_report(
|
||||
project, task, show_details=False, show_feedback=False
|
||||
)
|
||||
print(f" → {job_id}")
|
||||
job_ids.append((job_id, project))
|
||||
|
||||
elapsed = time.time() - start_time
|
||||
|
||||
print(f"\n{Colors.GREEN}✓ All tasks dispatched in {elapsed:.2f}s{Colors.RESET}")
|
||||
print(f" (No blocking - all jobs are running concurrently)\n")
|
||||
|
||||
return enhanced, job_ids
|
||||
|
||||
|
||||
def demo_status_polling(enhanced, job_ids):
|
||||
"""Demo 2: Poll status without blocking"""
|
||||
print(f"\n{Colors.BOLD}{Colors.CYAN}=== Demo 2: Non-Blocking Status Polling ==={Colors.RESET}\n")
|
||||
print("Checking status of all jobs (instantly, no blocking):\n")
|
||||
|
||||
for job_id, project in job_ids:
|
||||
status = enhanced.dispatcher.get_status(job_id, use_cache=False)
|
||||
if status:
|
||||
enhanced.feedback.show_status_line(status)
|
||||
print()
|
||||
|
||||
|
||||
def demo_concurrent_monitoring(enhanced, job_ids):
|
||||
"""Demo 3: Monitor multiple jobs independently"""
|
||||
print(f"\n{Colors.BOLD}{Colors.CYAN}=== Demo 3: Independent Job Monitoring ==={Colors.RESET}\n")
|
||||
|
||||
# Simulate progress updates
|
||||
print("Simulating job execution with progress updates:\n")
|
||||
|
||||
progress_stages = [
|
||||
(5, "Initializing agent"),
|
||||
(10, "Setting up environment"),
|
||||
(25, "Loading dependencies"),
|
||||
(50, "Processing request"),
|
||||
(75, "Applying changes"),
|
||||
(90, "Running validation"),
|
||||
]
|
||||
|
||||
for job_id, project in job_ids:
|
||||
# Update progress for each job
|
||||
for progress, message in progress_stages:
|
||||
enhanced.dispatcher.update_status(job_id, "running", progress, message)
|
||||
|
||||
# Mark as completed
|
||||
enhanced.dispatcher.update_status(
|
||||
job_id, "completed", 100, "Task completed successfully", exit_code=0
|
||||
)
|
||||
|
||||
# Now display all jobs
|
||||
jobs = enhanced.dispatcher.list_jobs()
|
||||
print(f"All jobs updated. Current status:\n")
|
||||
|
||||
for job in jobs[:5]: # Show first 5
|
||||
enhanced.feedback.show_status(job)
|
||||
|
||||
|
||||
def demo_list_all_jobs(enhanced):
|
||||
"""Demo 4: List all jobs"""
|
||||
print(f"\n{Colors.BOLD}{Colors.CYAN}=== Demo 4: List All Jobs ==={Colors.RESET}\n")
|
||||
|
||||
enhanced.show_jobs_summary()
|
||||
|
||||
|
||||
def demo_concurrent_summary(enhanced):
|
||||
"""Demo 5: Show concurrent job summary"""
|
||||
print(f"\n{Colors.BOLD}{Colors.CYAN}=== Demo 5: Concurrent Job Summary ==={Colors.RESET}\n")
|
||||
|
||||
enhanced.show_concurrent_summary()
|
||||
|
||||
|
||||
def demo_performance_metrics():
|
||||
"""Demo 6: Show performance metrics"""
|
||||
print(f"\n{Colors.BOLD}{Colors.CYAN}=== Demo 6: Performance Metrics ==={Colors.RESET}\n")
|
||||
|
||||
from responsive_dispatcher import ResponseiveDispatcher
|
||||
|
||||
print("Dispatch Performance (100 tasks):\n")
|
||||
|
||||
dispatcher = ResponseiveDispatcher()
|
||||
|
||||
# Time 100 dispatches
|
||||
start = time.time()
|
||||
for i in range(100):
|
||||
dispatcher.dispatch_task(f"proj{i % 5}", f"task_{i}")
|
||||
elapsed = time.time() - start
|
||||
|
||||
avg_dispatch_time = (elapsed * 1000) / 100 # ms
|
||||
print(f" Total time: {elapsed:.3f}s")
|
||||
print(f" Tasks: 100")
|
||||
print(f" Avg per task: {avg_dispatch_time:.2f}ms")
|
||||
print(f" Throughput: {100 / elapsed:.0f} tasks/second")
|
||||
|
||||
# Status retrieval performance
|
||||
jobs = dispatcher.list_jobs()
|
||||
job_id = jobs[0]["id"] if jobs else None
|
||||
|
||||
if job_id:
|
||||
print(f"\nStatus Retrieval Performance:\n")
|
||||
|
||||
# Cached reads
|
||||
start = time.time()
|
||||
for _ in range(1000):
|
||||
dispatcher.get_status(job_id, use_cache=True)
|
||||
cached_time = time.time() - start
|
||||
|
||||
# Fresh reads
|
||||
start = time.time()
|
||||
for _ in range(1000):
|
||||
dispatcher.get_status(job_id, use_cache=False)
|
||||
fresh_time = time.time() - start
|
||||
|
||||
print(f" Cached reads (1000x): {cached_time * 1000:.2f}ms ({cached_time/1000*1000:.2f}µs each)")
|
||||
print(f" Fresh reads (1000x): {fresh_time * 1000:.2f}ms ({fresh_time/1000*1000:.2f}µs each)")
|
||||
|
||||
|
||||
def main():
|
||||
"""Run all demos"""
|
||||
print(f"\n{Colors.BOLD}{Colors.CYAN}Luzia Responsive Dispatcher - Live Demo{Colors.RESET}")
|
||||
print(f"{Colors.GRAY}Non-blocking task dispatch with concurrent management{Colors.RESET}")
|
||||
|
||||
try:
|
||||
# Demo 1: Concurrent dispatch
|
||||
enhanced, job_ids = demo_concurrent_dispatch()
|
||||
|
||||
# Demo 2: Status polling
|
||||
demo_status_polling(enhanced, job_ids)
|
||||
|
||||
# Demo 3: Concurrent monitoring
|
||||
demo_concurrent_monitoring(enhanced, job_ids)
|
||||
|
||||
# Demo 4: List jobs
|
||||
demo_list_all_jobs(enhanced)
|
||||
|
||||
# Demo 5: Concurrent summary
|
||||
demo_concurrent_summary(enhanced)
|
||||
|
||||
# Demo 6: Performance metrics
|
||||
demo_performance_metrics()
|
||||
|
||||
print(f"\n{Colors.GREEN}{Colors.BOLD}✓ Demo completed successfully!{Colors.RESET}\n")
|
||||
return 0
|
||||
|
||||
except Exception as e:
|
||||
print(f"\n{Colors.RED}✗ Demo failed: {e}{Colors.RESET}\n")
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
return 1
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
310
examples/prompt_engineering_demo.py
Normal file
310
examples/prompt_engineering_demo.py
Normal file
@@ -0,0 +1,310 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Prompt Engineering Demo Script
|
||||
|
||||
Demonstrates the advanced prompt engineering techniques implemented in Luzia.
|
||||
|
||||
Run with: python3 examples/prompt_engineering_demo.py
|
||||
"""
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
# Add lib to path
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent / "lib"))
|
||||
|
||||
from prompt_techniques import (
|
||||
TaskType, PromptStrategy, PromptEngineer,
|
||||
ChainOfThoughtEngine, FewShotExampleBuilder, RoleBasedPrompting,
|
||||
TaskSpecificPatterns
|
||||
)
|
||||
from prompt_integration import (
|
||||
PromptIntegrationEngine, ComplexityAdaptivePrompting,
|
||||
DomainSpecificAugmentor
|
||||
)
|
||||
|
||||
|
||||
def demo_chain_of_thought():
|
||||
"""Demonstrate Chain-of-Thought prompting"""
|
||||
print("\n" + "="*80)
|
||||
print("DEMO 1: Chain-of-Thought Prompting")
|
||||
print("="*80)
|
||||
|
||||
task = "Implement a distributed caching layer for database queries with TTL-based invalidation"
|
||||
|
||||
print(f"\nOriginal Task:\n{task}\n")
|
||||
|
||||
print("CoT Prompt (Complexity 3):")
|
||||
print("-" * 40)
|
||||
cot_prompt = ChainOfThoughtEngine.generate_cot_prompt(task, complexity=3)
|
||||
print(cot_prompt)
|
||||
|
||||
|
||||
def demo_few_shot():
|
||||
"""Demonstrate Few-Shot Learning"""
|
||||
print("\n" + "="*80)
|
||||
print("DEMO 2: Few-Shot Learning")
|
||||
print("="*80)
|
||||
|
||||
builder = FewShotExampleBuilder()
|
||||
examples = builder.build_examples_for_task(TaskType.IMPLEMENTATION, num_examples=2)
|
||||
|
||||
print("\nFew-Shot Examples for IMPLEMENTATION tasks:")
|
||||
print("-" * 40)
|
||||
formatted = builder.format_examples_for_prompt(examples)
|
||||
print(formatted)
|
||||
|
||||
|
||||
def demo_role_based():
|
||||
"""Demonstrate Role-Based Prompting"""
|
||||
print("\n" + "="*80)
|
||||
print("DEMO 3: Role-Based Prompting")
|
||||
print("="*80)
|
||||
|
||||
for task_type in [TaskType.DEBUGGING, TaskType.ANALYSIS, TaskType.IMPLEMENTATION]:
|
||||
print(f"\n{task_type.value.upper()}:")
|
||||
print("-" * 40)
|
||||
role_prompt = RoleBasedPrompting.get_role_prompt(task_type)
|
||||
print(role_prompt[:200] + "...")
|
||||
|
||||
|
||||
def demo_task_patterns():
|
||||
"""Demonstrate Task-Specific Patterns"""
|
||||
print("\n" + "="*80)
|
||||
print("DEMO 4: Task-Specific Patterns")
|
||||
print("="*80)
|
||||
|
||||
patterns = TaskSpecificPatterns()
|
||||
|
||||
print("\nAnalysis Pattern:")
|
||||
print("-" * 40)
|
||||
analysis = patterns.get_analysis_pattern(
|
||||
"Database Performance",
|
||||
["Query optimization", "Index efficiency", "Cache effectiveness"]
|
||||
)
|
||||
print(analysis[:300] + "...")
|
||||
|
||||
print("\n\nDebugging Pattern:")
|
||||
print("-" * 40)
|
||||
debug = patterns.get_debugging_pattern(
|
||||
"Intermittent 503 errors under high load",
|
||||
"API Gateway"
|
||||
)
|
||||
print(debug[:300] + "...")
|
||||
|
||||
|
||||
def demo_complexity_detection():
|
||||
"""Demonstrate Complexity Estimation"""
|
||||
print("\n" + "="*80)
|
||||
print("DEMO 5: Complexity Adaptation")
|
||||
print("="*80)
|
||||
|
||||
test_tasks = [
|
||||
("Fix typo in README", "Simple"),
|
||||
("Add logging to error handler", "Basic"),
|
||||
("Implement rate limiting for API", "Moderate"),
|
||||
("Refactor authentication system with concurrent access and security considerations", "Complex"),
|
||||
("Design and implement distributed transaction system with encryption, failover, and performance optimization", "Very Complex")
|
||||
]
|
||||
|
||||
print("\nTask Complexity Detection:")
|
||||
print("-" * 40)
|
||||
|
||||
for task, expected_level in test_tasks:
|
||||
complexity = ComplexityAdaptivePrompting.estimate_complexity(task, TaskType.IMPLEMENTATION)
|
||||
strategies = ComplexityAdaptivePrompting.get_prompting_strategies(complexity)
|
||||
|
||||
print(f"\nTask: {task}")
|
||||
print(f"Expected: {expected_level} | Detected Complexity: {complexity}/5")
|
||||
print(f"Strategies: {len(strategies)} - {', '.join(s.value for s in strategies[:3])}")
|
||||
|
||||
|
||||
def demo_integration_engine():
|
||||
"""Demonstrate Full Integration Engine"""
|
||||
print("\n" + "="*80)
|
||||
print("DEMO 6: Full Prompt Integration Engine")
|
||||
print("="*80)
|
||||
|
||||
# Initialize engine
|
||||
project_config = {
|
||||
"name": "luzia",
|
||||
"path": "/opt/server-agents/orchestrator",
|
||||
"focus": "Self-improving orchestrator for multi-project coordination"
|
||||
}
|
||||
|
||||
engine = PromptIntegrationEngine(project_config)
|
||||
|
||||
# Example 1: Simple implementation task
|
||||
task1 = "Add request rate limiting to the API endpoint"
|
||||
augmented1, metadata1 = engine.augment_for_task(
|
||||
task1,
|
||||
task_type=TaskType.IMPLEMENTATION,
|
||||
domain="backend"
|
||||
)
|
||||
|
||||
print(f"\nTask 1: {task1}")
|
||||
print(f"Complexity: {metadata1['complexity']}/5")
|
||||
print(f"Strategies: {metadata1['strategies']}")
|
||||
print(f"Augmentation Ratio: {metadata1['final_token_estimate'] / len(task1.split()):.1f}x")
|
||||
print("\nAugmented Prompt (first 400 chars):")
|
||||
print("-" * 40)
|
||||
print(augmented1[:400] + "...\n")
|
||||
|
||||
# Example 2: Complex debugging task
|
||||
task2 = """Debug intermittent race condition in async event handler that only manifests under high concurrent load.
|
||||
The issue causes occasional data corruption in shared state and we need to identify the synchronization issue and fix it."""
|
||||
|
||||
augmented2, metadata2 = engine.augment_for_task(
|
||||
task2,
|
||||
task_type=TaskType.DEBUGGING,
|
||||
domain="backend"
|
||||
)
|
||||
|
||||
print(f"Task 2: {task2[:80]}...")
|
||||
print(f"Complexity: {metadata2['complexity']}/5")
|
||||
print(f"Strategies: {metadata2['strategies']}")
|
||||
print(f"Augmentation Ratio: {metadata2['final_token_estimate'] / len(task2.split()):.1f}x")
|
||||
|
||||
# Example 3: Security analysis
|
||||
task3 = "Analyze security implications of the current token storage approach"
|
||||
augmented3, metadata3 = engine.augment_for_task(
|
||||
task3,
|
||||
task_type=TaskType.ANALYSIS,
|
||||
domain="crypto",
|
||||
context={
|
||||
"previous_results": {
|
||||
"current_approach": "JWT stored in localStorage",
|
||||
"threat_model": "Browser-based XSS attacks"
|
||||
},
|
||||
"blockers": ["Need to decide on alternative storage mechanism"]
|
||||
}
|
||||
)
|
||||
|
||||
print(f"\n\nTask 3: {task3}")
|
||||
print(f"Complexity: {metadata3['complexity']}/5")
|
||||
print(f"Strategies: {metadata3['strategies']}")
|
||||
print(f"Augmentation Ratio: {metadata3['final_token_estimate'] / len(task3.split()):.1f}x")
|
||||
|
||||
|
||||
def demo_domain_contexts():
|
||||
"""Demonstrate Domain-Specific Contexts"""
|
||||
print("\n" + "="*80)
|
||||
print("DEMO 7: Domain-Specific Contexts")
|
||||
print("="*80)
|
||||
|
||||
domains = ["backend", "frontend", "crypto", "devops", "research", "orchestration"]
|
||||
|
||||
for domain in domains:
|
||||
context = DomainSpecificAugmentor.get_domain_context(domain)
|
||||
print(f"\n{domain.upper()}:")
|
||||
print(f" Focus: {context['focus']}")
|
||||
print(f" Priorities: {', '.join(context['priorities'][:2])}")
|
||||
print(f" Best Practices: {context['best_practices'][0]}")
|
||||
|
||||
|
||||
def demo_context_continuation():
|
||||
"""Demonstrate Task Continuation with Context"""
|
||||
print("\n" + "="*80)
|
||||
print("DEMO 8: Task Continuation with Previous Context")
|
||||
print("="*80)
|
||||
|
||||
project_config = {
|
||||
"name": "luzia",
|
||||
"path": "/opt/server-agents/orchestrator",
|
||||
"focus": "Self-improving orchestrator"
|
||||
}
|
||||
|
||||
engine = PromptIntegrationEngine(project_config)
|
||||
|
||||
# Initial task
|
||||
initial_task = "Design a caching strategy for frequently accessed user profiles"
|
||||
print(f"\nInitial Task: {initial_task}")
|
||||
|
||||
augmented_initial, metadata_initial = engine.augment_for_task(
|
||||
initial_task,
|
||||
task_type=TaskType.PLANNING,
|
||||
domain="backend"
|
||||
)
|
||||
print(f"Initial complexity: {metadata_initial['complexity']}")
|
||||
|
||||
# Continuation with previous context
|
||||
context = {
|
||||
"previous_results": {
|
||||
"chosen_strategy": "Redis with TTL-based invalidation",
|
||||
"estimated_hit_rate": "85%",
|
||||
"cache_size": "~500MB per instance"
|
||||
},
|
||||
"state": {
|
||||
"implementation_status": "Completed caching layer",
|
||||
"current_focus": "Optimizing invalidation strategy"
|
||||
},
|
||||
"blockers": [
|
||||
"Need to handle cache stampede on popular profiles",
|
||||
"Invalidation latency causing stale data"
|
||||
]
|
||||
}
|
||||
|
||||
continuation_task = "Continue: optimize cache invalidation to prevent stampede and reduce staleness"
|
||||
print(f"\nContinuation Task: {continuation_task}")
|
||||
|
||||
augmented_cont, metadata_cont = engine.augment_for_task(
|
||||
continuation_task,
|
||||
task_type=TaskType.IMPLEMENTATION,
|
||||
domain="backend",
|
||||
context=context
|
||||
)
|
||||
|
||||
print(f"Continuation complexity: {metadata_cont['complexity']}")
|
||||
print(f"Context included: {bool(context)}")
|
||||
print("\nAugmented Prompt includes:")
|
||||
print(" ✓ System Instructions")
|
||||
print(" ✓ Role-Based Prompting (Senior Engineer)")
|
||||
print(" ✓ Domain Context (Backend best practices)")
|
||||
print(" ✓ Task Continuation (Previous results, current state, blockers)")
|
||||
print(" ✓ Task-Specific Pattern (Implementation)")
|
||||
|
||||
|
||||
def main():
|
||||
"""Run all demonstrations"""
|
||||
print("\n" + "█"*80)
|
||||
print("█ LUZIA ADVANCED PROMPT ENGINEERING DEMONSTRATIONS")
|
||||
print("█"*80)
|
||||
|
||||
demos = [
|
||||
("Chain-of-Thought Prompting", demo_chain_of_thought),
|
||||
("Few-Shot Learning", demo_few_shot),
|
||||
("Role-Based Prompting", demo_role_based),
|
||||
("Task-Specific Patterns", demo_task_patterns),
|
||||
("Complexity Adaptation", demo_complexity_detection),
|
||||
("Full Integration Engine", demo_integration_engine),
|
||||
("Domain-Specific Contexts", demo_domain_contexts),
|
||||
("Task Continuation", demo_context_continuation),
|
||||
]
|
||||
|
||||
for i, (name, demo_func) in enumerate(demos, 1):
|
||||
try:
|
||||
demo_func()
|
||||
except Exception as e:
|
||||
print(f"\n[ERROR in {name}]: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
print("\n" + "█"*80)
|
||||
print("█ DEMONSTRATIONS COMPLETE")
|
||||
print("█"*80)
|
||||
print("\nKey Takeaways:")
|
||||
print("1. Chain-of-Thought breaks complex reasoning into steps")
|
||||
print("2. Few-Shot examples improve understanding of task patterns")
|
||||
print("3. Role-based prompting sets appropriate expertise level")
|
||||
print("4. Complexity adaptation optimizes strategy selection")
|
||||
print("5. Domain-specific contexts apply relevant best practices")
|
||||
print("6. Task continuation preserves state across multi-step work")
|
||||
print("\nIntegrate into Luzia with:")
|
||||
print(" from prompt_integration import PromptIntegrationEngine")
|
||||
print(" engine = PromptIntegrationEngine(project_config)")
|
||||
print(" augmented, metadata = engine.augment_for_task(task, task_type, domain)")
|
||||
print()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
316
examples/status_integration_example.py
Normal file
316
examples/status_integration_example.py
Normal file
@@ -0,0 +1,316 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Luzia Status System - Integration Example
|
||||
|
||||
This example demonstrates how to integrate the status publishing system
|
||||
into your orchestrator code. Each section shows the 7 key integration points.
|
||||
|
||||
Copy these patterns into your existing code wherever you dispatch tasks,
|
||||
monitor progress, or handle completion/failures.
|
||||
"""
|
||||
|
||||
import time
|
||||
import uuid
|
||||
import asyncio
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
# Setup logging
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Import the sync wrapper (works in both async and sync contexts)
|
||||
from luzia_status_sync_wrapper import get_sync_publisher
|
||||
|
||||
|
||||
class TaskDispatcherWithStatus:
|
||||
"""Example task dispatcher with integrated status publishing"""
|
||||
|
||||
def __init__(self):
|
||||
self.publisher = get_sync_publisher()
|
||||
|
||||
def dispatch_task(self, project: str, description: str, estimated_duration: int = 600):
|
||||
"""
|
||||
Example 1: Publish task started
|
||||
Location: In your task dispatcher when you create a new task
|
||||
"""
|
||||
task_id = f"{project}-{uuid.uuid4().hex[:8]}"
|
||||
|
||||
logger.info(f"Dispatching task: {task_id}")
|
||||
|
||||
# PUBLISHING POINT #1: Task Started
|
||||
self.publisher.publish_task_started(
|
||||
task_id=task_id,
|
||||
project=project,
|
||||
description=description,
|
||||
estimated_duration_seconds=estimated_duration
|
||||
)
|
||||
|
||||
return task_id
|
||||
|
||||
def monitor_task_progress(self, task_id: str, project: str, total_steps: int = 4):
|
||||
"""
|
||||
Example 2 & 5: Publish progress updates and warnings
|
||||
Location: In your main task execution loop
|
||||
"""
|
||||
start_time = time.time()
|
||||
step_names = ["Analyzing", "Processing", "Validating", "Finalizing"]
|
||||
|
||||
for step_num, step_name in enumerate(step_names, 1):
|
||||
logger.info(f" Step {step_num}/{total_steps}: {step_name}")
|
||||
|
||||
# Simulate work
|
||||
time.sleep(2)
|
||||
|
||||
elapsed = int(time.time() - start_time)
|
||||
progress = int((step_num / total_steps) * 100)
|
||||
|
||||
# PUBLISHING POINT #2: Progress Update
|
||||
# Do this every 30 seconds or at significant milestones
|
||||
self.publisher.publish_progress(
|
||||
task_id=task_id,
|
||||
progress_percent=progress,
|
||||
current_step=step_num,
|
||||
total_steps=total_steps,
|
||||
current_step_name=step_name,
|
||||
elapsed_seconds=elapsed,
|
||||
estimated_remaining_seconds=int((600 - elapsed) * (100 - progress) / 100)
|
||||
)
|
||||
|
||||
# PUBLISHING POINT #5: Warning (if approaching time limit)
|
||||
if elapsed > 480: # 80% of 600 second budget
|
||||
remaining = int(600 - elapsed)
|
||||
if remaining < 120: # Less than 2 minutes left
|
||||
self.publisher.publish_warning(
|
||||
task_id=task_id,
|
||||
warning_type="DURATION_EXCEEDED",
|
||||
message=f"Task approaching time limit: {remaining}s remaining",
|
||||
current_step=step_num,
|
||||
total_steps=total_steps,
|
||||
current_step_name=step_name,
|
||||
elapsed_seconds=elapsed,
|
||||
progress_percent=progress,
|
||||
recommendation="May need optimization or extension"
|
||||
)
|
||||
|
||||
def complete_task(self, task_id: str, project: str, elapsed_secs: int, findings: list):
|
||||
"""
|
||||
Example 3: Publish task completed
|
||||
Location: When task finishes successfully
|
||||
"""
|
||||
logger.info(f"Task completed: {task_id}")
|
||||
|
||||
# PUBLISHING POINT #3: Task Completed
|
||||
self.publisher.publish_task_completed(
|
||||
task_id=task_id,
|
||||
elapsed_seconds=elapsed_secs,
|
||||
findings_count=len(findings),
|
||||
recommendations_count=1, # Number of recommendations
|
||||
status="APPROVED" # or NEEDS_WORK, REJECTED
|
||||
)
|
||||
|
||||
def fail_task(self, task_id: str, error: str, elapsed_secs: int, retry_count: int):
|
||||
"""
|
||||
Example 6: Publish task failed
|
||||
Location: In your error handler
|
||||
"""
|
||||
logger.error(f"Task failed: {task_id}")
|
||||
|
||||
# PUBLISHING POINT #6: Task Failed
|
||||
self.publisher.publish_task_failed(
|
||||
task_id=task_id,
|
||||
error=error,
|
||||
elapsed_seconds=elapsed_secs,
|
||||
retry_count=retry_count,
|
||||
retriable=retry_count < 5 # Can be retried?
|
||||
)
|
||||
|
||||
|
||||
class QueueManagerWithStatus:
|
||||
"""Example queue manager with integrated status publishing"""
|
||||
|
||||
def __init__(self):
|
||||
self.publisher = get_sync_publisher()
|
||||
self.queue = []
|
||||
|
||||
def queue_task(self, task_id: str, project: str, description: str, reason: str, wait_estimate: int):
|
||||
"""
|
||||
Example 4: Publish task queued
|
||||
Location: In your queue manager when adding to queue
|
||||
"""
|
||||
queue_position = len(self.queue) + 1
|
||||
|
||||
logger.info(f"Queuing task: {task_id} (position {queue_position})")
|
||||
|
||||
# PUBLISHING POINT #4: Task Queued
|
||||
self.publisher.publish_task_queued(
|
||||
task_id=task_id,
|
||||
project=project,
|
||||
description=description,
|
||||
reason=reason, # Why it was queued
|
||||
queue_position=queue_position,
|
||||
queue_ahead=[t['id'] for t in self.queue], # Tasks ahead in queue
|
||||
estimated_wait_seconds=wait_estimate
|
||||
)
|
||||
|
||||
self.queue.append({
|
||||
'id': task_id,
|
||||
'project': project,
|
||||
'description': description
|
||||
})
|
||||
|
||||
|
||||
class SystemMonitorWithStatus:
|
||||
"""Example system monitor with integrated status publishing"""
|
||||
|
||||
def __init__(self):
|
||||
self.publisher = get_sync_publisher()
|
||||
|
||||
def check_system_health(self):
|
||||
"""
|
||||
Example 7: Publish system alert
|
||||
Location: In your system health monitor
|
||||
"""
|
||||
import psutil
|
||||
|
||||
# Check memory
|
||||
memory_percent = psutil.virtual_memory().percent
|
||||
if memory_percent > 80:
|
||||
# PUBLISHING POINT #7: System Alert
|
||||
self.publisher.publish_system_alert(
|
||||
alert_type="RESOURCE_WARNING",
|
||||
message=f"Memory usage at {memory_percent}%",
|
||||
recommendation="New tasks will be queued until memory is freed",
|
||||
severity="warning"
|
||||
)
|
||||
|
||||
# Check disk
|
||||
disk_percent = psutil.disk_usage('/').percent
|
||||
if disk_percent > 90:
|
||||
self.publisher.publish_system_alert(
|
||||
alert_type="DISK_CRITICAL",
|
||||
message=f"Disk usage at {disk_percent}%",
|
||||
recommendation="Immediate cleanup required",
|
||||
severity="critical"
|
||||
)
|
||||
|
||||
|
||||
# Example usage
|
||||
def example_task_lifecycle():
|
||||
"""
|
||||
Demonstrate the complete task lifecycle with status publishing
|
||||
|
||||
This shows all 7 integration points in action
|
||||
"""
|
||||
dispatcher = TaskDispatcherWithStatus()
|
||||
queue_manager = QueueManagerWithStatus()
|
||||
monitor = SystemMonitorWithStatus()
|
||||
|
||||
# Example 1: Dispatch a task
|
||||
task_id = dispatcher.dispatch_task(
|
||||
project="musica",
|
||||
description="Fix audio synthesis engine",
|
||||
estimated_duration=600
|
||||
)
|
||||
|
||||
try:
|
||||
# Example 2 & 5: Monitor progress (with warnings)
|
||||
dispatcher.monitor_task_progress(task_id, "musica")
|
||||
|
||||
# Example 3: Complete the task
|
||||
dispatcher.complete_task(
|
||||
task_id=task_id,
|
||||
project="musica",
|
||||
elapsed_secs=615,
|
||||
findings=["Issue A", "Issue B"]
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
# Example 6: Handle failures
|
||||
dispatcher.fail_task(
|
||||
task_id=task_id,
|
||||
error=str(e),
|
||||
elapsed_secs=300,
|
||||
retry_count=1
|
||||
)
|
||||
|
||||
|
||||
def example_queue_management():
|
||||
"""Demonstrate queuing with status publishing"""
|
||||
queue_manager = QueueManagerWithStatus()
|
||||
|
||||
# Example 4: Queue a task (when system is busy)
|
||||
queue_manager.queue_task(
|
||||
task_id="admin-code-001",
|
||||
project="admin",
|
||||
description="Code review and cleanup",
|
||||
reason="System resource limit reached",
|
||||
wait_estimate=300
|
||||
)
|
||||
|
||||
|
||||
def example_system_monitoring():
|
||||
"""Demonstrate system monitoring with alerts"""
|
||||
monitor = SystemMonitorWithStatus()
|
||||
|
||||
# Example 7: Check system health
|
||||
try:
|
||||
monitor.check_system_health()
|
||||
except ImportError:
|
||||
logger.warning("psutil not available, skipping system check")
|
||||
|
||||
|
||||
# Integration Points Summary
|
||||
"""
|
||||
To integrate into your orchestrator, add the following 7 calls:
|
||||
|
||||
1. Task Dispatcher (when creating task):
|
||||
publisher.publish_task_started(task_id, project, description, estimated_duration)
|
||||
|
||||
2. Progress Loop (every 30 seconds):
|
||||
publisher.publish_progress(task_id, progress_percent, current_step, total_steps,
|
||||
current_step_name, elapsed_seconds, estimated_remaining)
|
||||
|
||||
3. Task Completion (when task succeeds):
|
||||
publisher.publish_task_completed(task_id, elapsed_seconds, findings_count, status)
|
||||
|
||||
4. Queue Manager (when queueing task):
|
||||
publisher.publish_task_queued(task_id, project, description, reason,
|
||||
queue_position, queue_ahead, wait_estimate)
|
||||
|
||||
5. Resource Monitor (when warning threshold exceeded):
|
||||
publisher.publish_warning(task_id, warning_type, message, current_step,
|
||||
total_steps, current_step_name, elapsed_seconds,
|
||||
progress_percent, recommendation)
|
||||
|
||||
6. Error Handler (when task fails):
|
||||
publisher.publish_task_failed(task_id, error, elapsed_seconds,
|
||||
retry_count, retriable)
|
||||
|
||||
7. System Monitor (on health issues):
|
||||
publisher.publish_system_alert(alert_type, message, recommendation, severity)
|
||||
|
||||
Each call is idempotent and safe to use in production.
|
||||
"""
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("\n" + "=" * 60)
|
||||
print("LUZIA STATUS INTEGRATION EXAMPLES")
|
||||
print("=" * 60)
|
||||
|
||||
print("\n1. Task Lifecycle Example:")
|
||||
print("-" * 60)
|
||||
example_task_lifecycle()
|
||||
|
||||
print("\n2. Queue Management Example:")
|
||||
print("-" * 60)
|
||||
example_queue_management()
|
||||
|
||||
print("\n3. System Monitoring Example:")
|
||||
print("-" * 60)
|
||||
example_system_monitoring()
|
||||
|
||||
print("\n" + "=" * 60)
|
||||
print("Integration complete - status events published")
|
||||
print("=" * 60)
|
||||
Reference in New Issue
Block a user