Refactor cockpit to use DockerTmuxController pattern
Based on claude-code-tools TmuxCLIController, this refactor: - Added DockerTmuxController class for robust tmux session management - Implements send_keys() with configurable delay_enter - Implements capture_pane() for output retrieval - Implements wait_for_prompt() for pattern-based completion detection - Implements wait_for_idle() for content-hash-based idle detection - Implements wait_for_shell_prompt() for shell prompt detection Also includes workflow improvements: - Pre-task git snapshot before agent execution - Post-task commit protocol in agent guidelines Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
201
examples/demo_concurrent_tasks.py
Normal file
201
examples/demo_concurrent_tasks.py
Normal file
@@ -0,0 +1,201 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Demo: Concurrent Task Management with Responsive Dispatcher
|
||||
|
||||
This demonstrates:
|
||||
1. Dispatching multiple tasks concurrently
|
||||
2. Non-blocking task dispatch (returns immediately)
|
||||
3. Monitoring multiple jobs independently
|
||||
4. Live status updates without blocking
|
||||
5. Pretty-printed feedback
|
||||
"""
|
||||
|
||||
import sys
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
# Add lib to path
|
||||
lib_path = Path(__file__).parent.parent / "lib"
|
||||
sys.path.insert(0, str(lib_path))
|
||||
|
||||
from dispatcher_enhancements import EnhancedDispatcher
|
||||
from cli_feedback import Colors, ProgressBar
|
||||
|
||||
|
||||
def demo_concurrent_dispatch():
|
||||
"""Demo 1: Dispatch multiple tasks quickly"""
|
||||
print(f"\n{Colors.BOLD}{Colors.CYAN}=== Demo 1: Concurrent Task Dispatch ==={Colors.RESET}\n")
|
||||
print("Dispatching 5 tasks across different projects...\n")
|
||||
|
||||
enhanced = EnhancedDispatcher()
|
||||
|
||||
tasks = [
|
||||
("overbits", "fix the login button and add dark mode"),
|
||||
("musica", "analyze audio waveform and optimize performance"),
|
||||
("dss", "verify digital signature chain of custody"),
|
||||
("librechat", "implement chat message search functionality"),
|
||||
("admin", "update all system dependencies and run security scan"),
|
||||
]
|
||||
|
||||
job_ids = []
|
||||
start_time = time.time()
|
||||
|
||||
# Dispatch all tasks
|
||||
for project, task in tasks:
|
||||
print(f"Dispatching: {project}")
|
||||
job_id, status = enhanced.dispatch_and_report(
|
||||
project, task, show_details=False, show_feedback=False
|
||||
)
|
||||
print(f" → {job_id}")
|
||||
job_ids.append((job_id, project))
|
||||
|
||||
elapsed = time.time() - start_time
|
||||
|
||||
print(f"\n{Colors.GREEN}✓ All tasks dispatched in {elapsed:.2f}s{Colors.RESET}")
|
||||
print(f" (No blocking - all jobs are running concurrently)\n")
|
||||
|
||||
return enhanced, job_ids
|
||||
|
||||
|
||||
def demo_status_polling(enhanced, job_ids):
|
||||
"""Demo 2: Poll status without blocking"""
|
||||
print(f"\n{Colors.BOLD}{Colors.CYAN}=== Demo 2: Non-Blocking Status Polling ==={Colors.RESET}\n")
|
||||
print("Checking status of all jobs (instantly, no blocking):\n")
|
||||
|
||||
for job_id, project in job_ids:
|
||||
status = enhanced.dispatcher.get_status(job_id, use_cache=False)
|
||||
if status:
|
||||
enhanced.feedback.show_status_line(status)
|
||||
print()
|
||||
|
||||
|
||||
def demo_concurrent_monitoring(enhanced, job_ids):
|
||||
"""Demo 3: Monitor multiple jobs independently"""
|
||||
print(f"\n{Colors.BOLD}{Colors.CYAN}=== Demo 3: Independent Job Monitoring ==={Colors.RESET}\n")
|
||||
|
||||
# Simulate progress updates
|
||||
print("Simulating job execution with progress updates:\n")
|
||||
|
||||
progress_stages = [
|
||||
(5, "Initializing agent"),
|
||||
(10, "Setting up environment"),
|
||||
(25, "Loading dependencies"),
|
||||
(50, "Processing request"),
|
||||
(75, "Applying changes"),
|
||||
(90, "Running validation"),
|
||||
]
|
||||
|
||||
for job_id, project in job_ids:
|
||||
# Update progress for each job
|
||||
for progress, message in progress_stages:
|
||||
enhanced.dispatcher.update_status(job_id, "running", progress, message)
|
||||
|
||||
# Mark as completed
|
||||
enhanced.dispatcher.update_status(
|
||||
job_id, "completed", 100, "Task completed successfully", exit_code=0
|
||||
)
|
||||
|
||||
# Now display all jobs
|
||||
jobs = enhanced.dispatcher.list_jobs()
|
||||
print(f"All jobs updated. Current status:\n")
|
||||
|
||||
for job in jobs[:5]: # Show first 5
|
||||
enhanced.feedback.show_status(job)
|
||||
|
||||
|
||||
def demo_list_all_jobs(enhanced):
|
||||
"""Demo 4: List all jobs"""
|
||||
print(f"\n{Colors.BOLD}{Colors.CYAN}=== Demo 4: List All Jobs ==={Colors.RESET}\n")
|
||||
|
||||
enhanced.show_jobs_summary()
|
||||
|
||||
|
||||
def demo_concurrent_summary(enhanced):
|
||||
"""Demo 5: Show concurrent job summary"""
|
||||
print(f"\n{Colors.BOLD}{Colors.CYAN}=== Demo 5: Concurrent Job Summary ==={Colors.RESET}\n")
|
||||
|
||||
enhanced.show_concurrent_summary()
|
||||
|
||||
|
||||
def demo_performance_metrics():
|
||||
"""Demo 6: Show performance metrics"""
|
||||
print(f"\n{Colors.BOLD}{Colors.CYAN}=== Demo 6: Performance Metrics ==={Colors.RESET}\n")
|
||||
|
||||
from responsive_dispatcher import ResponseiveDispatcher
|
||||
|
||||
print("Dispatch Performance (100 tasks):\n")
|
||||
|
||||
dispatcher = ResponseiveDispatcher()
|
||||
|
||||
# Time 100 dispatches
|
||||
start = time.time()
|
||||
for i in range(100):
|
||||
dispatcher.dispatch_task(f"proj{i % 5}", f"task_{i}")
|
||||
elapsed = time.time() - start
|
||||
|
||||
avg_dispatch_time = (elapsed * 1000) / 100 # ms
|
||||
print(f" Total time: {elapsed:.3f}s")
|
||||
print(f" Tasks: 100")
|
||||
print(f" Avg per task: {avg_dispatch_time:.2f}ms")
|
||||
print(f" Throughput: {100 / elapsed:.0f} tasks/second")
|
||||
|
||||
# Status retrieval performance
|
||||
jobs = dispatcher.list_jobs()
|
||||
job_id = jobs[0]["id"] if jobs else None
|
||||
|
||||
if job_id:
|
||||
print(f"\nStatus Retrieval Performance:\n")
|
||||
|
||||
# Cached reads
|
||||
start = time.time()
|
||||
for _ in range(1000):
|
||||
dispatcher.get_status(job_id, use_cache=True)
|
||||
cached_time = time.time() - start
|
||||
|
||||
# Fresh reads
|
||||
start = time.time()
|
||||
for _ in range(1000):
|
||||
dispatcher.get_status(job_id, use_cache=False)
|
||||
fresh_time = time.time() - start
|
||||
|
||||
print(f" Cached reads (1000x): {cached_time * 1000:.2f}ms ({cached_time/1000*1000:.2f}µs each)")
|
||||
print(f" Fresh reads (1000x): {fresh_time * 1000:.2f}ms ({fresh_time/1000*1000:.2f}µs each)")
|
||||
|
||||
|
||||
def main():
|
||||
"""Run all demos"""
|
||||
print(f"\n{Colors.BOLD}{Colors.CYAN}Luzia Responsive Dispatcher - Live Demo{Colors.RESET}")
|
||||
print(f"{Colors.GRAY}Non-blocking task dispatch with concurrent management{Colors.RESET}")
|
||||
|
||||
try:
|
||||
# Demo 1: Concurrent dispatch
|
||||
enhanced, job_ids = demo_concurrent_dispatch()
|
||||
|
||||
# Demo 2: Status polling
|
||||
demo_status_polling(enhanced, job_ids)
|
||||
|
||||
# Demo 3: Concurrent monitoring
|
||||
demo_concurrent_monitoring(enhanced, job_ids)
|
||||
|
||||
# Demo 4: List jobs
|
||||
demo_list_all_jobs(enhanced)
|
||||
|
||||
# Demo 5: Concurrent summary
|
||||
demo_concurrent_summary(enhanced)
|
||||
|
||||
# Demo 6: Performance metrics
|
||||
demo_performance_metrics()
|
||||
|
||||
print(f"\n{Colors.GREEN}{Colors.BOLD}✓ Demo completed successfully!{Colors.RESET}\n")
|
||||
return 0
|
||||
|
||||
except Exception as e:
|
||||
print(f"\n{Colors.RED}✗ Demo failed: {e}{Colors.RESET}\n")
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
return 1
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
Reference in New Issue
Block a user