Refactor cockpit to use DockerTmuxController pattern
Based on claude-code-tools TmuxCLIController, this refactor: - Added DockerTmuxController class for robust tmux session management - Implements send_keys() with configurable delay_enter - Implements capture_pane() for output retrieval - Implements wait_for_prompt() for pattern-based completion detection - Implements wait_for_idle() for content-hash-based idle detection - Implements wait_for_shell_prompt() for shell prompt detection Also includes workflow improvements: - Pre-task git snapshot before agent execution - Post-task commit protocol in agent guidelines Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
325
QUICK_START.md
Normal file
325
QUICK_START.md
Normal file
@@ -0,0 +1,325 @@
|
||||
# Luzia Status System - Quick Start Guide
|
||||
|
||||
**TL;DR:** The status system is deployed and ready. Add 3 lines of code to start publishing events.
|
||||
|
||||
---
|
||||
|
||||
## 30-Second Setup
|
||||
|
||||
### 1. Verify Installation
|
||||
```bash
|
||||
python3 -c "from luzia_status_sync_wrapper import get_sync_publisher; print('✓ Ready')"
|
||||
```
|
||||
|
||||
### 2. Add to Your Code
|
||||
|
||||
Copy this into your task dispatcher:
|
||||
|
||||
```python
|
||||
from luzia_status_sync_wrapper import get_sync_publisher
|
||||
|
||||
publisher = get_sync_publisher()
|
||||
|
||||
# When task starts
|
||||
publisher.publish_task_started(
|
||||
task_id="project-task123",
|
||||
project="myproject",
|
||||
description="What the task does",
|
||||
estimated_duration_seconds=600
|
||||
)
|
||||
|
||||
# When task completes
|
||||
publisher.publish_task_completed(
|
||||
task_id="project-task123",
|
||||
elapsed_seconds=615,
|
||||
findings_count=2,
|
||||
status="APPROVED"
|
||||
)
|
||||
|
||||
# When task fails
|
||||
publisher.publish_task_failed(
|
||||
task_id="project-task123",
|
||||
error=str(exception),
|
||||
elapsed_seconds=300,
|
||||
retry_count=1,
|
||||
retriable=True
|
||||
)
|
||||
```
|
||||
|
||||
### 3. Test It
|
||||
|
||||
```bash
|
||||
# Run example
|
||||
python3 /opt/server-agents/orchestrator/examples/status_integration_example.py
|
||||
|
||||
# Check status
|
||||
luzia status
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Common Tasks
|
||||
|
||||
### Task Just Started - Publish Event
|
||||
```python
|
||||
publisher.publish_task_started(
|
||||
task_id="my-task-123",
|
||||
project="admin",
|
||||
description="Code review and fixes",
|
||||
estimated_duration_seconds=600
|
||||
)
|
||||
```
|
||||
|
||||
### Task Progressing - Update Progress (Every 30 Seconds)
|
||||
```python
|
||||
publisher.publish_progress(
|
||||
task_id="my-task-123",
|
||||
progress_percent=50, # 0-100
|
||||
current_step=2, # Which step (1, 2, 3...)
|
||||
total_steps=4, # Total steps
|
||||
current_step_name="Processing", # Name of current step
|
||||
elapsed_seconds=300, # How long so far
|
||||
estimated_remaining_seconds=300 # Est time left
|
||||
)
|
||||
```
|
||||
|
||||
### Task Completed Successfully
|
||||
```python
|
||||
publisher.publish_task_completed(
|
||||
task_id="my-task-123",
|
||||
elapsed_seconds=600,
|
||||
findings_count=2, # Number of findings
|
||||
recommendations_count=1, # Number of recommendations
|
||||
status="APPROVED" # or "NEEDS_WORK", "REJECTED"
|
||||
)
|
||||
```
|
||||
|
||||
### Task Failed
|
||||
```python
|
||||
publisher.publish_task_failed(
|
||||
task_id="my-task-123",
|
||||
error="Connection timeout",
|
||||
elapsed_seconds=300,
|
||||
retry_count=1, # Which attempt failed
|
||||
retriable=True # Can it be retried?
|
||||
)
|
||||
```
|
||||
|
||||
### Task Warning (Time Running Out, etc.)
|
||||
```python
|
||||
publisher.publish_warning(
|
||||
task_id="my-task-123",
|
||||
warning_type="DURATION_EXCEEDED",
|
||||
message="Task approaching time limit",
|
||||
current_step=3,
|
||||
total_steps=4,
|
||||
current_step_name="Validating",
|
||||
elapsed_seconds=480,
|
||||
progress_percent=75,
|
||||
recommendation="May need optimization"
|
||||
)
|
||||
```
|
||||
|
||||
### Task Queued (System Busy)
|
||||
```python
|
||||
publisher.publish_task_queued(
|
||||
task_id="my-task-123",
|
||||
project="admin",
|
||||
description="Code review",
|
||||
reason="System resource limit",
|
||||
queue_position=3,
|
||||
queue_ahead=["task-100", "task-101"],
|
||||
estimated_wait_seconds=300
|
||||
)
|
||||
```
|
||||
|
||||
### System Alert
|
||||
```python
|
||||
publisher.publish_system_alert(
|
||||
alert_type="MEMORY_WARNING",
|
||||
message="Memory at 85%",
|
||||
recommendation="Queued tasks will wait",
|
||||
severity="warning" # or "critical"
|
||||
)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Check Status
|
||||
|
||||
```bash
|
||||
# Show dashboard
|
||||
luzia status
|
||||
|
||||
# Show only warnings/errors
|
||||
luzia status --alerts
|
||||
|
||||
# Show specific project
|
||||
luzia status --project admin
|
||||
|
||||
# Show last N updates
|
||||
luzia status --recent 10
|
||||
|
||||
# Export to JSON
|
||||
luzia status --export json
|
||||
# Creates: /tmp/luzia_status_20260109_120000.json
|
||||
|
||||
# Export to Markdown
|
||||
luzia status --export markdown
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Customize Behavior
|
||||
|
||||
Edit `/etc/luzia/status_config.toml`:
|
||||
|
||||
```toml
|
||||
[status_updates]
|
||||
verbosity = "normal" # quiet, normal, verbose
|
||||
progress_update_threshold_percent = 25 # Show at 25%, 50%, 75%
|
||||
progress_update_min_interval_seconds = 30 # Min time between updates
|
||||
|
||||
[display]
|
||||
use_colors = true
|
||||
use_emojis = true
|
||||
compact_format = true
|
||||
|
||||
[logging]
|
||||
enabled = true
|
||||
log_file = "/var/log/luzia/status.log"
|
||||
log_level = "INFO"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Test Integration
|
||||
|
||||
Run the full test suite:
|
||||
```bash
|
||||
cd /opt/server-agents/orchestrator/lib
|
||||
python3 test_status_integration.py
|
||||
```
|
||||
|
||||
All tests should pass (7/7).
|
||||
|
||||
---
|
||||
|
||||
## Files Deployed
|
||||
|
||||
- `/etc/luzia/status_config.toml` - Configuration
|
||||
- `/opt/server-agents/orchestrator/lib/luzia_status_*.py` - Core modules
|
||||
- `/opt/server-agents/orchestrator/lib/test_status_integration.py` - Tests
|
||||
- `/opt/server-agents/orchestrator/examples/status_integration_example.py` - Examples
|
||||
- `/opt/server-agents/orchestrator/LUZIA_STATUS_INTEGRATION.md` - Full docs
|
||||
- `/opt/server-agents/orchestrator/STATUS_DEPLOYMENT_COMPLETE.md` - Deployment info
|
||||
|
||||
---
|
||||
|
||||
## Complete Example
|
||||
|
||||
```python
|
||||
from luzia_status_sync_wrapper import get_sync_publisher
|
||||
import time
|
||||
|
||||
def run_task():
|
||||
publisher = get_sync_publisher()
|
||||
task_id = "admin-review-123"
|
||||
|
||||
# Task starts
|
||||
publisher.publish_task_started(
|
||||
task_id=task_id,
|
||||
project="admin",
|
||||
description="Code review",
|
||||
estimated_duration_seconds=600
|
||||
)
|
||||
|
||||
start = time.time()
|
||||
|
||||
try:
|
||||
# Step 1
|
||||
print("Step 1: Analyzing...")
|
||||
time.sleep(2)
|
||||
|
||||
publisher.publish_progress(
|
||||
task_id=task_id,
|
||||
progress_percent=25,
|
||||
current_step=1,
|
||||
total_steps=4,
|
||||
current_step_name="Analyzing",
|
||||
elapsed_seconds=int(time.time() - start),
|
||||
estimated_remaining_seconds=450
|
||||
)
|
||||
|
||||
# Step 2
|
||||
print("Step 2: Reviewing...")
|
||||
time.sleep(2)
|
||||
|
||||
publisher.publish_progress(
|
||||
task_id=task_id,
|
||||
progress_percent=50,
|
||||
current_step=2,
|
||||
total_steps=4,
|
||||
current_step_name="Reviewing",
|
||||
elapsed_seconds=int(time.time() - start),
|
||||
estimated_remaining_seconds=300
|
||||
)
|
||||
|
||||
# Step 3
|
||||
print("Step 3: Fixing...")
|
||||
time.sleep(2)
|
||||
|
||||
# Step 4
|
||||
print("Step 4: Testing...")
|
||||
time.sleep(2)
|
||||
|
||||
publisher.publish_progress(
|
||||
task_id=task_id,
|
||||
progress_percent=100,
|
||||
current_step=4,
|
||||
total_steps=4,
|
||||
current_step_name="Testing",
|
||||
elapsed_seconds=int(time.time() - start),
|
||||
estimated_remaining_seconds=0
|
||||
)
|
||||
|
||||
# Success
|
||||
elapsed = int(time.time() - start)
|
||||
publisher.publish_task_completed(
|
||||
task_id=task_id,
|
||||
elapsed_seconds=elapsed,
|
||||
findings_count=3,
|
||||
status="APPROVED"
|
||||
)
|
||||
|
||||
print("✓ Task completed successfully")
|
||||
|
||||
except Exception as e:
|
||||
elapsed = int(time.time() - start)
|
||||
publisher.publish_task_failed(
|
||||
task_id=task_id,
|
||||
error=str(e),
|
||||
elapsed_seconds=elapsed,
|
||||
retry_count=1,
|
||||
retriable=True
|
||||
)
|
||||
raise
|
||||
|
||||
if __name__ == "__main__":
|
||||
run_task()
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Need Help?
|
||||
|
||||
1. **Check tests:** `python3 /opt/server-agents/orchestrator/lib/test_status_integration.py`
|
||||
2. **See examples:** `/opt/server-agents/orchestrator/examples/status_integration_example.py`
|
||||
3. **Read full docs:** `/opt/server-agents/orchestrator/LUZIA_STATUS_INTEGRATION.md`
|
||||
4. **Check logs:** `tail -f /var/log/luzia/status.log`
|
||||
|
||||
---
|
||||
|
||||
**Status:** ✓ Ready for production use
|
||||
|
||||
Add the 3-line import and start publishing. It's that simple.
|
||||
Reference in New Issue
Block a user