Files
luzia/lib/qa_learning_integration.py
admin ec33ac1936 Refactor cockpit to use DockerTmuxController pattern
Based on claude-code-tools TmuxCLIController, this refactor:

- Added DockerTmuxController class for robust tmux session management
- Implements send_keys() with configurable delay_enter
- Implements capture_pane() for output retrieval
- Implements wait_for_prompt() for pattern-based completion detection
- Implements wait_for_idle() for content-hash-based idle detection
- Implements wait_for_shell_prompt() for shell prompt detection

Also includes workflow improvements:
- Pre-task git snapshot before agent execution
- Post-task commit protocol in agent guidelines

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-14 10:42:16 -03:00

266 lines
9.3 KiB
Python

#!/usr/bin/env python3
"""
QA Learning Integration - Connects QA validation with skill learning.
This module hooks the QA validation process to automatically extract
and store learnings when QA passes, improving the system's decision-making
over time.
Workflow:
1. QA validation runs and generates results
2. Integration module captures success criteria
3. Skill learning system extracts learnings
4. Learnings stored in knowledge graph
5. Future tasks can get recommendations from stored learnings
"""
import json
import sys
from pathlib import Path
from typing import Dict, Any, Optional, List
from datetime import datetime
import subprocess
# Import our modules
sys.path.insert(0, str(Path(__file__).parent))
from skill_learning_engine import SkillLearningSystem
from qa_validator import QAValidator
class QALearningIntegrator:
"""Integrates QA validation results with skill learning."""
def __init__(self):
self.skill_system = SkillLearningSystem()
self.qa_validator = QAValidator()
self.integration_log: List[Dict[str, Any]] = []
def run_qa_with_learning(
self,
task_context: Optional[Dict[str, Any]] = None,
verbose: bool = False
) -> Dict[str, Any]:
"""
Run QA validation and automatically extract learnings on success.
Args:
task_context: Optional context about the task being validated
verbose: Verbose output
Returns:
Dict with QA results and learning extraction results
"""
# 1. Run QA validation
if verbose:
print("[Learning] Running QA validation...")
qa_results = self.qa_validator.validate_all()
# 2. Extract learning if QA passed
learning_result = {"extracted": False, "learning_id": None}
if qa_results.get("passed", False):
if verbose:
print("[Learning] QA passed! Extracting learnings...")
learning_result = self._extract_and_store_learning(qa_results, task_context, verbose)
# 3. Log integration event
self._log_event({
"qa_passed": qa_results.get("passed", False),
"learning_extracted": learning_result.get("extracted", False),
"learning_id": learning_result.get("learning_id"),
"task_context": task_context,
})
return {
"qa_results": qa_results,
"learning": learning_result,
"timestamp": datetime.now().isoformat(),
}
def run_qa_and_sync_with_learning(
self,
sync: bool = True,
verbose: bool = False
) -> Dict[str, Any]:
"""
Run full QA pipeline: validate, sync to KG, and extract learnings.
This is the recommended entry point for full workflow.
"""
if verbose:
print("[Learning] Starting QA validation with learning integration...\n")
# 1. Validate
print("\n=== Luzia QA Validation ===\n")
qa_results = self.qa_validator.validate_all()
for category, passed in qa_results["results"].items():
status = "[OK]" if passed else "[FAIL]"
print(f" {status} {category}")
if qa_results["issues"]:
print("\nErrors:")
for issue in qa_results["issues"]:
print(f" [!] {issue['category']}: {issue['message']}")
# 2. Sync if requested
if sync and qa_results.get("passed", False):
print("\n--- Syncing to Knowledge Graph ---")
route_result = self.qa_validator.sync_routes_to_kg()
if "error" in route_result:
print(f" Routes: Error - {route_result['error']}")
else:
print(f" Routes: {route_result['added']} added, {route_result['updated']} updated")
project_result = self.qa_validator.sync_projects_to_kg()
if "error" in project_result:
print(f" Projects: Error - {project_result['error']}")
else:
print(f" Projects: {project_result['added']} added, {project_result['updated']} updated")
# 3. Extract learning
print("\n--- Extracting Learnings ---")
learning_result = self._extract_and_store_learning(qa_results, verbose=verbose)
if learning_result["extracted"]:
print(f" Learning extracted: {learning_result['learning_id']}")
print(f" Skills identified: {learning_result['skills_count']}")
else:
print(" No learnings extracted (QA may have failed)")
return {
"qa_passed": qa_results.get("passed", False),
"qa_results": qa_results,
"learning": learning_result,
"timestamp": datetime.now().isoformat(),
}
def _extract_and_store_learning(
self,
qa_results: Dict[str, Any],
task_context: Optional[Dict[str, Any]] = None,
verbose: bool = False
) -> Dict[str, Any]:
"""Extract and store learning from QA results."""
if not qa_results.get("passed", False):
return {"extracted": False, "reason": "QA failed"}
try:
# Build task data from QA results
task_data = {
"task_id": f"qa_task_{datetime.now().strftime('%Y%m%d_%H%M%S')}",
"prompt": "QA Validation Pass - Code quality and documentation validated",
"project": task_context.get("project", "general") if task_context else "general",
"status": "success",
"tools_used": self._extract_tools_from_qa(qa_results),
"duration": 0.0, # QA duration
"result_summary": self._summarize_qa_results(qa_results),
"qa_passed": True,
"timestamp": datetime.now().isoformat(),
}
if verbose:
print(f"[Learning] Processing QA results as task...")
# Process through skill learning system
result = self.skill_system.process_task_completion(task_data, qa_results)
if verbose:
print(f"[Learning] Extracted {result['skills_extracted']} skills")
print(f"[Learning] Created learning: {result['learning_id']}")
return {
"extracted": result.get("learning_id") is not None,
"learning_id": result.get("learning_id"),
"skills_count": result.get("skills_extracted", 0),
"details": result,
}
except Exception as e:
if verbose:
print(f"[Learning] Error extracting learning: {e}")
return {
"extracted": False,
"error": str(e),
}
def _extract_tools_from_qa(self, qa_results: Dict[str, Any]) -> List[str]:
"""Extract tools used during QA from results."""
# QA typically uses: code analysis, syntax checking, documentation validation
return ["CodeAnalysis", "SyntaxValidator", "DocumentationChecker"]
def _summarize_qa_results(self, qa_results: Dict[str, Any]) -> str:
"""Summarize QA results as string."""
summary = qa_results.get("summary", {})
return f"QA passed with {summary.get('info', 0)} info items, no errors or warnings"
def _log_event(self, event: Dict[str, Any]) -> None:
"""Log integration event."""
self.integration_log.append({
"timestamp": datetime.now().isoformat(),
**event
})
def get_integration_stats(self) -> Dict[str, Any]:
"""Get statistics on learning integration."""
if not self.integration_log:
return {"events": 0, "learnings_extracted": 0}
events = len(self.integration_log)
learnings = sum(1 for e in self.integration_log if e.get("learning_extracted"))
qa_passes = sum(1 for e in self.integration_log if e.get("qa_passed"))
return {
"total_events": events,
"qa_passed": qa_passes,
"learnings_extracted": learnings,
"extraction_rate": learnings / qa_passes if qa_passes > 0 else 0.0,
"last_event": self.integration_log[-1]["timestamp"] if self.integration_log else None,
}
def run_integrated_qa(verbose: bool = False, sync: bool = True) -> int:
"""
Run integrated QA with learning extraction.
This is the main entry point to replace the standard QA run.
"""
integrator = QALearningIntegrator()
result = integrator.run_qa_and_sync_with_learning(sync=sync, verbose=verbose)
# Return appropriate exit code
return 0 if result["qa_passed"] else 1
# --- CLI ---
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="QA Learning Integration")
parser.add_argument("--sync", action="store_true", default=True,
help="Sync code to KG (default: True)")
parser.add_argument("--verbose", "-v", action="store_true",
help="Verbose output")
parser.add_argument("--stats", action="store_true",
help="Show integration statistics")
args = parser.parse_args()
integrator = QALearningIntegrator()
if args.stats:
stats = integrator.get_integration_stats()
print("\n=== QA Learning Integration Statistics ===\n")
for key, value in stats.items():
print(f" {key}: {value}")
else:
exit(run_integrated_qa(verbose=args.verbose, sync=args.sync))