feat: Add TurnOrchestrator for multi-turn LLM simulation (addresses #156)

TurnOrchestrator: Coordinates multi-agent turn-based simulation
- Perspective switching with FOV layer updates
- Screenshot capture per agent per turn
- Pluggable LLM query callback
- SimulationStep/SimulationLog for full context capture
- JSON save/load with replay support

New demos:
- 2_integrated_demo.py: WorldGraph + action execution integration
- 3_multi_turn_demo.py: Complete multi-turn simulation with logging

Updated 1_multi_agent_demo.py with action parser/executor integration.

Tested with Qwen2.5-VL-32B: agents successfully navigate based on
WorldGraph descriptions and VLM visual input.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
John McCardle 2025-12-14 12:53:48 -05:00
commit de739037f0
4 changed files with 1039 additions and 0 deletions

View file

@ -22,6 +22,9 @@ import base64
import os
import random
from action_parser import parse_action
from action_executor import ActionExecutor
# VLLM configuration
VLLM_URL = "http://192.168.1.100:8100/v1/chat/completions"
SCREENSHOT_DIR = "/tmp/vllm_multi_agent"
@ -284,6 +287,9 @@ def run_demo():
# Setup scene
grid, fov_layer, agents, rat = setup_scene()
# Create action executor
executor = ActionExecutor(grid)
# Cycle through each agent's perspective
for i, agent in enumerate(agents):
print(f"\n{'='*70}")
@ -319,6 +325,21 @@ def run_demo():
print(f"\n{agent.name}'s Response:\n{response}")
print()
# Parse and execute action
print(f"--- Action Execution ---")
action = parse_action(response)
print(f"Parsed action: {action.type.value} {action.args}")
result = executor.execute(agent, action)
if result.success:
print(f"SUCCESS: {result.message}")
if result.new_position:
# Update perspective after movement
switch_perspective(grid, fov_layer, agent)
mcrfpy.step(0.016)
else:
print(f"FAILED: {result.message}")
print("\n" + "=" * 70)
print("Multi-Agent Demo Complete")
print("=" * 70)