The Apptainer sandboxed agent server demonstrates how to run agents in isolated Apptainer containers using ApptainerWorkspace.Apptainer (formerly Singularity) is a container runtime designed for HPC environments that doesnβt require root access, making it ideal for shared computing environments, university clusters, and systems where Docker is not available.
import osimport platformimport timefrom pydantic import SecretStrfrom openhands.sdk import ( LLM, Conversation, RemoteConversation, get_logger,)from openhands.tools.preset.default import get_default_agentfrom openhands.workspace import ApptainerWorkspacelogger = get_logger(__name__)# 1) Ensure we have LLM API keyapi_key = os.getenv("LLM_API_KEY")assert api_key is not None, "LLM_API_KEY environment variable is not set."llm = LLM( usage_id="agent", model=os.getenv("LLM_MODEL", "anthropic/claude-sonnet-4-5-20250929"), base_url=os.getenv("LLM_BASE_URL"), api_key=SecretStr(api_key),)def detect_platform(): """Detects the correct platform string.""" machine = platform.machine().lower() if "arm" in machine or "aarch64" in machine: return "linux/arm64" return "linux/amd64"def get_server_image(): """Get the server image tag, using PR-specific image in CI.""" platform_str = detect_platform() arch = "arm64" if "arm64" in platform_str else "amd64" # If GITHUB_SHA is set (e.g. running in CI of a PR), use that to ensure consistency # Otherwise, use the latest image from main github_sha = os.getenv("GITHUB_SHA") if github_sha: return f"ghcr.io/openhands/agent-server:{github_sha[:7]}-python-{arch}" return "ghcr.io/openhands/agent-server:latest-python"# 2) Create an Apptainer-based remote workspace that will set up and manage# the Apptainer container automatically. Use `ApptainerWorkspace` with a# pre-built agent server image.# Apptainer (formerly Singularity) doesn't require root access, making it# ideal for HPC and shared computing environments.server_image = get_server_image()logger.info(f"Using server image: {server_image}")with ApptainerWorkspace( # use pre-built image for faster startup server_image=server_image, host_port=8010, platform=detect_platform(),) as workspace: # 3) Create agent agent = get_default_agent( llm=llm, cli_mode=True, ) # 4) Set up callback collection received_events: list = [] last_event_time = {"ts": time.time()} def event_callback(event) -> None: event_type = type(event).__name__ logger.info(f"π Callback received event: {event_type}\n{event}") received_events.append(event) last_event_time["ts"] = time.time() # 5) Test the workspace with a simple command result = workspace.execute_command( "echo 'Hello from sandboxed environment!' && pwd" ) logger.info( f"Command '{result.command}' completed with exit code {result.exit_code}" ) logger.info(f"Output: {result.stdout}") conversation = Conversation( agent=agent, workspace=workspace, callbacks=[event_callback], ) assert isinstance(conversation, RemoteConversation) try: logger.info(f"\nπ Conversation ID: {conversation.state.id}") logger.info("π Sending first message...") conversation.send_message( "Read the current repo and write 3 facts about the project into FACTS.txt." ) logger.info("π Running conversation...") conversation.run() logger.info("β First task completed!") logger.info(f"Agent status: {conversation.state.execution_status}") # Wait for events to settle (no events for 2 seconds) logger.info("β³ Waiting for events to stop...") while time.time() - last_event_time["ts"] < 2.0: time.sleep(0.1) logger.info("β Events have stopped") logger.info("π Running conversation again...") conversation.send_message("Great! Now delete that file.") conversation.run() logger.info("β Second task completed!") # Report cost (must be before conversation.close()) cost = conversation.conversation_stats.get_combined_metrics().accumulated_cost print(f"EXAMPLE_COST: {cost}") finally: print("\nπ§Ή Cleaning up conversation...") conversation.close()
You can run the example code as-is.
The model name should follow the LiteLLM convention: provider/model_name (e.g., anthropic/claude-sonnet-4-5-20250929, openai/gpt-4o).
The LLM_API_KEY should be the API key for your chosen provider.
ChatGPT Plus/Pro subscribers: You can use LLM.subscription_login() to authenticate with your ChatGPT account and access Codex models without consuming API credits. See the LLM Subscriptions guide for details.
with ApptainerWorkspace( server_image="ghcr.io/openhands/agent-server:main-python", host_port=8010, # Maps to container port 8010) as workspace: # Access agent server at http://localhost:8010