This tutorial walks you through wiring a LangChain ReAct agent with a tool that executes shell commands in a K7 sandbox.

Prerequisites

  • K7 API running (k7 start-api) and reachable
  • API key generated: k7 generate-api-key <name>
  • Python 3.10+

Setup

Create a .env file with your credentials and defaults:
K7_ENDPOINT=https://your-k7-endpoint
K7_API_KEY=your-api-key
K7_SANDBOX_NAME=lc-agent
K7_SANDBOX_IMAGE=alpine:latest
K7_NAMESPACE=default
OPENAI_API_KEY=sk-your-openai-key
OPENAI_MODEL=gpt-4o-mini
Install dependencies:
pip install langchain langchain-openai python-dotenv katakate

Agent code

import os, time
from pathlib import Path
from typing import Optional
from dotenv import load_dotenv
from langchain.agents import initialize_agent, AgentType
from langchain.memory import ConversationBufferMemory
from langchain.tools import Tool
from langchain_openai import ChatOpenAI
from katakate import Client, SandboxProxy

load_dotenv()

K7_ENDPOINT = os.getenv("K7_ENDPOINT")
K7_API_KEY = os.getenv("K7_API_KEY")
SANDBOX_NAME = os.getenv("K7_SANDBOX_NAME", "lc-agent")
SANDBOX_IMAGE = os.getenv("K7_SANDBOX_IMAGE", "alpine:latest")
SANDBOX_NAMESPACE = os.getenv("K7_NAMESPACE", "default")

k7 = Client(endpoint=K7_ENDPOINT, api_key=K7_API_KEY)
_sb: Optional[SandboxProxy] = None

def ensure_sandbox_ready(timeout_seconds: int = 60) -> SandboxProxy:
    try:
        sb = k7.create({
            "name": SANDBOX_NAME,
            "image": SANDBOX_IMAGE,
            "namespace": SANDBOX_NAMESPACE,
        })
    except Exception:
        sb = SandboxProxy(SANDBOX_NAME, SANDBOX_NAMESPACE, k7)

    deadline = time.time() + timeout_seconds
    while time.time() < deadline:
        for info in k7.list(namespace=SANDBOX_NAMESPACE):
            if info.get("name") == SANDBOX_NAME and info.get("status") == "Running":
                return sb
        time.sleep(2)
    raise RuntimeError("Sandbox did not become Running in time")

def run_code_in_sandbox(code: str) -> str:
    global _sb
    if _sb is None:
        _sb = ensure_sandbox_ready()
    result = _sb.exec(code)
    if result.get("exit_code", 1) != 0:
        return f"[stderr]\n{result.get('stderr','')}\n[stdout]\n{result.get('stdout','')}"
    return result.get("stdout", "")

tool = Tool(
    name="sandbox_exec",
    description="Execute a shell command inside an isolated K7 sandbox. Input should be a shell command string.",
    func=run_code_in_sandbox,
)

llm = ChatOpenAI(model=os.getenv("OPENAI_MODEL", "gpt-4o-mini"), temperature=0)
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)

agent = initialize_agent(
    tools=[tool],
    llm=llm,
    agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION,
    memory=memory,
    verbose=True,
    handle_parsing_errors=True,
)

print("Ask me to run a command in a sandbox, e.g.: 'List files in /'\n")
while True:
    try:
        user = input("You: ")
    except (EOFError, KeyboardInterrupt):
        break
    if not user.strip():
        continue
    resp = agent.invoke({"input": user})
    print("Agent:", resp.get("output", str(resp)))
You can shell into the same sandbox in parallel: k7 shell lc-agent.