FRAMEWORKS
Framework Integrations
AgentShield integrates natively with the most popular AI agent frameworks. All integrations are non-blocking — they never slow down or break your agents.
LangChain
pip install agentshield langchain-corefrom agentshield.integrations.langchain import AgentShieldCallback
from langchain_openai import ChatOpenAI
callback = AgentShieldCallback(
agent_name="my-langchain-agent",
session_id="optional-session-id",
)
llm = ChatOpenAI(model="gpt-4o")
llm.invoke("Hello", config={"callbacks": [callback]})CrewAI
pip install agentshield crewaifrom agentshield.integrations.crewai import AgentShieldCrewCallback
from crewai import Agent, Crew, Task
# Instantiating registers the listener automatically
callback = AgentShieldCrewCallback(agent_name="my-crew")
researcher = Agent(role="Researcher", goal="...", backstory="...")
task = Task(description="Research ...", agent=researcher)
crew = Crew(agents=[researcher], tasks=[task])
crew.kickoff()AutoGen
pip install agentshield pyautogenfrom agentshield.integrations.autogen import AgentShieldAutoGenHook
from autogen import AssistantAgent, UserProxyAgent
hook = AgentShieldAutoGenHook(agent_name="my-autogen-agent")
assistant = AssistantAgent("assistant", llm_config={"model": "gpt-4o"})
hook.register(assistant) # attach hooks
user = UserProxyAgent("user", human_input_mode="NEVER")
user.initiate_chat(assistant, message="Research AI safety.")LlamaIndex
pip install agentshield llama-index-corefrom agentshield.integrations.llamaindex import AgentShieldLlamaIndexCallback
from llama_index.core import Settings
from llama_index.core.callbacks import CallbackManager
callback = AgentShieldLlamaIndexCallback(agent_name="my-llama-agent")
Settings.callback_manager = CallbackManager([callback])
# All LLM calls via LlamaIndex are now tracked
from llama_index.core import VectorStoreIndex
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine()
response = query_engine.query("What is AgentShield?")Direct API / Any framework
No framework? Use the decorator directly on any Python function that calls an LLM:
from agentshield import shield
@shield(agent="my-agent")
async def call_llm(prompt: str) -> str:
# Your existing LLM call
response = await openai.chat.completions.create(...)
return response.choices[0].message.content