Integrations

Integrations

Paprika works with any Python agent framework. The integration pattern is universal: wrap the entry point, route LLM calls through context, register tools.

General Pattern

All integrations follow this pattern:

  1. Create a PaprikaRuntime
  2. Register tools with runtime.register_tool(name, func)
  3. Wrap your agent execution with @runtime.agent(name="...")
  4. Route LLM calls through ctx.llm.call(...)
  5. Route tool calls through ctx.tools.call(...)

That's it. No magic, no framework lock-in.

Vanilla Python

Full example: a multi-step research agent.

python
#a3d95f]">"text-[#9ecbff]">from paprika "text-[#9ecbff]">import PaprikaRuntime, PolicyConfig

# Create runtime
runtime = PaprikaRuntime(
    policy=PolicyConfig(max_steps=20, max_tokens=50000)
)

# Register tools
#a3d95f]">"text-[#9ecbff]">def search(query: str) -> str:
    #a3d95f]">"text-[#9ecbff]">return f"Results ">for '{query}': AI is advancing..."

#a3d95f]">"text-[#9ecbff]">def summarize_tool(text: str) -> str:
    #a3d95f]">"text-[#9ecbff]">return f"Summary: {text[:100]}..."

runtime.register_tool(#a3d95f]">"search", search)
runtime.register_tool(#a3d95f]">"summarize", summarize_tool)

# Define agent
@runtime.agent(name=#a3d95f]">"researcher")
#a3d95f]">"text-[#9ecbff]">def researcher(ctx, topic: str):
    # Step 1: LLM generates research question
    response = ctx.llm.call(
        provider=#a3d95f]">"openai",
        model=#a3d95f]">"gpt-4o",
        input={
            #a3d95f]">"messages": [
                {
                    #a3d95f]">"role": "user",
                    #a3d95f]">"content": f"Generate a research question about {topic}"
                }
            ]
        }
    )
    question = response[#a3d95f]">"choices"][0]["message"]["content"]

    # Step 2: Search "text-[#9ecbff]">for results
    search_result = ctx.tools.call(
        name=#a3d95f]">"search",
        args={#a3d95f]">"query": question}
    )

    # Step 3: Summarize results
    summary = ctx.tools.call(
        name=#a3d95f]">"summarize",
        args={#a3d95f]">"text": search_result}
    )

    #a3d95f]">"text-[#9ecbff]">return {
        #a3d95f]">"question": question,
        #a3d95f]">"results": search_result,
        #a3d95f]">"summary": summary
    }

# Run the agent
#a3d95f]">"text-[#9ecbff]">if __name__ == "__main__":
    result = researcher(#a3d95f]">"machine learning")
    print(result)

LangGraph

Wrap a LangGraph graph execution and route LLM calls through context.

python
#a3d95f]">"text-[#9ecbff]">from langchain_openai "text-[#9ecbff]">import ChatOpenAI
#a3d95f]">"text-[#9ecbff]">from langgraph.graph "text-[#9ecbff]">import StateGraph, START, END
#a3d95f]">"text-[#9ecbff]">from paprika "text-[#9ecbff]">import PaprikaRuntime

# Set up Paprika
runtime = PaprikaRuntime()

# Define tool
#a3d95f]">"text-[#9ecbff]">def search(query: str) -> str:
    #a3d95f]">"text-[#9ecbff]">return f"Search results ">for {query}"

runtime.register_tool(#a3d95f]">"search", search)

# Define LangGraph nodes
#a3d95f]">"text-[#9ecbff]">def node_generate_question(state):
    response = state[#a3d95f]">"ctx"].llm.call(
        provider=#a3d95f]">"openai",
        model=#a3d95f]">"gpt-4o",
        input={
            #a3d95f]">"messages": [
                {#a3d95f]">"role": "user", "content": "Generate a question about AI"}
            ]
        }
    )
    state[#a3d95f]">"question"] = response["choices"][0]["message"]["content"]
    #a3d95f]">"text-[#9ecbff]">return state

#a3d95f]">"text-[#9ecbff]">def node_search(state):
    result = state[#a3d95f]">"ctx"].tools.call(
        name=#a3d95f]">"search",
        args={#a3d95f]">"query": state["question"]}
    )
    state[#a3d95f]">"search_result"] = result
    #a3d95f]">"text-[#9ecbff]">return state

# Build graph
graph_builder = StateGraph(dict)
graph_builder.add_node(#a3d95f]">"generate", node_generate_question)
graph_builder.add_node(#a3d95f]">"search", node_search)
graph_builder.add_edge(START, #a3d95f]">"generate")
graph_builder.add_edge(#a3d95f]">"generate", "search")
graph_builder.add_edge(#a3d95f]">"search", END)
graph = graph_builder.compile()

# Wrap graph execution "text-[#9ecbff]">with Paprika agent
@runtime.agent(name=#a3d95f]">"langgraph_agent")
#a3d95f]">"text-[#9ecbff]">def run_graph(ctx):
    initial_state = {#a3d95f]">"ctx": ctx}
    result = graph.invoke(initial_state)
    #a3d95f]">"text-[#9ecbff]">return result

# Run
#a3d95f]">"text-[#9ecbff]">if __name__ == "__main__":
    result = run_graph()
    print(result)

Key: Pass ctx into the graph state so nodes can access it. Route all LLM calls and tool calls through ctx.llm.call() and ctx.tools.call().

CrewAI

Wrap crew execution and route decisions through Paprika context.

python
#a3d95f]">"text-[#9ecbff]">from crewai "text-[#9ecbff]">import Agent, Task, Crew
#a3d95f]">"text-[#9ecbff]">from paprika "text-[#9ecbff]">import PaprikaRuntime, PolicyConfig

# Set up Paprika
runtime = PaprikaRuntime(
    policy=PolicyConfig(max_steps=30)
)

# Register tools
#a3d95f]">"text-[#9ecbff]">from crewai_tools "text-[#9ecbff]">import SerperDevTool
search_tool = SerperDevTool()
runtime.register_tool(#a3d95f]">"search", lambda query: search_tool.run(query))

@runtime.agent(name=#a3d95f]">"crew_researcher")
#a3d95f]">"text-[#9ecbff]">def research_crew(ctx):
    #a3d95f]">"text-[#9ecbff]">def crew_search(query: str):
        #a3d95f]">"text-[#9ecbff]">return ctx.tools.call(name="search", args={"query": query})

    result = crew_search(#a3d95f]">"latest AI trends")
    #a3d95f]">"text-[#9ecbff]">return result

# Run
#a3d95f]">"text-[#9ecbff]">if __name__ == "__main__":
    result = research_crew()
    print(result)

Note: CrewAI does not expose agent internals for context injection. Integration requires wrapping CrewAI's LLM calls via monkey-patching, logging trajectory post-execution, or using a custom LLM provider. This is pattern-level integration, not deep integration.

AutoGen

Wrap agent conversations and route through Paprika.

python
#a3d95f]">"text-[#9ecbff]">from autogen "text-[#9ecbff]">import AssistantAgent, UserProxyAgent
#a3d95f]">"text-[#9ecbff]">from paprika "text-[#9ecbff]">import PaprikaRuntime

runtime = PaprikaRuntime()

assistant = AssistantAgent(name=#a3d95f]">"assistant", llm_config={"model": "gpt-4o"})
user_proxy = UserProxyAgent(name=#a3d95f]">"user", human_input_mode="NEVER")

@runtime.agent(name=#a3d95f]">"autogen_chat")
#a3d95f]">"text-[#9ecbff]">def run_autogen(ctx, task: str):
    user_proxy.initiate_chat(
        assistant,
        message=task,
        max_consecutive_auto_reply=5
    )

    messages = user_proxy.chat_messages[assistant]
    #a3d95f]">"text-[#9ecbff]">return {
        #a3d95f]">"messages": messages,
        #a3d95f]">"status": "completed"
    }

# Run
#a3d95f]">"text-[#9ecbff]">if __name__ == "__main__":
    result = run_autogen(ctx=#a3d95f]">"text-[#9ecbff]">None, task="Research AI trends")
    print(result)

Note: Like CrewAI, AutoGen does not expose internal LLM calls. Integration is pattern-level: wrap execution, log trajectory post-execution.

Integration Maturity

| Framework | Type | Maturity | Notes | |-----------|------|----------|-------| | Vanilla Python | Deep (context injection) | Production | Full step-by-step recording | | LangGraph | Deep (state-based injection) | Production | Pass context in state | | CrewAI | Pattern-level | Experimental | Wrapping/logging only | | AutoGen | Pattern-level | Experimental | Wrapping/logging only |

Deep integration: Paprika context used directly, full recording. Pattern-level integration: Entry point wrapped, trajectory logged.

Custom Frameworks

For any agent framework:

  1. Route LLM calls:
python
   response = ctx.llm.call(
       provider=#a3d95f]">"openai",
       model=#a3d95f]">"gpt-4o",
       input={...}
   )
  1. Route tool calls:
python
   result = ctx.tools.call(
       name=#a3d95f]">"tool_name",
       args={...}
   )
  1. Wrap entry point:
python
   @runtime.agent(name=#a3d95f]">"my_agent")
   #a3d95f]">"text-[#9ecbff]">def my_agent(ctx):
       # ... your framework logic ...
       #a3d95f]">"text-[#9ecbff]">pass

Next Steps