All benchmark data from LLM Framework Showdown notebook #23 - graph workflows.
Code Side-by-Side
SynapseKit (20 LoC)
LangChain (20 LoC)
LlamaIndex (15 LoC)
from synapsekit import StateGraph, END, TypedState, StateField
schema = TypedState(fields={
'query': StateField(default=''),
'result': StateField(default=''),
})
def research(state): return {'result': f'Research on: {state["query"]}'}
def detailed_answer(state): return {'result': state['result'] + ' [detailed]'}
def quick_answer(state): return {'result': state['result'] + ' [quick]'}
def router(state): return 'detailed' if len(state['query']) > 20 else 'quick'
graph = StateGraph(schema)
graph.add_node('research', research)
graph.add_node('detailed_answer', detailed_answer)
graph.add_node('quick_answer', quick_answer)
graph.set_entry_point('research')
graph.add_conditional_edge('research', router,
{'detailed': 'detailed_answer', 'quick': 'quick_answer'})
graph.add_edge('detailed_answer', END)
graph.add_edge('quick_answer', END)
app = graph.compile()
result = app.run_sync({'query': query, 'result': ''})
from langgraph.graph import StateGraph, END
from typing import TypedDict
class State(TypedDict):
query: str
result: str
def research(state): return {'result': f'Research on: {state["query"]}'}
def detailed_answer(state): return {'result': state['result'] + ' [detailed]'}
def quick_answer(state): return {'result': state['result'] + ' [quick]'}
def router(state): return 'detailed' if len(state['query']) > 20 else 'quick'
graph = StateGraph(State)
graph.add_node('research', research)
graph.add_node('detailed_answer', detailed_answer)
graph.add_node('quick_answer', quick_answer)
graph.set_entry_point('research')
graph.add_conditional_edges('research', router,
{'detailed': 'detailed_answer', 'quick': 'quick_answer'})
graph.add_edge('detailed_answer', END)
graph.add_edge('quick_answer', END)
app = graph.compile()
result = app.invoke({'query': query, 'result': ''})
from llama_index.core.agent import AgentWorkflow, FunctionAgent
from llama_index.core.tools import FunctionTool
from llama_index.core.llms import MockLLM
# No graph primitive - manual routing
state = {'query': query, 'result': ''}
def research_fn(query: str) -> str:
return f'Research on: {query}'
def detailed_fn(research: str) -> str:
return research + ' [detailed]'
def quick_fn(research: str) -> str:
return research + ' [quick]'
research_result = research_fn(state['query'])
if len(state['query']) > 20:
state['result'] = detailed_fn(research_result)
else:
state['result'] = quick_fn(research_result)
API Similarity
SynapseKit vs LangGraph API match
~95%
Key API difference
edge vs edges
State definition
TypedState vs TypedDict
LlamaIndex Gap
Graph features supported
0 / 7
Workaround
Manual Python
Missing infrastructure
Checkpoint, stream, viz
Notebook #23 Verdict
SynapseKit and LangChain tie on graph workflows with near-identical APIs and 7/7 feature coverage. LangGraph's TypedDict state is simpler for basic cases. SynapseKit's StateField reducers are better for parallel merging. LlamaIndex has no graph primitive - if your workflow needs conditional routing, loops, or parallel branches, you are building infrastructure from scratch.