Skip to content

Adapters API

Optional adapters for external integrations.

Overview

AgentECS provides protocol-based adapters for common external services: - Vector Stores: Semantic search and RAG (ChromaDB) - LLM Clients: Structured output with multiple providers (Instructor) - Configuration: Type-safe settings with environment variables

Design Principles: - Optional dependencies: Don't require installation unless used - Protocol-based: Easy to implement custom adapters - Multi-provider: Support multiple backends per protocol - Type-safe: Generics ensure type safety throughout


Vector Store Adapter

Semantic search and retrieval-augmented generation.

VectorStore Protocol

VectorStore

Bases: Protocol[T]

Protocol for vector database operations with typed data models.

Generic type T represents the data model (Pydantic model or dataclass) that will be stored alongside vectors.

Usage

@dataclass class Document: title: str content: str

store: VectorStore[Document] = ChromaAdapter.from_memory("docs", Document) store.add("doc1", embedding=[...], text="...", data=Document(...)) results = store.search(query_embedding=[...], mode=SearchMode.HYBRID)

Source code in src/agentecs/adapters/protocol.py
@runtime_checkable
class VectorStore(Protocol[T]):
    """Protocol for vector database operations with typed data models.

    Generic type T represents the data model (Pydantic model or dataclass)
    that will be stored alongside vectors.

    Usage:
        @dataclass
        class Document:
            title: str
            content: str

        store: VectorStore[Document] = ChromaAdapter.from_memory("docs", Document)
        store.add("doc1", embedding=[...], text="...", data=Document(...))
        results = store.search(query_embedding=[...], mode=SearchMode.HYBRID)
    """

    def add(
        self,
        id: str,
        embedding: list[float],
        text: str,
        data: T,
    ) -> str:
        """Add a single item to the store.

        Args:
            id: Unique identifier for the item.
            embedding: Vector embedding.
            text: Text content for keyword search.
            data: Typed data model to store.

        Returns:
            The ID of the added item.
        """
        ...

    def add_batch(self, items: list[VectorStoreItem[T]]) -> list[str]:
        """Add multiple items to the store.

        Args:
            items: List of items to add.

        Returns:
            List of IDs for added items.
        """
        ...

    def get(self, id: str) -> T | None:
        """Get an item by ID.

        Args:
            id: Item identifier.

        Returns:
            The data model if found, None otherwise.
        """
        ...

    def get_batch(self, ids: list[str]) -> list[T | None]:
        """Get multiple items by ID.

        Args:
            ids: List of item identifiers.

        Returns:
            List of data models (None for missing items).
        """
        ...

    def update(
        self,
        id: str,
        embedding: list[float] | None = None,
        text: str | None = None,
        data: T | None = None,
    ) -> bool:
        """Update an existing item.

        Args:
            id: Item identifier.
            embedding: New embedding (optional).
            text: New text (optional).
            data: New data model (optional).

        Returns:
            True if item existed and was updated.
        """
        ...

    def delete(self, id: str) -> bool:
        """Delete an item.

        Args:
            id: Item identifier.

        Returns:
            True if item existed and was deleted.
        """
        ...

    def delete_batch(self, ids: list[str]) -> int:
        """Delete multiple items.

        Args:
            ids: List of item identifiers.

        Returns:
            Number of items deleted.
        """
        ...

    def search(
        self,
        query_embedding: list[float] | None = None,
        query_text: str | None = None,
        mode: SearchMode = SearchMode.VECTOR,
        filters: Filter | FilterGroup | None = None,
        limit: int = 10,
    ) -> list[SearchResult[T]]:
        """Search the store.

        Args:
            query_embedding: Query vector for vector/hybrid search.
            query_text: Query text for keyword/hybrid search.
            mode: Search mode (vector, keyword, or hybrid).
            filters: Optional metadata filters.
            limit: Maximum number of results.

        Returns:
            List of search results with scores.
        """
        ...

    def count(self) -> int:
        """Get total number of items in the store.

        Returns:
            Item count.
        """
        ...

    # Async variants

    async def add_async(
        self,
        id: str,
        embedding: list[float],
        text: str,
        data: T,
    ) -> str:
        """Add a single item to the store (async).

        Args:
            id: Unique identifier for the item.
            embedding: Vector embedding.
            text: Text content for keyword search.
            data: Typed data model to store.

        Returns:
            The ID of the added item.
        """
        ...

    async def add_batch_async(self, items: list[VectorStoreItem[T]]) -> list[str]:
        """Add multiple items to the store (async).

        Args:
            items: List of items to add.

        Returns:
            List of IDs for added items.
        """
        ...

    async def get_async(self, id: str) -> T | None:
        """Get an item by ID (async).

        Args:
            id: Item identifier.

        Returns:
            The data model if found, None otherwise.
        """
        ...

    async def search_async(
        self,
        query_embedding: list[float] | None = None,
        query_text: str | None = None,
        mode: SearchMode = SearchMode.VECTOR,
        filters: Filter | FilterGroup | None = None,
        limit: int = 10,
    ) -> list[SearchResult[T]]:
        """Search the store (async).

        Args:
            query_embedding: Query vector for vector/hybrid search.
            query_text: Query text for keyword/hybrid search.
            mode: Search mode (vector, keyword, or hybrid).
            filters: Optional metadata filters.
            limit: Maximum number of results.

        Returns:
            List of search results with scores.
        """
        ...

add(id, embedding, text, data)

Add a single item to the store.

Parameters:

Name Type Description Default
id str

Unique identifier for the item.

required
embedding list[float]

Vector embedding.

required
text str

Text content for keyword search.

required
data T

Typed data model to store.

required

Returns:

Type Description
str

The ID of the added item.

Source code in src/agentecs/adapters/protocol.py
def add(
    self,
    id: str,
    embedding: list[float],
    text: str,
    data: T,
) -> str:
    """Add a single item to the store.

    Args:
        id: Unique identifier for the item.
        embedding: Vector embedding.
        text: Text content for keyword search.
        data: Typed data model to store.

    Returns:
        The ID of the added item.
    """
    ...

add_batch(items)

Add multiple items to the store.

Parameters:

Name Type Description Default
items list[VectorStoreItem[T]]

List of items to add.

required

Returns:

Type Description
list[str]

List of IDs for added items.

Source code in src/agentecs/adapters/protocol.py
def add_batch(self, items: list[VectorStoreItem[T]]) -> list[str]:
    """Add multiple items to the store.

    Args:
        items: List of items to add.

    Returns:
        List of IDs for added items.
    """
    ...

get(id)

Get an item by ID.

Parameters:

Name Type Description Default
id str

Item identifier.

required

Returns:

Type Description
T | None

The data model if found, None otherwise.

Source code in src/agentecs/adapters/protocol.py
def get(self, id: str) -> T | None:
    """Get an item by ID.

    Args:
        id: Item identifier.

    Returns:
        The data model if found, None otherwise.
    """
    ...

get_batch(ids)

Get multiple items by ID.

Parameters:

Name Type Description Default
ids list[str]

List of item identifiers.

required

Returns:

Type Description
list[T | None]

List of data models (None for missing items).

Source code in src/agentecs/adapters/protocol.py
def get_batch(self, ids: list[str]) -> list[T | None]:
    """Get multiple items by ID.

    Args:
        ids: List of item identifiers.

    Returns:
        List of data models (None for missing items).
    """
    ...

update(id, embedding=None, text=None, data=None)

Update an existing item.

Parameters:

Name Type Description Default
id str

Item identifier.

required
embedding list[float] | None

New embedding (optional).

None
text str | None

New text (optional).

None
data T | None

New data model (optional).

None

Returns:

Type Description
bool

True if item existed and was updated.

Source code in src/agentecs/adapters/protocol.py
def update(
    self,
    id: str,
    embedding: list[float] | None = None,
    text: str | None = None,
    data: T | None = None,
) -> bool:
    """Update an existing item.

    Args:
        id: Item identifier.
        embedding: New embedding (optional).
        text: New text (optional).
        data: New data model (optional).

    Returns:
        True if item existed and was updated.
    """
    ...

delete(id)

Delete an item.

Parameters:

Name Type Description Default
id str

Item identifier.

required

Returns:

Type Description
bool

True if item existed and was deleted.

Source code in src/agentecs/adapters/protocol.py
def delete(self, id: str) -> bool:
    """Delete an item.

    Args:
        id: Item identifier.

    Returns:
        True if item existed and was deleted.
    """
    ...

delete_batch(ids)

Delete multiple items.

Parameters:

Name Type Description Default
ids list[str]

List of item identifiers.

required

Returns:

Type Description
int

Number of items deleted.

Source code in src/agentecs/adapters/protocol.py
def delete_batch(self, ids: list[str]) -> int:
    """Delete multiple items.

    Args:
        ids: List of item identifiers.

    Returns:
        Number of items deleted.
    """
    ...

search(query_embedding=None, query_text=None, mode=SearchMode.VECTOR, filters=None, limit=10)

Search the store.

Parameters:

Name Type Description Default
query_embedding list[float] | None

Query vector for vector/hybrid search.

None
query_text str | None

Query text for keyword/hybrid search.

None
mode SearchMode

Search mode (vector, keyword, or hybrid).

VECTOR
filters Filter | FilterGroup | None

Optional metadata filters.

None
limit int

Maximum number of results.

10

Returns:

Type Description
list[SearchResult[T]]

List of search results with scores.

Source code in src/agentecs/adapters/protocol.py
def search(
    self,
    query_embedding: list[float] | None = None,
    query_text: str | None = None,
    mode: SearchMode = SearchMode.VECTOR,
    filters: Filter | FilterGroup | None = None,
    limit: int = 10,
) -> list[SearchResult[T]]:
    """Search the store.

    Args:
        query_embedding: Query vector for vector/hybrid search.
        query_text: Query text for keyword/hybrid search.
        mode: Search mode (vector, keyword, or hybrid).
        filters: Optional metadata filters.
        limit: Maximum number of results.

    Returns:
        List of search results with scores.
    """
    ...

count()

Get total number of items in the store.

Returns:

Type Description
int

Item count.

Source code in src/agentecs/adapters/protocol.py
def count(self) -> int:
    """Get total number of items in the store.

    Returns:
        Item count.
    """
    ...

ChromaDB Adapter

ChromaAdapter

ChromaDB implementation of VectorStore protocol.

Stores typed data models (Pydantic or dataclass) with vector embeddings and supports hybrid search.

Attributes:

Name Type Description
collection Collection

The underlying ChromaDB collection.

data_type type[T]

The type of data model being stored.

Source code in src/agentecs/adapters/chroma.py
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
class ChromaAdapter[T]:
    """ChromaDB implementation of VectorStore protocol.

    Stores typed data models (Pydantic or dataclass) with vector embeddings
    and supports hybrid search.

    Attributes:
        collection: The underlying ChromaDB collection.
        data_type: The type of data model being stored.
    """

    def __init__(self, collection: Collection, data_type: type[T]) -> None:
        """Initialize adapter with a ChromaDB collection.

        Use factory methods instead of direct construction.

        Args:
            collection: ChromaDB collection instance.
            data_type: Type of data model to store.
        """
        self._collection = collection
        self._data_type = data_type

    @classmethod
    def from_client(
        cls,
        client: chromadb.ClientAPI,  # type: ignore[name-defined]
        collection_name: str,
        data_type: type[T],
    ) -> ChromaAdapter[T]:
        """Create adapter from existing ChromaDB client.

        Args:
            client: ChromaDB client instance.
            collection_name: Name of collection to use/create.
            data_type: Type of data model to store.

        Returns:
            Configured ChromaAdapter instance.
        """
        collection = client.get_or_create_collection(name=collection_name)
        return cls(collection, data_type)

    @classmethod
    def from_path(
        cls,
        path: str,
        collection_name: str,
        data_type: type[T],
    ) -> ChromaAdapter[T]:
        """Create adapter with persistent storage.

        Args:
            path: Directory path for persistent storage.
            collection_name: Name of collection to use/create.
            data_type: Type of data model to store.

        Returns:
            Configured ChromaAdapter instance.
        """
        try:
            import chromadb
        except ImportError as e:
            raise ImportError(
                "chromadb is required for ChromaAdapter. Install with: pip install agentecs[chroma]"
            ) from e

        client = chromadb.PersistentClient(path=path)
        return cls.from_client(client, collection_name, data_type)

    @classmethod
    def from_memory(
        cls,
        collection_name: str,
        data_type: type[T],
    ) -> ChromaAdapter[T]:
        """Create adapter with ephemeral (in-memory) storage.

        Args:
            collection_name: Name of collection to use/create.
            data_type: Type of data model to store.

        Returns:
            Configured ChromaAdapter instance.
        """
        try:
            import chromadb
        except ImportError as e:
            raise ImportError(
                "chromadb is required for ChromaAdapter. Install with: pip install agentecs[chroma]"
            ) from e

        client = chromadb.EphemeralClient()
        return cls.from_client(client, collection_name, data_type)

    @property
    def collection(self) -> Collection:
        """Get the underlying ChromaDB collection."""
        return self._collection

    @property
    def data_type(self) -> type[T]:
        """Get the data type being stored."""
        return self._data_type

    def add(
        self,
        id: str,
        embedding: list[float],
        text: str,
        data: T,
    ) -> str:
        """Add a single item to the store.

        Args:
            id: Unique identifier for the item.
            embedding: Vector embedding.
            text: Text content for keyword search.
            data: Typed data model to store.

        Returns:
            The ID of the added item.
        """
        metadata = _serialize_to_metadata(data, self._data_type)
        self._collection.add(
            ids=[id],
            embeddings=[embedding],  # type: ignore[arg-type]
            documents=[text],
            metadatas=[metadata],
        )
        return id

    def add_batch(self, items: list[VectorStoreItem[T]]) -> list[str]:
        """Add multiple items to the store.

        Args:
            items: List of items to add.

        Returns:
            List of IDs for added items.
        """
        if not items:
            return []

        ids = [item.id for item in items]
        embeddings = [item.embedding for item in items]
        documents = [item.text for item in items]
        metadatas = [_serialize_to_metadata(item.data, self._data_type) for item in items]

        self._collection.add(
            ids=ids,
            embeddings=embeddings,  # type: ignore[arg-type]
            documents=documents,
            metadatas=metadatas,  # type: ignore[arg-type]
        )
        return ids

    def get(self, id: str) -> T | None:
        """Get an item by ID.

        Args:
            id: Item identifier.

        Returns:
            The data model if found, None otherwise.
        """
        result = self._collection.get(ids=[id], include=["metadatas"])
        if not result["ids"]:
            return None

        metadata = result["metadatas"][0]  # type: ignore[index]
        return _deserialize_from_metadata(metadata, self._data_type)  # type: ignore[arg-type]

    def get_batch(self, ids: list[str]) -> list[T | None]:
        """Get multiple items by ID.

        Args:
            ids: List of item identifiers.

        Returns:
            List of data models (None for missing items).
        """
        if not ids:
            return []

        result = self._collection.get(ids=ids, include=["metadatas"])

        # Build lookup from returned results
        found: dict[str, dict[str, Any]] = {}
        for i, id_ in enumerate(result["ids"]):
            found[id_] = result["metadatas"][i]  # type: ignore[index,assignment]

        # Return in original order
        return [
            _deserialize_from_metadata(found[id_], self._data_type) if id_ in found else None
            for id_ in ids
        ]

    def update(
        self,
        id: str,
        embedding: list[float] | None = None,
        text: str | None = None,
        data: T | None = None,
    ) -> bool:
        """Update an existing item.

        Args:
            id: Item identifier.
            embedding: New embedding (optional).
            text: New text (optional).
            data: New data model (optional).

        Returns:
            True if item existed and was updated.
        """
        # Check if exists
        existing = self._collection.get(ids=[id])
        if not existing["ids"]:
            return False

        update_kwargs: dict[str, Any] = {"ids": [id]}

        if embedding is not None:
            update_kwargs["embeddings"] = [embedding]
        if text is not None:
            update_kwargs["documents"] = [text]
        if data is not None:
            update_kwargs["metadatas"] = [_serialize_to_metadata(data, self._data_type)]

        self._collection.update(**update_kwargs)
        return True

    def delete(self, id: str) -> bool:
        """Delete an item.

        Args:
            id: Item identifier.

        Returns:
            True if item existed and was deleted.
        """
        # Check if exists first
        existing = self._collection.get(ids=[id])
        if not existing["ids"]:
            return False

        self._collection.delete(ids=[id])
        return True

    def delete_batch(self, ids: list[str]) -> int:
        """Delete multiple items.

        Args:
            ids: List of item identifiers.

        Returns:
            Number of items deleted.
        """
        if not ids:
            return 0

        # Check which exist
        existing = self._collection.get(ids=ids)
        existing_ids = set(existing["ids"])

        if not existing_ids:
            return 0

        self._collection.delete(ids=list(existing_ids))
        return len(existing_ids)

    def search(
        self,
        query_embedding: list[float] | None = None,
        query_text: str | None = None,
        mode: SearchMode = SearchMode.VECTOR,
        filters: Filter | FilterGroup | None = None,
        limit: int = 10,
    ) -> list[SearchResult[T]]:
        """Search the store.

        Args:
            query_embedding: Query vector for vector/hybrid search.
            query_text: Query text for keyword/hybrid search.
            mode: Search mode (vector, keyword, or hybrid).
            filters: Optional metadata filters.
            limit: Maximum number of results.

        Returns:
            List of search results with scores.
        """
        where = _build_chroma_where(filters)

        if mode == SearchMode.KEYWORD:
            if query_text is None:
                raise ValueError("query_text required for keyword search")
            # ChromaDB doesn't have pure keyword search, use where_document
            result = self._collection.query(
                query_texts=[query_text],
                n_results=limit,
                where=where,
                include=["metadatas", "distances", "documents"],
            )
        elif mode == SearchMode.VECTOR:
            if query_embedding is None:
                raise ValueError("query_embedding required for vector search")
            result = self._collection.query(
                query_embeddings=[query_embedding],  # type: ignore[arg-type]
                n_results=limit,
                where=where,
                include=["metadatas", "distances", "documents"],
            )
        else:  # HYBRID
            if query_embedding is None:
                raise ValueError("query_embedding required for hybrid search")
            # ChromaDB hybrid: use embedding + optional text filter
            result = self._collection.query(
                query_embeddings=[query_embedding],  # type: ignore[arg-type]
                query_texts=[query_text] if query_text else None,
                n_results=limit,
                where=where,
                include=["metadatas", "distances", "documents"],
            )

        # Convert results
        results: list[SearchResult[T]] = []
        if result["ids"] and result["ids"][0]:
            ids = result["ids"][0]
            metadatas = result["metadatas"][0] if result["metadatas"] else [{}] * len(ids)
            distances = result["distances"][0] if result["distances"] else [0.0] * len(ids)

            for i, id_ in enumerate(ids):
                data = _deserialize_from_metadata(metadatas[i], self._data_type)  # type: ignore[arg-type]
                distance = distances[i]
                # Convert distance to score (cosine: score = 1 - distance/2 for [-1,1] range)
                # ChromaDB uses squared L2 by default, but we assume cosine was set
                score = max(0.0, 1.0 - distance)

                results.append(
                    SearchResult(
                        id=id_,
                        data=data,
                        score=score,
                        distance=distance,
                    )
                )

        return results

    def count(self) -> int:
        """Get total number of items in the store.

        Returns:
            Item count.
        """
        return int(self._collection.count())

    # Async variants - ChromaDB is sync, so we wrap in executor

    async def add_async(
        self,
        id: str,
        embedding: list[float],
        text: str,
        data: T,
    ) -> str:
        """Add a single item to the store (async).

        Note: ChromaDB is synchronous, this runs in thread executor.
        """
        import asyncio

        return await asyncio.get_event_loop().run_in_executor(
            None, lambda: self.add(id, embedding, text, data)
        )

    async def add_batch_async(self, items: list[VectorStoreItem[T]]) -> list[str]:
        """Add multiple items to the store (async).

        Note: ChromaDB is synchronous, this runs in thread executor.
        """
        import asyncio

        return await asyncio.get_event_loop().run_in_executor(None, lambda: self.add_batch(items))

    async def get_async(self, id: str) -> T | None:
        """Get an item by ID (async).

        Note: ChromaDB is synchronous, this runs in thread executor.
        """
        import asyncio

        return await asyncio.get_event_loop().run_in_executor(None, lambda: self.get(id))

    async def search_async(
        self,
        query_embedding: list[float] | None = None,
        query_text: str | None = None,
        mode: SearchMode = SearchMode.VECTOR,
        filters: Filter | FilterGroup | None = None,
        limit: int = 10,
    ) -> list[SearchResult[T]]:
        """Search the store (async).

        Note: ChromaDB is synchronous, this runs in thread executor.
        """
        import asyncio

        return await asyncio.get_event_loop().run_in_executor(
            None, lambda: self.search(query_embedding, query_text, mode, filters, limit)
        )

from_memory(collection_name, data_type) classmethod

Create adapter with ephemeral (in-memory) storage.

Parameters:

Name Type Description Default
collection_name str

Name of collection to use/create.

required
data_type type[T]

Type of data model to store.

required

Returns:

Type Description
ChromaAdapter[T]

Configured ChromaAdapter instance.

Source code in src/agentecs/adapters/chroma.py
@classmethod
def from_memory(
    cls,
    collection_name: str,
    data_type: type[T],
) -> ChromaAdapter[T]:
    """Create adapter with ephemeral (in-memory) storage.

    Args:
        collection_name: Name of collection to use/create.
        data_type: Type of data model to store.

    Returns:
        Configured ChromaAdapter instance.
    """
    try:
        import chromadb
    except ImportError as e:
        raise ImportError(
            "chromadb is required for ChromaAdapter. Install with: pip install agentecs[chroma]"
        ) from e

    client = chromadb.EphemeralClient()
    return cls.from_client(client, collection_name, data_type)

add(id, embedding, text, data)

Add a single item to the store.

Parameters:

Name Type Description Default
id str

Unique identifier for the item.

required
embedding list[float]

Vector embedding.

required
text str

Text content for keyword search.

required
data T

Typed data model to store.

required

Returns:

Type Description
str

The ID of the added item.

Source code in src/agentecs/adapters/chroma.py
def add(
    self,
    id: str,
    embedding: list[float],
    text: str,
    data: T,
) -> str:
    """Add a single item to the store.

    Args:
        id: Unique identifier for the item.
        embedding: Vector embedding.
        text: Text content for keyword search.
        data: Typed data model to store.

    Returns:
        The ID of the added item.
    """
    metadata = _serialize_to_metadata(data, self._data_type)
    self._collection.add(
        ids=[id],
        embeddings=[embedding],  # type: ignore[arg-type]
        documents=[text],
        metadatas=[metadata],
    )
    return id

add_batch(items)

Add multiple items to the store.

Parameters:

Name Type Description Default
items list[VectorStoreItem[T]]

List of items to add.

required

Returns:

Type Description
list[str]

List of IDs for added items.

Source code in src/agentecs/adapters/chroma.py
def add_batch(self, items: list[VectorStoreItem[T]]) -> list[str]:
    """Add multiple items to the store.

    Args:
        items: List of items to add.

    Returns:
        List of IDs for added items.
    """
    if not items:
        return []

    ids = [item.id for item in items]
    embeddings = [item.embedding for item in items]
    documents = [item.text for item in items]
    metadatas = [_serialize_to_metadata(item.data, self._data_type) for item in items]

    self._collection.add(
        ids=ids,
        embeddings=embeddings,  # type: ignore[arg-type]
        documents=documents,
        metadatas=metadatas,  # type: ignore[arg-type]
    )
    return ids

get(id)

Get an item by ID.

Parameters:

Name Type Description Default
id str

Item identifier.

required

Returns:

Type Description
T | None

The data model if found, None otherwise.

Source code in src/agentecs/adapters/chroma.py
def get(self, id: str) -> T | None:
    """Get an item by ID.

    Args:
        id: Item identifier.

    Returns:
        The data model if found, None otherwise.
    """
    result = self._collection.get(ids=[id], include=["metadatas"])
    if not result["ids"]:
        return None

    metadata = result["metadatas"][0]  # type: ignore[index]
    return _deserialize_from_metadata(metadata, self._data_type)  # type: ignore[arg-type]

search(query_embedding=None, query_text=None, mode=SearchMode.VECTOR, filters=None, limit=10)

Search the store.

Parameters:

Name Type Description Default
query_embedding list[float] | None

Query vector for vector/hybrid search.

None
query_text str | None

Query text for keyword/hybrid search.

None
mode SearchMode

Search mode (vector, keyword, or hybrid).

VECTOR
filters Filter | FilterGroup | None

Optional metadata filters.

None
limit int

Maximum number of results.

10

Returns:

Type Description
list[SearchResult[T]]

List of search results with scores.

Source code in src/agentecs/adapters/chroma.py
def search(
    self,
    query_embedding: list[float] | None = None,
    query_text: str | None = None,
    mode: SearchMode = SearchMode.VECTOR,
    filters: Filter | FilterGroup | None = None,
    limit: int = 10,
) -> list[SearchResult[T]]:
    """Search the store.

    Args:
        query_embedding: Query vector for vector/hybrid search.
        query_text: Query text for keyword/hybrid search.
        mode: Search mode (vector, keyword, or hybrid).
        filters: Optional metadata filters.
        limit: Maximum number of results.

    Returns:
        List of search results with scores.
    """
    where = _build_chroma_where(filters)

    if mode == SearchMode.KEYWORD:
        if query_text is None:
            raise ValueError("query_text required for keyword search")
        # ChromaDB doesn't have pure keyword search, use where_document
        result = self._collection.query(
            query_texts=[query_text],
            n_results=limit,
            where=where,
            include=["metadatas", "distances", "documents"],
        )
    elif mode == SearchMode.VECTOR:
        if query_embedding is None:
            raise ValueError("query_embedding required for vector search")
        result = self._collection.query(
            query_embeddings=[query_embedding],  # type: ignore[arg-type]
            n_results=limit,
            where=where,
            include=["metadatas", "distances", "documents"],
        )
    else:  # HYBRID
        if query_embedding is None:
            raise ValueError("query_embedding required for hybrid search")
        # ChromaDB hybrid: use embedding + optional text filter
        result = self._collection.query(
            query_embeddings=[query_embedding],  # type: ignore[arg-type]
            query_texts=[query_text] if query_text else None,
            n_results=limit,
            where=where,
            include=["metadatas", "distances", "documents"],
        )

    # Convert results
    results: list[SearchResult[T]] = []
    if result["ids"] and result["ids"][0]:
        ids = result["ids"][0]
        metadatas = result["metadatas"][0] if result["metadatas"] else [{}] * len(ids)
        distances = result["distances"][0] if result["distances"] else [0.0] * len(ids)

        for i, id_ in enumerate(ids):
            data = _deserialize_from_metadata(metadatas[i], self._data_type)  # type: ignore[arg-type]
            distance = distances[i]
            # Convert distance to score (cosine: score = 1 - distance/2 for [-1,1] range)
            # ChromaDB uses squared L2 by default, but we assume cosine was set
            score = max(0.0, 1.0 - distance)

            results.append(
                SearchResult(
                    id=id_,
                    data=data,
                    score=score,
                    distance=distance,
                )
            )

    return results

Models

SearchMode

Bases: Enum

Search mode for vector store queries.

Source code in src/agentecs/adapters/models.py
class SearchMode(Enum):
    """Search mode for vector store queries."""

    VECTOR = "vector"
    KEYWORD = "keyword"
    HYBRID = "hybrid"

SearchResult dataclass

Result from a vector store search.

Attributes:

Name Type Description
id str

Document identifier.

data T

The typed data model.

score float

Similarity/relevance score (higher is better, normalized 0-1 for cosine).

distance float | None

Raw distance value (lower is better).

Source code in src/agentecs/adapters/models.py
@dataclass(slots=True)
class SearchResult[T]:
    """Result from a vector store search.

    Attributes:
        id: Document identifier.
        data: The typed data model.
        score: Similarity/relevance score (higher is better, normalized 0-1 for cosine).
        distance: Raw distance value (lower is better).
    """

    id: str
    data: T
    score: float
    distance: float | None = None

VectorStoreItem dataclass

Item to add to vector store.

Attributes:

Name Type Description
id str

Unique identifier for the item.

embedding list[float]

Vector embedding.

text str

Text content for keyword search.

data T

The typed data model to store.

Source code in src/agentecs/adapters/models.py
@dataclass(slots=True)
class VectorStoreItem[T]:
    """Item to add to vector store.

    Attributes:
        id: Unique identifier for the item.
        embedding: Vector embedding.
        text: Text content for keyword search.
        data: The typed data model to store.
    """

    id: str
    embedding: list[float]
    text: str
    data: T

Filter dataclass

Single filter condition.

Attributes:

Name Type Description
field str

Field name to filter on (supports nested: "metadata.category").

operator FilterOperator

Comparison operator.

value Any

Value to compare against.

Source code in src/agentecs/adapters/models.py
@dataclass(slots=True)
class Filter:
    """Single filter condition.

    Attributes:
        field: Field name to filter on (supports nested: "metadata.category").
        operator: Comparison operator.
        value: Value to compare against.
    """

    field: str
    operator: FilterOperator
    value: Any

FilterGroup dataclass

Group of filters combined with AND/OR.

Attributes:

Name Type Description
filters list[Filter | FilterGroup]

List of Filter or nested FilterGroup.

operator str

How to combine filters ("and" or "or").

Source code in src/agentecs/adapters/models.py
@dataclass(slots=True)
class FilterGroup:
    """Group of filters combined with AND/OR.

    Attributes:
        filters: List of Filter or nested FilterGroup.
        operator: How to combine filters ("and" or "or").
    """

    filters: list[Filter | FilterGroup] = field(default_factory=list)
    operator: str = "and"  # "and" or "or"

LLM Client Adapter

Structured LLM output with multiple providers.

LLMClient Protocol

LLMClient

Bases: Protocol

Protocol for LLM operations with structured output.

Uses Pydantic models for type-safe responses.

Usage

class Analysis(BaseModel): sentiment: str confidence: float

client: LLMClient = InstructorAdapter.from_openai_client(openai_client) result: Analysis = client.call(messages, response_model=Analysis)

Source code in src/agentecs/adapters/protocol.py
@runtime_checkable
class LLMClient(Protocol):
    """Protocol for LLM operations with structured output.

    Uses Pydantic models for type-safe responses.

    Usage:
        class Analysis(BaseModel):
            sentiment: str
            confidence: float

        client: LLMClient = InstructorAdapter.from_openai_client(openai_client)
        result: Analysis = client.call(messages, response_model=Analysis)
    """

    def call(
        self,
        messages: list[Message],
        response_model: type[T],
        temperature: float | None = None,
        max_tokens: int | None = None,
        **kwargs: Any,
    ) -> T:
        """Call LLM with structured output.

        Args:
            messages: Conversation messages.
            response_model: Pydantic model for response validation.
            temperature: Override default temperature.
            max_tokens: Override default max tokens.
            **kwargs: Additional provider-specific parameters.

        Returns:
            Validated response as the specified model type.
        """
        ...

    async def call_async(
        self,
        messages: list[Message],
        response_model: type[T],
        temperature: float | None = None,
        max_tokens: int | None = None,
        **kwargs: Any,
    ) -> T:
        """Call LLM with structured output (async).

        Args:
            messages: Conversation messages.
            response_model: Pydantic model for response validation.
            temperature: Override default temperature.
            max_tokens: Override default max tokens.
            **kwargs: Additional provider-specific parameters.

        Returns:
            Validated response as the specified model type.
        """
        ...

    def stream(
        self,
        messages: list[Message],
        response_model: type[T],
        temperature: float | None = None,
        max_tokens: int | None = None,
        **kwargs: Any,
    ) -> Iterator[T]:
        """Stream LLM response with partial structured output.

        Yields partial objects as they are received, with fields
        populated incrementally.

        Args:
            messages: Conversation messages.
            response_model: Pydantic model for response validation.
            temperature: Override default temperature.
            max_tokens: Override default max tokens.
            **kwargs: Additional provider-specific parameters.

        Yields:
            Partial response objects with incrementally populated fields.
        """
        ...

    def stream_async(
        self,
        messages: list[Message],
        response_model: type[T],
        temperature: float | None = None,
        max_tokens: int | None = None,
        **kwargs: Any,
    ) -> AsyncIterator[T]:
        """Stream LLM response with partial structured output (async).

        Yields partial objects as they are received, with fields
        populated incrementally.

        Args:
            messages: Conversation messages.
            response_model: Pydantic model for response validation.
            temperature: Override default temperature.
            max_tokens: Override default max tokens.
            **kwargs: Additional provider-specific parameters.

        Yields:
            Partial response objects with incrementally populated fields.
        """
        ...

call(messages, response_model, temperature=None, max_tokens=None, **kwargs)

Call LLM with structured output.

Parameters:

Name Type Description Default
messages list[Message]

Conversation messages.

required
response_model type[T]

Pydantic model for response validation.

required
temperature float | None

Override default temperature.

None
max_tokens int | None

Override default max tokens.

None
**kwargs Any

Additional provider-specific parameters.

{}

Returns:

Type Description
T

Validated response as the specified model type.

Source code in src/agentecs/adapters/protocol.py
def call(
    self,
    messages: list[Message],
    response_model: type[T],
    temperature: float | None = None,
    max_tokens: int | None = None,
    **kwargs: Any,
) -> T:
    """Call LLM with structured output.

    Args:
        messages: Conversation messages.
        response_model: Pydantic model for response validation.
        temperature: Override default temperature.
        max_tokens: Override default max tokens.
        **kwargs: Additional provider-specific parameters.

    Returns:
        Validated response as the specified model type.
    """
    ...

call_async(messages, response_model, temperature=None, max_tokens=None, **kwargs) async

Call LLM with structured output (async).

Parameters:

Name Type Description Default
messages list[Message]

Conversation messages.

required
response_model type[T]

Pydantic model for response validation.

required
temperature float | None

Override default temperature.

None
max_tokens int | None

Override default max tokens.

None
**kwargs Any

Additional provider-specific parameters.

{}

Returns:

Type Description
T

Validated response as the specified model type.

Source code in src/agentecs/adapters/protocol.py
async def call_async(
    self,
    messages: list[Message],
    response_model: type[T],
    temperature: float | None = None,
    max_tokens: int | None = None,
    **kwargs: Any,
) -> T:
    """Call LLM with structured output (async).

    Args:
        messages: Conversation messages.
        response_model: Pydantic model for response validation.
        temperature: Override default temperature.
        max_tokens: Override default max tokens.
        **kwargs: Additional provider-specific parameters.

    Returns:
        Validated response as the specified model type.
    """
    ...

stream(messages, response_model, temperature=None, max_tokens=None, **kwargs)

Stream LLM response with partial structured output.

Yields partial objects as they are received, with fields populated incrementally.

Parameters:

Name Type Description Default
messages list[Message]

Conversation messages.

required
response_model type[T]

Pydantic model for response validation.

required
temperature float | None

Override default temperature.

None
max_tokens int | None

Override default max tokens.

None
**kwargs Any

Additional provider-specific parameters.

{}

Yields:

Type Description
T

Partial response objects with incrementally populated fields.

Source code in src/agentecs/adapters/protocol.py
def stream(
    self,
    messages: list[Message],
    response_model: type[T],
    temperature: float | None = None,
    max_tokens: int | None = None,
    **kwargs: Any,
) -> Iterator[T]:
    """Stream LLM response with partial structured output.

    Yields partial objects as they are received, with fields
    populated incrementally.

    Args:
        messages: Conversation messages.
        response_model: Pydantic model for response validation.
        temperature: Override default temperature.
        max_tokens: Override default max tokens.
        **kwargs: Additional provider-specific parameters.

    Yields:
        Partial response objects with incrementally populated fields.
    """
    ...

stream_async(messages, response_model, temperature=None, max_tokens=None, **kwargs)

Stream LLM response with partial structured output (async).

Yields partial objects as they are received, with fields populated incrementally.

Parameters:

Name Type Description Default
messages list[Message]

Conversation messages.

required
response_model type[T]

Pydantic model for response validation.

required
temperature float | None

Override default temperature.

None
max_tokens int | None

Override default max tokens.

None
**kwargs Any

Additional provider-specific parameters.

{}

Yields:

Type Description
AsyncIterator[T]

Partial response objects with incrementally populated fields.

Source code in src/agentecs/adapters/protocol.py
def stream_async(
    self,
    messages: list[Message],
    response_model: type[T],
    temperature: float | None = None,
    max_tokens: int | None = None,
    **kwargs: Any,
) -> AsyncIterator[T]:
    """Stream LLM response with partial structured output (async).

    Yields partial objects as they are received, with fields
    populated incrementally.

    Args:
        messages: Conversation messages.
        response_model: Pydantic model for response validation.
        temperature: Override default temperature.
        max_tokens: Override default max tokens.
        **kwargs: Additional provider-specific parameters.

    Yields:
        Partial response objects with incrementally populated fields.
    """
    ...

Instructor Adapter

InstructorAdapter

Instructor-based implementation of LLMClient protocol.

Uses instructor library for structured LLM output with Pydantic models.

Attributes:

Name Type Description
client

The instructor-patched client.

settings LLMSettings

LLM configuration settings.

Source code in src/agentecs/adapters/instructor.py
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
class InstructorAdapter:
    """Instructor-based implementation of LLMClient protocol.

    Uses instructor library for structured LLM output with Pydantic models.

    Attributes:
        client: The instructor-patched client.
        settings: LLM configuration settings.
    """

    def __init__(
        self,
        client: instructor.Instructor,
        settings: LLMSettings | None = None,
        async_client: instructor.AsyncInstructor | None = None,
    ) -> None:
        """Initialize adapter with instructor client.

        Use factory methods instead of direct construction.

        Args:
            client: Instructor-patched client for sync operations.
            settings: Optional LLM settings (uses defaults if None).
            async_client: Optional instructor-patched async client.
        """
        self._client = client
        self._async_client = async_client
        self._settings = settings or LLMSettings()

    @classmethod
    def from_instructor_client(
        cls,
        client: instructor.Instructor,
        settings: LLMSettings | None = None,
        async_client: instructor.AsyncInstructor | None = None,
    ) -> InstructorAdapter:
        """Create adapter from existing instructor client.

        Args:
            client: Instructor-patched client.
            settings: Optional LLM settings.
            async_client: Optional async instructor client.

        Returns:
            Configured InstructorAdapter instance.
        """
        return cls(client, settings, async_client)

    @classmethod
    def from_openai_client(
        cls,
        client: OpenAI,
        settings: LLMSettings | None = None,
        async_client: AsyncOpenAI | None = None,
        mode: instructor.Mode | None = None,
    ) -> InstructorAdapter:
        """Create adapter from OpenAI client.

        Wraps the OpenAI client with instructor for structured output.

        Args:
            client: OpenAI client instance.
            settings: Optional LLM settings.
            async_client: Optional async OpenAI client.
            mode: Instructor mode (default: TOOLS).

        Returns:
            Configured InstructorAdapter instance.
        """
        try:
            import instructor
        except ImportError as e:
            raise ImportError(
                "instructor is required for InstructorAdapter. "
                "Install with: pip install agentecs[llm]"
            ) from e

        mode = mode or instructor.Mode.TOOLS
        patched_client = instructor.from_openai(client, mode=mode)

        patched_async_client = None
        if async_client is not None:
            patched_async_client = instructor.from_openai(async_client, mode=mode)

        return cls(patched_client, settings, patched_async_client)

    @classmethod
    def from_anthropic(
        cls,
        client: Any,
        settings: LLMSettings | None = None,
        async_client: Any | None = None,
        mode: Any | None = None,
    ) -> InstructorAdapter:
        """Create adapter from Anthropic client.

        Wraps the Anthropic client with instructor for structured output.

        Args:
            client: anthropic.Anthropic client instance.
            settings: Optional LLM settings.
            async_client: Optional anthropic.AsyncAnthropic client.
            mode: instructor.Mode (default: ANTHROPIC_TOOLS).

        Returns:
            Configured InstructorAdapter instance.

        Example:
            ```python
            import anthropic
            from agentecs.adapters import InstructorAdapter

            client = anthropic.Anthropic()
            adapter = InstructorAdapter.from_anthropic(client)
            ```
        """
        try:
            import instructor
        except ImportError as e:
            raise ImportError(
                "instructor is required for InstructorAdapter. "
                "Install with: pip install agentecs[llm]"
            ) from e

        mode = mode or instructor.Mode.ANTHROPIC_TOOLS
        patched_client = instructor.from_anthropic(client, mode=mode)

        patched_async_client = None
        if async_client is not None:
            patched_async_client = instructor.from_anthropic(async_client, mode=mode)

        return cls(patched_client, settings, patched_async_client)  # type: ignore[arg-type]

    @classmethod
    def from_litellm(
        cls,
        settings: LLMSettings | None = None,
        mode: Any | None = None,
    ) -> InstructorAdapter:
        """Create adapter using LiteLLM for multi-provider support.

        LiteLLM provides a unified interface to 100+ LLM providers including
        OpenAI, Anthropic, Cohere, Azure, AWS Bedrock, and more.

        Args:
            settings: Optional LLM settings. The model field should use
                LiteLLM's provider/model format (e.g., "anthropic/claude-3-opus").
            mode: Instructor mode (default: TOOLS).

        Returns:
            Configured InstructorAdapter instance.

        Example:
            ```python
            from agentecs.adapters import InstructorAdapter
            from agentecs.config import LLMSettings

            # Use Claude via LiteLLM
            adapter = InstructorAdapter.from_litellm(
                settings=LLMSettings(model="anthropic/claude-3-5-sonnet-20241022")
            )

            # Use GPT-4 via LiteLLM
            adapter = InstructorAdapter.from_litellm(
                settings=LLMSettings(model="openai/gpt-4o")
            )
            ```
        """
        try:
            import instructor
            import litellm  # type: ignore[import-not-found]
        except ImportError as e:
            raise ImportError(
                "instructor and litellm are required. "
                "Install with: pip install agentecs[llm] litellm"
            ) from e

        mode = mode or instructor.Mode.TOOLS
        patched_client = instructor.from_litellm(litellm.completion, mode=mode)
        patched_async_client = instructor.from_litellm(litellm.acompletion, mode=mode)

        return cls(patched_client, settings, patched_async_client)

    @classmethod
    def from_gemini(
        cls,
        client: Any,
        settings: LLMSettings | None = None,
        mode: Any | None = None,
    ) -> InstructorAdapter:
        """Create adapter from Google Gemini client.

        Wraps the Google GenerativeModel with instructor for structured output.

        Args:
            client: Google GenerativeModel instance.
            settings: Optional LLM settings.
            mode: Instructor mode (default: GEMINI_JSON).

        Returns:
            Configured InstructorAdapter instance.

        Example:
            ```python
            import google.generativeai as genai
            from agentecs.adapters import InstructorAdapter

            genai.configure(api_key="your-api-key")
            model = genai.GenerativeModel("gemini-1.5-flash")
            adapter = InstructorAdapter.from_gemini(model)
            ```
        """
        try:
            import instructor
        except ImportError as e:
            raise ImportError(
                "instructor is required for InstructorAdapter. "
                "Install with: pip install agentecs[llm] google-generativeai"
            ) from e

        mode = mode or instructor.Mode.GEMINI_JSON
        patched_client = instructor.from_gemini(client, mode=mode)

        # Gemini doesn't have a separate async client pattern
        return cls(patched_client, settings, None)

    @property
    def settings(self) -> LLMSettings:
        """Get the LLM settings."""
        return self._settings

    def call(
        self,
        messages: list[Message],
        response_model: type[T],
        temperature: float | None = None,
        max_tokens: int | None = None,
        **kwargs: Any,
    ) -> T:
        """Call LLM with structured output.

        Args:
            messages: Conversation messages.
            response_model: Pydantic model for response validation.
            temperature: Override default temperature.
            max_tokens: Override default max tokens.
            **kwargs: Additional parameters passed to the API.

        Returns:
            Validated response as the specified model type.
        """
        openai_messages = _messages_to_openai(messages)

        # Build kwargs with settings
        call_kwargs: dict[str, Any] = {
            "model": kwargs.pop("model", self._settings.model),
            "messages": openai_messages,
            "response_model": response_model,
            "temperature": temperature if temperature is not None else self._settings.temperature,
            "max_retries": kwargs.pop("max_retries", self._settings.max_retries),
        }

        if max_tokens is not None:
            call_kwargs["max_tokens"] = max_tokens
        elif self._settings.max_tokens is not None:
            call_kwargs["max_tokens"] = self._settings.max_tokens

        # Merge any additional kwargs
        call_kwargs.update(kwargs)

        return cast(T, self._client.chat.completions.create(**call_kwargs))

    async def call_async(
        self,
        messages: list[Message],
        response_model: type[T],
        temperature: float | None = None,
        max_tokens: int | None = None,
        **kwargs: Any,
    ) -> T:
        """Call LLM with structured output (async).

        Args:
            messages: Conversation messages.
            response_model: Pydantic model for response validation.
            temperature: Override default temperature.
            max_tokens: Override default max tokens.
            **kwargs: Additional parameters passed to the API.

        Returns:
            Validated response as the specified model type.

        Raises:
            RuntimeError: If no async client was provided.
        """
        if self._async_client is None:
            raise RuntimeError(
                "No async client configured. Provide async_client when creating the adapter."
            )

        openai_messages = _messages_to_openai(messages)

        call_kwargs: dict[str, Any] = {
            "model": kwargs.pop("model", self._settings.model),
            "messages": openai_messages,
            "response_model": response_model,
            "temperature": temperature if temperature is not None else self._settings.temperature,
            "max_retries": kwargs.pop("max_retries", self._settings.max_retries),
        }

        if max_tokens is not None:
            call_kwargs["max_tokens"] = max_tokens
        elif self._settings.max_tokens is not None:
            call_kwargs["max_tokens"] = self._settings.max_tokens

        call_kwargs.update(kwargs)

        return cast(T, await self._async_client.chat.completions.create(**call_kwargs))

    def stream(
        self,
        messages: list[Message],
        response_model: type[T],
        temperature: float | None = None,
        max_tokens: int | None = None,
        **kwargs: Any,
    ) -> Iterator[T]:
        """Stream LLM response with partial structured output.

        Uses instructor's Partial for incremental field population.

        Args:
            messages: Conversation messages.
            response_model: Pydantic model for response validation.
            temperature: Override default temperature.
            max_tokens: Override default max tokens.
            **kwargs: Additional parameters passed to the API.

        Yields:
            Partial response objects with incrementally populated fields.
        """
        try:
            from instructor import Partial
        except ImportError as e:
            raise ImportError(
                "instructor is required for streaming. Install with: pip install agentecs[llm]"
            ) from e

        openai_messages = _messages_to_openai(messages)

        call_kwargs: dict[str, Any] = {
            "model": kwargs.pop("model", self._settings.model),
            "messages": openai_messages,
            "response_model": Partial[response_model],  # type: ignore[valid-type]
            "temperature": temperature if temperature is not None else self._settings.temperature,
            "max_retries": kwargs.pop("max_retries", self._settings.max_retries),
            "stream": True,
        }

        if max_tokens is not None:
            call_kwargs["max_tokens"] = max_tokens
        elif self._settings.max_tokens is not None:
            call_kwargs["max_tokens"] = self._settings.max_tokens

        call_kwargs.update(kwargs)

        # Instructor returns an iterator of partial objects when streaming
        yield from self._client.chat.completions.create(**call_kwargs)

    async def stream_async(
        self,
        messages: list[Message],
        response_model: type[T],
        temperature: float | None = None,
        max_tokens: int | None = None,
        **kwargs: Any,
    ) -> AsyncIterator[T]:
        """Stream LLM response with partial structured output (async).

        Uses instructor's Partial for incremental field population.

        Args:
            messages: Conversation messages.
            response_model: Pydantic model for response validation.
            temperature: Override default temperature.
            max_tokens: Override default max tokens.
            **kwargs: Additional parameters passed to the API.

        Yields:
            Partial response objects with incrementally populated fields.

        Raises:
            RuntimeError: If no async client was provided.
        """
        if self._async_client is None:
            raise RuntimeError(
                "No async client configured. Provide async_client when creating the adapter."
            )

        try:
            from instructor import Partial
        except ImportError as e:
            raise ImportError(
                "instructor is required for streaming. Install with: pip install agentecs[llm]"
            ) from e

        openai_messages = _messages_to_openai(messages)

        call_kwargs: dict[str, Any] = {
            "model": kwargs.pop("model", self._settings.model),
            "messages": openai_messages,
            "response_model": Partial[response_model],  # type: ignore[valid-type]
            "temperature": temperature if temperature is not None else self._settings.temperature,
            "max_retries": kwargs.pop("max_retries", self._settings.max_retries),
            "stream": True,
        }

        if max_tokens is not None:
            call_kwargs["max_tokens"] = max_tokens
        elif self._settings.max_tokens is not None:
            call_kwargs["max_tokens"] = self._settings.max_tokens

        call_kwargs.update(kwargs)

        async for partial_obj in await self._async_client.chat.completions.create(**call_kwargs):
            yield partial_obj

from_openai_client(client, settings=None, async_client=None, mode=None) classmethod

Create adapter from OpenAI client.

Wraps the OpenAI client with instructor for structured output.

Parameters:

Name Type Description Default
client OpenAI

OpenAI client instance.

required
settings LLMSettings | None

Optional LLM settings.

None
async_client AsyncOpenAI | None

Optional async OpenAI client.

None
mode Mode | None

Instructor mode (default: TOOLS).

None

Returns:

Type Description
InstructorAdapter

Configured InstructorAdapter instance.

Source code in src/agentecs/adapters/instructor.py
@classmethod
def from_openai_client(
    cls,
    client: OpenAI,
    settings: LLMSettings | None = None,
    async_client: AsyncOpenAI | None = None,
    mode: instructor.Mode | None = None,
) -> InstructorAdapter:
    """Create adapter from OpenAI client.

    Wraps the OpenAI client with instructor for structured output.

    Args:
        client: OpenAI client instance.
        settings: Optional LLM settings.
        async_client: Optional async OpenAI client.
        mode: Instructor mode (default: TOOLS).

    Returns:
        Configured InstructorAdapter instance.
    """
    try:
        import instructor
    except ImportError as e:
        raise ImportError(
            "instructor is required for InstructorAdapter. "
            "Install with: pip install agentecs[llm]"
        ) from e

    mode = mode or instructor.Mode.TOOLS
    patched_client = instructor.from_openai(client, mode=mode)

    patched_async_client = None
    if async_client is not None:
        patched_async_client = instructor.from_openai(async_client, mode=mode)

    return cls(patched_client, settings, patched_async_client)

from_anthropic(client, settings=None, async_client=None, mode=None) classmethod

Create adapter from Anthropic client.

Wraps the Anthropic client with instructor for structured output.

Parameters:

Name Type Description Default
client Any

anthropic.Anthropic client instance.

required
settings LLMSettings | None

Optional LLM settings.

None
async_client Any | None

Optional anthropic.AsyncAnthropic client.

None
mode Any | None

instructor.Mode (default: ANTHROPIC_TOOLS).

None

Returns:

Type Description
InstructorAdapter

Configured InstructorAdapter instance.

Example
import anthropic
from agentecs.adapters import InstructorAdapter

client = anthropic.Anthropic()
adapter = InstructorAdapter.from_anthropic(client)
Source code in src/agentecs/adapters/instructor.py
@classmethod
def from_anthropic(
    cls,
    client: Any,
    settings: LLMSettings | None = None,
    async_client: Any | None = None,
    mode: Any | None = None,
) -> InstructorAdapter:
    """Create adapter from Anthropic client.

    Wraps the Anthropic client with instructor for structured output.

    Args:
        client: anthropic.Anthropic client instance.
        settings: Optional LLM settings.
        async_client: Optional anthropic.AsyncAnthropic client.
        mode: instructor.Mode (default: ANTHROPIC_TOOLS).

    Returns:
        Configured InstructorAdapter instance.

    Example:
        ```python
        import anthropic
        from agentecs.adapters import InstructorAdapter

        client = anthropic.Anthropic()
        adapter = InstructorAdapter.from_anthropic(client)
        ```
    """
    try:
        import instructor
    except ImportError as e:
        raise ImportError(
            "instructor is required for InstructorAdapter. "
            "Install with: pip install agentecs[llm]"
        ) from e

    mode = mode or instructor.Mode.ANTHROPIC_TOOLS
    patched_client = instructor.from_anthropic(client, mode=mode)

    patched_async_client = None
    if async_client is not None:
        patched_async_client = instructor.from_anthropic(async_client, mode=mode)

    return cls(patched_client, settings, patched_async_client)  # type: ignore[arg-type]

from_litellm(settings=None, mode=None) classmethod

Create adapter using LiteLLM for multi-provider support.

LiteLLM provides a unified interface to 100+ LLM providers including OpenAI, Anthropic, Cohere, Azure, AWS Bedrock, and more.

Parameters:

Name Type Description Default
settings LLMSettings | None

Optional LLM settings. The model field should use LiteLLM's provider/model format (e.g., "anthropic/claude-3-opus").

None
mode Any | None

Instructor mode (default: TOOLS).

None

Returns:

Type Description
InstructorAdapter

Configured InstructorAdapter instance.

Example
from agentecs.adapters import InstructorAdapter
from agentecs.config import LLMSettings

# Use Claude via LiteLLM
adapter = InstructorAdapter.from_litellm(
    settings=LLMSettings(model="anthropic/claude-3-5-sonnet-20241022")
)

# Use GPT-4 via LiteLLM
adapter = InstructorAdapter.from_litellm(
    settings=LLMSettings(model="openai/gpt-4o")
)
Source code in src/agentecs/adapters/instructor.py
@classmethod
def from_litellm(
    cls,
    settings: LLMSettings | None = None,
    mode: Any | None = None,
) -> InstructorAdapter:
    """Create adapter using LiteLLM for multi-provider support.

    LiteLLM provides a unified interface to 100+ LLM providers including
    OpenAI, Anthropic, Cohere, Azure, AWS Bedrock, and more.

    Args:
        settings: Optional LLM settings. The model field should use
            LiteLLM's provider/model format (e.g., "anthropic/claude-3-opus").
        mode: Instructor mode (default: TOOLS).

    Returns:
        Configured InstructorAdapter instance.

    Example:
        ```python
        from agentecs.adapters import InstructorAdapter
        from agentecs.config import LLMSettings

        # Use Claude via LiteLLM
        adapter = InstructorAdapter.from_litellm(
            settings=LLMSettings(model="anthropic/claude-3-5-sonnet-20241022")
        )

        # Use GPT-4 via LiteLLM
        adapter = InstructorAdapter.from_litellm(
            settings=LLMSettings(model="openai/gpt-4o")
        )
        ```
    """
    try:
        import instructor
        import litellm  # type: ignore[import-not-found]
    except ImportError as e:
        raise ImportError(
            "instructor and litellm are required. "
            "Install with: pip install agentecs[llm] litellm"
        ) from e

    mode = mode or instructor.Mode.TOOLS
    patched_client = instructor.from_litellm(litellm.completion, mode=mode)
    patched_async_client = instructor.from_litellm(litellm.acompletion, mode=mode)

    return cls(patched_client, settings, patched_async_client)

from_gemini(client, settings=None, mode=None) classmethod

Create adapter from Google Gemini client.

Wraps the Google GenerativeModel with instructor for structured output.

Parameters:

Name Type Description Default
client Any

Google GenerativeModel instance.

required
settings LLMSettings | None

Optional LLM settings.

None
mode Any | None

Instructor mode (default: GEMINI_JSON).

None

Returns:

Type Description
InstructorAdapter

Configured InstructorAdapter instance.

Example
import google.generativeai as genai
from agentecs.adapters import InstructorAdapter

genai.configure(api_key="your-api-key")
model = genai.GenerativeModel("gemini-1.5-flash")
adapter = InstructorAdapter.from_gemini(model)
Source code in src/agentecs/adapters/instructor.py
@classmethod
def from_gemini(
    cls,
    client: Any,
    settings: LLMSettings | None = None,
    mode: Any | None = None,
) -> InstructorAdapter:
    """Create adapter from Google Gemini client.

    Wraps the Google GenerativeModel with instructor for structured output.

    Args:
        client: Google GenerativeModel instance.
        settings: Optional LLM settings.
        mode: Instructor mode (default: GEMINI_JSON).

    Returns:
        Configured InstructorAdapter instance.

    Example:
        ```python
        import google.generativeai as genai
        from agentecs.adapters import InstructorAdapter

        genai.configure(api_key="your-api-key")
        model = genai.GenerativeModel("gemini-1.5-flash")
        adapter = InstructorAdapter.from_gemini(model)
        ```
    """
    try:
        import instructor
    except ImportError as e:
        raise ImportError(
            "instructor is required for InstructorAdapter. "
            "Install with: pip install agentecs[llm] google-generativeai"
        ) from e

    mode = mode or instructor.Mode.GEMINI_JSON
    patched_client = instructor.from_gemini(client, mode=mode)

    # Gemini doesn't have a separate async client pattern
    return cls(patched_client, settings, None)

call(messages, response_model, temperature=None, max_tokens=None, **kwargs)

Call LLM with structured output.

Parameters:

Name Type Description Default
messages list[Message]

Conversation messages.

required
response_model type[T]

Pydantic model for response validation.

required
temperature float | None

Override default temperature.

None
max_tokens int | None

Override default max tokens.

None
**kwargs Any

Additional parameters passed to the API.

{}

Returns:

Type Description
T

Validated response as the specified model type.

Source code in src/agentecs/adapters/instructor.py
def call(
    self,
    messages: list[Message],
    response_model: type[T],
    temperature: float | None = None,
    max_tokens: int | None = None,
    **kwargs: Any,
) -> T:
    """Call LLM with structured output.

    Args:
        messages: Conversation messages.
        response_model: Pydantic model for response validation.
        temperature: Override default temperature.
        max_tokens: Override default max tokens.
        **kwargs: Additional parameters passed to the API.

    Returns:
        Validated response as the specified model type.
    """
    openai_messages = _messages_to_openai(messages)

    # Build kwargs with settings
    call_kwargs: dict[str, Any] = {
        "model": kwargs.pop("model", self._settings.model),
        "messages": openai_messages,
        "response_model": response_model,
        "temperature": temperature if temperature is not None else self._settings.temperature,
        "max_retries": kwargs.pop("max_retries", self._settings.max_retries),
    }

    if max_tokens is not None:
        call_kwargs["max_tokens"] = max_tokens
    elif self._settings.max_tokens is not None:
        call_kwargs["max_tokens"] = self._settings.max_tokens

    # Merge any additional kwargs
    call_kwargs.update(kwargs)

    return cast(T, self._client.chat.completions.create(**call_kwargs))

call_async(messages, response_model, temperature=None, max_tokens=None, **kwargs) async

Call LLM with structured output (async).

Parameters:

Name Type Description Default
messages list[Message]

Conversation messages.

required
response_model type[T]

Pydantic model for response validation.

required
temperature float | None

Override default temperature.

None
max_tokens int | None

Override default max tokens.

None
**kwargs Any

Additional parameters passed to the API.

{}

Returns:

Type Description
T

Validated response as the specified model type.

Raises:

Type Description
RuntimeError

If no async client was provided.

Source code in src/agentecs/adapters/instructor.py
async def call_async(
    self,
    messages: list[Message],
    response_model: type[T],
    temperature: float | None = None,
    max_tokens: int | None = None,
    **kwargs: Any,
) -> T:
    """Call LLM with structured output (async).

    Args:
        messages: Conversation messages.
        response_model: Pydantic model for response validation.
        temperature: Override default temperature.
        max_tokens: Override default max tokens.
        **kwargs: Additional parameters passed to the API.

    Returns:
        Validated response as the specified model type.

    Raises:
        RuntimeError: If no async client was provided.
    """
    if self._async_client is None:
        raise RuntimeError(
            "No async client configured. Provide async_client when creating the adapter."
        )

    openai_messages = _messages_to_openai(messages)

    call_kwargs: dict[str, Any] = {
        "model": kwargs.pop("model", self._settings.model),
        "messages": openai_messages,
        "response_model": response_model,
        "temperature": temperature if temperature is not None else self._settings.temperature,
        "max_retries": kwargs.pop("max_retries", self._settings.max_retries),
    }

    if max_tokens is not None:
        call_kwargs["max_tokens"] = max_tokens
    elif self._settings.max_tokens is not None:
        call_kwargs["max_tokens"] = self._settings.max_tokens

    call_kwargs.update(kwargs)

    return cast(T, await self._async_client.chat.completions.create(**call_kwargs))

stream(messages, response_model, temperature=None, max_tokens=None, **kwargs)

Stream LLM response with partial structured output.

Uses instructor's Partial for incremental field population.

Parameters:

Name Type Description Default
messages list[Message]

Conversation messages.

required
response_model type[T]

Pydantic model for response validation.

required
temperature float | None

Override default temperature.

None
max_tokens int | None

Override default max tokens.

None
**kwargs Any

Additional parameters passed to the API.

{}

Yields:

Type Description
T

Partial response objects with incrementally populated fields.

Source code in src/agentecs/adapters/instructor.py
def stream(
    self,
    messages: list[Message],
    response_model: type[T],
    temperature: float | None = None,
    max_tokens: int | None = None,
    **kwargs: Any,
) -> Iterator[T]:
    """Stream LLM response with partial structured output.

    Uses instructor's Partial for incremental field population.

    Args:
        messages: Conversation messages.
        response_model: Pydantic model for response validation.
        temperature: Override default temperature.
        max_tokens: Override default max tokens.
        **kwargs: Additional parameters passed to the API.

    Yields:
        Partial response objects with incrementally populated fields.
    """
    try:
        from instructor import Partial
    except ImportError as e:
        raise ImportError(
            "instructor is required for streaming. Install with: pip install agentecs[llm]"
        ) from e

    openai_messages = _messages_to_openai(messages)

    call_kwargs: dict[str, Any] = {
        "model": kwargs.pop("model", self._settings.model),
        "messages": openai_messages,
        "response_model": Partial[response_model],  # type: ignore[valid-type]
        "temperature": temperature if temperature is not None else self._settings.temperature,
        "max_retries": kwargs.pop("max_retries", self._settings.max_retries),
        "stream": True,
    }

    if max_tokens is not None:
        call_kwargs["max_tokens"] = max_tokens
    elif self._settings.max_tokens is not None:
        call_kwargs["max_tokens"] = self._settings.max_tokens

    call_kwargs.update(kwargs)

    # Instructor returns an iterator of partial objects when streaming
    yield from self._client.chat.completions.create(**call_kwargs)

stream_async(messages, response_model, temperature=None, max_tokens=None, **kwargs) async

Stream LLM response with partial structured output (async).

Uses instructor's Partial for incremental field population.

Parameters:

Name Type Description Default
messages list[Message]

Conversation messages.

required
response_model type[T]

Pydantic model for response validation.

required
temperature float | None

Override default temperature.

None
max_tokens int | None

Override default max tokens.

None
**kwargs Any

Additional parameters passed to the API.

{}

Yields:

Type Description
AsyncIterator[T]

Partial response objects with incrementally populated fields.

Raises:

Type Description
RuntimeError

If no async client was provided.

Source code in src/agentecs/adapters/instructor.py
async def stream_async(
    self,
    messages: list[Message],
    response_model: type[T],
    temperature: float | None = None,
    max_tokens: int | None = None,
    **kwargs: Any,
) -> AsyncIterator[T]:
    """Stream LLM response with partial structured output (async).

    Uses instructor's Partial for incremental field population.

    Args:
        messages: Conversation messages.
        response_model: Pydantic model for response validation.
        temperature: Override default temperature.
        max_tokens: Override default max tokens.
        **kwargs: Additional parameters passed to the API.

    Yields:
        Partial response objects with incrementally populated fields.

    Raises:
        RuntimeError: If no async client was provided.
    """
    if self._async_client is None:
        raise RuntimeError(
            "No async client configured. Provide async_client when creating the adapter."
        )

    try:
        from instructor import Partial
    except ImportError as e:
        raise ImportError(
            "instructor is required for streaming. Install with: pip install agentecs[llm]"
        ) from e

    openai_messages = _messages_to_openai(messages)

    call_kwargs: dict[str, Any] = {
        "model": kwargs.pop("model", self._settings.model),
        "messages": openai_messages,
        "response_model": Partial[response_model],  # type: ignore[valid-type]
        "temperature": temperature if temperature is not None else self._settings.temperature,
        "max_retries": kwargs.pop("max_retries", self._settings.max_retries),
        "stream": True,
    }

    if max_tokens is not None:
        call_kwargs["max_tokens"] = max_tokens
    elif self._settings.max_tokens is not None:
        call_kwargs["max_tokens"] = self._settings.max_tokens

    call_kwargs.update(kwargs)

    async for partial_obj in await self._async_client.chat.completions.create(**call_kwargs):
        yield partial_obj

Models

Message dataclass

A message in an LLM conversation.

Attributes:

Name Type Description
role MessageRole

Who sent the message.

content str

Message text content.

Source code in src/agentecs/adapters/models.py
@dataclass(slots=True)
class Message:
    """A message in an LLM conversation.

    Attributes:
        role: Who sent the message.
        content: Message text content.
    """

    role: MessageRole
    content: str

    @classmethod
    def system(cls, content: str) -> Message:
        """Create a system message."""
        return cls(role=MessageRole.SYSTEM, content=content)

    @classmethod
    def user(cls, content: str) -> Message:
        """Create a user message."""
        return cls(role=MessageRole.USER, content=content)

    @classmethod
    def assistant(cls, content: str) -> Message:
        """Create an assistant message."""
        return cls(role=MessageRole.ASSISTANT, content=content)

system(content) classmethod

Create a system message.

Source code in src/agentecs/adapters/models.py
@classmethod
def system(cls, content: str) -> Message:
    """Create a system message."""
    return cls(role=MessageRole.SYSTEM, content=content)

user(content) classmethod

Create a user message.

Source code in src/agentecs/adapters/models.py
@classmethod
def user(cls, content: str) -> Message:
    """Create a user message."""
    return cls(role=MessageRole.USER, content=content)

assistant(content) classmethod

Create an assistant message.

Source code in src/agentecs/adapters/models.py
@classmethod
def assistant(cls, content: str) -> Message:
    """Create an assistant message."""
    return cls(role=MessageRole.ASSISTANT, content=content)

MessageRole

Bases: Enum

Role of a message in LLM conversation.

Source code in src/agentecs/adapters/models.py
class MessageRole(Enum):
    """Role of a message in LLM conversation."""

    SYSTEM = "developer"
    USER = "user"
    ASSISTANT = "assistant"

Configuration

Type-safe configuration with Pydantic Settings.

VectorStoreSettings

Bases: BaseSettings

Configuration for vector store adapters.

Attributes:

Name Type Description
collection_name str

Name of the collection/index.

persist_directory str | None

Path for persistent storage (None for ephemeral).

distance_metric str

Distance function for similarity (cosine, l2, ip).

Environment Variables

VECTORSTORE_COLLECTION_NAME VECTORSTORE_PERSIST_DIRECTORY VECTORSTORE_DISTANCE_METRIC

Source code in src/agentecs/config/settings.py
class VectorStoreSettings(BaseSettings):
    """Configuration for vector store adapters.

    Attributes:
        collection_name: Name of the collection/index.
        persist_directory: Path for persistent storage (None for ephemeral).
        distance_metric: Distance function for similarity (cosine, l2, ip).

    Environment Variables:
        VECTORSTORE_COLLECTION_NAME
        VECTORSTORE_PERSIST_DIRECTORY
        VECTORSTORE_DISTANCE_METRIC
    """

    model_config = SettingsConfigDict(
        env_prefix="VECTORSTORE_",
        env_file=".env",
        env_file_encoding="utf-8",
        extra="ignore",
    )

    collection_name: str = "default"
    persist_directory: str | None = None
    distance_metric: str = "cosine"

LLMSettings

Bases: BaseSettings

Configuration for LLM adapters.

Attributes:

Name Type Description
model str

Model name/identifier.

temperature float

Sampling temperature (0.0-2.0).

max_tokens int | None

Maximum tokens in response.

api_key str | None

API key (prefer environment variable).

base_url str | None

Custom API base URL (for proxies/local models).

timeout float

Request timeout in seconds.

max_retries int

Number of retries on failure.

Environment Variables

LLM_MODEL LLM_TEMPERATURE LLM_MAX_TOKENS LLM_API_KEY (or OPENAI_API_KEY as fallback) LLM_BASE_URL LLM_TIMEOUT LLM_MAX_RETRIES

Source code in src/agentecs/config/settings.py
class LLMSettings(BaseSettings):
    """Configuration for LLM adapters.

    Attributes:
        model: Model name/identifier.
        temperature: Sampling temperature (0.0-2.0).
        max_tokens: Maximum tokens in response.
        api_key: API key (prefer environment variable).
        base_url: Custom API base URL (for proxies/local models).
        timeout: Request timeout in seconds.
        max_retries: Number of retries on failure.

    Environment Variables:
        LLM_MODEL
        LLM_TEMPERATURE
        LLM_MAX_TOKENS
        LLM_API_KEY (or OPENAI_API_KEY as fallback)
        LLM_BASE_URL
        LLM_TIMEOUT
        LLM_MAX_RETRIES
    """

    model_config = SettingsConfigDict(
        env_prefix="LLM_",
        env_file=".env",
        env_file_encoding="utf-8",
        extra="ignore",
    )

    model: str = "gpt-4o-mini"
    temperature: float = 0.7
    max_tokens: int | None = None
    api_key: str | None = None
    base_url: str | None = None
    timeout: float = 60.0
    max_retries: int = 3

Installation

Install adapter dependencies:

# Vector store adapter
pip install agentecs[vector]

# LLM adapter
pip install agentecs[llm]

# Configuration
pip install agentecs[config]

# All adapters
pip install agentecs[all]

Usage Examples

Vector Store

from pydantic import BaseModel
from agentecs.adapters import ChromaAdapter, SearchMode

class Document(BaseModel):
    title: str
    content: str

# Create adapter
store = ChromaAdapter.from_memory("docs", Document)

# Add documents
store.add("doc1", embedding=[...], text="content", data=Document(...))

# Search
results = store.search(
    query_embedding=[...],
    mode=SearchMode.HYBRID,
    limit=10
)

LLM Client

from pydantic import BaseModel
from agentecs.adapters import InstructorAdapter, Message
from agentecs.config import LLMSettings

class Analysis(BaseModel):
    sentiment: str
    confidence: float

# Create adapter
adapter = InstructorAdapter.from_litellm(
    settings=LLMSettings(model="anthropic/claude-3-5-sonnet-20241022")
)

# Call with structured output
messages = [Message.user("Analyze: Great product!")]
result = adapter.call(messages, response_model=Analysis)