Skip to content

ModelWrapper

ModelWrapper provides a unified interface for interacting with different Large Language Model (LLM) frameworks and libraries. It abstracts away the complexities of structured generation, batching, and async execution, allowing tasks to work seamlessly with various backends like DSPy, Outlines, or LangChain.


ModelWrapper core interfaces and base classes used by backends.

Executable

Bases: Protocol[ModelWrapperResult]

Callable protocol representing a compiled prompt executable.

Source code in sieves/model_wrappers/core.py
22
23
24
25
26
27
28
29
30
31
class Executable(Protocol[ModelWrapperResult]):
    """Callable protocol representing a compiled prompt executable."""

    def __call__(self, values: Sequence[dict[str, Any]]) -> Sequence[tuple[ModelWrapperResult | None, Any, TokenUsage]]:
        """Execute prompt executable for given values.

        :param values: Values to inject into prompts.
        :return: Sequence of tuples containing (result, raw_output, usage) for prompts.
        """
        ...

__call__(values)

Execute prompt executable for given values.

Parameters:

Name Type Description Default
values Sequence[dict[str, Any]]

Values to inject into prompts.

required

Returns:

Type Description
Sequence[tuple[ModelWrapperResult | None, Any, TokenUsage]]

Sequence of tuples containing (result, raw_output, usage) for prompts.

Source code in sieves/model_wrappers/core.py
25
26
27
28
29
30
31
def __call__(self, values: Sequence[dict[str, Any]]) -> Sequence[tuple[ModelWrapperResult | None, Any, TokenUsage]]:
    """Execute prompt executable for given values.

    :param values: Values to inject into prompts.
    :return: Sequence of tuples containing (result, raw_output, usage) for prompts.
    """
    ...

ModelWrapper

Base class for model wrappers handling model invocation and structured generation.

Source code in sieves/model_wrappers/core.py
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
class ModelWrapper[ModelWrapperPromptSignature, ModelWrapperResult, ModelWrapperModel, ModelWrapperInferenceMode]:
    """Base class for model wrappers handling model invocation and structured generation."""

    def __init__(self, model: ModelWrapperModel, model_settings: ModelSettings):
        """Initialize model wrapper with model and model settings.

        :param model: Instantiated model instance.
        :param model_settings: Model settings.
        """
        self._model = model
        self._model_settings = model_settings
        self._inference_kwargs = model_settings.inference_kwargs or {}
        self._init_kwargs = model_settings.init_kwargs or {}
        self._strict = model_settings.strict

    @property
    def model_settings(self) -> ModelSettings:
        """Return model settings.

        :return: Model settings.
        """
        return self._model_settings

    @property
    def model(self) -> ModelWrapperModel:
        """Return model instance.

        :return: Model instance.
        """
        return self._model

    @property
    @abc.abstractmethod
    def supports_few_shotting(self) -> bool:
        """Return whether model wrapper supports few-shotting.

        :return: Whether model wrapper supports few-shotting.
        """

    @property
    @abc.abstractmethod
    def inference_modes(self) -> type[ModelWrapperInferenceMode]:
        """Return supported inference modes.

        :return: Supported inference modes.
        """

    @abc.abstractmethod
    def build_executable(
        self,
        inference_mode: ModelWrapperInferenceMode,
        prompt_template: str | None,
        prompt_signature: type[ModelWrapperPromptSignature] | ModelWrapperPromptSignature,
        fewshot_examples: Sequence[pydantic.BaseModel] = (),
    ) -> Executable[ModelWrapperResult | None]:
        """Return a prompt executable for the given signature and mode.

        This wraps the model type‑native generation callable (e.g., DSPy Predict, Outlines Generator) with sieves’
        uniform interface.

        :param inference_mode: Inference mode to use (e.g. classification, JSON, ... - this is model type-specific).
        :param prompt_template: Prompt template.
        :param prompt_signature: Expected prompt signature type.
        :param fewshot_examples: Few-shot examples.
        :return: Prompt executable.
        """

    @staticmethod
    def convert_fewshot_examples(fewshot_examples: Sequence[pydantic.BaseModel]) -> list[dict[str, Any]]:
        """Convert few‑shot examples to dicts.

        :param fewshot_examples: Fewshot examples to convert.
        :return: Fewshot examples as dicts.
        """
        return [fs_example.model_dump(serialize_as_any=True) for fs_example in fewshot_examples]

    @staticmethod
    async def _execute_async_calls(calls: list[Coroutine[Any, Any, Any]] | list[Awaitable[Any]]) -> Any:
        """Execute a batch of async functions.

        :param calls: Async calls to execute.
        :return: Parsed response objects.
        """
        return await asyncio.gather(*calls)

    def _get_tokenizer(self) -> Any | None:
        """Return the tokenizer instance for this model if available.

        :return: Tokenizer instance or None.
        """
        return None

    def _count_tokens(self, text: str | None, tokenizer: Any | None = None) -> int | None:
        """Count tokens in a string using the provided or default tokenizer.

        :param text: Text to count tokens for.
        :param tokenizer: Optional tokenizer to use. If not provided, uses _get_tokenizer().
        :return: Token count or None if no tokenizer is available.
        """
        if text is None:
            return None

        tokenizer = tokenizer or self._get_tokenizer()
        if tokenizer:
            try:
                # Handle both standard transformers and tiktoken-style tokenizers.
                encoded = tokenizer.encode(text)
            except Exception:
                return None

            # `transformers` tokenizers will have the token IDs nested.
            try:
                return len(encoded[0][0])
            except Exception:
                return len(encoded)
        return None

inference_modes abstractmethod property

Return supported inference modes.

Returns:

Type Description
type[ModelWrapperInferenceMode]

Supported inference modes.

model property

Return model instance.

Returns:

Type Description
ModelWrapperModel

Model instance.

model_settings property

Return model settings.

Returns:

Type Description
ModelSettings

Model settings.

supports_few_shotting abstractmethod property

Return whether model wrapper supports few-shotting.

Returns:

Type Description
bool

Whether model wrapper supports few-shotting.

__init__(model, model_settings)

Initialize model wrapper with model and model settings.

Parameters:

Name Type Description Default
model ModelWrapperModel

Instantiated model instance.

required
model_settings ModelSettings

Model settings.

required
Source code in sieves/model_wrappers/core.py
37
38
39
40
41
42
43
44
45
46
47
def __init__(self, model: ModelWrapperModel, model_settings: ModelSettings):
    """Initialize model wrapper with model and model settings.

    :param model: Instantiated model instance.
    :param model_settings: Model settings.
    """
    self._model = model
    self._model_settings = model_settings
    self._inference_kwargs = model_settings.inference_kwargs or {}
    self._init_kwargs = model_settings.init_kwargs or {}
    self._strict = model_settings.strict

build_executable(inference_mode, prompt_template, prompt_signature, fewshot_examples=()) abstractmethod

Return a prompt executable for the given signature and mode.

This wraps the model type‑native generation callable (e.g., DSPy Predict, Outlines Generator) with sieves’ uniform interface.

Parameters:

Name Type Description Default
inference_mode ModelWrapperInferenceMode

Inference mode to use (e.g. classification, JSON, ... - this is model type-specific).

required
prompt_template str | None

Prompt template.

required
prompt_signature type[ModelWrapperPromptSignature] | ModelWrapperPromptSignature

Expected prompt signature type.

required
fewshot_examples Sequence[BaseModel]

Few-shot examples.

()

Returns:

Type Description
Executable[ModelWrapperResult | None]

Prompt executable.

Source code in sieves/model_wrappers/core.py
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
@abc.abstractmethod
def build_executable(
    self,
    inference_mode: ModelWrapperInferenceMode,
    prompt_template: str | None,
    prompt_signature: type[ModelWrapperPromptSignature] | ModelWrapperPromptSignature,
    fewshot_examples: Sequence[pydantic.BaseModel] = (),
) -> Executable[ModelWrapperResult | None]:
    """Return a prompt executable for the given signature and mode.

    This wraps the model type‑native generation callable (e.g., DSPy Predict, Outlines Generator) with sieves’
    uniform interface.

    :param inference_mode: Inference mode to use (e.g. classification, JSON, ... - this is model type-specific).
    :param prompt_template: Prompt template.
    :param prompt_signature: Expected prompt signature type.
    :param fewshot_examples: Few-shot examples.
    :return: Prompt executable.
    """

convert_fewshot_examples(fewshot_examples) staticmethod

Convert few‑shot examples to dicts.

Parameters:

Name Type Description Default
fewshot_examples Sequence[BaseModel]

Fewshot examples to convert.

required

Returns:

Type Description
list[dict[str, Any]]

Fewshot examples as dicts.

Source code in sieves/model_wrappers/core.py
101
102
103
104
105
106
107
108
@staticmethod
def convert_fewshot_examples(fewshot_examples: Sequence[pydantic.BaseModel]) -> list[dict[str, Any]]:
    """Convert few‑shot examples to dicts.

    :param fewshot_examples: Fewshot examples to convert.
    :return: Fewshot examples as dicts.
    """
    return [fs_example.model_dump(serialize_as_any=True) for fs_example in fewshot_examples]

PydanticModelWrapper

Bases: ABC, ModelWrapper[ModelWrapperPromptSignature, ModelWrapperResult, ModelWrapperModel, ModelWrapperInferenceMode]

Abstract super class for model wrappers using Pydantic signatures and results.

Note that this class also assumes the model wrapper accepts a prompt. This holds true for most model wrappers - it doesn't only for those with an idiosyncratic way to process prompts like DSPy, or decoder-only models which don't work with object-based signatures anyway.

If and once we add support for a Pydantic-based model wrapper that doesn't accept prompt templates, we'll adjust by modifying _infer() to accept an additional parameter specifying how to handle prompt/instruction injection (and we might have to make supports_few_shotting() model type-specific again).

Source code in sieves/model_wrappers/core.py
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
class PydanticModelWrapper(
    abc.ABC, ModelWrapper[ModelWrapperPromptSignature, ModelWrapperResult, ModelWrapperModel, ModelWrapperInferenceMode]
):
    """Abstract super class for model wrappers using Pydantic signatures and results.

    Note that this class also assumes the model wrapper accepts a prompt. This holds true for most model wrappers - it
    doesn't only for those with an idiosyncratic way to process prompts like DSPy, or decoder-only models which don't
    work with object-based signatures anyway.

    If and once we add support for a Pydantic-based model wrapper that doesn't accept prompt templates, we'll adjust by
    modifying `_infer()` to accept an additional parameter specifying how to handle prompt/instruction injection (and
    we might have to make `supports_few_shotting()` model type-specific again).
    """

    @classmethod
    def _create_template(cls, template: str | None) -> jinja2.Template:
        """Create Jinja2 template from template string.

        :param template: Template string.
        :return: Jinja2 template.
        """
        assert template, f"prompt_template has to be provided to {cls.__name__}."
        return jinja2.Template(template)

    @override
    @property
    def supports_few_shotting(self) -> bool:
        return True

    def _infer(
        self,
        generator: Callable[[list[str]], Iterable[tuple[ModelWrapperResult, Any, TokenUsage]]],
        template: jinja2.Template,
        values: Sequence[dict[str, Any]],
    ) -> Sequence[tuple[ModelWrapperResult | None, Any, TokenUsage]]:
        """Run inference in batches with exception handling.

        :param generator: Callable generating responses.
        :param template: Prompt template.
        :param values: Doc values to inject.
        :return: Sequence of tuples containing results parsed from responses, raw outputs, and token usage.
        """
        try:
            prompts = [template.render(**doc_values) for doc_values in values]
            return list(generator(prompts))

        except Exception as err:
            if self._strict:
                raise RuntimeError(
                    "Encountered problem when executing prompt. Ensure your few-shot examples and document "
                    "chunks contain sensible information."
                ) from err
            else:
                return [(None, None, TokenUsage()) for _ in range(len(values))]

inference_modes abstractmethod property

Return supported inference modes.

Returns:

Type Description
type[ModelWrapperInferenceMode]

Supported inference modes.

model property

Return model instance.

Returns:

Type Description
ModelWrapperModel

Model instance.

model_settings property

Return model settings.

Returns:

Type Description
ModelSettings

Model settings.

__init__(model, model_settings)

Initialize model wrapper with model and model settings.

Parameters:

Name Type Description Default
model ModelWrapperModel

Instantiated model instance.

required
model_settings ModelSettings

Model settings.

required
Source code in sieves/model_wrappers/core.py
37
38
39
40
41
42
43
44
45
46
47
def __init__(self, model: ModelWrapperModel, model_settings: ModelSettings):
    """Initialize model wrapper with model and model settings.

    :param model: Instantiated model instance.
    :param model_settings: Model settings.
    """
    self._model = model
    self._model_settings = model_settings
    self._inference_kwargs = model_settings.inference_kwargs or {}
    self._init_kwargs = model_settings.init_kwargs or {}
    self._strict = model_settings.strict

build_executable(inference_mode, prompt_template, prompt_signature, fewshot_examples=()) abstractmethod

Return a prompt executable for the given signature and mode.

This wraps the model type‑native generation callable (e.g., DSPy Predict, Outlines Generator) with sieves’ uniform interface.

Parameters:

Name Type Description Default
inference_mode ModelWrapperInferenceMode

Inference mode to use (e.g. classification, JSON, ... - this is model type-specific).

required
prompt_template str | None

Prompt template.

required
prompt_signature type[ModelWrapperPromptSignature] | ModelWrapperPromptSignature

Expected prompt signature type.

required
fewshot_examples Sequence[BaseModel]

Few-shot examples.

()

Returns:

Type Description
Executable[ModelWrapperResult | None]

Prompt executable.

Source code in sieves/model_wrappers/core.py
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
@abc.abstractmethod
def build_executable(
    self,
    inference_mode: ModelWrapperInferenceMode,
    prompt_template: str | None,
    prompt_signature: type[ModelWrapperPromptSignature] | ModelWrapperPromptSignature,
    fewshot_examples: Sequence[pydantic.BaseModel] = (),
) -> Executable[ModelWrapperResult | None]:
    """Return a prompt executable for the given signature and mode.

    This wraps the model type‑native generation callable (e.g., DSPy Predict, Outlines Generator) with sieves’
    uniform interface.

    :param inference_mode: Inference mode to use (e.g. classification, JSON, ... - this is model type-specific).
    :param prompt_template: Prompt template.
    :param prompt_signature: Expected prompt signature type.
    :param fewshot_examples: Few-shot examples.
    :return: Prompt executable.
    """

convert_fewshot_examples(fewshot_examples) staticmethod

Convert few‑shot examples to dicts.

Parameters:

Name Type Description Default
fewshot_examples Sequence[BaseModel]

Fewshot examples to convert.

required

Returns:

Type Description
list[dict[str, Any]]

Fewshot examples as dicts.

Source code in sieves/model_wrappers/core.py
101
102
103
104
105
106
107
108
@staticmethod
def convert_fewshot_examples(fewshot_examples: Sequence[pydantic.BaseModel]) -> list[dict[str, Any]]:
    """Convert few‑shot examples to dicts.

    :param fewshot_examples: Fewshot examples to convert.
    :return: Fewshot examples as dicts.
    """
    return [fs_example.model_dump(serialize_as_any=True) for fs_example in fewshot_examples]