Skip to content

Engine

Engine

Bases: InternalEngine[PromptSignature, Result, Model, InferenceMode]

Source code in sieves/engines/wrapper.py
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
class Engine(InternalEngine[PromptSignature, Result, Model, InferenceMode]):
    def __init__(
        self,
        model: Model | None = None,
        init_kwargs: dict[str, Any] | None = None,
        inference_kwargs: dict[str, Any] | None = None,
        config_kwargs: dict[str, Any] | None = None,
        strict_mode: bool = False,
        batch_size: int = -1,
    ):
        """
        :param model: Model to run. If None, a default model (HuggingFaceTB/SmolLM-360M-Instruct with Outlines) is used.
        :param init_kwargs: Optional kwargs to supply to engine executable at init time.
        :param inference_kwargs: Optional kwargs to supply to engine executable at inference time.
        :param config_kwargs: Used only if supplied model is a DSPy model object, ignored otherwise. Optional kwargs
            supplied to dspy.configure().
        :param strict_mode: If True, exception is raised if prompt response can't be parsed correctly.
        :param batch_size: Batch size in processing prompts. -1 will batch all documents in one go. Not all engines
            support batching.
        """
        super().__init__(model or Engine._init_default_model(), init_kwargs, inference_kwargs, strict_mode, batch_size)
        self._config_kwargs = config_kwargs
        self._engine: InternalEngine[PromptSignature, Result, Model, InferenceMode] = self._init_engine()

    @classmethod
    def _init_default_model(cls) -> Model:
        """Initializes default model (HuggingFaceTB/SmolLM-360M-Instruct with Outlines).
        :return: Initialized default model.
        """
        import outlines

        return outlines.models.transformers("HuggingFaceTB/SmolLM-360M-Instruct")

    def _init_engine(self) -> InternalEngine[EnginePromptSignature, EngineResult, EngineModel, EngineInferenceMode]:
        """Initializes internal engine object.
        :return Engine: Engine.
        :raises: ValueError if model type isn't supported.
        """
        model_type = type(self._model)
        module_engine_map = {
            dspy_: dspy_.DSPy,
            glix_: glix_.GliX,
            huggingface_: huggingface_.HuggingFace,
            instructor_: instructor_.Instructor,
            langchain_: langchain_.LangChain,
            ollama_: ollama_.Ollama,
            outlines_: outlines_.Outlines,
            vllm_: vllm_.VLLM,
        }

        for module, engine_type in module_engine_map.items():
            try:
                module_model_types = module.Model.__args__
            except AttributeError:
                module_model_types = (module.Model,)

            if any(issubclass(model_type, module_model_type) for module_model_type in module_model_types):
                internal_engine = engine_type(
                    model=self._model,
                    init_kwargs=self._init_kwargs,
                    inference_kwargs=self._inference_kwargs,
                    strict_mode=self._strict_mode,
                    batch_size=self._batch_size,
                    **{"config_kwargs": self._config_kwargs} if module == dspy_ else {},
                )
                assert isinstance(internal_engine, InternalEngine)

                return internal_engine

        raise ValueError(
            f"Model type {self.model.__class__} is not supported. Please check the documentation and ensure you're "
            f"providing a supported model type."
        )

    @property
    def supports_few_shotting(self) -> bool:
        return self._engine.supports_few_shotting

    @property
    def inference_modes(self) -> type[InferenceMode]:
        return self._engine.inference_modes

    def build_executable(
        self,
        inference_mode: InferenceMode,
        prompt_template: str | None,
        prompt_signature: type[PromptSignature] | PromptSignature,
        fewshot_examples: Iterable[pydantic.BaseModel] = (),
    ) -> Executable[Result | None]:
        return self._engine.build_executable(
            inference_mode=inference_mode,
            prompt_template=prompt_template,
            prompt_signature=prompt_signature,
            fewshot_examples=fewshot_examples,
        )

    def get_engine_type(self) -> EngineType:
        """Returns engine type for specified engine.
        :return EngineType: Engine type for self._engine.
        :raises: ValueError if engine class not found in EngineType.
        """
        return EngineType.get_engine_type(self._engine)

model property

Return model instance.

Returns:

Type Description
EngineModel

Model instance.

__init__(model=None, init_kwargs=None, inference_kwargs=None, config_kwargs=None, strict_mode=False, batch_size=-1)

Parameters:

Name Type Description Default
model Model | None

Model to run. If None, a default model (HuggingFaceTB/SmolLM-360M-Instruct with Outlines) is used.

None
init_kwargs dict[str, Any] | None

Optional kwargs to supply to engine executable at init time.

None
inference_kwargs dict[str, Any] | None

Optional kwargs to supply to engine executable at inference time.

None
config_kwargs dict[str, Any] | None

Used only if supplied model is a DSPy model object, ignored otherwise. Optional kwargs supplied to dspy.configure().

None
strict_mode bool

If True, exception is raised if prompt response can't be parsed correctly.

False
batch_size int

Batch size in processing prompts. -1 will batch all documents in one go. Not all engines support batching.

-1
Source code in sieves/engines/wrapper.py
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
def __init__(
    self,
    model: Model | None = None,
    init_kwargs: dict[str, Any] | None = None,
    inference_kwargs: dict[str, Any] | None = None,
    config_kwargs: dict[str, Any] | None = None,
    strict_mode: bool = False,
    batch_size: int = -1,
):
    """
    :param model: Model to run. If None, a default model (HuggingFaceTB/SmolLM-360M-Instruct with Outlines) is used.
    :param init_kwargs: Optional kwargs to supply to engine executable at init time.
    :param inference_kwargs: Optional kwargs to supply to engine executable at inference time.
    :param config_kwargs: Used only if supplied model is a DSPy model object, ignored otherwise. Optional kwargs
        supplied to dspy.configure().
    :param strict_mode: If True, exception is raised if prompt response can't be parsed correctly.
    :param batch_size: Batch size in processing prompts. -1 will batch all documents in one go. Not all engines
        support batching.
    """
    super().__init__(model or Engine._init_default_model(), init_kwargs, inference_kwargs, strict_mode, batch_size)
    self._config_kwargs = config_kwargs
    self._engine: InternalEngine[PromptSignature, Result, Model, InferenceMode] = self._init_engine()

deserialize(config, **kwargs) classmethod

Generate Engine instance from config.

Parameters:

Name Type Description Default
config Config

Config to generate instance from.

required
kwargs dict[str, Any]

Values to inject into loaded config.

{}

Returns:

Type Description
InternalEngine[EnginePromptSignature, EngineResult, EngineModel, EngineInferenceMode]

Deserialized Engine instance.

Source code in sieves/engines/core.py
122
123
124
125
126
127
128
129
130
131
@classmethod
def deserialize(
    cls, config: Config, **kwargs: dict[str, Any]
) -> InternalEngine[EnginePromptSignature, EngineResult, EngineModel, EngineInferenceMode]:
    """Generate Engine instance from config.
    :param config: Config to generate instance from.
    :param kwargs: Values to inject into loaded config.
    :return: Deserialized Engine instance.
    """
    return cls(**config.to_init_dict(cls, **kwargs))

get_engine_type()

Returns engine type for specified engine.

Returns:

Type Description
EngineType

Engine type for self._engine.

Source code in sieves/engines/wrapper.py
166
167
168
169
170
171
def get_engine_type(self) -> EngineType:
    """Returns engine type for specified engine.
    :return EngineType: Engine type for self._engine.
    :raises: ValueError if engine class not found in EngineType.
    """
    return EngineType.get_engine_type(self._engine)

serialize()

Serializes engine.

Returns:

Type Description
Config

Config instance.

Source code in sieves/engines/core.py
116
117
118
119
120
def serialize(self) -> Config:
    """Serializes engine.
    :return: Config instance.
    """
    return Config.create(self.__class__, self._attributes)