Skip to content

Outlines

Bases: PydanticEngine[PromptSignature, Result, Model, InferenceMode]

Source code in sieves/engines/outlines_.py
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
class Outlines(PydanticEngine[PromptSignature, Result, Model, InferenceMode]):
    @property
    def inference_modes(self) -> type[InferenceMode]:
        return InferenceMode

    def build_executable(
        self,
        inference_mode: InferenceMode,
        prompt_template: str | None,  # noqa: UP007
        prompt_signature: type[PromptSignature] | PromptSignature,
        fewshot_examples: Iterable[pydantic.BaseModel] = (),
    ) -> Executable[Result | None]:
        cls_name = self.__class__.__name__
        template = self._create_template(prompt_template)
        generator_factory: Callable[..., Any] = inference_mode.value[0]

        match inference_mode:
            case InferenceMode.text:
                seq_generator = generator_factory(self._model, **self._init_kwargs)
            case InferenceMode.regex:
                assert isinstance(prompt_signature, str), ValueError(
                    "PromptSignature has to be supplied as string in outlines regex mode."
                )
                seq_generator = generator_factory(self._model, regex_str=prompt_signature, **self._init_kwargs)
            case InferenceMode.choice:
                assert isinstance(prompt_signature, list), ValueError(
                    f"PromptSignature has to be supplied as list of strings or enum values in {cls_name} choice "
                    f"mode."
                )
                seq_generator = generator_factory(self._model, choices=prompt_signature, **self._init_kwargs)

            case InferenceMode.json:
                assert isinstance(prompt_signature, type) and issubclass(prompt_signature, pydantic.BaseModel)
                seq_generator = generator_factory(self._model, schema_object=prompt_signature, **self._init_kwargs)
            case _:
                raise ValueError(f"Inference mode {inference_mode} not supported by {cls_name} engine.")

        def execute(values: Iterable[dict[str, Any]]) -> Iterable[Result | None]:
            """Execute prompts with engine for given values.
            :param values: Values to inject into prompts.
            :return Iterable[Result | None]: Results for prompts. Results are None if corresponding prompt failed.
            """

            def generate(prompts: list[str]) -> Iterable[Result]:
                yield from seq_generator(prompts, **self._inference_kwargs)

            yield from self._infer(
                generate,
                template,
                values,
                fewshot_examples,
            )

        return execute

model property

Return model instance.

Returns:

Type Description
EngineModel

Model instance.

__init__(model, init_kwargs, inference_kwargs, strict_mode, batch_size)

Parameters:

Name Type Description Default
model EngineModel

Instantiated model instance.

required
init_kwargs dict[str, Any] | None

Optional kwargs to supply to engine executable at init time.

required
inference_kwargs dict[str, Any] | None

Optional kwargs to supply to engine executable at inference time.

required
strict_mode bool

If True, exception is raised if prompt response can't be parsed correctly.

required
batch_size int

Batch size in processing prompts. -1 will batch all documents in one go. Not all engines support batching.

required
Source code in sieves/engines/core.py
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
def __init__(
    self,
    model: EngineModel,
    init_kwargs: dict[str, Any] | None,
    inference_kwargs: dict[str, Any] | None,
    strict_mode: bool,
    batch_size: int,
):
    """
    :param model: Instantiated model instance.
    :param init_kwargs: Optional kwargs to supply to engine executable at init time.
    :param inference_kwargs: Optional kwargs to supply to engine executable at inference time.
    :param strict_mode: If True, exception is raised if prompt response can't be parsed correctly.
    :param batch_size: Batch size in processing prompts. -1 will batch all documents in one go. Not all engines
        support batching.
    """
    self._model = model
    self._inference_kwargs = inference_kwargs or {}
    self._init_kwargs = init_kwargs or {}
    self._strict_mode = strict_mode
    self._batch_size = batch_size

deserialize(config, **kwargs) classmethod

Generate Engine instance from config.

Parameters:

Name Type Description Default
config Config

Config to generate instance from.

required
kwargs dict[str, Any]

Values to inject into loaded config.

{}

Returns:

Type Description
InternalEngine[EnginePromptSignature, EngineResult, EngineModel, EngineInferenceMode]

Deserialized Engine instance.

Source code in sieves/engines/core.py
122
123
124
125
126
127
128
129
130
131
@classmethod
def deserialize(
    cls, config: Config, **kwargs: dict[str, Any]
) -> InternalEngine[EnginePromptSignature, EngineResult, EngineModel, EngineInferenceMode]:
    """Generate Engine instance from config.
    :param config: Config to generate instance from.
    :param kwargs: Values to inject into loaded config.
    :return: Deserialized Engine instance.
    """
    return cls(**config.to_init_dict(cls, **kwargs))

serialize()

Serializes engine.

Returns:

Type Description
Config

Config instance.

Source code in sieves/engines/core.py
116
117
118
119
120
def serialize(self) -> Config:
    """Serializes engine.
    :return: Config instance.
    """
    return Config.create(self.__class__, self._attributes)