Bases: Engine[PromptSignature, Result, Model, InferenceMode]
Engine for DSPy.
Source code in sieves/engines/dspy_.py
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117 | class DSPy(Engine[PromptSignature, Result, Model, InferenceMode]):
"""Engine for DSPy."""
def __init__(self, model: Model, generation_settings: GenerationSettings):
"""Initialize engine.
:param model: Model to run. Note: DSPy only runs with APIs. If you want to run a model locally from v2.5
onwards, serve it with OLlama - see here: # https://dspy.ai/learn/programming/language_models/?h=models#__tabbed_1_5.
In a nutshell:
> curl -fsSL https://ollama.ai/install.sh | sh
> ollama run MODEL_ID
> `model = dspy.LM(MODEL_ID, api_base='http://localhost:11434', api_key='')`
:param generation_settings: Settings including DSPy configuration in `config_kwargs`.
"""
super().__init__(model, generation_settings)
cfg = generation_settings.config_kwargs or {}
dspy.configure(lm=model, **cfg)
@override
@property
def inference_modes(self) -> type[InferenceMode]:
return InferenceMode
@override
@property
def supports_few_shotting(self) -> bool:
return True
@override
def build_executable(
self,
inference_mode: InferenceMode,
prompt_template: str | None, # noqa: UP007
prompt_signature: type[PromptSignature] | PromptSignature,
fewshot_examples: Sequence[pydantic.BaseModel] = tuple(),
) -> Executable[Result | None]:
# Note: prompt_template is ignored here, as DSPy doesn't use it directly (only prompt_signature_description).
assert isinstance(prompt_signature, type)
# Handled differently than the other supported modules: dspy.Module serves as both the signature as well as
# the inference generator.
if inference_mode == InferenceMode.module:
assert isinstance(prompt_signature, dspy.Module), ValueError(
"In inference mode 'module' the provided prompt signature has to be of type dspy.Module."
)
generator = inference_mode.value(**self._init_kwargs)
else:
assert issubclass(prompt_signature, dspy.Signature)
generator = inference_mode.value(signature=prompt_signature, **self._init_kwargs)
def execute(values: Sequence[dict[str, Any]]) -> Iterable[Result | None]:
"""Execute structured generation with DSPy.
:params values: Values to inject into prompts.
:returns: Results for prompts.
"""
# Compile predictor with few-shot examples.
fewshot_examples_dicts = DSPy.convert_fewshot_examples(fewshot_examples)
generator_fewshot: dspy.Module | None = None
if len(fewshot_examples_dicts):
examples = [dspy.Example(**fs_example) for fs_example in fewshot_examples_dicts]
generator_fewshot = dspy.LabeledFewShot(k=len(examples)).compile(student=generator, trainset=examples)
try:
gen = generator_fewshot or generator
calls = [gen.acall(**doc_values, **self._inference_kwargs) for doc_values in values]
yield from asyncio.run(self._execute_async_calls(calls))
except Exception as err:
if self._strict_mode:
raise type(err)(
"Encountered problem when executing prompt. Ensure your few-shot examples and document "
"chunks contain sensible information."
) from err
else:
yield from [None] * len(values)
return execute
|
generation_settings
property
Return generation settings.
Returns:
Type |
Description |
GenerationSettings
|
|
model
property
Return model instance.
Returns:
Type |
Description |
EngineModel
|
|
__init__(model, generation_settings)
Initialize engine.
Parameters:
Name |
Type |
Description |
Default |
model
|
Model
|
Model to run. Note: DSPy only runs with APIs. If you want to run a model locally from v2.5 onwards, serve it with OLlama - see here: # https://dspy.ai/learn/programming/language_models/?h=models#__tabbed_1_5. In a nutshell: > curl -fsSL https://ollama.ai/install.sh | sh > ollama run MODEL_ID > model = dspy.LM(MODEL_ID, api_base='http://localhost:11434', api_key='')
|
required
|
generation_settings
|
GenerationSettings
|
Settings including DSPy configuration in config_kwargs .
|
required
|
Source code in sieves/engines/dspy_.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56 | def __init__(self, model: Model, generation_settings: GenerationSettings):
"""Initialize engine.
:param model: Model to run. Note: DSPy only runs with APIs. If you want to run a model locally from v2.5
onwards, serve it with OLlama - see here: # https://dspy.ai/learn/programming/language_models/?h=models#__tabbed_1_5.
In a nutshell:
> curl -fsSL https://ollama.ai/install.sh | sh
> ollama run MODEL_ID
> `model = dspy.LM(MODEL_ID, api_base='http://localhost:11434', api_key='')`
:param generation_settings: Settings including DSPy configuration in `config_kwargs`.
"""
super().__init__(model, generation_settings)
cfg = generation_settings.config_kwargs or {}
dspy.configure(lm=model, **cfg)
|
convert_fewshot_examples(fewshot_examples)
staticmethod
Convert few‑shot examples to dicts.
Parameters:
Name |
Type |
Description |
Default |
fewshot_examples
|
Sequence[BaseModel]
|
Fewshot examples to convert.
|
required
|
Returns:
Type |
Description |
list[dict[str, Any]]
|
Fewshot examples as dicts.
|
Source code in sieves/engines/core.py
100
101
102
103
104
105
106
107 | @staticmethod
def convert_fewshot_examples(fewshot_examples: Sequence[pydantic.BaseModel]) -> list[dict[str, Any]]:
"""Convert few‑shot examples to dicts.
:param fewshot_examples: Fewshot examples to convert.
:return: Fewshot examples as dicts.
"""
return [fs_example.model_dump(serialize_as_any=True) for fs_example in fewshot_examples]
|