Skip to content

Prompt

InputModel module-attribute

InputModel = TypeVar('InputModel', bound=BaseModel)

OutputModel module-attribute

OutputModel = TypeVar('OutputModel', bound=BaseModel)

BasePrompt

BasePrompt(name: Optional[str] = None, language: str = 'english', original_hash: Optional[str] = None)

Bases: ABC

Source code in src/ragas/prompt/base.py
def __init__(
    self,
    name: t.Optional[str] = None,
    language: str = "english",
    original_hash: t.Optional[str] = None,
):
    if name is None:
        self.name = camel_to_snake(self.__class__.__name__)

    _check_if_language_is_supported(language)
    self.language = language
    self.original_hash = original_hash

generate abstractmethod async

generate(llm: BaseRagasLLM, data: Any, temperature: Optional[float] = None, stop: Optional[List[str]] = None, callbacks: Callbacks = []) -> Any

Generate a single completion from the prompt.

Source code in src/ragas/prompt/base.py
@abstractmethod
async def generate(
    self,
    llm: BaseRagasLLM,
    data: t.Any,
    temperature: t.Optional[float] = None,
    stop: t.Optional[t.List[str]] = None,
    callbacks: Callbacks = [],
) -> t.Any:
    """
    Generate a single completion from the prompt.
    """
    pass

generate_multiple abstractmethod

generate_multiple(llm: BaseRagasLLM, data: Any, n: int = 1, temperature: Optional[float] = None, stop: Optional[List[str]] = None, callbacks: Callbacks = []) -> Any

Generate multiple completions from the prompt.

Source code in src/ragas/prompt/base.py
@abstractmethod
def generate_multiple(
    self,
    llm: BaseRagasLLM,
    data: t.Any,
    n: int = 1,
    temperature: t.Optional[float] = None,
    stop: t.Optional[t.List[str]] = None,
    callbacks: Callbacks = [],
) -> t.Any:
    """
    Generate multiple completions from the prompt.
    """
    pass

StringPrompt

StringPrompt(name: Optional[str] = None, language: str = 'english', original_hash: Optional[str] = None)

Bases: BasePrompt

A simple prompt that can be formatted with additional data using f-string syntax.

This prompt is a simpler alternative to PydanticPrompt for those who prefer a more flexible approach without the need for a Pydantic model.

Parameters:

Name Type Description Default
instruction str

The instruction string that can be formatted with additional data.

required

Examples:

>>> from ragas.prompt import string_prompt
>>> await prompt.generate(llm=llm, data={"category": "commerce"})
Source code in src/ragas/prompt/base.py
def __init__(
    self,
    name: t.Optional[str] = None,
    language: str = "english",
    original_hash: t.Optional[str] = None,
):
    if name is None:
        self.name = camel_to_snake(self.__class__.__name__)

    _check_if_language_is_supported(language)
    self.language = language
    self.original_hash = original_hash

generate async

generate(llm: BaseRagasLLM, data: str, temperature: Optional[float] = None, stop: Optional[List[str]] = None, callbacks: Callbacks = []) -> str

Generate text based on the instruction and provided data.

Parameters:

Name Type Description Default
llm BaseRagasLLM

The language model to use for text generation.

required
data Optional[Dict[str, Any]]

The data to format the instruction with, by default None.

required
n int

The number of completions to generate, by default 1.

required
temperature Optional[float]

The temperature for text generation, by default None.

None
stop Optional[List[str]]

The stop sequences for text generation, by default None.

None
callbacks Callbacks

The callbacks to use during text generation, by default [].

[]

Returns:

Type Description
str

The generated text.

Source code in src/ragas/prompt/base.py
async def generate(
    self,
    llm: BaseRagasLLM,
    data: str,
    temperature: t.Optional[float] = None,
    stop: t.Optional[t.List[str]] = None,
    callbacks: Callbacks = [],
) -> str:
    """
    Generate text based on the instruction and provided data.

    Parameters
    ----------
    llm : BaseRagasLLM
        The language model to use for text generation.
    data : Optional[Dict[str, Any]], optional
        The data to format the instruction with, by default None.
    n : int, optional
        The number of completions to generate, by default 1.
    temperature : Optional[float], optional
        The temperature for text generation, by default None.
    stop : Optional[List[str]], optional
        The stop sequences for text generation, by default None.
    callbacks : Callbacks, optional
        The callbacks to use during text generation, by default [].

    Returns
    -------
    str
        The generated text.
    """
    llm_result = await llm.agenerate_text(
        StringPromptValue(text=data),
        n=1,
        temperature=temperature,
        stop=stop,
        callbacks=callbacks,
    )
    return llm_result.generations[0][0].text

PydanticPrompt

PydanticPrompt(name: Optional[str] = None, language: str = 'english', original_hash: Optional[str] = None)

Bases: BasePrompt, Generic[InputModel, OutputModel]

Source code in src/ragas/prompt/base.py
def __init__(
    self,
    name: t.Optional[str] = None,
    language: str = "english",
    original_hash: t.Optional[str] = None,
):
    if name is None:
        self.name = camel_to_snake(self.__class__.__name__)

    _check_if_language_is_supported(language)
    self.language = language
    self.original_hash = original_hash

generate async

generate(llm: BaseRagasLLM, data: InputModel, temperature: Optional[float] = None, stop: Optional[List[str]] = None, callbacks: Optional[Callbacks] = None, retries_left: int = 3) -> OutputModel

Generate a single output using the provided language model and input data.

This method is a special case of generate_multiple where only one output is generated.

Parameters:

Name Type Description Default
llm BaseRagasLLM

The language model to use for generation.

required
data InputModel

The input data for generation.

required
temperature float

The temperature parameter for controlling randomness in generation.

None
stop List[str]

A list of stop sequences to end generation.

None
callbacks Callbacks

Callback functions to be called during the generation process.

None
retries_left int

Number of retry attempts for an invalid LLM response

3

Returns:

Type Description
OutputModel

The generated output.

Notes

This method internally calls generate_multiple with n=1 and returns the first (and only) result.

Source code in src/ragas/prompt/pydantic_prompt.py
async def generate(
    self,
    llm: BaseRagasLLM,
    data: InputModel,
    temperature: t.Optional[float] = None,
    stop: t.Optional[t.List[str]] = None,
    callbacks: t.Optional[Callbacks] = None,
    retries_left: int = 3,
) -> OutputModel:
    """
    Generate a single output using the provided language model and input data.

    This method is a special case of `generate_multiple` where only one output is generated.

    Parameters
    ----------
    llm : BaseRagasLLM
        The language model to use for generation.
    data : InputModel
        The input data for generation.
    temperature : float, optional
        The temperature parameter for controlling randomness in generation.
    stop : List[str], optional
        A list of stop sequences to end generation.
    callbacks : Callbacks, optional
        Callback functions to be called during the generation process.
    retries_left : int, optional
        Number of retry attempts for an invalid LLM response

    Returns
    -------
    OutputModel
        The generated output.

    Notes
    -----
    This method internally calls `generate_multiple` with `n=1` and returns the first (and only) result.
    """
    callbacks = callbacks or []

    # this is just a special case of generate_multiple
    output_single = await self.generate_multiple(
        llm=llm,
        data=data,
        n=1,
        temperature=temperature,
        stop=stop,
        callbacks=callbacks,
        retries_left=retries_left,
    )
    return output_single[0]

generate_multiple async

generate_multiple(llm: BaseRagasLLM, data: InputModel, n: int = 1, temperature: Optional[float] = None, stop: Optional[List[str]] = None, callbacks: Optional[Callbacks] = None, retries_left: int = 3) -> List[OutputModel]

Generate multiple outputs using the provided language model and input data.

Parameters:

Name Type Description Default
llm BaseRagasLLM

The language model to use for generation.

required
data InputModel

The input data for generation.

required
n int

The number of outputs to generate. Default is 1.

1
temperature float

The temperature parameter for controlling randomness in generation.

None
stop List[str]

A list of stop sequences to end generation.

None
callbacks Callbacks

Callback functions to be called during the generation process.

None
retries_left int

Number of retry attempts for an invalid LLM response

3

Returns:

Type Description
List[OutputModel]

A list of generated outputs.

Raises:

Type Description
RagasOutputParserException

If there's an error parsing the output.

Source code in src/ragas/prompt/pydantic_prompt.py
async def generate_multiple(
    self,
    llm: BaseRagasLLM,
    data: InputModel,
    n: int = 1,
    temperature: t.Optional[float] = None,
    stop: t.Optional[t.List[str]] = None,
    callbacks: t.Optional[Callbacks] = None,
    retries_left: int = 3,
) -> t.List[OutputModel]:
    """
    Generate multiple outputs using the provided language model and input data.

    Parameters
    ----------
    llm : BaseRagasLLM
        The language model to use for generation.
    data : InputModel
        The input data for generation.
    n : int, optional
        The number of outputs to generate. Default is 1.
    temperature : float, optional
        The temperature parameter for controlling randomness in generation.
    stop : List[str], optional
        A list of stop sequences to end generation.
    callbacks : Callbacks, optional
        Callback functions to be called during the generation process.
    retries_left : int, optional
        Number of retry attempts for an invalid LLM response

    Returns
    -------
    List[OutputModel]
        A list of generated outputs.

    Raises
    ------
    RagasOutputParserException
        If there's an error parsing the output.
    """
    callbacks = callbacks or []
    processed_data = self.process_input(data)
    prompt_rm, prompt_cb = new_group(
        name=self.name,
        inputs={"data": processed_data},
        callbacks=callbacks,
        metadata={"type": ChainType.RAGAS_PROMPT},
    )
    prompt_value = PromptValue(text=self.to_string(processed_data))
    resp = await llm.generate(
        prompt_value,
        n=n,
        temperature=temperature,
        stop=stop,
        callbacks=prompt_cb,
    )

    output_models = []
    parser = RagasOutputParser(pydantic_object=self.output_model)
    for i in range(n):
        output_string = resp.generations[0][i].text
        try:
            answer = await parser.parse_output_string(
                output_string=output_string,
                prompt_value=prompt_value,
                llm=llm,
                callbacks=prompt_cb,
                retries_left=retries_left,
            )
            processed_output = self.process_output(answer, data)  # type: ignore
            output_models.append(processed_output)
        except RagasOutputParserException as e:
            prompt_rm.on_chain_error(error=e)
            logger.error("Prompt %s failed to parse output: %s", self.name, e)
            raise e

    prompt_rm.on_chain_end({"output": output_models})
    return output_models

adapt async

adapt(target_language: str, llm: BaseRagasLLM, adapt_instruction: bool = False) -> 'PydanticPrompt[InputModel, OutputModel]'

Adapt the prompt to a new language.

Source code in src/ragas/prompt/pydantic_prompt.py
async def adapt(
    self, target_language: str, llm: BaseRagasLLM, adapt_instruction: bool = False
) -> "PydanticPrompt[InputModel, OutputModel]":
    """
    Adapt the prompt to a new language.
    """

    # throws ValueError if language is not supported
    _check_if_language_is_supported(target_language)

    # set the original hash, this is used to
    # identify the original prompt object when loading from file
    if self.original_hash is None:
        self.original_hash = hash(self)

    strings = get_all_strings(self.examples)
    translated_strings = await translate_statements_prompt.generate(
        llm=llm,
        data=ToTranslate(target_language=target_language, statements=strings),
    )

    translated_examples = update_strings(
        obj=self.examples,
        old_strings=strings,
        new_strings=translated_strings.statements,
    )

    new_prompt = copy.deepcopy(self)
    new_prompt.examples = translated_examples
    new_prompt.language = target_language

    if adapt_instruction:
        translated_instruction = await translate_statements_prompt.generate(
            llm=llm,
            data=ToTranslate(
                target_language=target_language, statements=[self.instruction]
            ),
        )
        new_prompt.instruction = translated_instruction.statements[0]

    return new_prompt

save

save(file_path: str)

Save the prompt to a file.

Source code in src/ragas/prompt/pydantic_prompt.py
def save(self, file_path: str):
    """
    Save the prompt to a file.
    """
    data = {
        "ragas_version": __version__,
        "original_hash": (
            hash(self) if self.original_hash is None else self.original_hash
        ),
        "language": self.language,
        "instruction": self.instruction,
        "examples": [
            {"input": example[0].model_dump(), "output": example[1].model_dump()}
            for example in self.examples
        ],
    }
    if os.path.exists(file_path):
        raise FileExistsError(f"The file '{file_path}' already exists.")
    with open(file_path, "w") as f:
        json.dump(data, f, indent=2, ensure_ascii=False)
        print(f"Prompt saved to {file_path}")

BoolIO

Bases: BaseModel

StringIO

Bases: BaseModel

PromptMixin

Mixin class for classes that have prompts. eg: BaseSynthesizer, MetricWithLLM

get_prompts

get_prompts() -> Dict[str, PydanticPrompt]

Returns a dictionary of prompts for the class.

Source code in src/ragas/prompt/mixin.py
def get_prompts(self) -> t.Dict[str, PydanticPrompt]:
    """
    Returns a dictionary of prompts for the class.
    """
    prompts = {}
    for name, value in inspect.getmembers(self):
        if isinstance(value, PydanticPrompt):
            prompts.update({name: value})
    return prompts

set_prompts

set_prompts(**prompts)

Sets the prompts for the class.

Raises:

Type Description
ValueError

If the prompt is not an instance of PydanticPrompt.

Source code in src/ragas/prompt/mixin.py
def set_prompts(self, **prompts):
    """
    Sets the prompts for the class.

    Raises
    ------
    ValueError
        If the prompt is not an instance of `PydanticPrompt`.
    """
    available_prompts = self.get_prompts()
    for key, value in prompts.items():
        if key not in available_prompts:
            raise ValueError(
                f"Prompt with name '{key}' does not exist. Use get_prompts() to see available prompts."
            )
        if not isinstance(value, PydanticPrompt):
            raise ValueError(
                f"Prompt with name '{key}' must be an instance of 'ragas.prompt.PydanticPrompt'"
            )
        setattr(self, key, value)

adapt_prompts async

adapt_prompts(language: str, llm: BaseRagasLLM, adapt_instruction: bool = False) -> Dict[str, PydanticPrompt]

Adapts the prompts in the class to the given language and using the given LLM.

Notes

Make sure you use the best available LLM for adapting the prompts and then save and load the prompts using save_prompts and load_prompts methods.

Source code in src/ragas/prompt/mixin.py
async def adapt_prompts(
    self, language: str, llm: BaseRagasLLM, adapt_instruction: bool = False
) -> t.Dict[str, PydanticPrompt]:
    """
    Adapts the prompts in the class to the given language and using the given LLM.

    Notes
    -----
    Make sure you use the best available LLM for adapting the prompts and then save and load the prompts using
    [save_prompts][ragas.prompt.mixin.PromptMixin.save_prompts] and [load_prompts][ragas.prompt.mixin.PromptMixin.load_prompts]
    methods.
    """
    prompts = self.get_prompts()
    adapted_prompts = {}
    for name, prompt in prompts.items():
        adapted_prompt = await prompt.adapt(language, llm, adapt_instruction)
        adapted_prompts[name] = adapted_prompt

    return adapted_prompts

save_prompts

save_prompts(path: str)

Saves the prompts to a directory in the format of {name}_{language}.json

Source code in src/ragas/prompt/mixin.py
def save_prompts(self, path: str):
    """
    Saves the prompts to a directory in the format of {name}_{language}.json
    """
    # check if path is valid
    if not os.path.exists(path):
        raise ValueError(f"Path {path} does not exist")

    prompts = self.get_prompts()
    for prompt_name, prompt in prompts.items():
        # hash_hex = f"0x{hash(prompt) & 0xFFFFFFFFFFFFFFFF:016x}"
        prompt_file_name = os.path.join(
            path, f"{prompt_name}_{prompt.language}.json"
        )
        prompt.save(prompt_file_name)

load_prompts

load_prompts(path: str, language: Optional[str] = None)

Loads the prompts from a path. File should be in the format of {name}_{language}.json

Source code in src/ragas/prompt/mixin.py
def load_prompts(self, path: str, language: t.Optional[str] = None):
    """
    Loads the prompts from a path. File should be in the format of {name}_{language}.json
    """
    # check if path is valid
    if not os.path.exists(path):
        raise ValueError(f"Path {path} does not exist")

    # check if language is supported, defaults to english
    if language is None:
        language = "english"
        logger.info(
            "Language not specified, loading prompts for default language: %s",
            language,
        )
    _check_if_language_is_supported(language)

    loaded_prompts = {}
    for prompt_name, prompt in self.get_prompts().items():
        prompt_file_name = os.path.join(path, f"{prompt_name}_{language}.json")
        loaded_prompt = prompt.__class__.load(prompt_file_name)
        loaded_prompts[prompt_name] = loaded_prompt
    return loaded_prompts