Skip to content

Schemas

BaseSample

Bases: BaseModel

Base class for evaluation samples.

to_dict

to_dict() -> Dict

Get the dictionary representation of the sample without attributes that are None.

Source code in src/ragas/dataset_schema.py
def to_dict(self) -> t.Dict:
    """
    Get the dictionary representation of the sample without attributes that are None.
    """
    return self.model_dump(exclude_none=True)

get_features

get_features() -> List[str]

Get the features of the sample that are not None.

Source code in src/ragas/dataset_schema.py
def get_features(self) -> t.List[str]:
    """
    Get the features of the sample that are not None.
    """
    return list(self.to_dict().keys())

to_string

to_string() -> str

Get the string representation of the sample.

Source code in src/ragas/dataset_schema.py
def to_string(self) -> str:
    """
    Get the string representation of the sample.
    """
    sample_dict = self.to_dict()
    return "".join(f"\n{key}:\n\t{val}\n" for key, val in sample_dict.items())

SingleTurnSample

Bases: BaseSample

Represents evaluation samples for single-turn interactions.

Attributes:

Name Type Description
user_input Optional[str]

The input query from the user.

retrieved_contexts Optional[List[str]]

List of contexts retrieved for the query.

reference_contexts Optional[List[str]]

List of reference contexts for the query.

response Optional[str]

The generated response for the query.

multi_responses Optional[List[str]]

List of multiple responses generated for the query.

reference Optional[str]

The reference answer for the query.

rubric Optional[Dict[str, str]]

Evaluation rubric for the sample.

MultiTurnSample

Bases: BaseSample

Represents evaluation samples for multi-turn interactions.

Attributes:

Name Type Description
user_input List[Union[HumanMessage, AIMessage, ToolMessage]]

A list of messages representing the conversation turns.

reference (Optional[str], optional)

The reference answer or expected outcome for the conversation.

reference_tool_calls (Optional[List[ToolCall]], optional)

A list of expected tool calls for the conversation.

rubrics (Optional[Dict[str, str]], optional)

Evaluation rubrics for the conversation.

reference_topics (Optional[List[str]], optional)

A list of reference topics for the conversation.

validate_user_input classmethod

validate_user_input(messages: List[Union[HumanMessage, AIMessage, ToolMessage]]) -> List[Union[HumanMessage, AIMessage, ToolMessage]]

Validates the user input messages.

Source code in src/ragas/dataset_schema.py
@field_validator("user_input")
@classmethod
def validate_user_input(
    cls,
    messages: t.List[t.Union[HumanMessage, AIMessage, ToolMessage]],
) -> t.List[t.Union[HumanMessage, AIMessage, ToolMessage]]:
    """Validates the user input messages."""
    if not (
        isinstance(m, (HumanMessage, AIMessage, ToolMessage)) for m in messages
    ):
        raise ValueError(
            "All inputs must be instances of HumanMessage, AIMessage, or ToolMessage."
        )

    prev_message = None
    for m in messages:
        if isinstance(m, ToolMessage):
            if not isinstance(prev_message, AIMessage):
                raise ValueError(
                    "ToolMessage instances must be preceded by an AIMessage instance."
                )
            if prev_message.tool_calls is None:
                raise ValueError(
                    f"ToolMessage instances must be preceded by an AIMessage instance with tool_calls. Got {prev_message}"
                )
        prev_message = m

    return messages

to_messages

to_messages()

Converts the user input messages to a list of dictionaries.

Source code in src/ragas/dataset_schema.py
def to_messages(self):
    """Converts the user input messages to a list of dictionaries."""
    return [m.model_dump() for m in self.user_input]

pretty_repr

pretty_repr()

Returns a pretty string representation of the conversation.

Source code in src/ragas/dataset_schema.py
def pretty_repr(self):
    """Returns a pretty string representation of the conversation."""
    lines = []
    for m in self.user_input:
        lines.append(m.pretty_repr())

    return "\n".join(lines)

RagasDataset dataclass

RagasDataset(samples: List[Sample])

Bases: ABC, Generic[Sample]

to_list abstractmethod

to_list() -> List[Dict]

Converts the samples to a list of dictionaries.

Source code in src/ragas/dataset_schema.py
@abstractmethod
def to_list(self) -> t.List[t.Dict]:
    """Converts the samples to a list of dictionaries."""
    pass

from_list abstractmethod classmethod

from_list(data: List[Dict]) -> T

Creates an RagasDataset from a list of dictionaries.

Source code in src/ragas/dataset_schema.py
@classmethod
@abstractmethod
def from_list(cls: t.Type[T], data: t.List[t.Dict]) -> T:
    """Creates an RagasDataset from a list of dictionaries."""
    pass

validate_samples

validate_samples(samples: List[Sample]) -> List[Sample]

Validates that all samples are of the same type.

Source code in src/ragas/dataset_schema.py
def validate_samples(self, samples: t.List[Sample]) -> t.List[Sample]:
    """Validates that all samples are of the same type."""
    if len(samples) == 0:
        return samples

    first_sample_type = type(samples[0])
    for i, sample in enumerate(samples):
        if not isinstance(sample, first_sample_type):
            raise ValueError(
                f"Sample at index {i} is of type {type(sample)}, expected {first_sample_type}"
            )

    return samples

get_sample_type

get_sample_type() -> Type[Sample]

Returns the type of the samples in the dataset.

Source code in src/ragas/dataset_schema.py
def get_sample_type(self) -> t.Type[Sample]:
    """Returns the type of the samples in the dataset."""
    return type(self.samples[0])

to_hf_dataset

to_hf_dataset() -> Dataset

Converts the dataset to a Hugging Face Dataset.

Source code in src/ragas/dataset_schema.py
def to_hf_dataset(self) -> HFDataset:
    """Converts the dataset to a Hugging Face Dataset."""
    try:
        from datasets import Dataset as HFDataset
    except ImportError:
        raise ImportError(
            "datasets is not installed. Please install it to use this function."
        )

    return HFDataset.from_list(self.to_list())

from_hf_dataset classmethod

from_hf_dataset(dataset: Dataset) -> T

Creates an EvaluationDataset from a Hugging Face Dataset.

Source code in src/ragas/dataset_schema.py
@classmethod
def from_hf_dataset(cls: t.Type[T], dataset: HFDataset) -> T:
    """Creates an EvaluationDataset from a Hugging Face Dataset."""
    return cls.from_list(dataset.to_list())

to_pandas

to_pandas() -> DataFrame

Converts the dataset to a pandas DataFrame.

Source code in src/ragas/dataset_schema.py
def to_pandas(self) -> PandasDataframe:
    """Converts the dataset to a pandas DataFrame."""
    try:
        import pandas as pd
    except ImportError:
        raise ImportError(
            "pandas is not installed. Please install it to use this function."
        )

    data = self.to_list()
    return pd.DataFrame(data)

from_pandas classmethod

from_pandas(dataframe: DataFrame)

Creates an EvaluationDataset from a pandas DataFrame.

Source code in src/ragas/dataset_schema.py
@classmethod
def from_pandas(cls, dataframe: PandasDataframe):
    """Creates an EvaluationDataset from a pandas DataFrame."""
    return cls.from_list(dataframe.to_dict(orient="records"))

features

features()

Returns the features of the samples.

Source code in src/ragas/dataset_schema.py
def features(self):
    """Returns the features of the samples."""
    return self.samples[0].get_features()

from_dict classmethod

from_dict(mapping: Dict) -> T

Creates an EvaluationDataset from a dictionary.

Source code in src/ragas/dataset_schema.py
@classmethod
def from_dict(cls: t.Type[T], mapping: t.Dict) -> T:
    """Creates an EvaluationDataset from a dictionary."""
    samples = []
    if all(
        "user_input" in item and isinstance(mapping[0]["user_input"], list)
        for item in mapping
    ):
        samples.extend(MultiTurnSample(**sample) for sample in mapping)
    else:
        samples.extend(SingleTurnSample(**sample) for sample in mapping)
    return cls(samples=samples)

to_csv

to_csv(path: Union[str, Path])

Converts the dataset to a CSV file.

Source code in src/ragas/dataset_schema.py
def to_csv(self, path: t.Union[str, Path]):
    """Converts the dataset to a CSV file."""
    import csv

    data = self.to_list()
    if not data:
        return

    fieldnames = data[0].keys()

    with open(path, "w", newline="") as csvfile:
        writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
        writer.writeheader()
        for row in data:
            writer.writerow(row)

to_jsonl

to_jsonl(path: Union[str, Path])

Converts the dataset to a JSONL file.

Source code in src/ragas/dataset_schema.py
def to_jsonl(self, path: t.Union[str, Path]):
    """Converts the dataset to a JSONL file."""
    with open(path, "w") as jsonlfile:
        for sample in self.to_list():
            jsonlfile.write(json.dumps(sample, ensure_ascii=False) + "\n")

from_jsonl classmethod

from_jsonl(path: Union[str, Path]) -> T

Creates an EvaluationDataset from a JSONL file.

Source code in src/ragas/dataset_schema.py
@classmethod
def from_jsonl(cls: t.Type[T], path: t.Union[str, Path]) -> T:
    """Creates an EvaluationDataset from a JSONL file."""
    with open(path, "r") as jsonlfile:
        data = [json.loads(line) for line in jsonlfile]
    return cls.from_list(data)

EvaluationDataset dataclass

EvaluationDataset(samples: List[Sample])

Bases: RagasDataset[SingleTurnSampleOrMultiTurnSample]

Represents a dataset of evaluation samples.

Attributes:

Name Type Description
samples List[BaseSample]

A list of evaluation samples.

Methods:

Name Description
validate_samples

Validates that all samples are of the same type.

get_sample_type

Returns the type of the samples in the dataset.

to_hf_dataset

Converts the dataset to a Hugging Face Dataset.

to_pandas

Converts the dataset to a pandas DataFrame.

features

Returns the features of the samples.

from_list

Creates an EvaluationDataset from a list of dictionaries.

from_dict

Creates an EvaluationDataset from a dictionary.

to_csv

Converts the dataset to a CSV file.

to_jsonl

Converts the dataset to a JSONL file.

from_jsonl

Creates an EvaluationDataset from a JSONL file.

EvaluationResult dataclass

EvaluationResult(scores: List[Dict[str, Any]], dataset: EvaluationDataset, binary_columns: List[str] = list(), cost_cb: Optional[CostCallbackHandler] = None, traces: List[Dict[str, Any]] = list(), ragas_traces: Dict[str, ChainRun] = dict(), run_id: Optional[UUID] = None)

A class to store and process the results of the evaluation.

Attributes:

Name Type Description
scores Dataset

The dataset containing the scores of the evaluation.

dataset (Dataset, optional)

The original dataset used for the evaluation. Default is None.

binary_columns list of str, optional

List of columns that are binary metrics. Default is an empty list.

cost_cb (CostCallbackHandler, optional)

The callback handler for cost computation. Default is None.

to_pandas

to_pandas(batch_size: int | None = None, batched: bool = False)

Convert the result to a pandas DataFrame.

Parameters:

Name Type Description Default
batch_size int

The batch size for conversion. Default is None.

None
batched bool

Whether to convert in batches. Default is False.

False

Returns:

Type Description
DataFrame

The result as a pandas DataFrame.

Raises:

Type Description
ValueError

If the dataset is not provided.

Source code in src/ragas/dataset_schema.py
def to_pandas(self, batch_size: int | None = None, batched: bool = False):
    """
    Convert the result to a pandas DataFrame.

    Parameters
    ----------
    batch_size : int, optional
        The batch size for conversion. Default is None.
    batched : bool, optional
        Whether to convert in batches. Default is False.

    Returns
    -------
    pandas.DataFrame
        The result as a pandas DataFrame.

    Raises
    ------
    ValueError
        If the dataset is not provided.
    """
    try:
        import pandas as pd
    except ImportError:
        raise ImportError(
            "pandas is not installed. Please install it to use this function."
        )

    if self.dataset is None:
        raise ValueError("dataset is not provided for the results class")
    assert len(self.scores) == len(self.dataset)
    # convert both to pandas dataframes and concatenate
    scores_df = pd.DataFrame(self.scores)
    dataset_df = self.dataset.to_pandas()
    return pd.concat([dataset_df, scores_df], axis=1)

total_tokens

total_tokens() -> Union[List[TokenUsage], TokenUsage]

Compute the total tokens used in the evaluation.

Returns:

Type Description
list of TokenUsage or TokenUsage

The total tokens used.

Raises:

Type Description
ValueError

If the cost callback handler is not provided.

Source code in src/ragas/dataset_schema.py
def total_tokens(self) -> t.Union[t.List[TokenUsage], TokenUsage]:
    """
    Compute the total tokens used in the evaluation.

    Returns
    -------
    list of TokenUsage or TokenUsage
        The total tokens used.

    Raises
    ------
    ValueError
        If the cost callback handler is not provided.
    """
    if self.cost_cb is None:
        raise ValueError(
            "The evaluate() run was not configured for computing cost. Please provide a token_usage_parser function to evaluate() to compute cost."
        )
    return self.cost_cb.total_tokens()

total_cost

total_cost(cost_per_input_token: Optional[float] = None, cost_per_output_token: Optional[float] = None, per_model_costs: Dict[str, Tuple[float, float]] = {}) -> float

Compute the total cost of the evaluation.

Parameters:

Name Type Description Default
cost_per_input_token float

The cost per input token. Default is None.

None
cost_per_output_token float

The cost per output token. Default is None.

None
per_model_costs dict of str to tuple of float

The per model costs. Default is an empty dictionary.

{}

Returns:

Type Description
float

The total cost of the evaluation.

Raises:

Type Description
ValueError

If the cost callback handler is not provided.

Source code in src/ragas/dataset_schema.py
def total_cost(
    self,
    cost_per_input_token: t.Optional[float] = None,
    cost_per_output_token: t.Optional[float] = None,
    per_model_costs: t.Dict[str, t.Tuple[float, float]] = {},
) -> float:
    """
    Compute the total cost of the evaluation.

    Parameters
    ----------
    cost_per_input_token : float, optional
        The cost per input token. Default is None.
    cost_per_output_token : float, optional
        The cost per output token. Default is None.
    per_model_costs : dict of str to tuple of float, optional
        The per model costs. Default is an empty dictionary.

    Returns
    -------
    float
        The total cost of the evaluation.

    Raises
    ------
    ValueError
        If the cost callback handler is not provided.
    """
    if self.cost_cb is None:
        raise ValueError(
            "The evaluate() run was not configured for computing cost. Please provide a token_usage_parser function to evaluate() to compute cost."
        )
    return self.cost_cb.total_cost(
        cost_per_input_token, cost_per_output_token, per_model_costs
    )

MetricAnnotation

Bases: BaseModel

from_json classmethod

from_json(path: str, metric_name: Optional[str]) -> 'MetricAnnotation'

Load annotations from a JSON file

Source code in src/ragas/dataset_schema.py
@classmethod
def from_json(cls, path: str, metric_name: t.Optional[str]) -> "MetricAnnotation":
    """Load annotations from a JSON file"""
    dataset = json.load(open(path))
    return cls._process_dataset(dataset, metric_name)

from_app classmethod

from_app(run_id: str, metric_name: Optional[str] = None) -> 'MetricAnnotation'

Fetch annotations from a URL using either evaluation result or run_id

Parameters:

Name Type Description Default
run_id str

Direct run ID to fetch annotations

required
metric_name str

Name of the specific metric to filter

None

Returns:

Type Description
MetricAnnotation

Annotation data from the API

Raises:

Type Description
ValueError

If run_id is not provided

Source code in src/ragas/dataset_schema.py
@classmethod
def from_app(
    cls,
    run_id: str,
    metric_name: t.Optional[str] = None,
) -> "MetricAnnotation":
    """
    Fetch annotations from a URL using either evaluation result or run_id

    Parameters
    ----------
    run_id : str
        Direct run ID to fetch annotations
    metric_name : str, optional
        Name of the specific metric to filter

    Returns
    -------
    MetricAnnotation
        Annotation data from the API

    Raises
    ------
    ValueError
        If run_id is not provided
    """
    if run_id is None:
        raise ValueError("run_id must be provided")

    endpoint = f"/api/v1/alignment/evaluation/annotation/{run_id}"

    app_token = get_app_token()
    base_url = get_api_url()
    app_url = get_app_url()

    response = requests.get(
        f"{base_url}{endpoint}",
        headers={
            "Content-Type": "application/json",
            "x-app-token": app_token,
            "x-source": RAGAS_API_SOURCE,
            "x-app-version": __version__,
        },
    )

    check_api_response(response)
    dataset = response.json()["data"]

    if not dataset:
        evaluation_url = build_evaluation_app_url(app_url, run_id)
        raise ValueError(
            f"No annotations found. Please annotate the Evaluation first then run this method. "
            f"\nNote: you can annotate the evaluations using the Ragas app by going to {evaluation_url}"
        )

    return cls._process_dataset(dataset, metric_name)

SingleMetricAnnotation

Bases: BaseModel

train_test_split

train_test_split(test_size: float = 0.2, seed: int = 42, stratify: Optional[List[Any]] = None) -> Tuple['SingleMetricAnnotation', 'SingleMetricAnnotation']

Split the dataset into training and testing sets.

Parameters: test_size (float): The proportion of the dataset to include in the test split. seed (int): Random seed for reproducibility. stratify (list): The column values to stratify the split on.

Source code in src/ragas/dataset_schema.py
def train_test_split(
    self,
    test_size: float = 0.2,
    seed: int = 42,
    stratify: t.Optional[t.List[t.Any]] = None,
) -> t.Tuple["SingleMetricAnnotation", "SingleMetricAnnotation"]:
    """
    Split the dataset into training and testing sets.

    Parameters:
        test_size (float): The proportion of the dataset to include in the test split.
        seed (int): Random seed for reproducibility.
        stratify (list): The column values to stratify the split on.
    """
    raise NotImplementedError

sample

sample(n: int, stratify_key: Optional[str] = None) -> 'SingleMetricAnnotation'

Create a subset of the dataset.

Parameters: n (int): The number of samples to include in the subset. stratify_key (str): The column to stratify the subset on.

Returns: SingleMetricAnnotation: A subset of the dataset with n samples.

Source code in src/ragas/dataset_schema.py
def sample(
    self, n: int, stratify_key: t.Optional[str] = None
) -> "SingleMetricAnnotation":
    """
    Create a subset of the dataset.

    Parameters:
        n (int): The number of samples to include in the subset.
        stratify_key (str): The column to stratify the subset on.

    Returns:
        SingleMetricAnnotation: A subset of the dataset with `n` samples.
    """
    if n > len(self.samples):
        raise ValueError(
            "Requested sample size exceeds the number of available samples."
        )

    if stratify_key is None:
        # Simple random sampling
        sampled_indices = random.sample(range(len(self.samples)), n)
        sampled_samples = [self.samples[i] for i in sampled_indices]
    else:
        # Stratified sampling
        class_groups = defaultdict(list)
        for idx, sample in enumerate(self.samples):
            key = sample[stratify_key]
            class_groups[key].append(idx)

        # Determine the proportion of samples to take from each class
        total_samples = sum(len(indices) for indices in class_groups.values())
        proportions = {
            cls: len(indices) / total_samples
            for cls, indices in class_groups.items()
        }

        sampled_indices = []
        for cls, indices in class_groups.items():
            cls_sample_count = int(np.round(proportions[cls] * n))
            cls_sample_count = min(
                cls_sample_count, len(indices)
            )  # Don't oversample
            sampled_indices.extend(random.sample(indices, cls_sample_count))

        # Handle any rounding discrepancies to ensure exactly `n` samples
        while len(sampled_indices) < n:
            remaining_indices = set(range(len(self.samples))) - set(sampled_indices)
            if not remaining_indices:
                break
            sampled_indices.append(random.choice(list(remaining_indices)))

        sampled_samples = [self.samples[i] for i in sampled_indices]

    return SingleMetricAnnotation(name=self.name, samples=sampled_samples)

batch

batch(batch_size: int, drop_last_batch: bool = False)

Create a batch iterator.

Parameters: batch_size (int): The number of samples in each batch. stratify (str): The column to stratify the batches on. drop_last_batch (bool): Whether to drop the last batch if it is smaller than the specified batch size.

Source code in src/ragas/dataset_schema.py
def batch(
    self,
    batch_size: int,
    drop_last_batch: bool = False,
):
    """
    Create a batch iterator.

    Parameters:
        batch_size (int): The number of samples in each batch.
        stratify (str): The column to stratify the batches on.
        drop_last_batch (bool): Whether to drop the last batch if it is smaller than the specified batch size.
    """

    samples = self.samples[:]
    random.shuffle(samples)

    all_batches = [
        samples[i : i + batch_size]
        for i in range(0, len(samples), batch_size)
        if len(samples[i : i + batch_size]) == batch_size or not drop_last_batch
    ]

    return all_batches

stratified_batches

stratified_batches(batch_size: int, stratify_key: str, drop_last_batch: bool = False, replace: bool = False) -> List[List[SampleAnnotation]]

Create stratified batches based on a specified key, ensuring proportional representation.

Parameters: batch_size (int): Number of samples per batch. stratify_key (str): Key in metric_input used for stratification (e.g., class labels). drop_last_batch (bool): If True, drops the last batch if it has fewer samples than batch_size. replace (bool): If True, allows reusing samples from the same class to fill a batch if necessary.

Returns: List[List[SampleAnnotation]]: A list of stratified batches, each batch being a list of SampleAnnotation objects.

Source code in src/ragas/dataset_schema.py
def stratified_batches(
    self,
    batch_size: int,
    stratify_key: str,
    drop_last_batch: bool = False,
    replace: bool = False,
) -> t.List[t.List[SampleAnnotation]]:
    """
    Create stratified batches based on a specified key, ensuring proportional representation.

    Parameters:
        batch_size (int): Number of samples per batch.
        stratify_key (str): Key in `metric_input` used for stratification (e.g., class labels).
        drop_last_batch (bool): If True, drops the last batch if it has fewer samples than `batch_size`.
        replace (bool): If True, allows reusing samples from the same class to fill a batch if necessary.

    Returns:
        List[List[SampleAnnotation]]: A list of stratified batches, each batch being a list of SampleAnnotation objects.
    """
    # Group samples based on the stratification key
    class_groups = defaultdict(list)
    for sample in self.samples:
        key = sample[stratify_key]
        class_groups[key].append(sample)

    # Shuffle each class group for randomness
    for group in class_groups.values():
        random.shuffle(group)

    # Determine the number of batches required
    total_samples = len(self.samples)
    num_batches = (
        np.ceil(total_samples / batch_size).astype(int)
        if drop_last_batch
        else np.floor(total_samples / batch_size).astype(int)
    )
    samples_per_class_per_batch = {
        cls: max(1, len(samples) // num_batches)
        for cls, samples in class_groups.items()
    }

    # Create stratified batches
    all_batches = []
    while len(all_batches) < num_batches:
        batch = []
        for cls, samples in list(class_groups.items()):
            # Determine the number of samples to take from this class
            count = min(
                samples_per_class_per_batch[cls],
                len(samples),
                batch_size - len(batch),
            )
            if count > 0:
                # Add samples from the current class
                batch.extend(samples[:count])
                class_groups[cls] = samples[count:]  # Remove used samples
            elif replace and len(batch) < batch_size:
                # Reuse samples if `replace` is True
                batch.extend(random.choices(samples, k=batch_size - len(batch)))

        # Shuffle the batch to mix classes
        random.shuffle(batch)
        if len(batch) == batch_size or not drop_last_batch:
            all_batches.append(batch)

    return all_batches

get_prompt_annotations

get_prompt_annotations() -> Dict[str, List[PromptAnnotation]]

Get all the prompt annotations for each prompt as a list.

Source code in src/ragas/dataset_schema.py
def get_prompt_annotations(self) -> t.Dict[str, t.List[PromptAnnotation]]:
    """
    Get all the prompt annotations for each prompt as a list.
    """
    prompt_annotations = defaultdict(list)
    for sample in self.samples:
        if sample.is_accepted:
            for prompt_name, prompt_annotation in sample.prompts.items():
                prompt_annotations[prompt_name].append(prompt_annotation)
    return prompt_annotations

Message

Bases: BaseModel

Represents a generic message.

Attributes:

Name Type Description
content str

The content of the message.

metadata (Optional[Dict[str, Any]], optional)

Additional metadata associated with the message.

ToolCall

Bases: BaseModel

Represents a tool call with a name and arguments.

Parameters:

Name Type Description Default
name str

The name of the tool being called.

required
args Dict[str, Any]

A dictionary of arguments for the tool call, where keys are argument names and values can be strings, integers, or floats.

required

HumanMessage

Bases: Message

Represents a message from a human user.

Attributes:

Name Type Description
type Literal[human]

The type of the message, always set to "human".

Methods:

Name Description
pretty_repr

Returns a formatted string representation of the human message.

pretty_repr

pretty_repr()

Returns a formatted string representation of the human message.

Source code in src/ragas/messages.py
def pretty_repr(self):
    """Returns a formatted string representation of the human message."""
    return f"Human: {self.content}"

ToolMessage

Bases: Message

Represents a message from a tool.

Attributes:

Name Type Description
type Literal[tool]

The type of the message, always set to "tool".

Methods:

Name Description
pretty_repr

Returns a formatted string representation of the tool message.

pretty_repr

pretty_repr()

Returns a formatted string representation of the tool message.

Source code in src/ragas/messages.py
def pretty_repr(self):
    """Returns a formatted string representation of the tool message."""
    return f"ToolOutput: {self.content}"

AIMessage

Bases: Message

Represents a message from an AI.

Attributes:

Name Type Description
type Literal[ai]

The type of the message, always set to "ai".

tool_calls Optional[List[ToolCall]]

A list of tool calls made by the AI, if any.

metadata Optional[Dict[str, Any]]

Additional metadata associated with the AI message.

Methods:

Name Description
dict

Returns a dictionary representation of the AI message.

pretty_repr

Returns a formatted string representation of the AI message.

to_dict

to_dict(**kwargs)

Returns a dictionary representation of the AI message.

Source code in src/ragas/messages.py
def to_dict(self, **kwargs):
    """
    Returns a dictionary representation of the AI message.
    """
    content = (
        self.content
        if self.tool_calls is None
        else {
            "text": self.content,
            "tool_calls": [tc.dict() for tc in self.tool_calls],
        }
    )
    return {"content": content, "type": self.type}

pretty_repr

pretty_repr()

Returns a formatted string representation of the AI message.

Source code in src/ragas/messages.py
def pretty_repr(self):
    """
    Returns a formatted string representation of the AI message.
    """
    lines = []
    if self.content != "":
        lines.append(f"AI: {self.content}")
    if self.tool_calls is not None:
        lines.append("Tools:")
        for tc in self.tool_calls:
            lines.append(f"  {tc.name}: {tc.args}")

    return "\n".join(lines)

ragas.evaluation.EvaluationResult dataclass

EvaluationResult(scores: List[Dict[str, Any]], dataset: EvaluationDataset, binary_columns: List[str] = list(), cost_cb: Optional[CostCallbackHandler] = None, traces: List[Dict[str, Any]] = list(), ragas_traces: Dict[str, ChainRun] = dict(), run_id: Optional[UUID] = None)

A class to store and process the results of the evaluation.

Attributes:

Name Type Description
scores Dataset

The dataset containing the scores of the evaluation.

dataset (Dataset, optional)

The original dataset used for the evaluation. Default is None.

binary_columns list of str, optional

List of columns that are binary metrics. Default is an empty list.

cost_cb (CostCallbackHandler, optional)

The callback handler for cost computation. Default is None.

to_pandas

to_pandas(batch_size: int | None = None, batched: bool = False)

Convert the result to a pandas DataFrame.

Parameters:

Name Type Description Default
batch_size int

The batch size for conversion. Default is None.

None
batched bool

Whether to convert in batches. Default is False.

False

Returns:

Type Description
DataFrame

The result as a pandas DataFrame.

Raises:

Type Description
ValueError

If the dataset is not provided.

Source code in src/ragas/dataset_schema.py
def to_pandas(self, batch_size: int | None = None, batched: bool = False):
    """
    Convert the result to a pandas DataFrame.

    Parameters
    ----------
    batch_size : int, optional
        The batch size for conversion. Default is None.
    batched : bool, optional
        Whether to convert in batches. Default is False.

    Returns
    -------
    pandas.DataFrame
        The result as a pandas DataFrame.

    Raises
    ------
    ValueError
        If the dataset is not provided.
    """
    try:
        import pandas as pd
    except ImportError:
        raise ImportError(
            "pandas is not installed. Please install it to use this function."
        )

    if self.dataset is None:
        raise ValueError("dataset is not provided for the results class")
    assert len(self.scores) == len(self.dataset)
    # convert both to pandas dataframes and concatenate
    scores_df = pd.DataFrame(self.scores)
    dataset_df = self.dataset.to_pandas()
    return pd.concat([dataset_df, scores_df], axis=1)

total_tokens

total_tokens() -> Union[List[TokenUsage], TokenUsage]

Compute the total tokens used in the evaluation.

Returns:

Type Description
list of TokenUsage or TokenUsage

The total tokens used.

Raises:

Type Description
ValueError

If the cost callback handler is not provided.

Source code in src/ragas/dataset_schema.py
def total_tokens(self) -> t.Union[t.List[TokenUsage], TokenUsage]:
    """
    Compute the total tokens used in the evaluation.

    Returns
    -------
    list of TokenUsage or TokenUsage
        The total tokens used.

    Raises
    ------
    ValueError
        If the cost callback handler is not provided.
    """
    if self.cost_cb is None:
        raise ValueError(
            "The evaluate() run was not configured for computing cost. Please provide a token_usage_parser function to evaluate() to compute cost."
        )
    return self.cost_cb.total_tokens()

total_cost

total_cost(cost_per_input_token: Optional[float] = None, cost_per_output_token: Optional[float] = None, per_model_costs: Dict[str, Tuple[float, float]] = {}) -> float

Compute the total cost of the evaluation.

Parameters:

Name Type Description Default
cost_per_input_token float

The cost per input token. Default is None.

None
cost_per_output_token float

The cost per output token. Default is None.

None
per_model_costs dict of str to tuple of float

The per model costs. Default is an empty dictionary.

{}

Returns:

Type Description
float

The total cost of the evaluation.

Raises:

Type Description
ValueError

If the cost callback handler is not provided.

Source code in src/ragas/dataset_schema.py
def total_cost(
    self,
    cost_per_input_token: t.Optional[float] = None,
    cost_per_output_token: t.Optional[float] = None,
    per_model_costs: t.Dict[str, t.Tuple[float, float]] = {},
) -> float:
    """
    Compute the total cost of the evaluation.

    Parameters
    ----------
    cost_per_input_token : float, optional
        The cost per input token. Default is None.
    cost_per_output_token : float, optional
        The cost per output token. Default is None.
    per_model_costs : dict of str to tuple of float, optional
        The per model costs. Default is an empty dictionary.

    Returns
    -------
    float
        The total cost of the evaluation.

    Raises
    ------
    ValueError
        If the cost callback handler is not provided.
    """
    if self.cost_cb is None:
        raise ValueError(
            "The evaluate() run was not configured for computing cost. Please provide a token_usage_parser function to evaluate() to compute cost."
        )
    return self.cost_cb.total_cost(
        cost_per_input_token, cost_per_output_token, per_model_costs
    )