Skip to content

Customise models

Customize Models

Ragas may use a LLM and or Embedding for evaluation and synthetic data generation. Both of these models can be customised according to you availabiity.

Note

Ragas supports all the LLMs and Embeddings available in langchain

  • BaseRagasLLM and BaseRagasEmbeddings are the base classes Ragas uses internally for LLMs and Embeddings. Any custom LLM or Embeddings should be a subclass of these base classes.

  • If you are using Langchain, you can pass the Langchain LLM and Embeddings directly and Ragas will wrap it with LangchainLLMWrapper or LangchainEmbeddingsWrapper as needed.

Examples

Azure OpenAI

pip install langchain_openai

from langchain_openai.chat_models import AzureChatOpenAI
from langchain_openai.embeddings import AzureOpenAIEmbeddings
from ragas.llms import LangchainLLMWrapper
from ragas.embeddings import LangchainEmbeddingsWrapper

azure_configs = {
    "base_url": "https://<your-endpoint>.openai.azure.com/",
    "model_deployment": "your-deployment-name",
    "model_name": "your-model-name",
    "embedding_deployment": "your-deployment-name",
    "embedding_name": "text-embedding-ada-002",  # most likely
}


azure_llm = AzureChatOpenAI(
    openai_api_version="2023-05-15",
    azure_endpoint=azure_configs["base_url"],
    azure_deployment=azure_configs["model_deployment"],
    model=azure_configs["model_name"],
    validate_base_url=False,
)

# init the embeddings for answer_relevancy, answer_correctness and answer_similarity
azure_embeddings = AzureOpenAIEmbeddings(
    openai_api_version="2023-05-15",
    azure_endpoint=azure_configs["base_url"],
    azure_deployment=azure_configs["embedding_deployment"],
    model=azure_configs["embedding_name"],
)

azure_llm = LangchainLLMWrapper(azure_llm)
azure_embeddings = LangchainEmbeddingsWrapper(azure_embeddings)
Yay! Now are you ready to use ragas with Azure OpenAI endpoints

Google Vertex

!pip install langchain_google_vertexai

import google.auth
from langchain_google_vertexai import ChatVertexAI, VertexAIEmbeddings
from ragas.llms import LangchainLLMWrapper
from ragas.embeddings import LangchainEmbeddingsWrapper

config = {
    "project_id": "<your-project-id>",
    "chat_model_id": "gemini-1.0-pro-002",
    "embedding_model_id": "textembedding-gecko",
}

# authenticate to GCP
creds, _ = google.auth.default(quota_project_id=config["project_id"])

# create Langchain LLM and Embeddings
vertextai_llm = ChatVertexAI(
    credentials=creds,
    model_name=config["chat_model_id"],
)
vertextai_embeddings = VertexAIEmbeddings(
    credentials=creds, model_name=config["embedding_model_id"]
)

vertextai_llm = LangchainLLMWrapper(vertextai_llm)
vertextai_embeddings = LangchainEmbeddingsWrapper(vertextai_embeddings)
Yay! Now are you ready to use ragas with Google VertexAI endpoints

AWS Bedrock

from langchain_community.chat_models import BedrockChat
from langchain_community.embeddings import BedrockEmbeddings
from ragas.llms import LangchainLLMWrapper
from ragas.embeddings import LangchainEmbeddingsWrapper

config = {
    "credentials_profile_name": "your-profile-name",  # E.g "default"
    "region_name": "your-region-name",  # E.g. "us-east-1"
    "model_id": "your-model-id",  # E.g "anthropic.claude-v2"
    "model_kwargs": {"temperature": 0.4},
}

bedrock_llm = BedrockChat(
    credentials_profile_name=config["credentials_profile_name"],
    region_name=config["region_name"],
    endpoint_url=f"https://bedrock-runtime.{config['region_name']}.amazonaws.com",
    model_id=config["model_id"],
    model_kwargs=config["model_kwargs"],
)

# init the embeddings
bedrock_embeddings = BedrockEmbeddings(
    credentials_profile_name=config["credentials_profile_name"],
    region_name=config["region_name"],
)

bedrock_llm = LangchainLLMWrapper(bedrock_llm)
bedrock_embeddings = LangchainEmbeddingsWrapper(bedrock_embeddings)
Yay! Now are you ready to use ragas with AWS Bedrock endpoints