In this notebook we show how you can in a single request, have the LLM model return multiple results per prompt. This is useful for running experiments where you want to evaluate the robustness of your prompt and the parameters of your config against a particular large language model.
!python -m pip install semantic-kernel==1.0.3
from services import Service
# Select a service to use for this notebook (available services: OpenAI, AzureOpenAI, HuggingFace)
selectedService = Service.OpenAI
from semantic_kernel.contents import ChatHistory # noqa: F401
if selectedService == Service.OpenAI or selectedService == Service.AzureOpenAI:
from semantic_kernel.connectors.ai.open_ai import ( # noqa: F401
AzureChatCompletion,
AzureChatPromptExecutionSettings,
AzureTextCompletion,
OpenAIChatCompletion,
OpenAIChatPromptExecutionSettings,
OpenAITextCompletion,
OpenAITextPromptExecutionSettings,
)
if selectedService == Service.HuggingFace:
from semantic_kernel.connectors.ai.hugging_face import HuggingFaceTextCompletion # noqa: F401
First, we will set up the text and chat services we will be submitting prompts to.
from semantic_kernel import Kernel
kernel = Kernel()
# Configure Azure LLM service
if selectedService == Service.AzureOpenAI:
azure_text_service = AzureTextCompletion(
service_id="aoai_text"
) # set the deployment name to the value of your text model (e.g. gpt-35-turbo-instruct)
azure_chat_service = AzureChatCompletion(
service_id="aoai_chat"
) # set the deployment name to the value of your chat model
# Configure OpenAI service
if selectedService == Service.OpenAI:
oai_text_service = OpenAITextCompletion(service_id="oai_text", ai_model_id="gpt-3.5-turbo-instruct")
oai_chat_service = OpenAIChatCompletion(service_id="oai_chat", ai_model_id="gpt-3.5-turbo")
# Configure Hugging Face service
if selectedService == Service.HuggingFace:
hf_text_service = HuggingFaceTextCompletion(service_id="hf_text", ai_model_id="distilgpt2", task="text-generation")
Next, we'll set up the completion request settings for text completion services.
oai_text_prompt_execution_settings = OpenAITextPromptExecutionSettings(
service="oai_text",
extension_data={
"max_tokens": 80,
"temperature": 0.7,
"top_p": 1,
"frequency_penalty": 0.5,
"presence_penalty": 0.5,
"number_of_responses": 3,
},
)
if selectedService == Service.OpenAI:
prompt = "What is the purpose of a rubber duck?"
results = await oai_text_service.get_text_contents(prompt=prompt, settings=oai_text_prompt_execution_settings)
i = 1
for result in results:
print(f"Result {i}: {result}")
i += 1
if selectedService == Service.AzureOpenAI:
prompt = "provide me a list of possible meanings for the acronym 'ORLD'"
results = await azure_text_service.get_text_contents(prompt=prompt, settings=oai_text_prompt_execution_settings)
i = 1
for result in results:
print(f"Result {i}: {result}")
i += 1
if selectedService == Service.HuggingFace:
from semantic_kernel.connectors.ai.hugging_face.hf_prompt_execution_settings import (
HuggingFacePromptExecutionSettings,
)
hf_prompt_execution_settings = HuggingFacePromptExecutionSettings(
service_id="hf_text", extension_data={"max_new_tokens": 80, "temperature": 0.7, "top_p": 1}
)
if selectedService == Service.HuggingFace:
prompt = "The purpose of a rubber duck is"
results = await hf_text_service.get_text_contents(
prompt=prompt, prompt_execution_settings=hf_prompt_execution_settings
)
print("".join(results))
Here, we're setting up the settings for Chat completions.
oai_chat_prompt_execution_settings = OpenAIChatPromptExecutionSettings(
service_id="oai_chat",
max_tokens=80,
temperature=0.7,
top_p=1,
frequency_penalty=0.5,
presence_penalty=0.5,
number_of_responses=3,
)
if selectedService == Service.OpenAI:
chat = ChatHistory()
chat.add_user_message(
"It's a beautiful day outside, birds are singing, flowers are blooming. On days like these, kids like you..."
)
results = await oai_chat_service.get_chat_message_contents(
chat_history=chat, settings=oai_chat_prompt_execution_settings
)
i = 0
for result in results:
print(f"Result {i+1}: {str(result)}")
i += 1
az_oai_prompt_execution_settings = AzureChatPromptExecutionSettings(
service_id="aoai_chat",
max_tokens=80,
temperature=0.7,
top_p=1,
frequency_penalty=0.5,
presence_penalty=0.5,
number_of_responses=3,
)
if selectedService == Service.AzureOpenAI:
content = (
"Tomorrow is going to be a great day, I can feel it. I'm going to wake up early, go for a run, and then..."
)
chat = ChatHistory()
chat.add_user_message(content)
results = await azure_chat_service.get_chat_message_contents(
chat_history=chat, settings=az_oai_prompt_execution_settings
)
i = 0
for result in results:
print(f"Result {i+1}: {str(result)}")
i += 1
Here is an example pattern if you want to stream your multiple results. Note that this is not supported for Hugging Face text completions at this time.
if selectedService == Service.OpenAI:
import os
import time
from IPython.display import clear_output
# Determine the clear command based on OS
clear_command = "cls" if os.name == "nt" else "clear"
chat = ChatHistory()
chat.add_user_message("what is the purpose of a rubber duck?")
stream = oai_chat_service.get_streaming_chat_message_contents(
chat_history=chat, settings=oai_chat_prompt_execution_settings
)
number_of_responses = oai_chat_prompt_execution_settings.number_of_responses
texts = [""] * number_of_responses
last_clear_time = time.time()
clear_interval = 0.5 # seconds
# Note: there are some quirks with displaying the output, which sometimes flashes and disappears.
# This could be influenced by a few factors specific to Jupyter notebooks and asynchronous processing.
# The following code attempts to buffer the results to avoid the output flashing on/off the screen.
async for results in stream:
current_time = time.time()
# Update texts with new results
for idx, result in enumerate(results):
if idx < number_of_responses:
texts[idx] += str(result)
# Clear and display output at intervals
if current_time - last_clear_time > clear_interval:
clear_output(wait=True)
for idx, text in enumerate(texts):
print(f"Result {idx + 1}: {text}")
last_clear_time = current_time
print("----------------------------------------")