#!/usr/bin/env python
# coding: utf-8

# # How to run a prompt plugins from file
# 
# Now that you're familiar with Kernel basics, let's see how the kernel allows you to run Prompt Plugins and Prompt Functions stored on disk.
# 
# A Prompt Plugin is a collection of Semantic Functions, where each function is defined with natural language that can be provided with a text file.
# 
# Refer to our [glossary](https://github.com/microsoft/semantic-kernel/blob/main/docs/GLOSSARY.md) for an in-depth guide to the terms.
# 
# The repository includes some examples under the [samples](https://github.com/microsoft/semantic-kernel/tree/main/samples) folder.
# 
# For instance, [this](../../../prompt_template_samples/FunPlugin/Joke/skprompt.txt) is the **Joke function** part of the **FunPlugin plugin**:
# 

# Import Semantic Kernel SDK from pypi.org

# In[ ]:


# Note: if using a virtual environment, do not run this cell
get_ipython().run_line_magic('pip', 'install -U semantic-kernel')
from semantic_kernel import __version__

__version__


# Initial configuration for the notebook to run properly.

# In[ ]:


# Make sure paths are correct for the imports

import os
import sys

notebook_dir = os.path.abspath("")
parent_dir = os.path.dirname(notebook_dir)
grandparent_dir = os.path.dirname(parent_dir)


sys.path.append(grandparent_dir)


# ### Configuring the Kernel
# 
# Let's get started with the necessary configuration to run Semantic Kernel. For Notebooks, we require a `.env` file with the proper settings for the model you use. Create a new file named `.env` and place it in this directory. Copy the contents of the `.env.example` file from this directory and paste it into the `.env` file that you just created.
# 
# **NOTE: Please make sure to include `GLOBAL_LLM_SERVICE` set to either OpenAI, AzureOpenAI, or HuggingFace in your .env file. If this setting is not included, the Service will default to AzureOpenAI.**
# 
# #### Option 1: using OpenAI
# 
# Add your [OpenAI Key](https://openai.com/product/) key to your `.env` file (org Id only if you have multiple orgs):
# 
# ```
# GLOBAL_LLM_SERVICE="OpenAI"
# OPENAI_API_KEY="sk-..."
# OPENAI_ORG_ID=""
# OPENAI_CHAT_MODEL_ID=""
# OPENAI_TEXT_MODEL_ID=""
# OPENAI_EMBEDDING_MODEL_ID=""
# ```
# The names should match the names used in the `.env` file, as shown above.
# 
# #### Option 2: using Azure OpenAI
# 
# Add your [Azure Open AI Service key](https://learn.microsoft.com/azure/cognitive-services/openai/quickstart?pivots=programming-language-studio) settings to the `.env` file in the same folder:
# 
# ```
# GLOBAL_LLM_SERVICE="AzureOpenAI"
# AZURE_OPENAI_API_KEY="..."
# AZURE_OPENAI_ENDPOINT="https://..."
# AZURE_OPENAI_CHAT_DEPLOYMENT_NAME="..."
# AZURE_OPENAI_TEXT_DEPLOYMENT_NAME="..."
# AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME="..."
# AZURE_OPENAI_API_VERSION="..."
# ```
# The names should match the names used in the `.env` file, as shown above.
# 
# For more advanced configuration, please follow the steps outlined in the [setup guide](./CONFIGURING_THE_KERNEL.md).

# Let's move on to learning what prompts are and how to write them.

# ```
# WRITE EXACTLY ONE JOKE or HUMOROUS STORY ABOUT THE TOPIC BELOW.
# JOKE MUST BE:
# - G RATED
# - WORKPLACE/FAMILY SAFE
# NO SEXISM, RACISM OR OTHER BIAS/BIGOTRY.
# BE CREATIVE AND FUNNY. I WANT TO LAUGH.
# +++++
# {{$input}}
# +++++
# ```
# 

# Note the special **`{{$input}}`** token, which is a variable that is automatically passed when invoking the function, commonly referred to as a "function parameter".
# 
# We'll explore later how functions can accept multiple variables, as well as invoke other functions.
# 

# In the same folder you'll notice a second [config.json](../../../prompt_template_samples/FunPlugin/Joke/config.json) file. The file is optional, and is used to set some parameters for large language models like Temperature, TopP, Stop Sequences, etc.
# 
# ```
# {
#   "schema": 1,
#   "description": "Generate a funny joke",
#   "execution_settings": {
#     "default": {
#       "max_tokens": 1000,
#       "temperature": 0.9,
#       "top_p": 0.0,
#       "presence_penalty": 0.0,
#       "frequency_penalty": 0.0
#     }
#   },
#   "input_variables": [
#     {
#       "name": "input",
#       "description": "Joke subject",
#       "default": ""
#     },
#     {
#       "name": "style",
#       "description": "Give a hint about the desired joke style",
#       "default": ""
#     }
#   ]
# }
# 
# ```
# 

# Given a prompt function defined by these files, this is how to load and use a file based prompt function.
# 
# Load and configure the kernel, as usual, loading also the AI service settings defined in the [Setup notebook](00-getting-started.ipynb):
# 

# In[ ]:


from semantic_kernel import Kernel

kernel = Kernel()


# We will load our settings and get the LLM service to use for the notebook.

# In[ ]:


from services import Service

from samples.service_settings import ServiceSettings

service_settings = ServiceSettings.create()

# Select a service to use for this notebook (available services: OpenAI, AzureOpenAI, HuggingFace)
selectedService = (
    Service.AzureOpenAI
    if service_settings.global_llm_service is None
    else Service(service_settings.global_llm_service.lower())
)
print(f"Using service type: {selectedService}")


# Let's load our settings and validate that the required ones exist.

# In[ ]:


from services import Service

from samples.service_settings import ServiceSettings

service_settings = ServiceSettings.create()

# Select a service to use for this notebook (available services: OpenAI, AzureOpenAI, HuggingFace)
selectedService = (
    Service.AzureOpenAI
    if service_settings.global_llm_service is None
    else Service(service_settings.global_llm_service.lower())
)
print(f"Using service type: {selectedService}")


# We now configure our Chat Completion service on the kernel.

# In[ ]:


# Remove all services so that this cell can be re-run without restarting the kernel
kernel.remove_all_services()

service_id = None
if selectedService == Service.OpenAI:
    from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion

    service_id = "default"
    kernel.add_service(
        OpenAIChatCompletion(
            service_id=service_id,
        ),
    )
elif selectedService == Service.AzureOpenAI:
    from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion

    service_id = "default"
    kernel.add_service(
        AzureChatCompletion(
            service_id=service_id,
        ),
    )


# Import the plugin and all its functions:
# 

# In[ ]:


# note: using plugins from the samples folder
plugins_directory = "../../../prompt_template_samples/"

funFunctions = kernel.add_plugin(parent_directory=plugins_directory, plugin_name="FunPlugin")

jokeFunction = funFunctions["Joke"]


# How to use the plugin functions, e.g. generate a joke about "_time travel to dinosaur age_":
# 

# In[ ]:


result = await kernel.invoke(jokeFunction, input="travel to dinosaur age", style="silly")
print(result)


# Great, now that you know how to load a plugin from disk, let's show how you can [create and run a prompt function inline.](./03-prompt-function-inline.ipynb)
#