#!/usr/bin/env python
# coding: utf-8

# # Basic Loading of the Kernel
# 

# ### Setup
# 
# Import Semantic Kernel SDK from pypi.org

# In[ ]:


# Note: if using a virtual environment, do not run this cell
get_ipython().run_line_magic('pip', 'install -U semantic-kernel')
from semantic_kernel import __version__

__version__


# Initial configuration for the notebook to run properly.

# In[ ]:


# Make sure paths are correct for the imports

import os
import sys

notebook_dir = os.path.abspath("")
parent_dir = os.path.dirname(notebook_dir)
grandparent_dir = os.path.dirname(parent_dir)


sys.path.append(grandparent_dir)


# ### Configuring the Kernel
# 
# Let's get started with the necessary configuration to run Semantic Kernel. For Notebooks, we require a `.env` file with the proper settings for the model you use. Create a new file named `.env` and place it in this directory. Copy the contents of the `.env.example` file from this directory and paste it into the `.env` file that you just created.
# 
# **NOTE: Please make sure to include `GLOBAL_LLM_SERVICE` set to either OpenAI, AzureOpenAI, or HuggingFace in your .env file. If this setting is not included, the Service will default to AzureOpenAI.**
# 
# #### Option 1: using OpenAI
# 
# Add your [OpenAI Key](https://openai.com/product/) key to your `.env` file (org Id only if you have multiple orgs):
# 
# ```
# GLOBAL_LLM_SERVICE="OpenAI"
# OPENAI_API_KEY="sk-..."
# OPENAI_ORG_ID=""
# OPENAI_CHAT_MODEL_ID=""
# OPENAI_TEXT_MODEL_ID=""
# OPENAI_EMBEDDING_MODEL_ID=""
# ```
# The names should match the names used in the `.env` file, as shown above.
# 
# #### Option 2: using Azure OpenAI
# 
# Add your [Azure Open AI Service key](https://learn.microsoft.com/azure/cognitive-services/openai/quickstart?pivots=programming-language-studio) settings to the `.env` file in the same folder:
# 
# ```
# GLOBAL_LLM_SERVICE="AzureOpenAI"
# AZURE_OPENAI_API_KEY="..."
# AZURE_OPENAI_ENDPOINT="https://..."
# AZURE_OPENAI_CHAT_DEPLOYMENT_NAME="..."
# AZURE_OPENAI_TEXT_DEPLOYMENT_NAME="..."
# AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME="..."
# AZURE_OPENAI_API_VERSION="..."
# ```
# The names should match the names used in the `.env` file, as shown above.
# 
# For more advanced configuration, please follow the steps outlined in the [setup guide](./CONFIGURING_THE_KERNEL.md).

# Let's define our kernel for this example.

# In[ ]:


from semantic_kernel import Kernel

kernel = Kernel()


# We will load our settings and get the LLM service to use for the notebook.

# In[ ]:


from services import Service

from samples.service_settings import ServiceSettings

service_settings = ServiceSettings.create()

# Select a service to use for this notebook (available services: OpenAI, AzureOpenAI, HuggingFace)
selectedService = (
    Service.AzureOpenAI
    if service_settings.global_llm_service is None
    else Service(service_settings.global_llm_service.lower())
)
print(f"Using service type: {selectedService}")


# In[ ]:


# Remove all services so that this cell can be re-run without restarting the kernel
kernel.remove_all_services()

service_id = None
if selectedService == Service.OpenAI:
    from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion

    service_id = "default"
    kernel.add_service(
        OpenAIChatCompletion(
            service_id=service_id,
        ),
    )
elif selectedService == Service.AzureOpenAI:
    from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion

    service_id = "default"
    kernel.add_service(
        AzureChatCompletion(
            service_id=service_id,
        ),
    )


# Great, now that you're familiar with setting up the Semantic Kernel, let's see [how we can use it to run prompts](02-running-prompts-from-file.ipynb).
#