#!/usr/bin/env python # coding: utf-8 # In[ ]: get_ipython().system('pip install -qU "semantic-router[pinecone]==0.1.0"') # In[1]: from semantic_router import Route # we could use this as a guide for our chatbot to avoid political conversations politics = Route( name="politics", utterances=[ "isn't politics the best thing ever", "why don't you tell me about your political opinions", "don't you just love the president" "don't you just hate the president", "they're going to destroy this country!", "they will save the country!", ], ) # this could be used as an indicator to our chatbot to switch to a more # conversational prompt chitchat = Route( name="chitchat", utterances=[ "how's the weather today?", "how are things going?", "lovely weather today", "the weather is horrendous", "let's go to the chippy", ], ) # we place both of our decisions together into single list routes = [politics, chitchat] # As of 13 June 2024, two encoders support async functionality: # # * `AzureOpenAIEncoder` # * `OpenAIEncoder` # # To use either of these encoders in async mode we simply initialize them as we usually would. When we then include them within a `RouteLayer` and run `acall` the route layer will automatically run the encoders in async mode. # # **Azure OpenAI:** # # ```python # from semantic_router.encoders import AzureOpenAIEncoder # # encoder = AzureOpenAIEncoder( # api_key="YOUR_AZURE_OPENAI_API_KEY", # deployment_name="YOUR_DEPLOYMENT_NAME", # azure_endpoint="YOUR_ENDPOINT", # api_version="2024-02-01", # model="text-embedding-3-small", # ) # ``` # **OpenAI:** # In[2]: import os from getpass import getpass from semantic_router.encoders import OpenAIEncoder # get at platform.openai.com os.environ["OPENAI_API_KEY"] = os.environ.get("OPENAI_API_KEY") or getpass( "Enter OpenAI API key: " ) encoder = OpenAIEncoder(name="text-embedding-3-small") # We can see encoder details, including default `score_threshold` like so: # In[3]: encoder # We can create embeddings asynchronously via our encoder using the `encoder.acall` method: # In[4]: await encoder.acall(docs=["test", "test 2"]) # For our `PineconeIndex` we do the exact same thing, ie we initialize as usual: # In[5]: import os from semantic_router.index.pinecone import PineconeIndex # get at app.pinecone.io os.environ["PINECONE_API_KEY"] = os.environ.get("PINECONE_API_KEY") or getpass( "Enter Pinecone API key: " ) pc_index = PineconeIndex(dimensions=1536, init_async_index=True) # There are several async methods we can call directly: # In[6]: await pc_index._async_list_indexes() # But unless we're using the index directly, we don't need to use these. As with the encoder, once we pass the `PineconeIndex` to our route layer, the route layer will call all async methods automatically when we hit the `acall` method. # ## Async RouteLayer # The `RouteLayer` class supports both sync and async operations by default, so we initialize as usual: # In[7]: from semantic_router.layer import RouteLayer rl = RouteLayer(encoder=encoder, routes=routes, index=pc_index) # We can check our route layer and index information as usual: # In[8]: rl.list_route_names() # In[9]: len(rl.index) # We can also view all of the records for a given route: # In[10]: rl.index._get_route_ids(route_name="politics") # And now for async vs. sync usage! To call in synchronous mode we simply hit `rl(...)`, to switch to async mode we hit `rl.acall(...)`: # In[11]: rl("don't you love politics").name # SYNC mode # In[12]: out = await rl.acall("don't you love politics?") # ASYNC mode out.name # Let's try a few more sync and async requests: # In[13]: rl("how's the weather today?").name # In[15]: out = await rl.acall("how's the weather today?") out.name # In[16]: rl("I'm interested in learning about llama 2").name # In[17]: out = await rl.acall("I'm interested in learning about llama 2") out.name # We can delete or update routes using the usual synchronous methods: # In[18]: len(rl.index) # In[19]: import time rl.delete(route_name="chitchat") time.sleep(3) len(rl.index) # In[21]: out = await rl.acall("how's the weather today?") out.name # In[22]: rl.index.get_routes() # In[23]: rl.index.describe() # ---