UMAP is a dimensionality reduction algorithm which performs non-linear dimension reduction. It can also be used for visualization of the dataset. The UMAP model implemented in cuml allows the user to set the following parameter values:
The cuml implemetation of the UMAP model has the following functions that one can run:
The model can take array-like objects, either in host as NumPy arrays or in device (as Numba or _cuda_array_interface_compliant), as well as cuDF DataFrames. In order to convert your dataset to cudf format please read the cudf documentation on https://rapidsai.github.io/projects/cudf/en/latest/. For additional information on the UMAP model please refer to the documentation on https://rapidsai.github.io/projects/cuml/en/0.6.0/api.html#cuml.UMAP
import numpy as np
import pandas as pd
import cudf
import os
from sklearn import datasets
from sklearn.metrics import adjusted_rand_score
from sklearn.cluster import KMeans
from sklearn.manifold.t_sne import trustworthiness
from cuml.manifold.umap import UMAP
# create a blobs dataset with 500 samples and 10 features each
data, labels = datasets.make_blobs(
n_samples=500, n_features=10, centers=5)
# using the cuml UMAP algorithm to reduce the features of the dataset and store
embedding = UMAP().fit_transform(data)
# calculate the score of the results obtained using cuml's algorithm and sklearn kmeans
score = adjusted_rand_score(labels,
KMeans(5).fit_predict(embedding))
print(score) # should equal 1.0
1.0
# load the iris dataset from sklearn and extract the required information
iris = datasets.load_iris()
data = iris.data
# define the cuml UMAP model and use fit_transform function to obtain the low dimensional output of the input dataset
embedding = UMAP(
n_neighbors=10, min_dist=0.01, init="random"
).fit_transform(data)
# calculate the trust worthiness of the results obtaind from the cuml UMAP
trust = trustworthiness(iris.data, embedding, 10)
print(trust)
0.9741363073110285
# create a selection variable which will have 75% True and 25% False values. The size of the selection variable is 150
iris_selection = np.random.choice(
[True, False], 150, replace=True, p=[0.75, 0.25])
# create an iris dataset using the selection variable
data = iris.data[iris_selection]
print(data)
[[5.1 3.5 1.4 0.2] [4.9 3. 1.4 0.2] [4.6 3.1 1.5 0.2] [5. 3.6 1.4 0.2] [4.6 3.4 1.4 0.3] [5. 3.4 1.5 0.2] [4.4 2.9 1.4 0.2] [4.9 3.1 1.5 0.1] [5.4 3.7 1.5 0.2] [4.8 3.4 1.6 0.2] [4.8 3. 1.4 0.1] [4.3 3. 1.1 0.1] [5.7 4.4 1.5 0.4] [5.4 3.9 1.3 0.4] [5.1 3.5 1.4 0.3] [5.7 3.8 1.7 0.3] [5.1 3.8 1.5 0.3] [5.4 3.4 1.7 0.2] [5.1 3.3 1.7 0.5] [4.8 3.4 1.9 0.2] [5. 3. 1.6 0.2] [5. 3.4 1.6 0.4] [5.2 3.4 1.4 0.2] [4.7 3.2 1.6 0.2] [4.8 3.1 1.6 0.2] [5.2 4.1 1.5 0.1] [5.5 4.2 1.4 0.2] [5. 3.2 1.2 0.2] [4.9 3.6 1.4 0.1] [4.4 3. 1.3 0.2] [5. 3.5 1.3 0.3] [4.5 2.3 1.3 0.3] [4.4 3.2 1.3 0.2] [5. 3.5 1.6 0.6] [5.1 3.8 1.9 0.4] [4.8 3. 1.4 0.3] [5.1 3.8 1.6 0.2] [4.6 3.2 1.4 0.2] [5.3 3.7 1.5 0.2] [5. 3.3 1.4 0.2] [7. 3.2 4.7 1.4] [6.4 3.2 4.5 1.5] [6.9 3.1 4.9 1.5] [5.5 2.3 4. 1.3] [6.5 2.8 4.6 1.5] [5.7 2.8 4.5 1.3] [6.3 3.3 4.7 1.6] [4.9 2.4 3.3 1. ] [6.6 2.9 4.6 1.3] [5.2 2.7 3.9 1.4] [5. 2. 3.5 1. ] [5.9 3. 4.2 1.5] [6.1 2.9 4.7 1.4] [5.6 2.9 3.6 1.3] [6.7 3.1 4.4 1.4] [5.6 3. 4.5 1.5] [5.8 2.7 4.1 1. ] [6.2 2.2 4.5 1.5] [5.9 3.2 4.8 1.8] [6.1 2.8 4. 1.3] [6.3 2.5 4.9 1.5] [6.1 2.8 4.7 1.2] [6.4 2.9 4.3 1.3] [6.6 3. 4.4 1.4] [6.8 2.8 4.8 1.4] [6. 2.9 4.5 1.5] [5.7 2.6 3.5 1. ] [5.5 2.4 3.8 1.1] [5.5 2.4 3.7 1. ] [6. 2.7 5.1 1.6] [5.4 3. 4.5 1.5] [6.7 3.1 4.7 1.5] [6.3 2.3 4.4 1.3] [5.6 3. 4.1 1.3] [5.5 2.5 4. 1.3] [6.1 3. 4.6 1.4] [5.8 2.6 4. 1.2] [5. 2.3 3.3 1. ] [5.6 2.7 4.2 1.3] [5.7 3. 4.2 1.2] [6.2 2.9 4.3 1.3] [5.7 2.8 4.1 1.3] [6.3 3.3 6. 2.5] [7.1 3. 5.9 2.1] [6.3 2.9 5.6 1.8] [6.5 3. 5.8 2.2] [4.9 2.5 4.5 1.7] [6.7 2.5 5.8 1.8] [7.2 3.6 6.1 2.5] [6.4 2.7 5.3 1.9] [6.8 3. 5.5 2.1] [5.7 2.5 5. 2. ] [5.8 2.8 5.1 2.4] [6.4 3.2 5.3 2.3] [6.5 3. 5.5 1.8] [7.7 3.8 6.7 2.2] [7.7 2.6 6.9 2.3] [6.9 3.2 5.7 2.3] [5.6 2.8 4.9 2. ] [7.7 2.8 6.7 2. ] [6.3 2.7 4.9 1.8] [6.7 3.3 5.7 2.1] [7.2 3.2 6. 1.8] [6.2 2.8 4.8 1.8] [6.1 3. 4.9 1.8] [6.4 2.8 5.6 2.1] [7.2 3. 5.8 1.6] [7.4 2.8 6.1 1.9] [6.4 2.8 5.6 2.2] [6.1 2.6 5.6 1.4] [7.7 3. 6.1 2.3] [6.4 3.1 5.5 1.8] [6. 3. 4.8 1.8] [5.8 2.7 5.1 1.9] [6.7 3. 5.2 2.3] [6.5 3. 5.2 2. ] [5.9 3. 5.1 1.8]]
# create a cuml UMAP model
fitter = UMAP(n_neighbors=10, min_dist=0.01, verbose=False)
# fit the data created the selection variable to the cuml UMAP model created (fitter)
fitter.fit(data)
# create a new iris dataset by inverting the values of the selection variable (ie. 75% False and 25% True values)
new_data = iris.data[~iris_selection]
# transform the new data using the previously created embedded space
embedding = fitter.transform(new_data)
# calculate the trustworthiness score for the new data created (new_data)
trust = trustworthiness(new_data, embedding, 10)
print(trust)
0.9513419913419914