import torch
import numpy as np
tensor_from_list = torch.tensor([[1., 0.], [0., 1.]])
print(tensor_from_list)
tensor([[1., 0.], [0., 1.]])
print(tensor_from_list.size())
torch.Size([2, 2])
tensor_from_numpy = torch.tensor(np.array([[1., 0.], [0., 1.]]))
print(tensor_from_numpy)
tensor([[1., 0.], [0., 1.]], dtype=torch.float64)
tensor_empty = torch.empty(3, 2)
print(tensor_empty)
tensor([[ 0.0000e+00, -1.5846e+29], [-7.5247e+03, 2.0005e+00], [ 9.8091e-45, 0.0000e+00]])
tensor_rand = torch.rand(3, 2)
print(tensor_rand)
tensor([[0.8953, 0.7131], [0.7226, 0.4733], [0.7516, 0.9558]])
# Set the seed for random number generator to get consistent, reproducible results
torch.manual_seed(24)
tensor_rand = torch.rand(3, 2)
print(tensor_rand)
tensor([[0.7644, 0.3751], [0.0751, 0.5308], [0.9660, 0.2770]])
tensor_zeros = torch.zeros(3, 2)
print(tensor_zeros)
tensor([[0., 0.], [0., 0.], [0., 0.]])
tensor_ones = torch.ones(3, 2)
print(tensor_ones)
tensor([[1., 1.], [1., 1.], [1., 1.]])
tensor_from_list.dtype
torch.float32
tensor_from_list_float64 = torch.tensor([[1., 0.], [0., 1.]],
dtype=torch.float64)
print(tensor_from_list_float64)
tensor([[1., 0.], [0., 1.]], dtype=torch.float64)
tensor_rand.device
device(type='cpu')
torch.cuda.is_available()
False
x = torch.rand(3, 2)
x
tensor([[0.2989, 0.3510], [0.0529, 0.1988], [0.8022, 0.1249]])
y = torch.rand(3, 2)
y
tensor([[0.6708, 0.9704], [0.4365, 0.7187], [0.7336, 0.1431]])
torch.add(x, y)
tensor([[0.9697, 1.3214], [0.4894, 0.9176], [1.5357, 0.2680]])
x + y
tensor([[0.9697, 1.3214], [0.4894, 0.9176], [1.5357, 0.2680]])
x_numpy = x.numpy()
x_numpy
array([[0.29888242, 0.35096592], [0.05293709, 0.19883835], [0.8021769 , 0.12490124]], dtype=float32)
from sklearn.datasets import make_classification
from torch.utils.data import Dataset
X, y = make_classification(n_samples=100,
n_features=5,
random_state=42)
X[0]
array([-0.43066755, 0.67287309, -0.72427983, -0.53963044, -0.65160035])
class CustomDataset(Dataset):
def __init__(self,
X, y,
transform=None):
self.X = X
self.y = y
self.transform = transform
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
x = X[idx]
label = y[idx]
if self.transform:
x = self.transform(x)
label = self.transform(label)
return x, label
custom_dataset = CustomDataset(X, y)
len(custom_dataset)
100
x_0, y_0 = custom_dataset[0]
x_0
array([-0.43066755, 0.67287309, -0.72427983, -0.53963044, -0.65160035])
y_0
0
from functools import partial
torch_tensor_float32 = partial(torch.tensor, dtype=torch.float32)
transformed_dataset = CustomDataset(X, y,
transform=torch_tensor_float32)
x_0, y_0 = transformed_dataset[0]
x_0
tensor([-0.4307, 0.6729, -0.7243, -0.5396, -0.6516])
y_0
tensor(0.)
from torch.utils.data import DataLoader
dataloader = DataLoader(transformed_dataset,
batch_size=4,
shuffle=True)
%%time
for i_batch, sample_batched in enumerate(dataloader):
print(f"[Batch {i_batch}] Number of rows in batch: {len(sample_batched[0])}")
[Batch 0] Number of rows in batch: 4 [Batch 1] Number of rows in batch: 4 [Batch 2] Number of rows in batch: 4 [Batch 3] Number of rows in batch: 4 [Batch 4] Number of rows in batch: 4 [Batch 5] Number of rows in batch: 4 [Batch 6] Number of rows in batch: 4 [Batch 7] Number of rows in batch: 4 [Batch 8] Number of rows in batch: 4 [Batch 9] Number of rows in batch: 4 [Batch 10] Number of rows in batch: 4 [Batch 11] Number of rows in batch: 4 [Batch 12] Number of rows in batch: 4 [Batch 13] Number of rows in batch: 4 [Batch 14] Number of rows in batch: 4 [Batch 15] Number of rows in batch: 4 [Batch 16] Number of rows in batch: 4 [Batch 17] Number of rows in batch: 4 [Batch 18] Number of rows in batch: 4 [Batch 19] Number of rows in batch: 4 [Batch 20] Number of rows in batch: 4 [Batch 21] Number of rows in batch: 4 [Batch 22] Number of rows in batch: 4 [Batch 23] Number of rows in batch: 4 [Batch 24] Number of rows in batch: 4 CPU times: user 12.7 ms, sys: 3.9 ms, total: 16.6 ms Wall time: 15.5 ms
dataloader = DataLoader(transformed_dataset,
batch_size=4,
shuffle=True,
num_workers=4)
%%time
for i_batch, sample_batched in enumerate(dataloader):
print(f"[Batch {i_batch}] Number of rows in batch: {len(sample_batched[0])}")
[Batch 0] Number of rows in batch: 4 [Batch 1] Number of rows in batch: 4 [Batch 2] Number of rows in batch: 4 [Batch 3] Number of rows in batch: 4 [Batch 4] Number of rows in batch: 4 [Batch 5] Number of rows in batch: 4 [Batch 6] Number of rows in batch: 4 [Batch 7] Number of rows in batch: 4 [Batch 8] Number of rows in batch: 4 [Batch 9] Number of rows in batch: 4 [Batch 10] Number of rows in batch: 4 [Batch 11] Number of rows in batch: 4 [Batch 12] Number of rows in batch: 4 [Batch 13] Number of rows in batch: 4 [Batch 14] Number of rows in batch: 4 [Batch 15] Number of rows in batch: 4 [Batch 16] Number of rows in batch: 4 [Batch 17] Number of rows in batch: 4 [Batch 18] Number of rows in batch: 4 [Batch 19] Number of rows in batch: 4 [Batch 20] Number of rows in batch: 4 [Batch 21] Number of rows in batch: 4 [Batch 22] Number of rows in batch: 4 [Batch 23] Number of rows in batch: 4 [Batch 24] Number of rows in batch: 4 CPU times: user 30.6 ms, sys: 36.6 ms, total: 67.2 ms Wall time: 101 ms
model = torch.nn.Sequential(
torch.nn.Linear(5, 5),
torch.nn.ReLU(),
torch.nn.Linear(5, 3),
torch.nn.ReLU(),
torch.nn.Linear(3, 1),
torch.nn.Sigmoid()
)
print(model)
Sequential( (0): Linear(in_features=5, out_features=5, bias=True) (1): ReLU() (2): Linear(in_features=5, out_features=3, bias=True) (3): ReLU() (4): Linear(in_features=3, out_features=1, bias=True) (5): Sigmoid() )
num_features = 5
num_outputs = 1
layer_dims = [num_features, 5, 3, num_outputs]
class BinaryClassifier(torch.nn.Sequential):
def __init__(self, layer_dims):
super(BinaryClassifier, self).__init__()
for idx, dim in enumerate(layer_dims):
if (idx < len(layer_dims) - 1):
module = torch.nn.Linear(dim, layer_dims[idx + 1])
self.add_module(f"linear{idx}", module)
if idx < len(layer_dims) - 2:
activation = torch.nn.ReLU()
self.add_module(f"relu{idx}", activation)
elif idx == len(layer_dims) - 2:
activation = torch.nn.Sigmoid()
self.add_module(f"sigmoid{idx}", activation)
bc_model = BinaryClassifier(layer_dims)
print(bc_model)
BinaryClassifier( (linear0): Linear(in_features=5, out_features=5, bias=True) (relu0): ReLU() (linear1): Linear(in_features=5, out_features=3, bias=True) (relu1): ReLU() (linear2): Linear(in_features=3, out_features=1, bias=True) (sigmoid2): Sigmoid() )
bc_model.linear0
Linear(in_features=5, out_features=5, bias=True)
bc_model.linear1
Linear(in_features=5, out_features=3, bias=True)
len(bc_model)
6
x = torch.ones(2, 3,
requires_grad=True)
x
tensor([[1., 1., 1.], [1., 1., 1.]], requires_grad=True)
w = 2 * x
w
tensor([[2., 2., 2.], [2., 2., 2.]], grad_fn=<MulBackward0>)
y = w * w * w + 3 * w * w + 2 * w + 1
y
tensor([[25., 25., 25.], [25., 25., 25.]], grad_fn=<AddBackward0>)
z = torch.sum(y)
z
tensor(150., grad_fn=<SumBackward0>)
z.backward()
x.grad
tensor([[52., 52., 52.], [52., 52., 52.]])
t = torch.sum(w)
(3 * w * w + 6 * w + 2) * 2
tensor([[52., 52., 52.], [52., 52., 52.]], grad_fn=<MulBackward0>)
criterion = torch.nn.BCELoss()
optimizer = torch.optim.Adam(bc_model.parameters())
num_epochs = 10
for epoch in range(num_epochs):
for idx, (X_batch, labels) in enumerate(dataloader):
optimizer.zero_grad()
outputs = bc_model(X_batch)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
pred_var = bc_model(transformed_dataset[0][0])
pred_var
tensor([0.5884], grad_fn=<SigmoidBackward>)
pred_var.grad
pred_var.detach().numpy()[0]
0.58835465