import torch # <Ctrl> / <Shift> + <Return>
torch.sq # <Tab>
# What about all `*Tensor`s?
torch.*Tensor?
torch.nn.Module() # <Shift>+<Tab>
# Annotate your functions / classes!
torch.nn.Module?
torch.nn.Module??
! ls -lh
%%bash
for f in $(ls *.*); do
echo $(wc -l $f)
done
# Help?
%%bash?
# Getting some general help
%magic
Python has many native datatypes. Here are the important ones:
True
or False
.See here for a complete overview.
t = torch.Tensor(2, 3, 4)
type(t)
t.size()
# t.size() is a classic tuple =>
print('t size:', ' \u00D7 '.join(map(str, t.size())))
print(f'point in a {t.numel()} dimensional space')
print(f'organised in {t.dim()} sub-dimensions')
t
# Mind the underscore!
t.random_(10)
t
r = torch.Tensor(t)
r.resize_(3, 8)
r
r.zero_()
t
# This *is* important, sigh...
s = r.clone()
s.fill_(1)
s
r
v = torch.Tensor([1, 2, 3, 4]); v
print(f'dim: {v.dim()}, size: {v.size()[0]}')
w = torch.Tensor([1, 0, 2, 0]); w
# Element-wise multiplication
v * w
# Scalar product: 1*1 + 2*0 + 3*2 + 4*0
v @ w
x = torch.Tensor(5).random_(10); x
print(f'first: {x[0]}, last: {x[-1]}')
# Extract sub-Tensor [from:to)
x[1:2 + 1]
v
v = torch.arange(1, 4 + 1); v
print(v.pow(2), v)
print(v.pow_(2), v)
m = torch.Tensor([[2, 5, 3, 7],
[4, 2, 1, 9]]); m
m.dim()
print(m.size(0), m.size(1), m.size(), sep=' -- ')
m.numel()
m[0][2]
m[0, 2]
m[:, 1]
m[:, [1]]
m[[0], :]
m[0, :]
v = torch.arange(1, 4 + 1); v
m @ v
m[[0], :] @ v
m[[1], :] @ v
m + torch.rand(2, 4)
m - torch.rand(2, 4)
m * torch.rand(2, 4)
m / torch.rand(2, 4)
m.t()
# Same as
m.transpose(0, 1)
torch.arange(3, 8 + 1)
torch.arange(5.7, -3, -2.1)
torch.linspace(3, 8, 20).view(1, -1)
torch.zeros(3, 5)
torch.ones(3, 2, 5)
torch.eye(3)
# Pretty plotting config
%run plot_conf.py
plt_style()
# Numpy bridge!
plt.hist(torch.randn(1000).numpy(), 100);
plt.hist(torch.randn(10**6).numpy(), 100); # how much does this chart weight?
# use rasterized=True for SVG/EPS/PDF!
plt.hist(torch.rand(10**6).numpy(), 100);
torch.*Tensor?
m
m.double()
m.byte()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
m.to(device)
m_np = m.numpy(); m_np
m_np[0, 0] = -1; m_np
m
n_np = np.arange(5)
n = torch.from_numpy(n_np)
print(n_np, n)
n.mul_(2)
n_np
a = torch.Tensor([[1, 2, 3, 4]])
b = torch.Tensor([[5, 6, 7, 8]])
print(a, b)
torch.cat((a, b), 0)
torch.cat((a, b), 1)