Contenido bajo licencia Creative Commons BY 4.0 y código bajo licencia MIT. Ā© Manuela Bastidas Olivares y NicolĆ”s GuarĆn-Zapata 2024.
Queremos resolver la siguiente ecuación
$$ \frac{d^2 u(x)}{d^2 x} = f(x)\quad \forall x \in (0, \pi)$$con $u(0) = u(\pi) = 0$.
En este caso tenemos una aproximación
$$u_\theta(x) \approx \operatorname{NN}(x; \theta)\, ,$$dondee $\operatorname{NN}$ es una red neuronal con parƔmetros entrenables $\theta$.
El residual para este problema estarĆa dado por
$$R(x) = \frac{d^2 u_\theta(x)}{d^2 x} - f(x) \, .$$Por el caracter no linealidad respecto a los parĆ”metros $\theta$ de las redes neuronales evaluar el residual en una serie de puntos $x_i$ y forzarlo a ser cero en estos puntos, llevarĆa a un sistema no lineal de ecuaciones
$$R(x_i) = 0 \quad \forall x_i\, .$$Una alternativa a resolver el sistema de ecuaciones anteriormente planteado es minimizar
$$\min_\theta \frac{1}{N}\sum_{i}^N |R(x_i)|^2 \, .$$Que serĆa exactamente 0 si cada uno de los residuales es igual a 0.
A este problema le harĆan falta las condiciones de frontera. Para esto se propone una función objetivo que las incluya
$$\min_\theta \frac{1}{N}\sum_{i}^N R(x_i)^2 + \lambda_1 u_\theta(0)^2 + \lambda_2 u_\theta(\pi)^2\, .$$# Esto permite tener grƔficos interactivos en
# el caso de correrse en Google Colab
if 'google.colab' in str(get_ipython()):
%pip install ipympl
from google.colab import output
output.enable_custom_widget_manager()
%matplotlib widget
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch.autograd import grad
if 'google.colab' in str(get_ipython()):
style = "https://raw.githubusercontent.com/nicoguaro/pinns_mapi-3/main/notebooks/clean.mplstyle"
else:
style = "./clean.mplstyle"
plt.style.use(style)
class Model(torch.nn.Module):
def __init__(self, neurons, n_layers, activation=torch.tanh):
super(Model, self).__init__()
self.activation = activation
self.layers = torch.nn.ModuleList()
self.layers.append(torch.nn.Linear(1, neurons))
for _ in range(n_layers-2):
self.layers.append(torch.nn.Linear(neurons, neurons))
self.layers.append(torch.nn.Linear(neurons, 1))
def forward(self, x):
for layer in self.layers[:-1]:
x = self.activation(layer(x))
x = self.layers[-1](x)
return x
def f_rhs(x):
return -4*torch.sin(2 * x)
def residual(u, x, f):
du = grad(u, x, grad_outputs=torch.ones_like(u), create_graph=True)[0]
ddu = grad(du, x, grad_outputs=torch.ones_like(du), create_graph=True)[0]
return ddu - f(x)
def loss_fn(u_model, x, f):
u = u_model(x)
res = residual(u, x, f)
res_MSE = torch.mean(res**2)
bc = u_model(torch.tensor([np.pi]))**2 + u_model(torch.tensor([0.]))**2
return res_MSE + bc[0]
def train(model, optimizer, loss_fn, f, n_pts, iterations):
losses = []
for iteration in range(iterations):
optimizer.zero_grad()
x = torch.FloatTensor(n_pts,1).uniform_(0, np.pi).requires_grad_(True)
loss = loss_fn(model, x, f)
loss.backward()
optimizer.step()
losses.append(loss.item())
if iteration % 100 == 0:
print(f'Iteration {iteration}, Loss {loss.item()}')
return losses
def exact_u(x):
return torch.sin(2 * x)
nn = 10
nl = 4
model = Model(neurons=nn, n_layers=nl)
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
# Número de parÔmetros
sum(p.numel() for p in model.parameters() if p.requires_grad)
251
n_pts = 1000
iterations = 1000
losses = train(model, optimizer, loss_fn, f_rhs, n_pts, iterations)
Iteration 0, Loss 7.980610370635986 Iteration 100, Loss 5.786739349365234 Iteration 200, Loss 1.7077887058258057 Iteration 300, Loss 1.131252408027649 Iteration 400, Loss 0.1608763039112091 Iteration 500, Loss 0.003429063130170107 Iteration 600, Loss 0.0006879867287352681 Iteration 700, Loss 0.00047126287245191634 Iteration 800, Loss 0.00043680204544216394 Iteration 900, Loss 0.0004027598479297012
xlist = np.linspace(0, np.pi, n_pts)
xlist_torch = torch.tensor(xlist, dtype=torch.float32, requires_grad=True).view(-1, 1)
u_ap = model(xlist_torch)
u_ex = exact_u(xlist_torch)
fig, ax = plt.subplots()
plt.plot(xlist, u_ap.detach().numpy())
plt.plot(xlist, u_ex.detach().numpy(), ".", markevery=50)
plt.legend(['Aproximación', 'Exacta'])
plt.xlabel("x")
plt.ylabel("u(x)")
Text(0, 0.5, 'u(x)')
fig, ax = plt.subplots()
plt.loglog(losses)
plt.xlabel("Iteraciones")
plt.legend(['Función de pérdida'])
<matplotlib.legend.Legend at 0x2b0f7bafce0>
fig, ax = plt.subplots()
plt.plot(xlist, u_ap.detach().numpy())
plt.plot(xlist, (u_ap - u_ex).detach().numpy())
plt.plot(xlist, residual(u_ap, xlist_torch, f_rhs).detach().numpy())
plt.xlabel("x")
plt.legend(["Solución Aproximada", "Error", "Residual"])
<matplotlib.legend.Legend at 0x2b0828efce0>