#!/usr/bin/env python # coding: utf-8 # # Low rank (and stack of low rank) matrices: forward and adjoints # # 1. **U V as linear operator (with v as model)** # # $$ # \mathbf{y}=\mathbf{R}\mathbf{U}\mathbf{V}^T = R_u(\mathbf{v}) # $$ # # 1. **U V as linear operator (with u as model)** # # $$ # \mathbf{y}=\mathbf{R}\mathbf{U}\mathbf{V}^T = R_v(\mathbf{u}) # $$ # # where $\mathbf{R}$ is any generic additional linear operator # In[1]: get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') get_ipython().run_line_magic('matplotlib', 'inline') import warnings warnings.filterwarnings('ignore') import numpy as np from scipy.sparse.linalg import lsqr from pylops.basicoperators import * from pylops.utils.dottest import dottest from pyproximal.proximal import * from pyproximal import ProxOperator from pyproximal.utils.bilinear import BilinearOperator # In[2]: class LowRankFactorizedMatrix(BilinearOperator): def __init__(self, X, Y, d, Op=None): self.n, self.k = X.shape self.m = Y.shape[1] self.x = X self.y = Y self.d = d self.Op = Op self.shapex = (self.n * self.m, self.n * self.k) self.shapey = (self.n * self.m, self.m * self.k) def __call__(self, x, y=None): if y is None: x, y = x[:self.n * self.k], x[self.n * self.k:] xold = self.x.copy() self.updatex(x) res = self.d - self._matvecy(y) self.updatex(xold) return np.linalg.norm(res)**2 / 2. def _matvecx(self, x): X = x.reshape(self.n, self.k) X = X @ self.y.reshape(self.k, self.m) if self.Op is not None: X = self.Op @ X.ravel() return X.ravel() def _matvecy(self, y): Y = y.reshape(self.k, self.m) X = self.x.reshape(self.n, self.k) @ Y if self.Op is not None: X = self.Op @ X.ravel() return X.ravel() def matvec(self, x): if x.size == self.shapex[1]: y = self._matvecx(x) else: y = self._matvecy(x) return y def _rmatvecx(self, x): if self.Op is not None: x = self.Op.H @ x X = x.reshape(self.n, self.m) X = X @ np.conj(self.y.reshape(self.k, self.m).T) return X.ravel() def _rmatvecy(self, x): if self.Op is not None: x = self.Op.H @ x Y = x.reshape(self.n, self.m) X = (np.conj(Y.T) @ self.x.reshape(self.n, self.k)).T return X.ravel() def rmatvec(self, x, which="x"): if which == "x": y = self._rmatvecx(x) else: y = self._rmatvecy(x) return y # In[3]: # Restriction operator n, m, k = 4, 5, 2 sub = 0.4 nsub = int(n*m*sub) iava = np.random.permutation(np.arange(n*m))[:nsub] Rop = Restriction(n*m, iava) # In[4]: # model U = np.random.normal(0., 1., (n, k)) V = np.random.normal(0., 1., (m, k)) X = U @ V.T # data y = Rop * X.ravel() # Masked data Y = (Rop.H * Rop * X.ravel()).reshape(n, m) # In[5]: X = U @ V.T X1 = (V @ U.T).T # ## U V^T as linear operator (with V as model) # # $$ # \mathbf{y}=\mathbf{R}\mathbf{U}\mathbf{V}^T = R_u(\mathbf{v}) # $$ # In[6]: Uop = MatrixMult(U, otherdims=(m,)) Top = Transpose((m,k), (1,0)) Uop1 = Uop * Top print(Uop, Top) X1 = Uop1 * V.ravel() X1 = X1.reshape(n,m) print(X-X1) # data Ruop = Rop * Uop * Top y1 = Ruop * V.ravel() print(y-y1) # In[7]: v1 = Ruop.H @ y1 # ## U V^T as linear operator (with U as model) # # $$ # \mathbf{y}=\mathbf{R}\mathbf{U}\mathbf{V}^T = \mathbf{R}(\mathbf{V}\mathbf{U}^T)^T = R_v(\mathbf{u}) # $$ # In[8]: Vop = MatrixMult(V, otherdims=(n,)) Top = Transpose((n,k), (1,0)) T1op = Transpose((n,m), (1,0)) Vop1 = T1op.T * Vop * Top X1 = Vop1 * U.ravel() X1 = X1.reshape(n,m) print(X-X1) # data Ruop = Rop * T1op.T * Vop * Top y1 = Ruop * U.ravel() print(y-y1) # In[9]: u1 = Ruop.H @ y1 # Let's now use our function # In[10]: LOp = LowRankFactorizedMatrix(U, V.T, y, Op=Rop) y-LOp._matvecx(U.ravel()), y-LOp._matvecy(V.T.ravel()) # In[11]: u1-LOp._rmatvecx(y).reshape(n, k) # In[12]: v1.T-LOp._rmatvecy(y).reshape(k, m) # In[13]: Fop = FunctionOperator(LOp._matvecx, LOp._rmatvecx, len(iava), n*k) dottest(Fop) # In[14]: Fop = FunctionOperator(LOp._matvecy, LOp._rmatvecy, len(iava), k*m) dottest(Fop) # ## Stack of matrices # We do the same now but we assume a stack of matrices, where for each of them we have # # $$ # \mathbf{y}_i=\mathbf{U}_i\mathbf{V}_i^T = R_{u_i}(\mathbf{v}_i) # $$ # # and # # $$ # \mathbf{y}=\mathbf{R} [\mathbf{y}_1^T, \mathbf{y}_2^T, ..., \mathbf{y}_N^T]^T # $$ # In[15]: class LowRankFactorizedStackMatrix(BilinearOperator): r"""Low-Rank Factorized Stack of Matrix operator. Parameters ---------- X : :obj:`numpy.ndarray` Left-matrix of size :math:`r \times n \times k` Y : :obj:`numpy.ndarray` Right-matrix of size :math:`r \times k \times m` d : :obj:`numpy.ndarray` Data vector Op : :obj:`pylops.LinearOperator`, optional Linear operator """ def __init__(self, X, Y, d, Op=None): self.r, self.n, self.k = X.shape self.m = Y.shape[2] self.x = X self.y = Y self.d = d self.Op = Op self.shapex = (self.r * self.n * self.m, self.r * self.n * self.k) self.shapey = (self.r * self.n * self.m, self.r * self.m * self.k) def __call__(self, x, y=None): if y is None: x, y = x[:self.r * self.n * self.k], x[self.r * self.n * self.k:] xold = self.x.copy() self.updatex(x) res = self.d - self._matvecy(y) self.updatex(xold) return np.linalg.norm(res)**2 / 2. def _matvecx(self, x): X = x.reshape(self.r, self.n, self.k) X = np.matmul(X, self.y.reshape(self.r, self.k, self.m)) if self.Op is not None: X = self.Op @ X.ravel() return X.ravel() def _matvecy(self, y): Y = y.reshape(self.r, self.k, self.m) X = np.matmul(self.x.reshape(self.r, self.n, self.k), Y) if self.Op is not None: X = self.Op @ X.ravel() return X.ravel() def matvec(self, x): if x.size == self.shapex[1]: y = self._matvecx(x) else: y = self._matvecy(x) return y def _rmatvecx(self, x): if self.Op is not None: x = self.Op.H @ x X = x.reshape(self.r, self.n, self.m) X = X @ np.conj(self.y.reshape(self.r, self.k, self.m).transpose(0, 2, 1)) return X.ravel() def _rmatvecy(self, x): if self.Op is not None: x = self.Op.H @ x Y = x.reshape(self.r, self.n, self.m) X = (np.conj(Y.transpose(0, 2, 1) @ self.x.reshape(self.r, self.n, self.k)) ).transpose(0, 2, 1) return X.ravel() def rmatvec(self, x, which="x"): if which == "x": y = self._rmatvecx(x) else: y = self._rmatvecy(x) return y # In[16]: # Restriction operator r, n, m, k = 10, 4, 5, 2 nsub = int(r*n*m*sub) iava = np.random.permutation(np.arange(r*n*m))[:nsub] Rop = Restriction(r*n*m, iava) U = np.random.normal(0., 1., (r, n, k)) V = np.random.normal(0., 1., (r, m, k)) LOp = LowRankFactorizedStackMatrix(U, V.transpose(0,2,1), y, Op=Rop) y = LOp._matvecx(U.ravel()) LOp._matvecx(U.ravel()) - LOp._matvecy(V.transpose(0,2,1).ravel()) # In[17]: LOp._rmatvecx(y).reshape(r, n, k) # In[18]: LOp._rmatvecy(y).reshape(r, k, m) # In[19]: Fop = FunctionOperator(LOp._matvecx, LOp._rmatvecx, len(iava), r*n*k) dottest(Fop) # In[20]: Fop = FunctionOperator(LOp._matvecy, LOp._rmatvecy, len(iava), r*k*m) dottest(Fop)