Skip to content
Snippets Groups Projects
Verified Commit 2b876eff authored by Laurent Modolo's avatar Laurent Modolo
Browse files

update M1_biosciences_dimension_reduction

parent 7cb7d644
No related branches found
No related tags found
No related merge requests found
......@@ -249,7 +249,7 @@ $$
\begin{itemize}
\item Using trigonometry properties:
$$
\operatorname{cos} \theta = \|\ybf\|_2 \Big/ \|\xbf\|_2
\operatorname{cos} \theta = \frac{\|\ybf_{proj}\|_2}{ \|\ybf\|_2} = \lambda \frac{\|\xbf\|_2 }{\|\ybf\|_2}
$$
\item The dot product is the length of $\xbf$ times the length of the ortho. projection of $\ybf$
\item Orthogonality :
......@@ -500,9 +500,9 @@ I_T(\Xbf) & = & \frac{1}{n} \sum_{i=1}^n \sum_{j=1}^p (x_i^j - \overline{x}^j)^2
$$
\item This operation resumes to a linear transform of $\xbf_i$ (old) to obtain $\zbf$ (new)
$$
\zbf_{i1} = \widetilde{\xbf}_{i,c} \vbf_1'
\zbf_{i1} = \widetilde{\xbf}_{i,c} \vbf_1
$$
\item How to determine $\vbf_1=(v_{11},v_{12})$ ?
\item How to determine $\vbf_1=\left[ \begin{array}{c} v_{11} \\ v_{12} \end{array} \right]_{2 \times 1}$ ?
\end{itemize}
\column{.4\textwidth}
\begin{center}
......@@ -542,7 +542,7 @@ I_T(\Xbf) & = & \frac{1}{n} \sum_{i=1}^n \sum_{j=1}^p (x_i^j - \overline{x}^j)^2
\begin{eqnarray*}
\zbf_{1} &=& v_{11} \widetilde{\xbf}_c^1 + v_{12} \widetilde{\xbf}_c^2 \\
&=& \left[ \begin{array}{cc} \widetilde{\xbf}^1_c & \widetilde{\xbf}_c^2 \end{array} \right]_{n \times 2} \left[ \begin{array}{c} v_{11} \\ v_{12} \end{array} \right]_{2 \times 1} \\
\zbf_{1} &=& \widetilde{\Xbf}_c \vbf_1'
\zbf_{1} &=& \widetilde{\Xbf}_c \vbf_1
\end{eqnarray*}
\item Equation of a line with slope $\vbf_1$
\item Centered data so no intercept
......@@ -561,7 +561,7 @@ I_T(\Xbf) & = & \frac{1}{n} \sum_{i=1}^n \sum_{j=1}^p (x_i^j - \overline{x}^j)^2
\begin{itemize}
\item First axis carries the biggest empirical variance
\begin{eqnarray*}
\operatorname{var}(\zbf_{1}) &=& \operatorname{var} \Big(\widetilde{\Xbf}_c \vbf_1' \Big) \\
\operatorname{var}(\zbf_{1}) &=& \operatorname{var} \Big(\widetilde{\Xbf}_c \vbf_1 \Big) \\
&=& \operatorname{var} \Big( v_{11} \widetilde{\xbf}_c^1 + v_{12} \widetilde{\xbf}_c^2 \Big) \\
&=& v_{11}^2 \operatorname{var} \big(\widetilde{\xbf}_c^1\big) + v_{12}^2 \operatorname{var} \big(\widetilde{\xbf}_c^2\big) + 2 v_{11} v_{12} \operatorname{c}(\widetilde{\xbf}_c^1,\widetilde{\xbf}_c^2)
\end{eqnarray*}
......@@ -586,7 +586,7 @@ I_T(\Xbf) & = & \frac{1}{n} \sum_{i=1}^n \sum_{j=1}^p (x_i^j - \overline{x}^j)^2
\begin{eqnarray*}
\operatorname{var}(\zbf_{1}) &=& v_{11}^2 + v_{12}^2 + 2 v_{11} v_{12} \times \operatorname{r}(\widetilde{\xbf}_c^1,\widetilde{\xbf}_c^2)
\end{eqnarray*}
\item Constraint of ortho-normality: $\|\vbf_1\|^2_2=1$
\item Constraint for a normed basis: $\|\vbf_1\|^2_2=1$
\item This ensures that the new basis is of unitary scale, so that the information carried by the new axes can be compared
\end{itemize}
\column{.4\textwidth}
......@@ -603,9 +603,9 @@ I_T(\Xbf) & = & \frac{1}{n} \sum_{i=1}^n \sum_{j=1}^p (x_i^j - \overline{x}^j)^2
\begin{itemize}
\item To find the first axis, find coefficients $\mathbf{v}_1$, s.t.
\begin{eqnarray*}
\max_{\vbf_1, \|\vbf_1\|_2^2=1 } \Big\{ \operatorname{var}(\zbf_{1}) \Big\} &=& \max_{\vbf_1, \|\vbf_1\|_2^2=1 } \Big\{ \operatorname{var}( \Xbf_c \vbf_1' ) \Big\}\\
&=& \max_{\vbf_1, \|\vbf_1\|_2^2=1 } \Big\{ \vbf_1' \Big( \Xbf_c'\Xbf_c \Big) \vbf_1\Big\} \\
&=& \max_{\vbf_1, \|\vbf_1\|_2^2=1 }\Big\{ \vbf_1' \Sbf \vbf_1\Big\}
\max_{\vbf_1, \|\vbf_1\|_2^2=1 } \Big\{ \operatorname{var}(\zbf_{1}) \Big\} &=& \max_{\vbf_1, \|\vbf_1\|_2^2=1 } \Big\{ \operatorname{var}( \Xbf_c \vbf_1 ) \Big\}\\
&=& \max_{\vbf_1, \|\vbf_1\|_2^2=1 } \Big\{ \vbf_1 \Big( \Xbf_c'\Xbf_c \Big) \vbf_1'\Big\} \\
&=& \max_{\vbf_1, \|\vbf_1\|_2^2=1 }\Big\{ \vbf_1 \Sbf \vbf_1'\Big\}
\end{eqnarray*}
\item The solution of this optimization problem is explicit
\begin{eqnarray*}
......@@ -619,18 +619,18 @@ I_T(\Xbf) & = & \frac{1}{n} \sum_{i=1}^n \sum_{j=1}^p (x_i^j - \overline{x}^j)^2
\begin{frame}
\frametitle{normed PCA as an optimization problem}
\begin{itemize}
\item To find the first axis, find coefficients $\mathbf{v}_1$, s.t.
\item To find the first axis, find coefficients $\widetilde{\vbf}_1$, s.t.
\begin{eqnarray*}
\max_{\vbf_1, \|\vbf_1\|_2^2=1 } \Big\{ \operatorname{var}(\zbf_{1}) \Big\} &=& \max_{\vbf_1, \|\vbf_1\|_2^2=1 } \Big\{ \operatorname{var}( \widetilde{\Xbf}_c \vbf_1' ) \Big\}\\
&=& \max_{\vbf_1, \|\vbf_1\|_2^2=1 } \Big\{ \vbf_1' \Big( \widetilde{\Xbf}_c'\widetilde{\Xbf}_c \Big) \vbf_1\Big\} \\
&=& \max_{\vbf_1, \|\vbf_1\|_2^2=1 }\Big\{ \vbf_1' \Rbf \vbf_1\Big\}
\max_{\widetilde{\vbf}_1, \|\widetilde{\vbf}_1\|_2^2=1 } \Big\{ \operatorname{var}(\zbf_{1}) \Big\} &=& \max_{\widetilde{\vbf}_1, \|\widetilde{\vbf}_1\|_2^2=1 } \Big\{ \operatorname{var}( \widetilde{\Xbf}_c \widetilde{\vbf}_1 ) \Big\}\\
&=& \max_{\widetilde{\vbf}_1, \|\widetilde{\vbf}_1\|_2^2=1 } \Big\{ \widetilde{\vbf}_1 \Big( \widetilde{\Xbf}_c'\widetilde{\Xbf}_c \Big) \widetilde{\vbf}_1'\Big\} \\
&=& \max_{\widetilde{\vbf}_1, \|\widetilde{\vbf}_1\|_2^2=1 }\Big\{ \widetilde{\vbf}_1 \Rbf \widetilde{\vbf}_1'\Big\}
\end{eqnarray*}
\item The solution of this optimization problem is explicit
\begin{eqnarray*}
\vbf_1'\vbf_1 &=& 1 \\
\Rbf \vbf_1 &=& \lambda_1 \vbf_1
\widetilde{\vbf}_1'\widetilde{\vbf}_1 &=& 1 \\
\Rbf \widetilde{\vbf}_1 &=& \lambda_1 \widetilde{\vbf}_1
\end{eqnarray*}
\item $\vbf_1$ (resp $\lambda_1$) is the first eigenvector (resp eigenvalue) of the \textbf{correlation} matrix
\item $\widetilde{\vbf}_1$ (resp $\lambda_1$) is the first eigenvector (resp eigenvalue) of the \textbf{correlation} matrix
\end{itemize}
\end{frame}
......@@ -642,7 +642,7 @@ I_T(\Xbf) & = & \frac{1}{n} \sum_{i=1}^n \sum_{j=1}^p (x_i^j - \overline{x}^j)^2
\begin{itemize}
\item $\Sbf$ contains the directions of maximal variance of the data
\item $\mathbf{v}_1 \perp \mathbf{v}_2$ and are normed (unit variance)
\item $(\lambda_1^2,\lambda_2^2)$ quantify the amount of variance in each direction
\item $(\lambda_1,\lambda_2)$ quantify the amount of variance in each direction
\item The eigen decomposition provides the best representation of the data in terms of variance
\item Its the linear transform that makes the new set of coordinates diagonal
\end{itemize}
......@@ -661,14 +661,14 @@ I_T(\Xbf) & = & \frac{1}{n} \sum_{i=1}^n \sum_{j=1}^p (x_i^j - \overline{x}^j)^2
\begin{itemize}
\item Eigenvalues quantify the inertia of the dataset:
$$
I_T(X) = \sum_{k=1} I_k(X) = \sum_{k=1}^K \lambda_k^2
I_T(X) = \sum_{k=1} I_k(X) = \sum_{k=1}^K \lambda_k
$$
\item Percent of explained variance:
$$
\text{Contrib}_k = \frac{\lambda_k^2}{\sum_{\ell=1}^K \lambda_\ell^2}
\text{Contrib}_k = \frac{\lambda_k}{\sum_{\ell=1}^K \lambda_\ell}
$$
$$
\text{Contrib}_{1:k} = \frac{\sum_{h=1}^k\lambda_h^2}{\sum_{\ell=1}^K \lambda_\ell^2}
\text{Contrib}_{1:k} = \frac{\sum_{h=1}^k\lambda_h}{\sum_{\ell=1}^K \lambda_\ell}
$$
\end{itemize}
\column{.45\textwidth}
......@@ -683,7 +683,7 @@ I_T(\Xbf) & = & \frac{1}{n} \sum_{i=1}^n \sum_{j=1}^p (x_i^j - \overline{x}^j)^2
\frametitle{Representation of individuals in the new coordinates}
\begin{center}
\includegraphics[scale=0.6]{./figures/projection_individuals.pdf} \\
The new coordinates for individuals are $\vbf_k' \big( \xbf_i- \overline{\xbf}\big)$
The new coordinates for individuals are $\big( \xbf_i- \overline{\xbf}\big)\vbf_k$
\end{center}
\end{frame}
......@@ -699,7 +699,7 @@ I_T(\Xbf) & = & \frac{1}{n} \sum_{i=1}^n \sum_{j=1}^p (x_i^j - \overline{x}^j)^2
\item From 2D to 2D, there is no dimension reduction !
\item The approach is generalized from $p$ variables to $K$ principal components
$$
\zbf_{k} = \sum_{j=1}^p v_{kj} \widetilde{\xbf}_c^j = \Xbf_c \mathbf{v}_1'
\zbf_{k} = \sum_{j=1}^p v_{kj} \widetilde{\xbf}_c^j = \Xbf_c \mathbf{v}_1
$$
\item Intuition: if $v_{kj}$ is high, variable $j$ highly contributes to principal component $\zbf_k$
\item From $p$ to $K(=2)$ the information was compressed
......@@ -707,6 +707,46 @@ I_T(\Xbf) & = & \frac{1}{n} \sum_{i=1}^n \sum_{j=1}^p (x_i^j - \overline{x}^j)^2
\end{frame}
\begin{frame}
\frametitle{General Case with $K$ principal components}
\begin{itemize}
\item $\Vbf_{[p \times K]} = \big[ \vbf_1, \hdots,\vbf_K \big]$, the eigen vectors of the covariance matrix
$$
\Sbf_{p \times p} = \frac{1}{n} \Xbf'\Xbf = \frac{1}{n}\sum_{k=1}^K \lambda_k \vbf_k \vbf_k'
$$
\item $\Ubf_{[n \times K]} = \big[ \ubf_1, \hdots,\ubf_K \big]$, the eigen vectors of the Gram matrix
$$
\Gbf_{n \times n} = \frac{1}{p} \Xbf \Xbf' = \frac{1}{p} \sum_{k=1}^K \lambda_k \ubf_k \ubf_k'
$$
\item Then we have
\begin{eqnarray*}
\big( \Xbf \Xbf' \big) \ubf_k &=& \sqrt{\lambda_k} \Xbf \vbf_k = \lambda_k \ubf_k \\
\big( \Xbf' \Xbf \big) \vbf_k &=& \sqrt{\lambda_k} \Xbf' \ubf_k = \lambda_k \vbf_k
\end{eqnarray*}
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Low-rank approximation of X}
\begin{itemize}
\item The rank of a matrix ($r^{*}$) is the number of linearly independent columns (unknown in practice)
\item From a statistical perspective, it is the number of independent coordinates that can describe a dataset
\item The initial dataset can be rewritten such that
$$
\Xbf = \Ubf_{n \times r^{*}} \Vbf_{r^{*} \times p}' = \sum_{k=1}^{r^{*}} \sqrt{\lambda_k} \ubf_k \vbf_k'
$$
\item Since the rank is unknown, we select a number of components $K$, and then:
$$
\Xbf \simeq \Ubf_{n \times K} \Vbf_{K \times p}' = \sum_{k=1}^{K} \sqrt{\lambda_k} \ubf_k \vbf_k'
$$
\item It is called the low-rank approximation of $\Xbf$
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{PCA on the complete ER dataset - 1}
......@@ -755,7 +795,7 @@ I_T(\Xbf) & = & \frac{1}{n} \sum_{i=1}^n \sum_{j=1}^p (x_i^j - \overline{x}^j)^2
\item Geometrically, $\mathbf{x}_i-\overline{\mathbf{x}}$ is colinear to $\mathbf{z}_k$
\item Compute
$$
\cos^2 \theta( \mathbf{x}_i-\overline{\mathbf{x}}, \mathbf{z}_k) = \frac{\Big( \vbf_k' \big( \xbf_i- \overline{\xbf}\big) \Big)^2}{\|\xbf_i- \overline{\xbf}\|^2\|\vbf_k\|^2}
\cos^2 \theta( \mathbf{x}_i-\overline{\mathbf{x}}, \mathbf{z}_k) = \frac{\Big( \big( \xbf_i- \overline{\xbf}\big)\vbf_k \Big)^2}{\|\xbf_i- \overline{\xbf}\|^2\|\vbf_k\|^2}
$$
\end{itemize}
\column{.45\textwidth}
......@@ -769,7 +809,7 @@ I_T(\Xbf) & = & \frac{1}{n} \sum_{i=1}^n \sum_{j=1}^p (x_i^j - \overline{x}^j)^2
\frametitle{Contribution of individuals to the representation}
The contribution of a $\mathbf{x}_i$ is the proportion of carried by $\mathbf{x}_i$
$$
\operatorname{contr}(\xbf_i,\mathbf{z}_k) = \frac{\Big( \vbf_k' \big( \xbf_i- \overline{\xbf}\big) \Big)^2}{n \lambda_k}
\operatorname{contr}(\xbf_i,\mathbf{z}_k) = \frac{\Big( \big( \xbf_i- \overline{\xbf}\big)\vbf_k \Big)^2}{n \lambda_k}
$$
\begin{center}
\includegraphics[scale=0.3]{./figures/outlier_contribution.pdf}
......@@ -788,7 +828,7 @@ $$
& \ddots & \\
\operatorname{r}(\xbf^j,\xbf^{j'}) & \hdots &\operatorname{r}(\xbf^p,\xbf^{p})
\end{array}
\right] =\frac{1}{n} \widetilde{\Xbf}_c' \widetilde{\Xbf}_c = \sum_{k=1}^K \lambda_k^2 \vbf_k \vbf_k'
\right] =\frac{1}{n} \widetilde{\Xbf}_c' \widetilde{\Xbf}_c = \sum_{k=1}^K \lambda_k \vbf_k \vbf_k'
$$
\item Get $K$ new uncorrelated (non redundant) variables $\Zbf=\left[ \begin{array}{c} \zbf^1, \hdots, \zbf^K\end{array}\right]$
\end{itemize}
......@@ -799,19 +839,19 @@ $$
\begin{columns}[c]
\column{.5\textwidth}
\begin{itemize}
\item Components are independent of variance with $S^2( \zbf_k) = \lambda_k^2$
\item Components are independent of variance with $\operatorname{var}( \zbf_k) = \lambda_k$
$$
\Sbf_Z = \left[
\begin{array}{ccc}
\lambda_1^2 & & 0\\
\lambda_1 & & 0\\
& \ddots & \\
0 & & \lambda_K^2
0 & & \lambda_K
\end{array}
\right]
$$
\item Contribution of variables to axis:
\begin{eqnarray*}
\operatorname{c}(\xbf^j,\zbf_k) &=& (\xbf^{j})' \ubf_k = \lambda_k^2 v_{jk} \\
\operatorname{c}(\xbf^j,\zbf_k) &=& (\xbf^{j})' \ubf_k = \lambda_k v_{jk} \\
&=& \operatorname{r}(\xbf^j,\zbf_k) \, \text{for normed PCA} \\
\operatorname{c}(\Xbf,\Zbf) &=& \Sbf_Z \Vbf
\end{eqnarray*}
......
......@@ -14,6 +14,105 @@
\end{itemize}
\end{frame}
\section[Principal Components]{Principal Components and orthogonal subspaces}
\begin{frame}
\frametitle{Decomposition of $\mathbb{R}^p$ into orthogonal subspaces}
\begin{itemize}
\item Let us consider $p$ orthogonal subspaces $\big( E_k \big)_{k=1,p}$ each subspace spanned by an individual axis (dim 1):
$$
\mathbb{R}^p = \bigoplus_{k=1}^p E_k,
$$
\item Orthogonal projection of $X_i \in \mathbb{R}^p$ on a subspace $E_k=\operatorname{vect}(Z_k)$
$$
\operatorname{Proj}_{E_k}(X_i) = X_i V_k \in \mathbb{R}
$$
\item The inertia of $X$ wrt $E_k$ measures the proximity of $E_k$ from $X$
$$
I_{E_k}(X)=\frac{1}{n} \sum_{i=1}^n \|X_i-\operatorname{Proj}_{E_k}(X_i)\|_2^2
$$
\item Let $E_k^\perp$ denotes the orthogonal complement of subspace $E_k$.
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Pythagore - Huyguens Theorem}
\begin{center}
\includegraphics[scale=0.5]{./figures/ortho_proj.pdf}
\end{center}
$$
I_T(X) = I_{E}(X) + I_{E^\perp}(X) = I \Big( \operatorname{Proj}_{E}(X) \Big) + I \Big( \operatorname{Proj}_{k^\perp}(X) \Big)
$$
\end{frame}
\begin{frame}
\frametitle{Construction of principal components (PC)}
\begin{itemize}
\item Resume the data $X$ by a new dataset $Z_{n \times K}$, $K \leq p$ and $K$ fixed
\item The new axis spans the 1-dim subspaces $\Big( E_k=\operatorname{vect}(Z_k) \Big)_k$
$$
\forall k,k', \quad E_k \perp E_{k'}
$$
\item $Z=[Z_1, \hdots, Z_K]$ constitute independent PCs (easy interpretation)
\item $Z_k \in \mathbb{R}^n$ is defined as a linear combination of the variables
$$
Z_{k} = X V_k, \quad V_k=\big(V_{jk} \big)_j \in \mathbb{R}^p
$$
\item $V_{p \times K} = [V_1,\hdots, V_K]$ is the matrix of contributions (weights) of variables $\big( X^j \big)_j$
\begin{eqnarray*}
Z_{n \times K} &=& X_{n \times p}V_{p \times K}
\end{eqnarray*}
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Decomposition of the Inertia on the PCs}
\begin{eqnarray*}
I_T(X) & = & \frac{1}{n} \sum_{i=1}^n \sum_{k=1}^p \|X_i-\operatorname{Proj}_{E_k}(X_i) +\operatorname{Proj}_{E_k}(X_i)\|^2 \\
& = & \frac{1}{n} \sum_{i=1}^n \sum_{k=1}^p \|X_i-\operatorname{Proj}_{E_k}(X_i)\|^2 + \frac{1}{n} \sum_{i=1}^n \sum_{k=1}^p \|\operatorname{Proj}_{E_k}(X_i)\|^2 \\
& = & \frac{1}{n} \sum_{i=1}^n \sum_{k=1}^p \|X_i-Z_{ik}\|^2 + \frac{1}{n} \sum_{i=1}^n \sum_{k=1}^p \|Z_{ik}\|^2 \\
& = & \frac{1}{n} \sum_{i=1}^n \sum_{k=1}^p \|X_i-X_iV_k\|^2 + \frac{1}{n} \sum_{i=1}^n \sum_{k=1}^p \|X_iV_k\|^2 \\
\end{eqnarray*}
\end{frame}
\begin{frame}
\frametitle{Orthogonal Components with maximal variance}
\begin{itemize}
\item We want to resume the variability of the dataset
\item Find the PCs that explain the maximum of the observed variance:
$$
\frac{1}{n} \sum_{i=1}^n \| \operatorname{Proj}_{E_k}(X_i) \|^2 = \frac{1}{n} \sum_{i=1}^n \|Z_{ik}\|^2 = \frac{1}{n} V_k' \Big(X'X\Big)V_k = \frac{1}{n} V_k' \Sigma V_k
$$
\item The optimization scheme is iterative, and for the $k$th PC:
$$
\widehat{V}_k = \underset{V \in \mathbb{R}^p, \|V\|^2_2=1}{\arg \max} \Big( \frac{1}{n} V' X'X V \Big) \quad \text{with } Z_k \perp (Z_1,\hdots, Z_{k-1})
$$
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Constrained optimization}
\begin{itemize}
\item To account for the orthogonality constraint, we introduce the Lagrange multipliers
\begin{eqnarray*}
\mathcal{L}(V,\mu) &=& \frac{1}{n} V' X'X V - \mu \Big( V'V -1 \Big) \\
\frac{\partial L}{\partial \mu} & =& V'V -1 \\
\frac{\partial L}{\partial V} & =& 2 X'XV - \mu V
\end{eqnarray*}
\item Which gives the following solution
\begin{eqnarray*}
V'V & =& 1 \\
X'X V & =& \mu V
\end{eqnarray*}
\item The optimal solution is provided by the eigenvectors of the covariance matrix $\Sigma$
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Spectral decomposition of symmetric real matrices}
......
File added
......@@ -199,9 +199,10 @@
\include{PCA}
\include{perspectives}
\begin{frame}{References}
\begin{frame}{Useful links}
\begin{itemize}
\item \url{https://towardsdatascience.com/}
\item \href{https://pca4ds.github.io/}{PCA for datascience}
\item \href{https://www.youtube.com/watch?v=LyGKycYT2v0}{Link to a tuto on dot products}
\item \href{https://en.wikipedia.org/wiki/Transformation_matrix}{Wiki for Linear Transforms}
\item \href{http://cazencott.info/dotclear/public/lectures/IntroML_Azencott.pdf}{Book for the introduction to machine learning (C.-A. Azencott)}
......@@ -210,6 +211,9 @@
\item PCA in general \url{http://factominer.free.fr/index_fr.html}
\end{itemize}
\end{frame}
\begin{frame}{References}
\begin{small}
\bibliographystyle{plain}
\bibliography{biblio}
......@@ -217,6 +221,7 @@
\end{small}
\end{frame}
\newpage
\include{annexes}
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment