% \documentclass[serif]{beamer} % Serif for Computer Modern math font. \documentclass[serif, handout]{beamer} % Handout to ignore pause statements \hypersetup{colorlinks,linkcolor=,urlcolor=red} \usefonttheme{serif} % Looks like Computer Modern for non-math text -- nice! \setbeamertemplate{navigation symbols}{} % Suppress navigation symbols % \usetheme{Berlin} % Displays sections on top \usetheme{Frankfurt} % Displays section titles on top: Fairly thin but still swallows some material at bottom of crowded slides %\usetheme{Berkeley} \usepackage[english]{babel} \usepackage{amsmath} % for binom % \usepackage{graphicx} % To include pdf files! % \definecolor{links}{HTML}{2A1B81} % \definecolor{links}{red} \usepackage{comment} \setbeamertemplate{footline}[frame number] \mode \title{More Linear Algebra\footnote{See Appendix A for more detail. This slide show is an open-source document. See last slide for copyright information.}} \subtitle{STA 431: Fall 2023} \date{} % To suppress date \begin{document} \begin{frame} \titlepage \end{frame} \begin{frame} \frametitle{Overview} \tableofcontents \end{frame} \section{Things you already know} \begin{frame} \frametitle{You already know about} \begin{itemize} \item Matrices $\mathbf{A} = [a_{ij}]$ \item Matrix addition and subtraction $\mathbf{A}+\mathbf{B} = [a_{ij}+b_{ij}]$ \item Column vectors $\mathbf{v} = [v_j]$ \item Scalar multiplication $a \mathbf{B} = [a\,b_{ij}]$ \item Matrix multiplication $\mathbf{AB} = \left[ \displaystyle{\sum_k}a_{ik}b_{kj} \right]$ \item[] In words: The $i,j$ element of $\mathbf{AB}$ is the inner product of row $i$ of $\mathbf{A}$ with column $j$ of $\mathbf{B}$. \item Inverse $\mathbf{A}^{-1}\mathbf{A} = \mathbf{AA}^{-1} = \mathbf{I}$ \item Transpose $\mathbf{A}^\top = [a_{ji}]$ \item Symmetric matrices $\mathbf{A} = \mathbf{A}^\top$ \item Determinants \item Linear independence \end{itemize} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Three mistakes that will get you a zero} \framesubtitle{Numbers are $1 \times 1$ matrices, but larger matrices are not just numbers.} \pause You will get a zero if you \vspace{2mm} \begin{itemize} \item Write $\mathbf{AB} = \mathbf{BA}$. It's not true in general. \pause \item Write $\mathbf{A}^{-1}$ when $\mathbf{A}$ is not a square matrix. The inverse is not even defined. \pause \item Represent the inverse of a matrix (even if it exists) by writing it in the denominator, like $\mathbf{a}^\top \mathbf{B}^{-1}\mathbf{a} = \frac{\mathbf{a}^\top \mathbf{a}}{\mathbf{B}}$. Matrices are not just numbers. \pause \end{itemize} \vspace{5mm} If you commit one of these crimes, the mark for the question (or part of a question, like 3c) is zero, regardless of what else you write. \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Half marks off, at least} %\framesubtitle{At least} You will lose \emph{at least} half marks for writing a product like $\mathbf{AB}$ when the number of columns in $\mathbf{A}$ does not equal the number of rows in $\mathbf{B}$. \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Trace} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Trace of a square matrix: Sum of the diagonal elements} \begin{displaymath} tr(\mathbf{A}) = \sum_{i=1}^n a_{i,i}. \end{displaymath} \vspace{10mm} \begin{itemize} \item Obvious: $tr(\mathbf{A}+\mathbf{B}) = tr(\mathbf{A}) + tr(\mathbf{B})$. \pause \item Not obvious: $tr(\mathbf{AB}) = tr(\mathbf{BA})$ \item Even though $\mathbf{AB} \neq \mathbf{BA}$ \end{itemize} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Example} %\framesubtitle{} Let $\mathbf{A} = \left( \begin{array}{rrr} 2 & 1 & 0 \\ 5 & -4 & 3 \\ \end{array} \right)$ and $\mathbf{B} = \left( \begin{array}{rr} 1 & 0 \\ 2 & 3 \\ -1 & 3 \end{array} \right)$ \pause \vspace{4mm} \begin{eqnarray*} \mathbf{AB} & = & \left( \begin{array}{rr} 4 & 3 \\ -6 & -3 \end{array} \right) \\ &&\\ \mathbf{BA} & = & \left( \begin{array}{rrr} 2 & 1 & 0 \\ 19 & -10 & 9 \\ 13 & -13 & 9 \\ \end{array} \right) \end{eqnarray*} And $tr(\mathbf{AB})=tr(\mathbf{BA})$. \end{frame} \begin{comment} A = rbind(c(2 , 1 , 0), c(5 , -4 , 3) ) B = rbind(c(1,0), c(2,3), c(-1,3) ) A%*%B B%*%A \end{comment} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Spectral decomposition} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Eigenvalues and eigenvectors} %\framesubtitle{} Let $\mathbf{A} = [a_{i,j}]$ be a square matrix. $\mathbf{A}$ is said to have an \emph{eigenvalue} $\lambda$ and \emph{eigenvector} $\mathbf{x} \neq \mathbf{0}$ corresponding to $\lambda$ if \begin{displaymath} \mathbf{Ax} = \lambda\mathbf{x}. \end{displaymath} \pause Recall \begin{itemize} \item Eigenvalues are the $\lambda$ values that solve the determinantal equation $|\mathbf{A}-\lambda\mathbf{I}| = 0$. \pause \item The determinant is the product of the eigenvalues: $|\mathbf{A}| = \prod_{i=1}^n \lambda_i$ \end{itemize} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Spectral decomposition of symmetric matrices} %\framesubtitle{} {\small The \emph{Spectral decomposition theorem} says that every square and symmetric matrix $\mathbf{A} = [a_{i,j}]$ may be written \begin{equation*} \mathbf{A} = \mathbf{CDC}^\top, \end{equation*} \pause where the columns of $\mathbf{C}$ (which may also be denoted $\mathbf{x}_1, \ldots, \mathbf{x}_n$) are the eigenvectors of $\mathbf{A}$, and the diagonal matrix $\mathbf{D}$ contains the corresponding eigenvalues. \pause \begin{displaymath} \mathbf{D} = \left( \begin{array}{c c c c } \lambda_1 & 0 & \cdots & 0 \\ 0 & \lambda_2 & \cdots & 0 \\ \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & \cdots & \lambda_n \\ \end{array} \right) \end{displaymath} \pause \begin{itemize} \item If the elements of $\mathbf{A}$ are real, the eigenvalues are real. \item The eigenvectors may be chosen to be orthonormal, so that $\mathbf{C}$ is an orthogonal matrix. That is, $\mathbf{CC}^\top = \mathbf{C}^\top\mathbf{C} = \mathbf{I}$. \end{itemize} } % End size \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Inverse of a diagonal matrix} % \framesubtitle{To set things up} \pause Suppose the eigenvalues are all non-zero. Let \begin{displaymath} \mathbf{D}^{-1} = \left( \begin{array}{c c c c } \frac{1}{\lambda_1} & 0 & \cdots & 0 \\ 0 & \frac{1}{\lambda_2} & \cdots & 0 \\ \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & \cdots & \frac{1}{\lambda_n} \\ \end{array} \right) \end{displaymath} \pause It works because \begin{displaymath} \left( \begin{array}{c c c c } \lambda_1 & 0 & \cdots & 0 \\ 0 & \lambda_2 & \cdots & 0 \\ \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & \cdots & \lambda_n \\ \end{array} \right) \left( \begin{array}{c c c c } \frac{1}{\lambda_1} & 0 & \cdots & 0 \\ 0 & \frac{1}{\lambda_2} & \cdots & 0 \\ \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & \cdots & \frac{1}{\lambda_n} \\ \end{array} \right) = \mathbf{I} \end{displaymath} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Square root of a diagonal matrix} %\framesubtitle{} {\small Suppose the eigenvalues are non-negative. Let \begin{displaymath} \mathbf{D}^{1/2} = \left( \begin{array}{c c c c } \sqrt{\lambda_1} & 0 & \cdots & 0 \\ 0 & \sqrt{\lambda_2} & \cdots & 0 \\ \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & \cdots & \sqrt{\lambda_n} \\ \end{array} \right) \end{displaymath} \pause It works because \begin{eqnarray*} \mathbf{D}^{1/2} \mathbf{D}^{1/2} & = & \left( \begin{array}{c c c c } \sqrt{\lambda_1} & 0 & \cdots & 0 \\ 0 & \sqrt{\lambda_2} & \cdots & 0 \\ \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & \cdots & \sqrt{\lambda_n} \\ \end{array} \right) \left( \begin{array}{c c c c } \sqrt{\lambda_1} & 0 & \cdots & 0 \\ 0 & \sqrt{\lambda_2} & \cdots & 0 \\ \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & \cdots & \sqrt{\lambda_n} \\ \end{array} \right) \\ & = & \left( \begin{array}{c c c c } \lambda_1 & 0 & \cdots & 0 \\ 0 & \lambda_2 & \cdots & 0 \\ \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & \cdots & \lambda_n \\ \end{array} \right) = \mathbf{D} \end{eqnarray*} } % End size \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Using $\mathbf{A} = \mathbf{CDC}^\top$} \framesubtitle{Where $\mathbf{A}$ is a symmetric matrix} {\LARGE \begin{eqnarray*} \mathbf{A}^{-1} & = & \mathbf{C}\mathbf{D}^{-1}\mathbf{C}^\top \\ \mathbf{A}^{1/2} & = & \mathbf{C} \mathbf{D}^{1/2} \mathbf{C}^\top \\ \mathbf{A}^{-1/2} & = & \mathbf{C} \mathbf{D}^{-1/2} \mathbf{C}^\top \end{eqnarray*} } % End size \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Positive definite} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Positive definite matrices} %\framesubtitle{} The $n \times n$ matrix $\mathbf{A}$ is said to be \emph{positive definite} if \begin{displaymath} \mathbf{y}^\top \mathbf{A} \mathbf{y} > 0 \end{displaymath} for \emph{all} $n \times 1$ vectors $\mathbf{y} \neq \mathbf{0}$. \pause \vspace{3mm} It is called \emph{non-negative definite} (or sometimes positive semi-definite) if $\mathbf{y}^\top \mathbf{A} \mathbf{y} \geq 0$. \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{comment} \begin{frame} \frametitle{Example: Show $\mathbf{X}^\top\mathbf{X}$ non-negative definite} \pause Let $\mathbf{X}$ be an $n \times p$ matrix of real constants and let $\mathbf{y}$ be $p \times 1$. \pause Then $\mathbf{z} = \mathbf{Xy}$ is $n \times 1$, and \pause \begin{eqnarray*} & & \mathbf{y}^\top \, (\mathbf{X}^\top\mathbf{X}) \, \mathbf{y} \\ \pause & = & (\mathbf{Xy})^\top (\mathbf{Xy}) \\ \pause & = & \mathbf{z}^\top \mathbf{z} \\ \pause & = & \sum_{i=1}^n z_i^2 \pause \geq 0 ~~ \blacksquare \end{eqnarray*} \end{frame} \end{comment} % HW: Show if cols of X linearly independent, then X'X is strictly positive definite. % herehere p. 96 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Some properties of symmetric positive definite matrices} \framesubtitle{Variance-covariance matrices are often assumed positive definite.} \pause For a symmetric matrix, \vspace{5mm} \begin{itemize} \item[] Positive definite \item[]~~~~~~~~$\Downarrow$ \item[] All eigenvalues positive \item[]~~~~~~~~$\Downarrow$ \item[] Inverse exists $\Leftrightarrow$ Columns (rows) linearly independent. \end{itemize} \pause \vspace{5mm} If a real symmetric matrix is also non-negative definite (as a variance-covariance matrix \emph{must} be) Linear independence $\Rightarrow$ Positive definite. \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Extras} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Extras} \framesubtitle{You may not know about these, and we may use them occasionally} \begin{itemize} \item Rank \item Partitioned matrices \end{itemize} \end{frame} \begin{frame} \frametitle{Rank} %\framesubtitle{} \begin{itemize} \item Row rank is the number of linearly independent rows. \item Column rank is the number of linearly independent columns. \pause \item Rank of a matrix is the minimum of row rank and column rank. \pause \item[] \item $rank(\mathbf{AB}) = \min\left(rank(\mathbf{A}),rank(\mathbf{B})\right)$. \end{itemize} \end{frame} \begin{frame} \frametitle{Partitioned matrix} %\framesubtitle{} \begin{itemize} \item A matrix of matrices {\LARGE \begin{displaymath} \left[ \begin{array}{c|c} \mathbf{A} & \mathbf{B} \\ \hline \mathbf{C} & \mathbf{D} \end{array} \right] \end{displaymath} \pause } % End size \item Row by column (matrix) multiplication works, provided the matrices are the right sizes. \end{itemize} \end{frame} \section{R} \begin{frame}[fragile] \frametitle{Matrix calculation with R} \pause %\framesubtitle{} {\footnotesize % or scriptsize {\color{blue} \begin{verbatim} > is.matrix(3) # Is the number 3 a 1x1 matrix? \end{verbatim} \pause } % End color \begin{verbatim} [1] FALSE \end{verbatim} \pause {\color{blue} \begin{verbatim} > treecorr = cor(trees); treecorr \end{verbatim} } % End color \begin{verbatim} Girth Height Volume Girth 1.0000000 0.5192801 0.9671194 Height 0.5192801 1.0000000 0.5982497 Volume 0.9671194 0.5982497 1.0000000 \end{verbatim} \pause {\color{blue} \begin{verbatim} > is.matrix(treecorr) \end{verbatim} } % End color \begin{verbatim} [1] TRUE \end{verbatim} } % End size \end{frame} \begin{frame}[fragile] \frametitle{Creating matrices} \framesubtitle{Bind rows into a matrix} {\footnotesize % or scriptsize {\color{blue} \begin{verbatim} > # Bind rows of a matrix together > A = rbind( c(3, 2, 6,8), + c(2,10,-7,4), + c(6, 6, 9,1) ); A \end{verbatim} \pause } % End color \begin{verbatim} [,1] [,2] [,3] [,4] [1,] 3 2 6 8 [2,] 2 10 -7 4 [3,] 6 6 9 1 \end{verbatim} \pause {\color{blue} \begin{verbatim} > # Transpose > t(A) \end{verbatim} } % End color \begin{verbatim} [,1] [,2] [,3] [1,] 3 2 6 [2,] 2 10 6 [3,] 6 -7 9 [4,] 8 4 1 \end{verbatim} } % End size \end{frame} \begin{frame}[fragile] \frametitle{Matrix multiplication} \framesubtitle{Remember, $\mathbf{A}$ is $3\times 4$} \pause {\footnotesize % or scriptsize {\color{blue} \begin{verbatim} > # U = A A' (3x3), V = A' A (4x4) > U = A %*% t(A) > V = t(A) %*% A; V \end{verbatim} \pause } % End color \begin{verbatim} [,1] [,2] [,3] [,4] [1,] 49 62 58 38 [2,] 62 140 -4 62 [3,] 58 -4 166 29 [4,] 38 62 29 81 \end{verbatim} } % End size \end{frame} \begin{frame}[fragile] \frametitle{Determinants} \framesubtitle{$\mathbf{A}$ is $3\times 4$} {\footnotesize % or scriptsize {\color{blue} \begin{verbatim} > # U = A A' (3x3), V = A' A (4x4) > # So rank(V) cannot exceed 3 and det(V)=0 > det(U); det(V) \end{verbatim} \pause } % End color \begin{verbatim} [1] 1490273 [1] -3.622862e-09 \end{verbatim} \pause } % End size Inverse of $\mathbf{U}$ exists, but inverse of $\mathbf{V}$ does not. \end{frame} \begin{frame}[fragile] \frametitle{Inverses} \pause %\framesubtitle{} \begin{itemize} \item The \texttt{solve} function is for solving systems of linear equations like $\mathbf{Mx}=\mathbf{b}$. \item Just typing \texttt{solve(M)} gives $\mathbf{M}^{-1}$. \pause \end{itemize} {\footnotesize % or scriptsize {\color{blue} \begin{verbatim} > # Recall U = A A' (3x3), V = A' A (4x4) > solve(U) \end{verbatim} \pause } % End color \begin{verbatim} [,1] [,2] [,3] [1,] 0.0173505123 -8.508508e-04 -1.029342e-02 [2,] -0.0008508508 5.997559e-03 2.013054e-06 [3,] -0.0102934160 2.013054e-06 1.264265e-02 \end{verbatim} \pause {\color{blue} \begin{verbatim} > solve(V) \end{verbatim} \pause } % End color {\color{red} \begin{verbatim} Error in solve.default(V) : system is computationally singular: reciprocal condition number = 6.64193e-18 \end{verbatim} } % End color } % End size \end{frame} \begin{frame}[fragile] \frametitle{Eigenvalues and eigenvectors} \pause %\framesubtitle{} {\footnotesize % or scriptsize {\color{blue} \begin{verbatim} > # Recall U = A A' (3x3), V = A' A (4x4) > eigen(U) \end{verbatim} \pause } % End color \begin{verbatim} $values [1] 234.01162 162.89294 39.09544 $vectors [,1] [,2] [,3] [1,] -0.6025375 0.1592598 0.78203893 [2,] -0.2964610 -0.9544379 -0.03404605 [3,] -0.7409854 0.2523581 -0.62229894 \end{verbatim} } % End size \end{frame} \begin{frame}[fragile] \frametitle{$\mathbf{V}$ should have at least one zero eigenvalue} \framesubtitle{Because $\mathbf{A}$ is $3 \times 4$, $\mathbf{V} = \mathbf{A}^\top \mathbf{A}$, and the rank of a product is the minimum rank of the matrices.} {\footnotesize % or scriptsize {\color{blue} \begin{verbatim} > eigen(V) \end{verbatim} \pause } % End color \begin{verbatim} $values [1] 2.340116e+02 1.628929e+02 3.909544e+01 -1.012719e-14 $vectors [,1] [,2] [,3] [,4] [1,] -0.4475551 0.006507269 -0.2328249 0.863391352 [2,] -0.5632053 -0.604226296 -0.4014589 -0.395652773 [3,] -0.5366171 0.776297432 -0.1071763 -0.312917928 [4,] -0.4410627 -0.179528649 0.8792818 0.009829883 \end{verbatim} } % End size \end{frame} \begin{frame}[fragile] \frametitle{Spectral decomposition $\mathbf{V}= \mathbf{CDC}^\top$} %\framesubtitle{} {\footnotesize % or scriptsize {\color{blue} \begin{verbatim} > eigenV = eigen(V) > C = eigenV$vectors; D = diag(eigenV$values); D \end{verbatim} } % End color \begin{verbatim} [,1] [,2] [,3] [,4] [1,] 234.0116 0.0000 0.00000 0.000000e+00 [2,] 0.0000 162.8929 0.00000 0.000000e+00 [3,] 0.0000 0.0000 39.09544 0.000000e+00 [4,] 0.0000 0.0000 0.00000 -1.012719e-14 \end{verbatim} \pause {\color{blue} \begin{verbatim} > # C is an orthoganal matrix > C %*% t(C) \end{verbatim} \pause } % End color \begin{verbatim} [,1] [,2] [,3] [,4] [1,] 1.000000e+00 5.551115e-17 0.000000e+00 -3.989864e-17 [2,] 5.551115e-17 1.000000e+00 2.636780e-16 3.556183e-17 [3,] 0.000000e+00 2.636780e-16 1.000000e+00 2.558717e-16 [4,] -3.989864e-17 3.556183e-17 2.558717e-16 1.000000e+00 \end{verbatim} } % End size \end{frame} \begin{frame}[fragile] \frametitle{Verify $\mathbf{V}= \mathbf{CDC}^\top$} %\framesubtitle{} {\footnotesize % or scriptsize {\color{blue} \begin{verbatim} > V; C %*% D %*% t(C) \end{verbatim} } % End color \begin{verbatim} [,1] [,2] [,3] [,4] [1,] 49 62 58 38 [2,] 62 140 -4 62 [3,] 58 -4 166 29 [4,] 38 62 29 81 [,1] [,2] [,3] [,4] [1,] 49 62 58 38 [2,] 62 140 -4 62 [3,] 58 -4 166 29 [4,] 38 62 29 81 \end{verbatim} } % End size \end{frame} \begin{frame}[fragile] \frametitle{Square root matrix $\mathbf{V}^{1/2}= \mathbf{CD}^{1/2}\mathbf{C}^\top$} %\framesubtitle{} {\footnotesize % or scriptsize {\color{blue} \begin{verbatim} > sqrtV = C %*% sqrt(D) %*% t(C) \end{verbatim} \pause } % End color {\color{red} \begin{verbatim} Warning message: In sqrt(D) : NaNs produced \end{verbatim} \pause } % End color {\color{blue} \begin{verbatim} > # Multiply to get V > sqrtV %*% sqrtV; V \end{verbatim} \pause } % End color \begin{verbatim} [,1] [,2] [,3] [,4] [1,] NaN NaN NaN NaN [2,] NaN NaN NaN NaN [3,] NaN NaN NaN NaN [4,] NaN NaN NaN NaN [,1] [,2] [,3] [,4] [1,] 49 62 58 38 [2,] 62 140 -4 62 [3,] 58 -4 166 29 [4,] 38 62 29 81 \end{verbatim} } % End size \end{frame} \begin{frame}[fragile] \frametitle{What happened?} %\framesubtitle{} {\footnotesize % or scriptsize {\color{blue} \begin{verbatim} > D; sqrt(D) \end{verbatim} \pause } % End color \begin{verbatim} [,1] [,2] [,3] [,4] [1,] 234.0116 0.0000 0.00000 0.000000e+00 [2,] 0.0000 162.8929 0.00000 0.000000e+00 [3,] 0.0000 0.0000 39.09544 0.000000e+00 [4,] 0.0000 0.0000 0.00000 -1.012719e-14 [,1] [,2] [,3] [,4] [1,] 15.29744 0.00000 0.000000 0 [2,] 0.00000 12.76295 0.000000 0 [3,] 0.00000 0.00000 6.252635 0 [4,] 0.00000 0.00000 0.000000 NaN \end{verbatim} {\color{red} \begin{verbatim} Warning message: In sqrt(D) : NaNs produced \end{verbatim} } % End color } % End size \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Copyright Information} This slide show was prepared by \href{http://www.utstat.toronto.edu/~brunner}{Jerry Brunner}, Department of Statistical Sciences, University of Toronto. It is licensed under a \href{http://creativecommons.org/licenses/by-sa/3.0/deed.en_US} {Creative Commons Attribution - ShareAlike 3.0 Unported License}. Use any part of it as you like and share the result freely. The \LaTeX~source code is available from the course website: \href{http://www.utstat.toronto.edu/brunner/oldclass/431s23} {\small\texttt{http://www.utstat.toronto.edu/brunner/oldclass/431s23}} \end{frame} \end{document} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%