\documentclass[serif]{beamer} % Serif for Computer Modern math font. % \documentclass[serif, handout]{beamer} % Handout to ignore pause statements \hypersetup{colorlinks,linkcolor=,urlcolor=red} \usefonttheme{serif} % Looks like Computer Modern for non-math text -- nice! \setbeamertemplate{navigation symbols}{} % Suppress navigation symbols % \usetheme{Berlin} % Displays sections on top \usetheme{Frankfurt} % Displays section titles on top: Fairly thin but still swallows some material at bottom of crowded slides %\usetheme{Berkeley} \usepackage[english]{babel} \usepackage{amsmath} % for binom % \usepackage{graphicx} % To include pdf files! % \definecolor{links}{HTML}{2A1B81} % \definecolor{links}{red} \setbeamertemplate{footline}[frame number] \mode \title{More Linear Algebra\footnote{See Chapter 2 of \emph{Linear models in statistics} for more detail. This slide show is an open-source document. See last slide for copyright information.}} \subtitle{STA 302: Fall 2013} \date{} % To suppress date \begin{document} \begin{frame} \titlepage \end{frame} \begin{frame} \frametitle{Overview} \tableofcontents \end{frame} \section{Things you already know} \begin{frame} \frametitle{You already know about} \begin{itemize} \item Matrices $\mathbf{A} = [a_{ij}]$ \item Matrix addition and subtraction $\mathbf{A}+\mathbf{B} = [a_{ij}+b_{ij}]$ \item Scalar multiplication $a \mathbf{B} = [a\,b_{ij}]$ \item Matrix multiplication $\mathbf{AB} = \left[ \displaystyle{\sum_k}a_{ik}b_{kj} \right]$ \item Inverse $\mathbf{A}^{-1}\mathbf{A} = \mathbf{AA}^{-1} = \mathbf{I}$ \item Transpose $\mathbf{A}^\prime = [a_{ji}]$ \item Symmetric matrices $\mathbf{A} = \mathbf{A}^\prime$ \item Determinants \item Linear independence \end{itemize} \end{frame} \begin{frame} \frametitle{Linear independence} %\framesubtitle{} $\mathbf{X}$ be an $n \times p$ matrix of constants.The columns of $\mathbf{X}$ are said to be \emph{linearly dependent} if there exists $\mathbf{v} \neq \mathbf{0}$ with $\mathbf{Xv} = \mathbf{0}$. We will say that the columns of $\mathbf{X}$ are linearly \emph{independent} if $\mathbf{Xv} = \mathbf{0}$ implies $\mathbf{v} = \mathbf{0}$. \vspace{10mm} For example, show that $\mathbf{A}^{-1}$ exists implies that the columns of $\mathbf{A}$ are linearly independent. \begin{displaymath} \mathbf{Av} = \mathbf{0} \Rightarrow \mathbf{A}^{-1}\mathbf{Av} = \mathbf{A}^{-1}\mathbf{0} \Rightarrow \mathbf{v} = \mathbf{0} \end{displaymath} \end{frame} \begin{frame} \frametitle{How to show $\mathbf{A}^{-1\prime}= \mathbf{A}^{\prime-1}$} Suppose $\mathbf{B} = \mathbf{A}^{-1}$, meaning $\mathbf{AB} = \mathbf{BA} = \mathbf{I}$. Must show two things: $\mathbf{B}^\prime\mathbf{A}^\prime = \mathbf{I}$ and $\mathbf{A}^\prime\mathbf{B}^\prime = \mathbf{I}$. \begin{eqnarray*} \mathbf{AB} = \mathbf{I} & \Rightarrow & \mathbf{B}^\prime\mathbf{A}^\prime = \mathbf{I}^\prime = \mathbf{I} \\ \mathbf{BA} = \mathbf{I} & \Rightarrow & \mathbf{A}^\prime\mathbf{B}^\prime = \mathbf{I}^\prime = \mathbf{I} \end{eqnarray*} $\blacksquare$ \end{frame} % \section{Trace} deleted: See STA431s13 for omitted material \begin{frame} \frametitle{Extras} \framesubtitle{You may not know about these, but we may use them occasionally} \begin{itemize} \item Trace \item Rank \item Partitioned matrices \end{itemize} \end{frame} \begin{frame} \frametitle{Trace of a square matrix} %\framesubtitle{} \begin{itemize} \item Sum of diagonal elements \item Obvious: $tr(\mathbf{A}+\mathbf{B}) = tr(\mathbf{A})+tr(\mathbf{B})$ \item Not obvious: $tr(\mathbf{AB}) = tr(\mathbf{BA})$ \end{itemize} \end{frame} \begin{frame} \frametitle{Rank} %\framesubtitle{} \begin{itemize} \item Row rank is the number of linearly independent rows \item Column rank is the number of linearly independent columns \item Rank of a matrix is the minimum of row rank and column rank \item $rank(\mathbf{AB}) = \min\left(rank(\mathbf{A}),rank(\mathbf{B})\right)$ \end{itemize} \end{frame} \begin{frame} \frametitle{Partitioned matrix} %\framesubtitle{} \begin{itemize} \item A matrix of matrices {\LARGE \begin{displaymath} \left[ \begin{array}{c|c} \mathbf{A} & \mathbf{B} \\ \hline \mathbf{C} & \mathbf{D} \end{array} \right] \end{displaymath} } % End size \item Row by column (matrix) multiplication works, provided the matrices are the right sizes. \end{itemize} \end{frame} \section{Spectral decomposition} \begin{frame} \frametitle{Eigenvalues and eigenvectors} %\framesubtitle{} Let $\mathbf{A} = [a_{i,j}]$ be an $n \times n$ matrix, so that the following applies to square matrices. $\mathbf{A}$ is said to have an \emph{eigenvalue} $\lambda$ and (non-zero) \emph{eigenvector} $\mathbf{x}$ corresponding to $\lambda$ if \begin{displaymath} \mathbf{Ax} = \lambda\mathbf{x}. \end{displaymath} \begin{itemize} \item Eigenvalues are the $\lambda$ values that solve the determinantal equation $|\mathbf{A}-\lambda\mathbf{I}| = 0$. \item The determinant is the product of the eigenvalues: $|\mathbf{A}| = \prod_{i=1}^n \lambda_i$ \end{itemize} \end{frame} \begin{frame} \frametitle{Spectral decomposition of symmetric matrices} %\framesubtitle{} The \emph{Spectral decomposition theorem} says that every square and symmetric matrix $\mathbf{A} = [a_{i,j}]$ may be written \begin{equation*} \mathbf{A} = \mathbf{CDC}^\prime, \end{equation*} where the columns of $\mathbf{C}$ (which may also be denoted $\mathbf{x}_1, \ldots, \mathbf{x}_n$) are the eigenvectors of $\mathbf{A}$, and the diagonal matrix $\mathbf{D}$ contains the corresponding eigenvalues. \begin{displaymath} \mathbf{D} = \left[ \begin{array}{c c c c } \lambda_1 & 0 & \cdots & 0 \\ 0 & \lambda_2 & \cdots & 0 \\ \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & \cdots & \lambda_n \\ \end{array} \right] \end{displaymath} The eigenvectors may be chosen to be orthonormal, so that $\mathbf{C}$ is an orthogonal matrix. That is, $\mathbf{CC}^\prime = \mathbf{C}^\prime\mathbf{C} = \mathbf{I}$. \end{frame} \section{Positive definite matrices} \begin{frame} \frametitle{Positive definite matrices} %\framesubtitle{} The $n \times n$ matrix $\mathbf{A}$ is said to be \emph{positive definite} if \begin{displaymath} \mathbf{y}^\prime \mathbf{A} \mathbf{y} > 0 \end{displaymath} for \emph{all} $n \times 1$ vectors $\mathbf{y} \neq \mathbf{0}$. It is called \emph{non-negative definite} (or sometimes positive semi-definite) if $\mathbf{y}^\prime \mathbf{A} \mathbf{y} \geq 0$. \end{frame} \begin{frame} \frametitle{Example: Show $\mathbf{X}^\prime\mathbf{X}$ non-negative definite} Let $\mathbf{X}$ be an $n \times p$ matrix of real constants and $\mathbf{y}$ be $p \times 1$. Then $\mathbf{Z} = \mathbf{Xy}$ is $n \times 1$, and \begin{eqnarray*} & & \mathbf{y}^\prime \, (\mathbf{X}^\prime\mathbf{X}) \, \mathbf{y} \\ & = & (\mathbf{Xy})^\prime (\mathbf{Xy}) \\ & = & \mathbf{Z}^\prime \mathbf{Z} \\ & = & \sum_{i=1}^n Z_i^2 \geq 0 \end{eqnarray*} \end{frame} \begin{frame} \frametitle{Some properties of symmetric positive definite matrices} \framesubtitle{Variance-covariance matrices are often assumed positive definite.} For a symmetric matrix, \vspace{5mm} \begin{itemize} \item[] Positive definite \item[]~~~~~~~~$\Downarrow$ \item[] All eigenvalues positive \item[]~~~~~~~~$\Downarrow$ \item[] Inverse exists $\Leftrightarrow$ Columns (rows) linearly independent \end{itemize} \vspace{5mm} If a real symmetric matrix is also non-negative definite, as a variance-covariance matrix \emph{must} be, Inverse exists $\Rightarrow$ Positive definite \end{frame} \begin{frame} \frametitle{Showing Positive definite $\Rightarrow$ Eigenvalues positive} \framesubtitle{For example} Let $\mathbf{A}$ be square and symmetric as well as positive definite. \begin{itemize} \item Spectral decomposition says $\mathbf{A} = \mathbf{CDC}^\prime$. \item Using $\mathbf{y}^\prime \mathbf{A} \mathbf{y} > 0$, let $\mathbf{y}$ be an eigenvector, say the third one. \item Because eigenvectors are orthonormal, \end{itemize} \begin{eqnarray*} \mathbf{y}^\prime \mathbf{A} \mathbf{y} & = & \mathbf{y}^\prime \mathbf{CDC}^\prime \mathbf{y} \\ & = & ( \begin{array}{ccccc} 0 & 0 & 1 & \cdots & 0 \end{array} ) \left( \begin{array}{c c c c } \lambda_1 & 0 & \cdots & 0 \\ 0 & \lambda_2 & \cdots & 0 \\ \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & \cdots & \lambda_n \\ \end{array} \right) \left( \begin{array}{c} 0 \\ 0 \\ 1 \\ \vdots \\ 0 \end{array} \right) \\ & = & \lambda_3 \\ & > & 0 \end{eqnarray*} \end{frame} \begin{frame} \frametitle{Inverse of a diagonal matrix} %\framesubtitle{} Suppose $\mathbf{D} = [d_{i,j}]$ is a diagonal matrix with non-zero diagonal elements. It is easy to verify that \begin{displaymath} \left( \begin{array}{c c c c } d_{1,1} & 0 & \cdots & 0 \\ 0 & d_{2,2} & \cdots & 0 \\ \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & \cdots & d_{n,n} \\ \end{array} \right) \left( \begin{array}{c c c c } 1/d_{1,1} & 0 & \cdots & 0 \\ 0 & 1/d_{2,2} & \cdots & 0 \\ \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & \cdots & 1/d_{n,n} \\ \end{array} \right) = \mathbf{I} \end{displaymath} And \begin{displaymath} \left( \begin{array}{c c c c } 1/d_{1,1} & 0 & \cdots & 0 \\ 0 & 1/d_{2,2} & \cdots & 0 \\ \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & \cdots & 1/d_{n,n} \\ \end{array} \right) \left( \begin{array}{c c c c } d_{1,1} & 0 & \cdots & 0 \\ 0 & d_{2,2} & \cdots & 0 \\ \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & \cdots & d_{n,n} \\ \end{array} \right) = \mathbf{I} \end{displaymath} \end{frame} \begin{frame} \frametitle{Showing Eigenvalues positive $\Rightarrow$ Inverse exists} \framesubtitle{For a symmetric, positive definite matrix} Let $\mathbf{A}$ be symmetric and positive definite. Then $\mathbf{A} = \mathbf{CDC}^\prime$ and its eigenvalues are positive. \vspace{2mm} Let $\mathbf{B} = \mathbf{C}\mathbf{D}^{-1}\mathbf{C}^\prime$ \vspace{5mm} Showing $\mathbf{B} = \mathbf{A}^{-1}$: \begin{eqnarray*} \mathbf{AB} & = & \mathbf{CDC}^\prime \, \mathbf{C}\mathbf{D}^{-1}\mathbf{C}^\prime = \mathbf{I} \\ \mathbf{BA} & = & \mathbf{C}\mathbf{D}^{-1}\mathbf{C}^\prime \, \mathbf{CDC}^\prime = \mathbf{I} \end{eqnarray*} So {\LARGE \begin{displaymath} \mathbf{A}^{-1} = \mathbf{C}\mathbf{D}^{-1}\mathbf{C}^\prime \end{displaymath} } % End size \end{frame} \section{Square root matrices} \begin{frame} \frametitle{Square root matrices} \framesubtitle{For symmetric, non-negative definite matrices} % Real if non-neg def {\small Define \begin{displaymath} \mathbf{D}^{1/2} = \left( \begin{array}{c c c c } \sqrt{\lambda_1} & 0 & \cdots & 0 \\ 0 & \sqrt{\lambda_2} & \cdots & 0 \\ \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & \cdots & \sqrt{\lambda_n} \\ \end{array} \right) \end{displaymath} So that \begin{eqnarray*} \mathbf{D}^{1/2} \mathbf{D}^{1/2} & = & \left( \begin{array}{c c c c } \sqrt{\lambda_1} & 0 & \cdots & 0 \\ 0 & \sqrt{\lambda_2} & \cdots & 0 \\ \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & \cdots & \sqrt{\lambda_n} \\ \end{array} \right) \left( \begin{array}{c c c c } \sqrt{\lambda_1} & 0 & \cdots & 0 \\ 0 & \sqrt{\lambda_2} & \cdots & 0 \\ \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & \cdots & \sqrt{\lambda_n} \\ \end{array} \right) \\ & = & \left( \begin{array}{c c c c } \lambda_1 & 0 & \cdots & 0 \\ 0 & \lambda_2 & \cdots & 0 \\ \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & \cdots & \lambda_n \\ \end{array} \right) = \mathbf{D} \end{eqnarray*} } % End size \end{frame} \begin{frame} \frametitle{For a non-negative definite, symmetric matrix $\mathbf{A}$} Define {\LARGE \begin{displaymath} \mathbf{A}^{1/2} = \mathbf{C} \mathbf{D}^{1/2} \mathbf{C}^\prime \end{displaymath} } % End size So that \begin{eqnarray*} \mathbf{A}^{1/2}\mathbf{A}^{1/2} & = & \mathbf{C} \mathbf{D}^{1/2} \mathbf{C}^\prime \mathbf{C} \mathbf{D}^{1/2} \mathbf{C}^\prime \\ & = & \mathbf{C} \mathbf{D}^{1/2} \, \mathbf{I} \, \mathbf{D}^{1/2} \mathbf{C}^\prime \\ & = & \mathbf{C} \mathbf{D}^{1/2} \mathbf{D}^{1/2} \mathbf{C}^\prime \\ & = & \mathbf{C} \mathbf{D} \mathbf{C}^\prime \\ & = & \mathbf{A} \end{eqnarray*} \end{frame} \begin{frame} \frametitle{The square root of the inverse is the inverse of the square root} Let $\mathbf{A}$ be symmetric and positive definite, with $\mathbf{A} = \mathbf{CDC}^\prime$. \vspace{2mm} Let $\mathbf{B} = \mathbf{C}\mathbf{D}^{-1/2} \mathbf{C}^\prime $. What is $\mathbf{D}^{-1/2}$? \vspace{2mm} Show $\mathbf{B} = \left( \mathbf{A}^{-1} \right)^{1/2}$ \begin{eqnarray*} \mathbf{BB} & = & \mathbf{C}\mathbf{D}^{-1/2} \mathbf{C}^\prime \mathbf{C}\mathbf{D}^{-1/2} \mathbf{C}^\prime \\ & = & \mathbf{C}\mathbf{D}^{-1} \mathbf{C}^\prime = \mathbf{A}^{-1} \end{eqnarray*} \vspace{2mm} Show $\mathbf{B} = \left( \mathbf{A}^{1/2} \right)^{-1}$ \begin{eqnarray*} \mathbf{A}^{1/2}\mathbf{B} & = & \mathbf{C}\mathbf{D}^{1/2} \mathbf{C}^\prime \mathbf{C}\mathbf{D}^{-1/2} \mathbf{C}^\prime = \mathbf{I} \\ \mathbf{B} \mathbf{A}^{1/2} & = & \mathbf{C}\mathbf{D}^{-1/2} \mathbf{C}^\prime \mathbf{C}\mathbf{D}^{1/2} \mathbf{C}^\prime = \mathbf{I} \end{eqnarray*} Just write ~~~~~~~ \LARGE{$\mathbf{A}^{-1/2} = \mathbf{C}\mathbf{D}^{-1/2} \mathbf{C}^\prime$} \end{frame} \section{R} \begin{frame}[fragile] \frametitle{Matrix calculation with R} %\framesubtitle{} {\footnotesize % or scriptsize {\color{blue} \begin{verbatim} > is.matrix(3) # Is the number 3 a 1x1 matrix? \end{verbatim} } % End color \begin{verbatim} [1] FALSE \end{verbatim} {\color{blue} \begin{verbatim} > treecorr = cor(trees); treecorr \end{verbatim} } % End color \begin{verbatim} Girth Height Volume Girth 1.0000000 0.5192801 0.9671194 Height 0.5192801 1.0000000 0.5982497 Volume 0.9671194 0.5982497 1.0000000 \end{verbatim} {\color{blue} \begin{verbatim} > is.matrix(treecorr) \end{verbatim} } % End color \begin{verbatim} [1] TRUE \end{verbatim} } % End size \end{frame} \begin{frame}[fragile] \frametitle{Creating matrices} \framesubtitle{Bind rows into a matrix} {\footnotesize % or scriptsize {\color{blue} \begin{verbatim} > # Bind rows of a matrix together > A = rbind( c(3, 2, 6,8), + c(2,10,-7,4), + c(6, 6, 9,1) ); A \end{verbatim} } % End color \begin{verbatim} [,1] [,2] [,3] [,4] [1,] 3 2 6 8 [2,] 2 10 -7 4 [3,] 6 6 9 1 \end{verbatim} {\color{blue} \begin{verbatim} > # Transpose > t(A) \end{verbatim} } % End color \begin{verbatim} [,1] [,2] [,3] [1,] 3 2 6 [2,] 2 10 6 [3,] 6 -7 9 [4,] 8 4 1 \end{verbatim} } % End size \end{frame} \begin{frame}[fragile] \frametitle{Matrix multiplication} \framesubtitle{Remember, $\mathbf{A}$ is $3\times 4$} {\footnotesize % or scriptsize {\color{blue} \begin{verbatim} > # U = AA' (3x3), V = A'A (4x4) > U = A %*% t(A) > V = t(A) %*% A; V \end{verbatim} } % End color \begin{verbatim} [,1] [,2] [,3] [,4] [1,] 49 62 58 38 [2,] 62 140 -4 62 [3,] 58 -4 166 29 [4,] 38 62 29 81 \end{verbatim} } % End size \end{frame} \begin{frame}[fragile] \frametitle{Determinants} %\framesubtitle{} {\footnotesize % or scriptsize {\color{blue} \begin{verbatim} > # U = AA' (3x3), V = A'A (4x4) > # So rank(V) cannot exceed 3 and det(V)=0 > det(U); det(V) \end{verbatim} } % End color \begin{verbatim} [1] 1490273 [1] -3.622862e-09 \end{verbatim} } % End size Inverse of $\mathbf{U}$ exists, but inverse of $\mathbf{V}$ does not. \end{frame} \begin{frame}[fragile] \frametitle{Inverses} %\framesubtitle{} \begin{itemize} \item The \texttt{solve} function is for solving systems of linear equations like $\mathbf{Mx}=\mathbf{b}$. \item Just typing \texttt{solve(M)} gives $\mathbf{M}^{-1}$. \end{itemize} {\footnotesize % or scriptsize {\color{blue} \begin{verbatim} > solve(U) \end{verbatim} } % End color \begin{verbatim} [,1] [,2] [,3] [1,] 0.0173505123 -8.508508e-04 -1.029342e-02 [2,] -0.0008508508 5.997559e-03 2.013054e-06 [3,] -0.0102934160 2.013054e-06 1.264265e-02 \end{verbatim} {\color{blue} \begin{verbatim} > solve(V) \end{verbatim} } % End color {\color{red} \begin{verbatim} Error in solve.default(V) : system is computationally singular: reciprocal condition number = 6.64193e-18 \end{verbatim} } % End color } % End size \end{frame} \begin{frame}[fragile] \frametitle{Eigenvalues and eigenvectors} %\framesubtitle{} {\footnotesize % or scriptsize {\color{blue} \begin{verbatim} > eigen(U) \end{verbatim} } % End color \begin{verbatim} $values [1] 234.01162 162.89294 39.09544 $vectors [,1] [,2] [,3] [1,] -0.6025375 0.1592598 0.78203893 [2,] -0.2964610 -0.9544379 -0.03404605 [3,] -0.7409854 0.2523581 -0.62229894 \end{verbatim} } % End size \end{frame} \begin{frame}[fragile] \frametitle{$\mathbf{V}$ should have at least one zero eigenvalue} %\framesubtitle{} {\footnotesize % or scriptsize {\color{blue} \begin{verbatim} > eigen(V) \end{verbatim} } % End color \begin{verbatim} $values [1] 2.340116e+02 1.628929e+02 3.909544e+01 -1.012719e-14 $vectors [,1] [,2] [,3] [,4] [1,] -0.4475551 0.006507269 -0.2328249 0.863391352 [2,] -0.5632053 -0.604226296 -0.4014589 -0.395652773 [3,] -0.5366171 0.776297432 -0.1071763 -0.312917928 [4,] -0.4410627 -0.179528649 0.8792818 0.009829883 \end{verbatim} } % End size \end{frame} \begin{frame}[fragile] \frametitle{Spectral decomposition $\mathbf{V}= \mathbf{CDC}^\prime$} %\framesubtitle{} {\footnotesize % or scriptsize {\color{blue} \begin{verbatim} > eigenV = eigen(V) > C = eigenV$vectors; D = diag(eigenV$values); D \end{verbatim} } % End color \begin{verbatim} [,1] [,2] [,3] [,4] [1,] 234.0116 0.0000 0.00000 0.000000e+00 [2,] 0.0000 162.8929 0.00000 0.000000e+00 [3,] 0.0000 0.0000 39.09544 0.000000e+00 [4,] 0.0000 0.0000 0.00000 -1.012719e-14 \end{verbatim} {\color{blue} \begin{verbatim} > # C is an orthoganal matrix > C %*% t(C) \end{verbatim} } % End color \begin{verbatim} [,1] [,2] [,3] [,4] [1,] 1.000000e+00 5.551115e-17 0.000000e+00 -3.989864e-17 [2,] 5.551115e-17 1.000000e+00 2.636780e-16 3.556183e-17 [3,] 0.000000e+00 2.636780e-16 1.000000e+00 2.558717e-16 [4,] -3.989864e-17 3.556183e-17 2.558717e-16 1.000000e+00 \end{verbatim} } % End size \end{frame} \begin{frame}[fragile] \frametitle{Verify $\mathbf{V}= \mathbf{CDC}^\prime$} %\framesubtitle{} {\footnotesize % or scriptsize {\color{blue} \begin{verbatim} > V; C %*% D %*% t(C) \end{verbatim} } % End color \begin{verbatim} [,1] [,2] [,3] [,4] [1,] 49 62 58 38 [2,] 62 140 -4 62 [3,] 58 -4 166 29 [4,] 38 62 29 81 [,1] [,2] [,3] [,4] [1,] 49 62 58 38 [2,] 62 140 -4 62 [3,] 58 -4 166 29 [4,] 38 62 29 81 \end{verbatim} } % End size \end{frame} \begin{frame}[fragile] \frametitle{Square root matrix $\mathbf{V}^{1/2}= \mathbf{CD}^{1/2}\mathbf{C}^\prime$} %\framesubtitle{} {\footnotesize % or scriptsize {\color{blue} \begin{verbatim} > sqrtV = C %*% sqrt(D) %*% t(C) \end{verbatim} } % End color {\color{red} \begin{verbatim} Warning message: In sqrt(D) : NaNs produced \end{verbatim} } % End color {\color{blue} \begin{verbatim} > # Multiply to get V > sqrtV %*% sqrtV; V \end{verbatim} } % End color \begin{verbatim} [,1] [,2] [,3] [,4] [1,] NaN NaN NaN NaN [2,] NaN NaN NaN NaN [3,] NaN NaN NaN NaN [4,] NaN NaN NaN NaN [,1] [,2] [,3] [,4] [1,] 49 62 58 38 [2,] 62 140 -4 62 [3,] 58 -4 166 29 [4,] 38 62 29 81 \end{verbatim} } % End size \end{frame} \begin{frame}[fragile] \frametitle{What happened?} %\framesubtitle{} {\footnotesize % or scriptsize {\color{blue} \begin{verbatim} > D; sqrt(D) \end{verbatim} } % End color \begin{verbatim} [,1] [,2] [,3] [,4] [1,] 234.0116 0.0000 0.00000 0.000000e+00 [2,] 0.0000 162.8929 0.00000 0.000000e+00 [3,] 0.0000 0.0000 39.09544 0.000000e+00 [4,] 0.0000 0.0000 0.00000 -1.012719e-14 [,1] [,2] [,3] [,4] [1,] 15.29744 0.00000 0.000000 0 [2,] 0.00000 12.76295 0.000000 0 [3,] 0.00000 0.00000 6.252635 0 [4,] 0.00000 0.00000 0.000000 NaN \end{verbatim} {\color{red} \begin{verbatim} Warning message: In sqrt(D) : NaNs produced \end{verbatim} } % End color } % End size \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Copyright Information} This slide show was prepared by \href{http://www.utstat.toronto.edu/~brunner}{Jerry Brunner}, Department of Statistical Sciences, University of Toronto. It is licensed under a \href{http://creativecommons.org/licenses/by-sa/3.0/deed.en_US} {Creative Commons Attribution - ShareAlike 3.0 Unported License}. Use any part of it as you like and share the result freely. The \LaTeX~source code is available from the course website: \href{http://www.utstat.toronto.edu/~brunner/oldclass/302f13} {\small\texttt{http://www.utstat.toronto.edu/$^\sim$brunner/oldclass/302f13}} \end{frame} \end{document} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%