% \documentclass[serif]{beamer} % Serif for Computer Modern math font. \documentclass[serif, handout]{beamer} % Handout to ignore pause statements \hypersetup{colorlinks,linkcolor=,urlcolor=red} \usefonttheme{serif} % Looks like Computer Modern for non-math text -- nice! \setbeamertemplate{navigation symbols}{} % Suppress navigation symbols % \usetheme{Berlin} % Displays sections on top \usetheme{Frankfurt} % Displays section titles on top: Fairly thin but still swallows some material at bottom of crowded slides %\usetheme{Berkeley} \usepackage[english]{babel} \usepackage{amsmath} % for binom % \usepackage{graphicx} % To include pdf files! % \definecolor{links}{HTML}{2A1B81} % \definecolor{links}{red} \setbeamertemplate{footline}[frame number] \mode \title{More Linear Algebra\footnote{See Chapter 2 of \emph{Linear models in statistics} for more detail. This slide show is an open-source document. See last slide for copyright information.}} \subtitle{STA 302: Fall 2015} \date{} % To suppress date \begin{document} % The text uses parentheses instead of bracket notation, but I'm sticking with brackets; it's clearer. Warn the class. \begin{frame} \titlepage \end{frame} \begin{frame} \frametitle{Overview} \tableofcontents \end{frame} \section{Things you already know} \begin{frame} \frametitle{You already know about} \begin{itemize} \item Matrices $A = (a_{ij})$ \pause \item Column vectors $\mathbf{v} = (v_j)$ \pause \item Matrix addition and subtraction $A+B = (a_{ij}+b_{ij})$ \pause \item Scalar multiplication $a B = (a\,b_{ij})$ \pause \item Matrix multiplication $AB = \left( \displaystyle{\sum_k}a_{ik}b_{kj} \right)$ \pause \item[] In words: The $i,j$ element of $AB$ is the inner product of row $i$ of $A$ with column $j$ of $B$. \pause \item Inverse: \pause $A^{-1}A = AA^{-1} = I$ \pause \item Transpose $A^\prime = (a_{ji})$ \pause \item Symmetric matrices: $A = A^\prime$ \pause \item Determinants \pause \item Linear independence \end{itemize} \end{frame} \begin{frame} \frametitle{Inverses: Proving $B=A^{-1}$} \pause \begin{itemize} \item $B = A^{-1}$ means $AB = BA = I$. \pause \item It looks like you have two things to show. \pause \item But if $A$ and $B$ are square matrices of the same size, you only need to do it in one direction. \end{itemize} \end{frame} \begin{frame} \frametitle{Theorem} %\framesubtitle{} If $A$ and $B$ are square matrices and $AB = I$, then $A$ and $B$ are inverses. \pause \vspace{3mm} \textbf{Proof}: Suppose $AB = I$ \pause \begin{itemize} \item $A$ and $B$ must both have inverses, for otherwise \pause \linebreak $|AB| = |A| \, |B| = 0 \pause \neq |I|=1$. \pause Now, \pause \item $AB = I \pause \Rightarrow ABB^{-1} = IB^{-1} \pause \Rightarrow A = B^{-1}$. \pause \item $AB = I \pause \Rightarrow A^{-1}AB = A^{-1}I \pause \Rightarrow B = A^{-1}$. \end{itemize} \end{frame} \begin{frame} \frametitle{How to show $A^{-1\prime}= A^{\prime-1}$} \pause \begin{itemize} \item Let $B=A^{-1}$. \pause \item Want to prove that $B^\prime$ is the inverse of $A^\prime$. \pause \item It is enough to show that $B^\prime A^\prime = I$. \pause \item $AB = I \pause \Rightarrow B^\prime A^\prime = I^\prime \pause = I$. \pause \item So $B^\prime=A^{\prime -1}$~~$\blacksquare$ \end{itemize} \end{frame} \begin{frame} \frametitle{Three mistakes that will get you a zero} \framesubtitle{Numbers are $1 \times 1$ matrices, but larger matrices are not just numbers.} \pause You will get a zero if you \pause \vspace{2mm} \begin{itemize} \item Write $AB = BA$. It's not true in general. \pause \item Write $A^{-1}$ when $A$ is not a square matrix. The inverse is not even defined. \pause \item Represent the inverse of a matrix (even if it exists) by writing it in the denominator, \pause like $\mathbf{a}^\prime B^{-1}\mathbf{a} = \frac{\mathbf{a}^\prime \mathbf{a}}{B}$. \pause Matrices are not just numbers. \pause \end{itemize} \vspace{5mm} If you commit one of these crimes, the mark for the question (or part of a question, like 3c) is zero. The rest of your answer will be ignored. \end{frame} \begin{frame} \frametitle{Half marks off, at least} %\framesubtitle{At least} You will lose \emph{at least} half marks for writing a product like $AB$ when the number of colmns in $A$ does not equal the number of rows in $B$. \end{frame} \begin{frame} \frametitle{Linear combination of vectors} \pause %\framesubtitle{} Let $\mathbf{x}_1, \ldots, \mathbf{x}_p$ be $n \times 1$ vectors and $a_1, \ldots, a_p$ be scalars. \pause A \emph{linear combination} is \pause \renewcommand{\arraystretch}{2.0} \begin{displaymath} \begin{array}{ccccccccc} % 9 columns \mathbf{c} &=& ~~a_1 \mathbf{x}_1 &+& ~~a_2 \mathbf{x}_2 &+& \cdots &+& ~~a_p \mathbf{x}_p \\ \pause &=& a_1 \left( \begin{array}{c} x_{11} \\ x_{21} \\ \vdots \\ x_{n1} \end{array}\right) &+& a_2 \left( \begin{array}{c} x_{12} \\ x_{22} \\ \vdots \\ x_{n2} \end{array}\right) &+& \cdots &+& a_p \left( \begin{array}{c} x_{1p} \\ x_{2p} \\ \vdots \\ x_{np} \end{array}\right) \end{array} \end{displaymath} \renewcommand{\arraystretch}{1.0} \end{frame} \begin{frame} \frametitle{Linear independence} \pause %\framesubtitle{} A set of vectors $\mathbf{x}_1, \ldots, \mathbf{x}_p$ is said to be \emph{linearly dependent} \pause if there is a set of scalars $a_1, \ldots, a_p$, not all zero, with \pause \begin{displaymath} a_1 \left( \begin{array}{c} x_{11} \\ x_{21} \\ \vdots \\ x_{n1} \end{array}\right) + a_2 \left( \begin{array}{c} x_{12} \\ x_{22} \\ \vdots \\ x_{n2} \end{array}\right) + \cdots + a_p \left( \begin{array}{c} x_{1p} \\ x_{2p} \\ \vdots \\ x_{np} \end{array}\right) = \left( \begin{array}{c} 0 \\ 0 \\ \vdots \\ 0 \end{array}\right) \end{displaymath} \pause \vspace{2mm} If no such constants $a_1, \ldots, a_p$ exist, the vectors are linearly independent. \pause That is, \vspace{2mm} If $a_1 \mathbf{x}_1 + a_2 \mathbf{x}_2 + \cdots + a_p \mathbf{x}_p = \mathbf{0}$ implies $a_1 = a_2 \cdots = a_p = 0$, then the vectors are said to be \emph{linearly independent}. \end{frame} \begin{frame} \frametitle{Bind the vectors $\mathbf{x}_1, \ldots, \mathbf{x}_p$ into a matrix} \pause % \framesubtitle{Yields a more convenient definition of linear independence} {\footnotesize %\renewcommand{\arraystretch}{2.0} \begin{displaymath} \begin{array}{ccccccccc} % 9 columns & & a_1 \mathbf{x}_1~~ &+& a_2 \mathbf{x}_2~~ &+& \cdots &+& a_p \mathbf{x}_p~~ \\ &&&&&& \\ \pause &=& \left( \begin{array}{c} x_{11} \\ x_{21} \\ \vdots \\ x_{n1} \end{array}\right)a_1 &+& \left( \begin{array}{c} x_{12} \\ x_{22} \\ \vdots \\ x_{n2} \end{array}\right)a_2 &+& \cdots &+& \left( \begin{array}{c} x_{1p} \\ x_{2p} \\ \vdots \\ x_{np} \end{array}\right)a_p \\ \end{array} \end{displaymath} \pause %\renewcommand{\arraystretch}{1.0} \begin{eqnarray*} & = & \left( \begin{array}{cccc} x_{11} & x_{12} & \cdots & x_{1p} \\ x_{21} & x_{22} & \cdots & x_{2p} \\ \vdots & \vdots & \vdots & \vdots \\ x_{n1} & x_{n2} & \cdots & n_{np} \\ \end{array}\right) \left( \begin{array}{c} a_{1} \\ a_{2} \\ \vdots \\ a_{p} \end{array}\right) \\ && \\ \pause & = & X\mathbf{a} \hspace{82mm} % Such an ugly way to get nice alignment! \end{eqnarray*} } % End size \end{frame} \begin{frame} \frametitle{A more convenient definition of linear independence} \framesubtitle{$a_1 \mathbf{x}_1 + a_2 \mathbf{x}_2 + \cdots + a_p \mathbf{x}_p = X\mathbf{a}$} \pause Let X be an $n \times p$ matrix of constants. The columns of $X$ are said to be \emph{linearly dependent} if there exists $\mathbf{a} \neq \mathbf{0}$ with $X\mathbf{a} = \mathbf{0}$. \pause We will say that the columns of X are linearly \emph{independent} if $X\mathbf{a} = \mathbf{0}$ implies $\mathbf{a} = \mathbf{0}$. \pause \vspace{10mm} For example, show that $B^{-1}$ exists implies that the columns of $B$ are linearly independent. \pause \begin{displaymath} B\mathbf{a} = \mathbf{0} \pause \Rightarrow B^{-1}B\mathbf{a} = B^{-1}\mathbf{0} \pause \Rightarrow \mathbf{a} = \mathbf{0}. \end{displaymath} \end{frame} \section{Trace} \begin{frame} \frametitle{Trace of a square matrix} %\framesubtitle{} \begin{itemize} \item Sum of diagonal elements \pause \item Obvious: $tr(A+B) = tr(A)+tr(B)$ \pause \item Not obvious: $tr(AB) = tr(BA)$ \pause \item Even though $AB \neq BA$. \end{itemize} \end{frame} \begin{frame} \frametitle{$tr(AB) = tr(BA)$} \framesubtitle{Let $A$ be $p \times q$ and $B$ be $q \times p$, so that $AB$ is $p \times p$ and $BA$ is $q \times q$.} \pause {\small First, agree that $\sum_{i=1}^n x_i = \sum_{j=1}^n x_j$. \pause % \vspace{2mm} \begin{eqnarray*} tr(AB) & = & tr(\left[ \sum_{k=1}^q a_{ik}b_{kj} \right]) \\ \pause & = & {\color{green} \sum_{i=1}^p} \sum_{k=1}^q {\color{red} a_{ik}} {\color{blue} b_{ki}} \\ \pause & = & \sum_{k=1}^q {\color{green} \sum_{i=1}^p} {\color{blue} b_{ki}} {\color{red} a_{ik}} \\ \pause & = & \sum_{i=1}^q \sum_{k=1}^p b_{ik} a_{ki} \\ \pause & = & tr(\left[ \sum_{k=1}^p b_{ik} a_{kj} \right])\\ \pause & = & tr(BA) \end{eqnarray*} } % End size \end{frame} \section{Spectral decomposition} \begin{frame} \frametitle{Eigenvalues and eigenvectors} \pause %\framesubtitle{} Let $A = [a_{i,j}]$ be an $n \times n$ matrix, so that the following applies to square matrices. \pause $A$ is said to have an \emph{eigenvalue} $\lambda$ and (non-zero) \emph{eigenvector} $\mathbf{x} \neq \mathbf{0}$ corresponding to $\lambda$ if \pause \begin{displaymath} A\mathbf{x} = \lambda\mathbf{x}. \end{displaymath} \pause Eigenvectors can be scaled to have length one, so that $\mathbf{x}^\prime\mathbf{x}=1$. \pause \begin{itemize} \item Eigenvalues are the $\lambda$ values that solve the determinantal equation $|A-\lambda I| = 0$. \pause \item The determinant is the product of the eigenvalues: $|A| = \prod_{i=1}^n \lambda_i$ \end{itemize} \end{frame} \begin{frame} \frametitle{Spectral decomposition of symmetric matrices} \pause %\framesubtitle{} The \emph{Spectral decomposition theorem} says that every square and symmetric matrix $A = [a_{i,j}]$ may be written \begin{equation*} A = CDC^\prime, \end{equation*} \pause where the columns of $C$ (which may also be denoted $\mathbf{x}_1, \ldots, \mathbf{x}_n$) are the eigenvectors of $A$, \pause and the diagonal matrix $D$ contains the corresponding eigenvalues. \pause \begin{displaymath} D = \left( \begin{array}{c c c c } \lambda_1 & 0 & \cdots & 0 \\ 0 & \lambda_2 & \cdots & 0 \\ \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & \cdots & \lambda_n \\ \end{array} \right) \end{displaymath} \pause The eigenvectors may be chosen to be orthonormal, so that $C$ is an orthogonal matrix. \pause That is, $CC^\prime = C^\prime C = I$. \end{frame} \section{Positive definite} \begin{frame} \frametitle{Positive definite matrices} \pause %\framesubtitle{} The $n \times n$ matrix $A$ is said to be \emph{positive definite} if \begin{displaymath} \mathbf{y}^\prime A \mathbf{y} > 0 \end{displaymath} for \emph{all} $n \times 1$ vectors $\mathbf{y} \neq \mathbf{0}$. \pause It is called \emph{non-negative definite} (or sometimes positive semi-definite) if \pause $\mathbf{y}^\prime A \mathbf{y} \geq 0$. \end{frame} \begin{frame} \frametitle{Example: Show $X^\prime X$ non-negative definite} \pause Let X be an $n \times p$ matrix of real constants and let $\mathbf{y}$ be $p \times 1$. \pause Then $\mathbf{z} = X\mathbf{y}$ is $n \times 1$, and \pause \begin{eqnarray*} & & \mathbf{y}^\prime \, (X^\prime X) \, \mathbf{y} \\ \pause & = & (X\mathbf{y})^\prime (X\mathbf{y}) \\ \pause & = & \mathbf{z}^\prime \mathbf{z} \\ \pause & = & \sum_{i=1}^n z_i^2 \pause \geq 0 ~~ \blacksquare \end{eqnarray*} \end{frame} \begin{frame} \frametitle{Some properties of symmetric positive definite matrices} \framesubtitle{Variance-covariance matrices are often assumed positive definite.} \pause For a symmetric matrix, \pause \vspace{5mm} \begin{itemize} \item[] Positive definite \item[]~~~~~~~~$\Downarrow$ \item[] All eigenvalues positive \pause \item[]~~~~~~~~$\Downarrow$ \item[] Inverse exists \pause $\Leftrightarrow$ Columns (rows) linearly independent. \end{itemize} \pause \vspace{5mm} If a real symmetric matrix is also non-negative definite, as a variance-covariance matrix \emph{must} be, Inverse exists $\Rightarrow$ Positive definite \end{frame} \begin{frame} \frametitle{Showing Positive definite $\Rightarrow$ Eigenvalues positive} \pause % \framesubtitle{For example} Let the $p \times p$ matrix $A$ be positive definite, so that $\mathbf{y}^\prime A \mathbf{y} > 0$ for all $\mathbf{y} \neq \mathbf{0}$. \pause \begin{itemize} \item[] $\lambda$ an eigenvalue means $A\mathbf{x} = \lambda\mathbf{x}$ \pause with $\mathbf{x}^\prime\mathbf{x}=1$. \pause \item[] $\Rightarrow \mathbf{x}^\prime A \mathbf{x} = \mathbf{x}^\prime \lambda\mathbf{x} \pause = \lambda \mathbf{x}^\prime\mathbf{x} \pause = \lambda \pause >0$. ~ $\blacksquare$ \end{itemize} \end{frame} \begin{frame} \frametitle{Inverse of a diagonal matrix} \framesubtitle{To set things up} \pause Suppose $D = [d_{i,j}]$ is a diagonal matrix with non-zero diagonal elements. \pause It is easy to verify that \begin{displaymath} \left( \begin{array}{c c c c } 1/d_{1,1} & 0 & \cdots & 0 \\ 0 & 1/d_{2,2} & \cdots & 0 \\ \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & \cdots & 1/d_{n,n} \\ \end{array} \right) \left( \begin{array}{c c c c } d_{1,1} & 0 & \cdots & 0 \\ 0 & d_{2,2} & \cdots & 0 \\ \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & \cdots & d_{n,n} \\ \end{array} \right) = I \end{displaymath} \pause So $D^{-1}$ exists. \end{frame} \begin{frame} \frametitle{Showing Eigenvalues positive $\Rightarrow$ Inverse exists} \framesubtitle{For a symmetric, positive definite matrix} \pause Let $A$ be symmetric and positive definite. \pause Then $A = CDC^\prime$, \pause and its eigenvalues are positive. \pause \vspace{2mm} Let $B = CD^{-1}C^\prime$. \pause Show $B = A^{-1}$. \pause % \vspace{2mm} \begin{eqnarray*} AB & = & \pause CDC^\prime \, CD^{-1}C^\prime \pause = I \pause \\ \end{eqnarray*} So {\LARGE \begin{displaymath} A^{-1} = CD^{-1}C^\prime \end{displaymath} } % End size \end{frame} \section{Square root matrices} \begin{frame} \frametitle{Square root matrices} \framesubtitle{For symmetric, non-negative definite matrices} \pause % Real if non-neg def {\small To set things up, define \begin{displaymath} D^{1/2} = \left( \begin{array}{c c c c } \sqrt{\lambda_1} & 0 & \cdots & 0 \\ 0 & \sqrt{\lambda_2} & \cdots & 0 \\ \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & \cdots & \sqrt{\lambda_n} \\ \end{array} \right) \end{displaymath} \pause So that \begin{eqnarray*} D^{1/2} D^{1/2} \pause & = & \left( \begin{array}{c c c c } \sqrt{\lambda_1} & 0 & \cdots & 0 \\ 0 & \sqrt{\lambda_2} & \cdots & 0 \\ \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & \cdots & \sqrt{\lambda_n} \\ \end{array} \right) \left( \begin{array}{c c c c } \sqrt{\lambda_1} & 0 & \cdots & 0 \\ 0 & \sqrt{\lambda_2} & \cdots & 0 \\ \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & \cdots & \sqrt{\lambda_n} \\ \end{array} \right) \\ \pause & = & \left( \begin{array}{c c c c } \lambda_1 & 0 & \cdots & 0 \\ 0 & \lambda_2 & \cdots & 0 \\ \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & \cdots & \lambda_n \\ \end{array} \right) \pause = D \end{eqnarray*} } % End size \end{frame} \begin{frame} \frametitle{For a non-negative definite, symmetric matrix $A$} \pause Define {\LARGE \begin{displaymath} A^{1/2} = C D^{1/2} C^\prime \end{displaymath} \pause } % End size So that \begin{eqnarray*} A^{1/2}A^{1/2} \pause & = & C D^{1/2} C^\prime C D^{1/2} C^\prime \\ \pause & = & C D^{1/2} \, I \, D^{1/2} C^\prime \\ \pause & = & C D^{1/2} D^{1/2} C^\prime \\ \pause & = & C D C^\prime \\ \pause & = & A \end{eqnarray*} \end{frame} \begin{frame} \frametitle{The square root of the inverse is the inverse of the square root} \pause Let $A$ be symmetric and positive definite, with $A = CDC^\prime$. \pause \vspace{2mm} Let $B = CD^{-1/2} C^\prime $. \pause What is $D^{-1/2}$? \pause \vspace{2mm} Show $B = \left( A^{-1} \right)^{1/2}$. \pause \begin{eqnarray*} BB & = & \pause CD^{-1/2} C^\prime CD^{-1/2} C^\prime \\ \pause & = & CD^{-1} C^\prime \pause = A^{-1} \end{eqnarray*} \pause \vspace{2mm} Show $B = \left( A^{1/2} \right)^{-1}$ \pause \begin{eqnarray*} A^{1/2}B \pause & = & CD^{1/2} C^\prime CD^{-1/2} C^\prime \pause = I \\ \end{eqnarray*} \pause Just write ~~~~~~~ \LARGE{$A^{-1/2} = CD^{-1/2} C^\prime$} \end{frame} \section{Extras} \begin{frame} \frametitle{Extras} \framesubtitle{You may not know about these, but we may use them occasionally} \begin{itemize} \item Rank \item Partitioned matrices \end{itemize} \end{frame} \begin{frame} \frametitle{Rank} \pause %\framesubtitle{} \begin{itemize} \item Row rank is the number of linearly independent rows. \pause \item Column rank is the number of linearly independent columns. \pause \item Rank of a matrix is the minimum of row rank and column rank. \pause \item $rank(AB) = \min\left(rank(A),rank(B)\right)$. \end{itemize} \end{frame} \begin{frame} \frametitle{Partitioned matrix} \pause %\framesubtitle{} \begin{itemize} \item A matrix of matrices \pause {\LARGE \begin{displaymath} \left[ \begin{array}{c|c} A & B \\ \hline C & D \end{array} \right] \end{displaymath} \pause } % End size \item Row by column (matrix) multiplication works, provided the matrices are the right sizes. \end{itemize} \end{frame} \section{R} \begin{frame}[fragile] \frametitle{Matrix calculation with R} \pause %\framesubtitle{} {\footnotesize % or scriptsize {\color{blue} \begin{verbatim} > is.matrix(3) # Is the number 3 a 1x1 matrix? \end{verbatim} \pause } % End color \begin{verbatim} [1] FALSE \end{verbatim} \pause {\color{blue} \begin{verbatim} > treecorr = cor(trees); treecorr \end{verbatim} \pause } % End color \begin{verbatim} Girth Height Volume Girth 1.0000000 0.5192801 0.9671194 Height 0.5192801 1.0000000 0.5982497 Volume 0.9671194 0.5982497 1.0000000 \end{verbatim} \pause {\color{blue} \begin{verbatim} > is.matrix(treecorr) \end{verbatim} \pause } % End color \begin{verbatim} [1] TRUE \end{verbatim} } % End size \end{frame} \begin{frame}[fragile] \frametitle{Creating matrices} \framesubtitle{Bind rows into a matrix} \pause {\footnotesize % or scriptsize {\color{blue} \begin{verbatim} > # Bind rows of a matrix together > A = rbind( c(3, 2, 6,8), + c(2,10,-7,4), + c(6, 6, 9,1) ); A \end{verbatim} \pause } % End color \begin{verbatim} [,1] [,2] [,3] [,4] [1,] 3 2 6 8 [2,] 2 10 -7 4 [3,] 6 6 9 1 \end{verbatim} \pause {\color{blue} \begin{verbatim} > # Transpose > t(A) \end{verbatim} \pause } % End color \begin{verbatim} [,1] [,2] [,3] [1,] 3 2 6 [2,] 2 10 6 [3,] 6 -7 9 [4,] 8 4 1 \end{verbatim} } % End size \end{frame} \begin{frame}[fragile] \frametitle{Matrix multiplication} \framesubtitle{Remember, $A$ is $3\times 4$} \pause {\footnotesize % or scriptsize {\color{blue} \begin{verbatim} > # U = AA' (3x3), V = A'A (4x4) > U = A %*% t(A) > V = t(A) %*% A; V \end{verbatim} \pause } % End color \begin{verbatim} [,1] [,2] [,3] [,4] [1,] 49 62 58 38 [2,] 62 140 -4 62 [3,] 58 -4 166 29 [4,] 38 62 29 81 \end{verbatim} } % End size \end{frame} \begin{frame}[fragile] \frametitle{Determinants} \pause %\framesubtitle{} {\footnotesize % or scriptsize {\color{blue} \begin{verbatim} > # U = AA' (3x3), V = A'A (4x4) > # So rank(V) cannot exceed 3 and det(V)=0 > det(U); det(V) \end{verbatim} \pause } % End color \begin{verbatim} [1] 1490273 [1] -3.622862e-09 \end{verbatim} \pause } % End size Inverse of $U$ exists, but inverse of $V$ does not. \end{frame} \begin{frame}[fragile] \frametitle{Inverses} \pause %\framesubtitle{} \begin{itemize} \item The \texttt{solve} function is for solving systems of linear equations like $M\mathbf{x}=\mathbf{b}$. \pause \item Just typing \texttt{solve(M)} gives $M^{-1}$. \pause \end{itemize} {\footnotesize % or scriptsize {\color{blue} \begin{verbatim} > # Recall U = AA' (3x3), V = A'A (4x4) > solve(U) \end{verbatim} \pause } % End color \begin{verbatim} [,1] [,2] [,3] [1,] 0.0173505123 -8.508508e-04 -1.029342e-02 [2,] -0.0008508508 5.997559e-03 2.013054e-06 [3,] -0.0102934160 2.013054e-06 1.264265e-02 \end{verbatim} \pause {\color{blue} \begin{verbatim} > solve(V) \end{verbatim} \pause } % End color {\color{red} \begin{verbatim} Error in solve.default(V) : system is computationally singular: reciprocal condition number = 6.64193e-18 \end{verbatim} } % End color } % End size \end{frame} \begin{frame}[fragile] \frametitle{Eigenvalues and eigenvectors} \pause %\framesubtitle{} {\footnotesize % or scriptsize {\color{blue} \begin{verbatim} > # Recall U = AA' (3x3), V = A'A (4x4) > eigen(U) \end{verbatim} \pause } % End color \begin{verbatim} $values [1] 234.01162 162.89294 39.09544 $vectors [,1] [,2] [,3] [1,] -0.6025375 0.1592598 0.78203893 [2,] -0.2964610 -0.9544379 -0.03404605 [3,] -0.7409854 0.2523581 -0.62229894 \end{verbatim} } % End size \end{frame} \begin{frame}[fragile] \frametitle{$V$ should have at least one zero eigenvalue} \framesubtitle{Because $A$ is $3 \times 4$, $V = A^\prime A$, and the rank of a product is the minimum rank of the matrices.} {\footnotesize % or scriptsize {\color{blue} \begin{verbatim} > eigen(V) \end{verbatim} \pause } % End color \begin{verbatim} $values [1] 2.340116e+02 1.628929e+02 3.909544e+01 -1.012719e-14 $vectors [,1] [,2] [,3] [,4] [1,] -0.4475551 0.006507269 -0.2328249 0.863391352 [2,] -0.5632053 -0.604226296 -0.4014589 -0.395652773 [3,] -0.5366171 0.776297432 -0.1071763 -0.312917928 [4,] -0.4410627 -0.179528649 0.8792818 0.009829883 \end{verbatim} } % End size \end{frame} \begin{frame}[fragile] \frametitle{Spectral decomposition $V= CDC^\prime$} \pause %\framesubtitle{} {\footnotesize % or scriptsize {\color{blue} \begin{verbatim} > eigenV = eigen(V) > C = eigenV$vectors; D = diag(eigenV$values); D \end{verbatim} \pause } % End color \begin{verbatim} [,1] [,2] [,3] [,4] [1,] 234.0116 0.0000 0.00000 0.000000e+00 [2,] 0.0000 162.8929 0.00000 0.000000e+00 [3,] 0.0000 0.0000 39.09544 0.000000e+00 [4,] 0.0000 0.0000 0.00000 -1.012719e-14 \end{verbatim} \pause {\color{blue} \begin{verbatim} > # C is an orthoganal matrix > C %*% t(C) \end{verbatim} \pause } % End color \begin{verbatim} [,1] [,2] [,3] [,4] [1,] 1.000000e+00 5.551115e-17 0.000000e+00 -3.989864e-17 [2,] 5.551115e-17 1.000000e+00 2.636780e-16 3.556183e-17 [3,] 0.000000e+00 2.636780e-16 1.000000e+00 2.558717e-16 [4,] -3.989864e-17 3.556183e-17 2.558717e-16 1.000000e+00 \end{verbatim} } % End size \end{frame} \begin{frame}[fragile] \frametitle{Verify $V= CDC^\prime$} \pause %\framesubtitle{} {\footnotesize % or scriptsize {\color{blue} \begin{verbatim} > V; C %*% D %*% t(C) \end{verbatim} \pause } % End color \begin{verbatim} [,1] [,2] [,3] [,4] [1,] 49 62 58 38 [2,] 62 140 -4 62 [3,] 58 -4 166 29 [4,] 38 62 29 81 [,1] [,2] [,3] [,4] [1,] 49 62 58 38 [2,] 62 140 -4 62 [3,] 58 -4 166 29 [4,] 38 62 29 81 \end{verbatim} } % End size \end{frame} \begin{frame}[fragile] \frametitle{Square root matrix $V^{1/2}= CD^{1/2}C^\prime$} \pause %\framesubtitle{} {\footnotesize % or scriptsize {\color{blue} \begin{verbatim} > sqrtV = C %*% sqrt(D) %*% t(C) \end{verbatim} \pause } % End color {\color{red} \begin{verbatim} Warning message: In sqrt(D) : NaNs produced \end{verbatim} \pause } % End color {\color{blue} \begin{verbatim} > # Multiply to get V > sqrtV %*% sqrtV; V \end{verbatim} \pause } % End color \begin{verbatim} [,1] [,2] [,3] [,4] [1,] NaN NaN NaN NaN [2,] NaN NaN NaN NaN [3,] NaN NaN NaN NaN [4,] NaN NaN NaN NaN [,1] [,2] [,3] [,4] [1,] 49 62 58 38 [2,] 62 140 -4 62 [3,] 58 -4 166 29 [4,] 38 62 29 81 \end{verbatim} } % End size \end{frame} \begin{frame}[fragile] \frametitle{What happened?} \pause %\framesubtitle{} {\footnotesize % or scriptsize {\color{blue} \begin{verbatim} > D; sqrt(D) \end{verbatim} \pause } % End color \begin{verbatim} [,1] [,2] [,3] [,4] [1,] 234.0116 0.0000 0.00000 0.000000e+00 [2,] 0.0000 162.8929 0.00000 0.000000e+00 [3,] 0.0000 0.0000 39.09544 0.000000e+00 [4,] 0.0000 0.0000 0.00000 -1.012719e-14 [,1] [,2] [,3] [,4] [1,] 15.29744 0.00000 0.000000 0 [2,] 0.00000 12.76295 0.000000 0 [3,] 0.00000 0.00000 6.252635 0 [4,] 0.00000 0.00000 0.000000 NaN \end{verbatim} {\color{red} \begin{verbatim} Warning message: In sqrt(D) : NaNs produced \end{verbatim} } % End color } % End size \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Copyright Information} This slide show was prepared by \href{http://www.utstat.toronto.edu/~brunner}{Jerry Brunner}, Department of Statistical Sciences, University of Toronto. It is licensed under a \href{http://creativecommons.org/licenses/by-sa/3.0/deed.en_US} {Creative Commons Attribution - ShareAlike 3.0 Unported License}. Use any part of it as you like and share the result freely. The \LaTeX~source code is available from the course website: \href{http://www.utstat.toronto.edu/~brunner/oldclass/302f16} {\small\texttt{http://www.utstat.toronto.edu/$^\sim$brunner/oldclass/302f16}} \end{frame} \end{document} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%