% \documentclass[serif]{beamer} % Serif for Computer Modern math font. \documentclass[serif, handout]{beamer} % Handout mode to ignore pause statements \hypersetup{colorlinks,linkcolor=,urlcolor=red} % To create handout using article mode: Comment above and uncomment below (2 places) %\documentclass[12pt]{article} %\usepackage{beamerarticle} %\usepackage[colorlinks=true, pdfstartview=FitV, linkcolor=blue, citecolor=blue, urlcolor=red]{hyperref} % For live Web links with href in article mode %\usepackage{amsmath} % For \binom{n}{y} %\usepackage{graphicx} % To include pdf files! %\usepackage{fullpage} \usefonttheme{serif} % Looks like Computer Modern for non-math text -- nice! \setbeamertemplate{navigation symbols}{} % Suppress navigation symbols % \usetheme{Berlin} % Displays sections on top \usetheme{Frankfurt} % Displays section titles on top: Fairly thin but still swallows some material at bottom of crowded slides %\usetheme{Berkeley} \usepackage[english]{babel} \usepackage{amsmath} % for binom % \usepackage{graphicx} % To include pdf files! % \definecolor{links}{HTML}{2A1B81} % \definecolor{links}{red} \setbeamertemplate{footline}[frame number] \mode \title{Random Vectors\footnote{See last slide for copyright information.}} \subtitle{STA442/2101 Fall 2014} \date{} % To suppress date \begin{document} \begin{frame} \titlepage \end{frame} \begin{frame} \frametitle{Background Reading: Renscher and Schaalje's \emph{Linear models in statistics}} \begin{itemize} \item Chapter 3 on Random Vectors and Matrices % \item Chapter 4 on the Multivariate Normal Distribution \end{itemize} \end{frame} \begin{frame} \frametitle{Random Vectors and Matrices} %\framesubtitle{} A \emph{random matrix} is just a matrix of random variables. Their joint probability distribution is the distribution of the random matrix. Random matrices with just one column (say, $p \times 1$) may be called \emph{random vectors}. \end{frame} \begin{frame} \frametitle{Expected Value} %\framesubtitle{} The expected value of a matrix is defined as the matrix of expected values. Denoting the $p \times c$ random matrix $\mathbf{X}$ by $[X_{i,j}]$, \begin{displaymath} E(\mathbf{X}) = [E(X_{i,j})]. \end{displaymath} \end{frame} \begin{frame} % Fixed up -- better for 2014 \frametitle{Immediately we have natural properties like} %\framesubtitle{} \begin{eqnarray} E(\mathbf{X}+\mathbf{Y}) &=& E([X_{i,j}+Y_{i,j}]) \nonumber \\ &=& [E(X_{i,j}+Y_{i,j})] \nonumber \\ &=& [E(X_{i,j})+E(Y_{i,j})] \nonumber \\ &=& [E(X_{i,j})]+[E(Y_{i,j})] \nonumber \\ &=& E(\mathbf{X})+E(\mathbf{Y}). \nonumber \end{eqnarray} \end{frame} \begin{frame} \frametitle{Moving a constant through the expected value sign} Let $\mathbf{A} = [a_{i,j}]$ be an $r \times p$ matrix of constants, while $\mathbf{X}$ is still a $p \times c$ random matrix. Then \begin{eqnarray} E(\mathbf{AX}) &=& E\left(\left[\sum_{k=1}^p a_{i,k}X_{k,j}\right]\right) \nonumber \\ &=& \left[E\left(\sum_{k=1}^p a_{i,k}X_{k,j}\right)\right] \nonumber \\ &=& \left[\sum_{k=1}^p a_{i,k}E(X_{k,j})\right] \nonumber \\ &=& \mathbf{A}E(\mathbf{X}). \nonumber \end{eqnarray} Similar calculations yield $E(\mathbf{AXB}) = \mathbf{A}E(\mathbf{X})\mathbf{B}$. \end{frame} \begin{frame} \frametitle{Variance-Covariance Matrices} Let $\mathbf{X}$ be a $p \times 1$ random vector with $E(\mathbf{X}) = \boldsymbol{\mu}$. The \emph{variance-covariance matrix} of $\mathbf{X}$ (sometimes just called the \emph{covariance matrix}), denoted by $cov(\mathbf{X})$, is defined as \begin{displaymath} cov(\mathbf{X}) = E\left\{ (\mathbf{X}-\boldsymbol{\mu}) (\mathbf{X}-\boldsymbol{\mu})^\top\right\}. \end{displaymath} \end{frame} \begin{frame} \frametitle{$cov(\mathbf{X}) = E\left\{ (\mathbf{X}-\boldsymbol{\mu}) (\mathbf{X}-\boldsymbol{\mu})^\top\right\}$} {\scriptsize \begin{eqnarray} cov(\mathbf{X}) &=& E\left\{ \left( \begin{array}{c} X_1-\mu_1 \\ X_2-\mu_2 \\ X_3-\mu_3 \end{array} \right) \left( \begin{array}{c c c} X_1-\mu_1 & X_2-\mu_2 & X_3-\mu_3 \end{array} \right) \right\} \nonumber \\ &=& E\left\{ \left( \begin{array}{l l l} (X_1-\mu_1)^2 & (X_1-\mu_1)(X_2-\mu_2) & (X_1-\mu_1)(X_3-\mu_3) \\ (X_2-\mu_2)(X_1-\mu_1) & (X_2-\mu_2)^2 & (X_2-\mu_2)(X_3-\mu_3) \\ (X_3-\mu_3)(X_1-\mu_1) & (X_3-\mu_3)(X_2-\mu_2) & (X_3-\mu_3)^2 \\ \end{array} \right) \right\} \nonumber \\ \nonumber \\ &=& \left( \begin{array}{l l l} E\{(X_1-\mu_1)^2\} & E\{(X_1-\mu_1)(X_2-\mu_2)\} & E\{(X_1-\mu_1)(X_3-\mu_3)\} \\ E\{(X_2-\mu_2)(X_1-\mu_1)\} & E\{(X_2-\mu_2)^2\} & E\{(X_2-\mu_2)(X_3-\mu_3)\} \\ E\{(X_3-\mu_3)(X_1-\mu_1)\} & E\{(X_3-\mu_3)(X_2-\mu_2)\} & E\{(X_3-\mu_3)^2\} \\ \end{array} \right) \nonumber \\ \nonumber \\ &=& \left( \begin{array}{l l l} Var(X_1) & Cov(X_1,X_2) & Cov(X_1,X_3) \\ Cov(X_1,X_2) & Var(X_2) & Cov(X_2,X_3) \\ Cov(X_1,X_3) & Cov(X_2,X_3) & Var(X_3) \\ \end{array} \right) . \nonumber \\ \nonumber \end{eqnarray} So, the covariance matrix $cov(\mathbf{X})$ is a $p \times p$ symmetric matrix with variances on the main diagonal and covariances on the off-diagonals. } \end{frame} \begin{frame} \frametitle{Matrix of covariances between two random vectors} Let $\mathbf{X}$ be a $p \times 1$ random vector with $E(\mathbf{X}) = \boldsymbol{\mu}_x$ and let $\mathbf{Y}$ be a $q \times 1$ random vector with $E(\mathbf{Y}) = \boldsymbol{\mu}_y$. The $p \times q$ matrix of covariances between the elements of $\mathbf{X}$ and the elements of $\mathbf{Y}$ is \begin{displaymath} C(\mathbf{X,Y}) = E\left\{ (\mathbf{X}-\boldsymbol{\mu}_x) (\mathbf{Y}-\boldsymbol{\mu}_y)^\top\right\}. \end{displaymath} \end{frame} \begin{frame} \frametitle{Adding a constant has no effect} \framesubtitle{On variances and covariances} \begin{itemize} \item $ cov(\mathbf{X} + \mathbf{a}) = cov(\mathbf{X})$ \item $C(\mathbf{X} + \mathbf{a},\mathbf{Y} + \mathbf{b}) = C(\mathbf{X},\mathbf{Y})$ \end{itemize} \vspace{10mm} It's clear from the definitions: \begin{itemize} \item $cov(\mathbf{X}) = E\left\{ (\mathbf{X}-\boldsymbol{\mu}) (\mathbf{X}-\boldsymbol{\mu})^\top\right\}$ \item $C(\mathbf{X,Y}) = E\left\{ (\mathbf{X}-\boldsymbol{\mu}_x) (\mathbf{Y}-\boldsymbol{\mu}_y)^\top\right\}$ \end{itemize} \vspace{10mm} So sometimes it is useful to let $\mathbf{a} = -\boldsymbol{\mu}_x$ and $\mathbf{b} = -\boldsymbol{\mu}_y$. \end{frame} \begin{frame} \frametitle{Analogous to $Var(a\,X) = a^2\,Var(X)$} Let $\mathbf{X}$ be a $p \times 1$ random vector with $E(\mathbf{X}) = \boldsymbol{\mu}$ and $cov(\mathbf{X}) = \boldsymbol{\Sigma}$, while $\mathbf{A} = [a_{i,j}]$ is an $r \times p$ matrix of constants. Then \begin{eqnarray*} \label{vax} cov(\mathbf{AX}) &=& E\left\{ (\mathbf{AX}-\mathbf{A}\boldsymbol{\mu}) (\mathbf{AX}-\mathbf{A}\boldsymbol{\mu})^\top \right\} \\ &=& E\left\{ \mathbf{A}(\mathbf{X}-\boldsymbol{\mu}) \left(\mathbf{A}(\mathbf{X}-\boldsymbol{\mu})\right)^\top \right\} \\ &=& E\left\{ \mathbf{A}(\mathbf{X}-\boldsymbol{\mu}) (\mathbf{X}-\boldsymbol{\mu})^\top \mathbf{A}^\top \right\} \nonumber \\ &=& \mathbf{A}E\{(\mathbf{X}-\boldsymbol{\mu}) (\mathbf{X}-\boldsymbol{\mu})^\top\} \mathbf{A}^\top \\ &=& \mathbf{A}cov(\mathbf{X}) \mathbf{A}^\top \nonumber \\ &=& \mathbf{A}\boldsymbol{\Sigma}\mathbf{A}^\top \end{eqnarray*} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Copyright Information} This slide show was prepared by \href{http://www.utstat.toronto.edu/~brunner}{Jerry Brunner}, Department of Statistics, University of Toronto. It is licensed under a \href{http://creativecommons.org/licenses/by-sa/3.0/deed.en_US} {Creative Commons Attribution - ShareAlike 3.0 Unported License}. Use any part of it as you like and share the result freely. The \LaTeX~source code is available from the course website: \href{http://www.utstat.toronto.edu/~brunner/oldclass/appliedf14} {\small\texttt{http://www.utstat.toronto.edu/$^\sim$brunner/oldclass/appliedf14}} \end{frame} \end{document} L(\boldsymbol{\mu,\Sigma}) = |\boldsymbol{\Sigma}|^{-n/2} (2\pi)^{-np/2} \exp -\frac{n}{2}\left\{ tr(\boldsymbol{\widehat{\Sigma}\Sigma}^{-1}) + (\overline{\mathbf{x}}-\boldsymbol{\mu})^\top \boldsymbol{\Sigma}^{-1} (\overline{\mathbf{x}}-\boldsymbol{\mu}) \right\} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{} %\framesubtitle{} \begin{itemize} \item \item \item \end{itemize} \end{frame} {\LARGE \begin{displaymath} \end{displaymath} } %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%