% 431s23Assignment2.tex Mostly linear algebra and random vectors \documentclass[12pt]{article} %\usepackage{amsbsy} % for \boldsymbol and \pmb \usepackage{graphicx} % To include pdf files! \usepackage{amsmath} \usepackage{amsbsy} \usepackage{amsfonts} \usepackage{comment} \usepackage[colorlinks=true, pdfstartview=FitV, linkcolor=blue, citecolor=blue, urlcolor=blue]{hyperref} % For links \usepackage{fullpage} %\pagestyle{empty} % No page numbers \begin{document} %\enlargethispage*{1000 pt} \begin{center} {\Large \textbf{STA 431s23 Assignment Two}}\footnote{This assignment was prepared by \href{http://www.utstat.toronto.edu/~brunner}{Jerry Brunner}, Department of Statistical Sciences, University of Toronto. It is licensed under a \href{http://creativecommons.org/licenses/by-sa/3.0/deed.en_US} {Creative Commons Attribution - ShareAlike 3.0 Unported License}. Use any part of it as you like and share the result freely. The \LaTeX~source code is available from the course website: \href{http://www.utstat.toronto.edu/brunner/oldclass/431s23} {\small\texttt{http://www.utstat.toronto.edu/brunner/oldclass/431s23}}} \vspace{1 mm} \end{center} \noindent \emph{These problems are not to be handed in. They are practice for the Quiz on Friday January 27.} \vspace{2mm} \hrule \begin{enumerate} \item Two latent explanatory variables $X_1$ and $X_2$ (say motivation and ability) potentially have non-zero covariance. Four observable job performance measures $D_1$, $D_2$, $D_3$ and $D_4$ are potentially related to $X_1$ and $X_2$ as follows: \begin{eqnarray*} D_1 & = & \alpha_1 + \beta_{11}X_1 + \beta_{12}X_2 + \epsilon_1 \\ D_2 & = & \alpha_2 + \beta_{21}X_1 + \beta_{22}X_2 + \epsilon_2 \\ D_3 & = & \alpha_3 + \beta_{31}X_1 + \beta_{32}X_2 + \epsilon_3 \\ D_4 & = & \alpha_4 + \beta_{41}X_1 + \beta_{42}X_2 + \epsilon_4, \end{eqnarray*} where the $\alpha$ and $\beta$ quantities are unknown parameters, $\epsilon_1$ through $\epsilon_4$ have zero expected value, are independent of one another, and are independent of $X_1$ and $X_2$. Everything is normally distributed. \begin{enumerate} \item Make a path diagram of this model. Write $\beta_{ij}$ parameters on the appropriate arrows. \item What are the unknown parameters of this model? I count 21. You will have to make up some notation for the expected values, variances and covariances. \end{enumerate} \item \label{ABneqBA} % A2.11 Let $\mathbf{A}$ and $\mathbf{B}$ be $2 \times 2$ matrices. Either \begin{itemize} \item Prove $\mathbf{AB} = \mathbf{BA}$, or \item Give a numerical example in which $\mathbf{AB} \neq \mathbf{BA}$ \end{itemize} \item % A2.14 The formal definition of a matrix inverse is that an inverse of the matrix $\mathbf{A}$ (denoted $\mathbf{A}^{-1}$) is defined by two properties: $\mathbf{A}^{-1}\mathbf{A=I}$ and $\mathbf{AA}^{-1}=\mathbf{I}$. If you want to prove that one matrix is the inverse of another using the definition, you'd have two things to show. This homework problem establishes that you only need to do it in one direction. Let $\mathbf{A}$ and $\mathbf{B}$ be square matrices with $\mathbf{AB} = \mathbf{I}$. Show that $\mathbf{A} = \mathbf{B}^{-1}$ and $\mathbf{B} = \mathbf{A}^{-1}$. Start by establishing that the inverses exist. To make it easy, use well-known properties of determinants. \item % A2.15 Prove that inverses are unique, as follows. Let $\mathbf{B}$ and $\mathbf{C}$ both be inverses of $\mathbf{A}$. Show that $\mathbf{B=C}$. \item % A2.16 Let $\mathbf{X}$ be an $n$ by $p$ matrix with $n \neq p$. Why is it incorrect to say that $(\mathbf{X^\top X})^{-1}= \mathbf{X}^{-1}\mathbf{X}^{\top -1}$? \item % A2.17 Suppose that the matrices $\mathbf{A}$ and $\mathbf{B}$ both have inverses. Prove that $\mathbf{(AB)}^{-1} = \mathbf{B}^{-1}\mathbf{A}^{-1}$. \item % A2.18 \label{ivt} Let $\mathbf{A}$ be a non-singular matrix. Prove $(\mathbf{A}^{-1})^\top=(\mathbf{A}^\top)^{-1}$. The proof is easier with a different notation. Let $\mathbf{A}$ and $\mathbf{B}$ be inverses. Show that $\mathbf{B}^\top$ is the inverse of $\mathbf{A}^\top$. Using $(\mathbf{A}^{-1})^\top=(\mathbf{A}^\top)^{-1}$, show that the inverse of a symmetric matrix is also symmetric. \item % A2.21 Let $\mathbf{a}$ be an $n \times 1$ matrix of real constants. How do you know $\mathbf{a}^\top\mathbf{a}\geq 0$? \item Let $\mathbf{A}$ be a real, symmetric, positive definite matrix, so that $\mathbf{A}= \mathbf{CDC}^\top$. \begin{enumerate} \item Show that the eigenvalues of $\mathbf{A}$ are all strictly positive. Start with the definition $\mathbf{Ax}=\lambda\mathbf{x}$. \item Show that $\mathbf{A}^{-1} = \mathbf{C}\mathbf{D}^{-1}\mathbf{C}^\top$. \item Show that $\mathbf{A}^{1/2} = \mathbf{C} \mathbf{D}^{1/2} \mathbf{C}^\top$. \item The notation $\mathbf{A}^{-1/2} = \mathbf{C} \mathbf{D}^{-1/2} \mathbf{C}^\top$ means two things. \begin{enumerate} \item Show that $\mathbf{A}^{-1/2}$ is the square root of $\mathbf{A}^{-1}$. \item Show that $\mathbf{A}^{-1/2}$ is the inverse of $\mathbf{A}^{1/2}$. \end{enumerate} \end{enumerate} \item Using the Spectral Decomposition Theorem and $tr(\mathbf{AB})=tr(\mathbf{BA})$, prove that the trace is the sum of the eigenvalues for a symmetric matrix $\boldsymbol{\Sigma}$. \item Using the Spectral Decomposition Theorem and $|\mathbf{AB}| = |\mathbf{BA}|$, prove that the determinant of a symmetric matrix $\boldsymbol{\Sigma}$ is the product of its eigenvalues. \item Prove that the diagonal elements of a positive definite matrix must be positive. Hint: Can you describe a vector $\mathbf{v}$ such that $\mathbf{v}^\top \mathbf{Av}$ picks out the $j$th diagonal element? % Random vectors and matrices \vspace{3mm}\hrule\vspace{3mm} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% In the questions below, you are sometimes asked to prove things that are false. If a statement is false, please say so and provide a counter-example. \vspace{3mm}\hrule\vspace{3mm} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \item Let the $p \times 1$ random vector $\mathbf{x}$ have expected value $\boldsymbol{\mu}$ and variance-covariance matrix $\mathbf{\Sigma}$, and let $\mathbf{A}$ be an $m \times p$ matrix of constants. Prove that the variance-covariance matrix of $\mathbf{Ax}$ is either \begin{itemize} \item $\mathbf{A} \boldsymbol{\Sigma} \mathbf{A}^\top$, or \item $\mathbf{A}^2 \boldsymbol{\Sigma}$.. \end{itemize} Pick one and prove it. Start with the definition of a variance-covariance matrix on the formula sheet. If the two expressions above are equal, say so. \item Let the $p \times 1$ random vector $\mathbf{y}$ have expected value $\boldsymbol{\mu}$ and variance-covariance matrix $\boldsymbol{\Sigma}$. Starting with the definition, calculate $cov(\mathbf{Ay},\mathbf{By})$, where $A$ and $B$ are matrices of constants. Show your work. \item Let $\mathbf{x}$ be a $p \times 1$ random vector. Starting with the definition on the formula sheet, prove $cov(\mathbf{x})=\mathbf{0}$.. % FALSE \item Let the $p \times 1$ random vector $\mathbf{x}$ have mean $\boldsymbol{\mu}$ and variance-covariance matrix $\boldsymbol{\Sigma}$, and let $\mathbf{c}$ be an $r \times 1$ vector of constants. Find $cov(\mathbf{x}+\mathbf{c})$. Show your work. \item Comparing the definitions, one can see that viewing a scalar random variable as a $1 \times 1$ random vector, the variance-covariance matrix is just the ordinary variance. Accordingly, let the scalar random variable $y = \mathbf{v}^\top \mathbf{x}$, where $\mathbf{x}$ is a $p \times 1$ random vector with covariance matrix $\boldsymbol{\Sigma}$, and $\mathbf{v}$ is a $p \times 1$ vector of constants. What is $Var(y)$? Why does this tell you that \emph{any} variance-covariance matrix must be positive semi-definite? \item Using definitions on the formula sheet and other material from this assignment, \begin{enumerate} \item Show that the eigenvalues of a variance-covariance matrix cannot be negative. \item How do you know that the determinant of a variance-covariance matrix must be greater than or equal to zero? The answer is one short sentence. \item Let $x$ and $y$ be scalar random variables. Recall $Corr(x,y) = \frac{Cov(x,y)}{\sqrt{Var(x)Var(y)}}$. Using what you have shown about the determinant, show $-1 \leq Corr(x,y) \leq 1$. You have just proved the Cauchy-Schwarz inequality. \end{enumerate} % \href{https://en.wikipedia.org} {Cauchy-Schwarz inequality} % \href{https://en.wikipedia.org/wiki/Cauchy–Schwarz_inequality} {Cauchy-Schwarz inequality} % \href{https://en.wikipedia.org/wiki} {Cauchy-Schwarz inequality} % \href{https://en.wikipedia.org/wiki/Cauchy–Schwarz_inequality} {Cauchy-Schwarz inequality} %\href{http://www.utstat.toronto.edu/brunner/oldclass/431s23} %{\small\texttt{http://www.utstat.toronto.edu/brunner/oldclass/431s23}} % \item Let $\mathbf{x}$ be a $p \times 1$ random vector with mean $\boldsymbol{\mu}_x$ and variance-covariance matrix $\mathbf{\Sigma}_x$, and let $\mathbf{y}$ be a $q \times 1$ random vector with mean $\boldsymbol{\mu}_y$ and variance-covariance matrix $\mathbf{\Sigma}_y$. \begin{enumerate} \item What is the $(i,j)$ element of $\boldsymbol{\Sigma}_{xy} = cov(\mathbf{x},\mathbf{y})$? \item Assuming $p=q$, find an expression for $cov(\mathbf{x}+\mathbf{y})$ in terms of $\mathbf{\Sigma}_x$, $\mathbf{\Sigma}_y$ and $\boldsymbol{\Sigma}_{xy}$. Show your work, using anything on the formula sheet you wish. \item Simplify further for the special case where $Cov(x_i,y_j)=0$ for all $i$ and $j$. \item Let $\mathbf{c}$ be a $p \times 1$ vector of constants and $\mathbf{d}$ be a $q \times 1$ vector of constants. Find $ cov(\mathbf{x}+\mathbf{c}, \mathbf{y}+\mathbf{d})$. Show your work, using the \emph{definition} on the formula sheet. \end{enumerate} \item Let the random vectors $\mathbf{x}_1$ and $\mathbf{x}_2$ be $p \times 1$, and the random vectors $\mathbf{y}_1$ and $\mathbf{y}_2$ be $q \times 1$. Using anything on the formula sheet you wish, calculate $cov(\mathbf{x}_1+\mathbf{x}_2, \mathbf{y}_1+\mathbf{y}_2)$. \item Starting with the definition on the formula sheet, show $cov(\mathbf{x,y})=cov(\mathbf{y,x})$.. % FALSE \item Starting with the definition on the formula sheet, show $cov(\mathbf{x,y})=\mathbf{0}$.. % FALSE % Still need to do the multivariate normal. \end{enumerate} % End of all the questions \end{document}