\documentclass[10pt]{article} %\usepackage{amsbsy} % for \boldsymbol and \pmb %\usepackage{graphicx} % To include pdf files! \usepackage{amsmath} \usepackage{amsbsy} \usepackage{amsfonts} \usepackage{euscript} % for \EuScript \usepackage[colorlinks=true, pdfstartview=FitV, linkcolor=blue, citecolor=blue, urlcolor=blue]{hyperref} % For links \oddsidemargin=0in % Good for US Letter paper \evensidemargin=0in \textwidth=6.3in \topmargin=-0.5in \headheight=0.1in \headsep=0.1in \textheight=9.4in \pagestyle{empty} % No page numbers \begin{document} % \enlargethispage*{1000 pt} \begin{center} {\Large \textbf{STA 302 Formulas}}\\ \vspace{1 mm} \end{center} \noindent \renewcommand{\arraystretch}{2.0} \begin{tabular}{lll} $M_y(t) = E(e^{yt})$ & ~~~~~ & $M_{ay}(t) = M_y(at)$ \\ $M_{y+a}(t) = e^{at}M_y(t)$ & ~~~~~ & $M_{\sum_{i=1}^n y_i}(t) = \prod_{i=1}^n M_{y_i}(t)$ \\ $y \sim N(\mu,\sigma^2)$ means $M_y(t) = e^{\mu t + \frac{1}{2}\sigma^2t^2}$ & ~~~~~ & $W \sim \chi^2(\nu)$ means $M_W(t) = (1-2t)^{-\nu/2}$ \\ If $W_1, \ldots, W_n \stackrel{ind}{\sim} \chi^2(\nu_i)$, then $\sum_{i=1}^n W_i \sim \chi^2(\sum_{i=1}^n \nu_i)$ & ~~~~~ & If $Z \sim N(0,1)$ then $Z^2 \sim \chi^2(1)$ \\ \multicolumn{3}{l}{If $W=W_1+W_2$ with $W_1$ and $W_2$ independent, $W\sim\chi^2(\nu_1+\nu_2)$, $W_2\sim\chi^2(\nu_2)$ then $W_1\sim\chi^2(\nu_1)$} \\ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \parbox{7 cm}{Columns of $\mathbf{A}$ \emph{linearly dependent} means there is a vector $\mathbf{v} \neq \mathbf{0}$ with $\mathbf{Av} = \mathbf{0}$.} & ~~~~~ & \parbox{7 cm}{Columns of $\mathbf{A}$ \emph{linearly independent} means that $\mathbf{Av} = \mathbf{0}$ implies $\mathbf{v} = \mathbf{0}$.} \\ \multicolumn{3}{l}{$\mathbf{A}$ \emph{positive definite} means $\mathbf{v}^\prime \mathbf{Av} > 0$ for all vectors $\mathbf{v} \neq \mathbf{0}$.} \\ $\boldsymbol{\Sigma} = \mathbf{CD} \mathbf{C}^\prime$ & ~~~~~ & $\boldsymbol{\Sigma}^{-1} = \mathbf{C} \mathbf{D}^{-1} \mathbf{C}^\prime$ \\ $\boldsymbol{\Sigma}^{1/2} = \mathbf{CD}^{1/2} \mathbf{C}^\prime$ & ~~~~~ & $\boldsymbol{\Sigma}^{-1/2} = \mathbf{CD}^{-1/2} \mathbf{C}^\prime$ \\ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% $cov(\mathbf{y}) = E\left\{(\mathbf{y}-\boldsymbol{\mu}_y)(\mathbf{y}-\boldsymbol{\mu}_y)^\prime\right\}$ & ~~~~~ & $C(\mathbf{y,t}) = E\left\{ (\mathbf{y}-\boldsymbol{\mu}_y) (\mathbf{t}-\boldsymbol{\mu}_t)^\prime\right\}$ \\ $cov(\mathbf{y}) = E\{\mathbf{yy}^\prime\} - \boldsymbol{\mu}_y\boldsymbol{\mu}_y^\prime$ & ~~~~~ & $cov(\mathbf{Ay}) = \mathbf{A}cov(\mathbf{y}) \mathbf{A}^\prime$ \\ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% $M_{\mathbf{y}}(\mathbf{t}) = E(e^{\mathbf{t}^\prime\mathbf{y}})$ & ~~~~~ & $M_{\mathbf{Ay}}(\mathbf{t}) = M_{\mathbf{y}}(\mathbf{A}^\prime\mathbf{t})$ \\ $M_{\mathbf{y}+\mathbf{c}}(\mathbf{t}) = e^{\mathbf{t}^\prime\mathbf{c}} M_{\mathbf{y}}(\mathbf{t})$ & ~~~~~ & $\mathbf{y} \sim N_p(\boldsymbol{\mu}, \boldsymbol{\Sigma})$ means $M_{\mathbf{y}}(\mathbf{t}) = e^{\mathbf{t}^\prime\boldsymbol{\mu} + \frac{1}{2} \mathbf{t}^\prime \boldsymbol{\Sigma} \mathbf{t}}$ \\ \multicolumn{3}{l}{$\mathbf{y}_1$ and $\mathbf{y}_2$ are independent if and only if $M_{(\mathbf{y}_1,\mathbf{y}_2)}\left(\mathbf{t}_1,\mathbf{t}_2\right) = M_{\mathbf{y}_1}(\mathbf{t}_1) M_{\mathbf{y}_2}(\mathbf{t}_2)$} \\ If $\mathbf{y} \sim N_p(\boldsymbol{\mu}, \boldsymbol{\Sigma})$, then $\mathbf{Ay} \sim N_q(\mathbf{A}\boldsymbol{\mu}, \mathbf{A}\boldsymbol{\Sigma}\mathbf{A}^\prime)$, & ~~~~~ & and $W = (\mathbf{y}-\boldsymbol{\mu})^\prime \boldsymbol{\Sigma}^{-1}(\mathbf{y}-\boldsymbol{\mu}) \sim \chi^2(p)$ \\ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% $y_i = \beta_0 + \beta_1 x_{i1} + \cdots + \beta_k x_{ik} + \epsilon_i$ & ~~~~~ & $\epsilon_1, \ldots, \epsilon_n$ independent $N(0,\sigma^2)$ \\ $\mathbf{y} = \mathbf{X} \boldsymbol{\beta} + \boldsymbol{\epsilon}$ with $\boldsymbol{\epsilon} \sim N_n(\mathbf{0},\sigma^2\mathbf{I}_n)$ & ~~~~~ & $\widehat{\boldsymbol{\beta}} = (\mathbf{X}^\prime \mathbf{X})^{-1} \mathbf{X}^\prime \mathbf{y} \sim N_{k+1}(\boldsymbol{\beta}, \sigma^2(\mathbf{X}^\prime \mathbf{X})^{-1})$ \\ $\widehat{\mathbf{y}} = \mathbf{X}\widehat{\boldsymbol{\beta}} = \mathbf{Hy}$, where $\mathbf{H} = \mathbf{X}(\mathbf{X}^\prime \mathbf{X})^{-1} \mathbf{X}^\prime $ & ~~~~~ & $\widehat{\boldsymbol{\epsilon}} = \mathbf{y} - \widehat{\mathbf{y}} = (\mathbf{I}-\mathbf{H})\mathbf{y}$ \\ $\widehat{\boldsymbol{\beta}}$ and $\widehat{\boldsymbol{\epsilon}}$ are independent under normality. & ~~~~~ & $\frac{SSE}{\sigma^2} = \frac{\hat{\boldsymbol{\epsilon}}^\prime \hat{\boldsymbol{\epsilon}}}{\sigma^2} \sim \chi^2(n-k-1)$ \\ $\sum_{i=1}^n(y_i-\overline{y})^2 = \sum_{i=1}^n(y_i-\widehat{y}_i)^2 + \sum_{i=1}^n(\widehat{y}_i-\overline{y})^2$ & ~~~~~ & $SST=SSE+SSR$ and $R^2 = \frac{SSR}{SST}$ \\ $T = \frac{Z}{\sqrt{W/\nu}} \sim t(\nu)$ & ~~~~~ & $F = \frac{W_1/\nu_1}{W_2/\nu_2} \sim F(\nu_1,\nu_2)$ \\ $T = \frac{\mathbf{a}^\prime \widehat{\boldsymbol{\beta}}-\mathbf{a}^\prime \boldsymbol{\beta}} {\sqrt{MSE \, \mathbf{a}^\prime (\mathbf{X}^\prime \mathbf{X})^{-1}\mathbf{a}}} \sim t(n-k-1)$ & ~~~~~ & $T = \frac{y_0-\mathbf{x}_0^\prime \widehat{\boldsymbol{\beta}}} {\sqrt{MSE \, (1+\mathbf{x}_0^\prime (\mathbf{X}^\prime \mathbf{X})^{-1}\mathbf{x}_0)}} \sim t(n-k-1)$ \\ \multicolumn{3}{l}{$F = \frac{(\mathbf{C}\widehat{\boldsymbol{\beta}}-\mathbf{t})^\prime (\mathbf{C}(\mathbf{X}^\prime \mathbf{X})^{-1}\mathbf{C}^\prime)^{-1} (\mathbf{C}\widehat{\boldsymbol{\beta}}-\mathbf{t})} {q \, MSE} = \frac{SSR(full)-SSR(reduced)}{q \, MSE} \sim F(q,n-k-1)$, where $MSE = \frac{SSE}{n-k-1}$} \\ \parbox{7 cm}{$ F = \left( \frac{p}{1-p} \right) \left( \frac{n-k-1}{q} \right) ~\Leftrightarrow~ p = \frac{qF}{qF+n-k-1}$, where $p = \frac{R^2(full)-R^2(reduced)}{1-R^2(reduced)}$} & ~~~~~ & $r_{xy} = \frac{\sum_{i=1}^n (x_i-\overline{x})(y_i-\overline{y})} {\sqrt{\sum_{i=1}^n (x_i-\overline{x})^2} \sqrt{\sum_{i=1}^n (y_i-\overline{y})^2}}$ \\ \end{tabular} \renewcommand{\arraystretch}{1.0} \vspace{1mm} \hrule \vspace{2mm} \noindent {\small This formula sheet was prepared by \href{http://www.utstat.toronto.edu/~brunner}{Jerry Brunner}, Department of Statistics, University of Toronto. It is licensed under a \href{http://creativecommons.org/licenses/by-sa/3.0/deed.en_US} {Creative Commons Attribution - ShareAlike 3.0 Unported License}. Use any part of it as you like and share the result freely. The \LaTeX~source code is available from the course website: %\vspace{2mm} \begin{center} \href{http://www.utstat.toronto.edu/~brunner/oldclass/302f15} {\texttt{http://www.utstat.toronto.edu/$^\sim$brunner/oldclass/302f15}} \end{center} } % End size \end{document} \multicolumn{3}{l}{Columns of $\mathbf{A}$ \emph{linearly independent} means that $\mathbf{Av} = \mathbf{0}$ implies $\mathbf{v} = \mathbf{0}$.} \\ \multicolumn{3}{l}{Columns of $\mathbf{A}$ \emph{linearly dependent} means there is a vector $\mathbf{v} \neq \mathbf{0}$ with $\mathbf{Av} = \mathbf{0}$.} \\ % Removed for this year \multicolumn{3}{l}{Columns of $\mathbf{A}$ \emph{linearly dependent} means there is a vector $\mathbf{v} \neq \mathbf{0}$ with $\mathbf{Av} = \mathbf{0}$.} \\ \multicolumn{3}{l}{Columns of $\mathbf{A}$ \emph{linearly independent} means that $\mathbf{Av} = \mathbf{0}$ implies $\mathbf{v} = \mathbf{0}$.} \\ $\boldsymbol{\Sigma} = \mathbf{CD} \mathbf{C}^\prime$ & ~~~~~ & $\boldsymbol{\Sigma}^{-1} = \mathbf{C} \mathbf{D}^{-1} \mathbf{C}^\prime$ \\ $\boldsymbol{\Sigma}^{1/2} = \mathbf{CD}^{1/2} \mathbf{C}^\prime$ & ~~~~~ & $\boldsymbol{\Sigma}^{-1/2} = \mathbf{CD}^{-1/2} \mathbf{C}^\prime$ \\ \noindent The vectors $\mathbf{x}_1, \ldots, \mathbf{x}_p$ are \emph{linearly dependent} if there is a set of scalars $a_1, \ldots, a_p$, not all zero, with $a_1 \mathbf{x}_1 + a_2 \mathbf{x}_2 + \cdots + a_p \mathbf{x}_p = \mathbf{0}$. \vspace{2mm} \noindent The vectors $\mathbf{x}_1, \ldots, \mathbf{x}_p$ are \emph{linearly independent} if $a_1 \mathbf{x}_1 + a_2 \mathbf{x}_2 + \cdots + a_p \mathbf{x}_p = \mathbf{0}$ implies $a_1 = \cdots = a_p = 0$. \\ $ \log\left(\frac{\pi_i}{1-\pi_i} \right) = \beta_0 + \beta_1 x_{i,1} + \ldots + \beta_k x_{i,k}$ & ~~~~~ & $\pi_i = \frac{e^{\beta_0 + \beta_1 x_{i,1} + \ldots + \beta_k x_{i,k}}} {1+e^{\beta_0 + \beta_1 x_{i,1} + \ldots + \beta_k x_{i,k}}}$