\documentclass[serif]{beamer} % Serif for Computer Modern math font. % \documentclass[serif, handout]{beamer} % Handout mode to ignore pause statements \hypersetup{colorlinks,linkcolor=,urlcolor=red} \usefonttheme{serif} % Looks like Computer Modern for non-math text -- nice! \setbeamertemplate{navigation symbols}{} % Supress navigation symbols \usetheme{Berlin} % Displays sections on top \usepackage[english]{babel} \usepackage{graphpap} \usepackage{comment} % \definecolor{links}{HTML}{2A1B81} % \definecolor{links}{red} \setbeamertemplate{footline}[frame number] \mode\pagebreak % \mode{\setbeamercolor{background canvas}{bg=black!5}} \title{Ignoring Measurement Error: Convergence\footnote{See last slide for copyright information.}} \subtitle{STA2053 Fall 2022} \date{} % To suppress date \begin{document} \begin{frame} \titlepage \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Overview} \tableofcontents \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Reliability} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Additive measurement error} \framesubtitle{A very simple model} \begin{center} \includegraphics[width=1.5in]{Additive} \end{center} \pause {\LARGE \begin{displaymath} W = X + e \end{displaymath} } % End size Where $E(X)=\mu_x$, $E(e)=0$, $Var(X)=\sigma^2_x$, $Var(e)=\sigma^2_e$, and $Cov(X,e)=0$. \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Variance and Covariance} \framesubtitle{$W = X + e$} \pause \begin{eqnarray*} Var(W) &=& Var(X) + Var(e) \\ &=& \sigma^2_x + \sigma^2_e \\ &&\\ Cov(X,W) &=& Cov\left(X,X+e\right) \\ &=& Cov(X,X) + Cov(X,e) \\ &=& \sigma^2_x + 0 \\ &=& \sigma^2_x \end{eqnarray*} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Definition of Reliability} \framesubtitle{Psychometric} Reliability is the squared correlation between the observed variable and the latent variable (true score). \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Calculation of Reliability} \framesubtitle{Squared correlation between observed and true score} {\small \begin{eqnarray*} \rho^2 \pause &=& \left(\frac{Cov(X,W)}{SD(X) SD(W)}\right)^2 \pause \\ &=& \left(\frac{\sigma^2_x}{\sqrt{\sigma^2_x} \sqrt{\sigma^2_x+\sigma^2_e}}\right)^2 \pause \\ &=& \frac{\sigma^4_x}{\sigma^2_x (\sigma^2_x+\sigma^2_e)} \\ &=& \frac{\sigma^2_x}{\sigma^2_x+\sigma^2_e} \\ \pause &=& \frac{Var(X)}{Var(W)}. \pause \end{eqnarray*} % Have a phi-omega version in OpenSEM work. Reliability is the proportion of the variance in the observed variable that comes from the latent variable of interest, and not from random error. } % End size \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{How to estimate reliability from data} \pause %\framesubtitle{} \begin{itemize} \item Correlate usual measurement with ``Gold Standard?" \item Not very realistic, except maybe for some bio-markers. \pause \item One answer: Measure twice. \end{itemize} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Measure twice} \framesubtitle{Called ``equivalent measurements" because error variance is the same} \begin{center} % Path diagram: Had to fiddle with this! \begin{picture}(100,100)(150,0) % Size of picture (does not matter), origin \put(197,000){$X$} \put(202,4){\circle{20}} \put(157,50){\framebox{$W_1$}} \put(232,50){\framebox{$W_2$}} \put(197,15){\vector(-1,1){25}} % X -> W1 \put(209,15){\vector(1,1){25}} % X -> W2 \put(161,95){$e_1$} % x = V2+4 \put(165,90){\vector(0,-1){25}} % e1 -> W1 \put(236,95){$e_2$} % x = V3+4 \put(240,90){\vector(0,-1){25}} % e2 -> W2 \end{picture} \end{center} \pause \begin{eqnarray} W_1 & = & X + e_1 \nonumber \\ W_2 & = & X + e_2, \nonumber \end{eqnarray} where $E(X)=\mu_x$, $Var(X)=\sigma^2_x$, $E(e_1)=E(e_2)=0$, $Var(e_1)=Var(e_2)=\sigma^2_e$, and $X$, $e_1$ and $e_2$ are all independent. \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Reliability equals the correlation between two equivalent measurements} \framesubtitle{This is a population correlation} {\small \begin{eqnarray*} Corr(W_1,W_2) & = & \frac{Cov(W_1,W_2)}{SD(W_1)SD(W_2)} \\ \pause & & \\ & = & \frac{Cov(X+e_1,X+e_2) } {\sqrt{\sigma^2_x+\sigma^2_e}\sqrt{\sigma^2_x+\sigma^2_e}} \\ \pause & & \\ & = & \frac{Cov(X,X)+Cov(X,e_2)+Cov(e_1,X)+Cov(e_1,e_2)} {\sigma^2_x+\sigma^2_e} \pause \\ & & \\ & = & \frac{\sigma^2_x}{\sigma^2_x+\sigma^2_e}, \end{eqnarray*} which is the reliability. } % End size \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Estimate the reliability: Measure twice for a sample of size $n$} \framesubtitle{With a well-chosen time gap} Calculate $r = \frac{\sum_{i=1}^n (W_{i1}-\overline{W}_1)(W_{i2}-\overline{W}_2)} {\sqrt{\sum_{i=1}^n (W_{i1}-\overline{W}_1)^2} \sqrt{\sum_{i=1}^n (W_{i2}-\overline{W}_2)^2}}$. \pause \vspace{5mm} \begin{itemize} \item Test-retest reliability \pause \item Alternate forms reliability \pause \item Split-half reliability \end{itemize} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Measurement Error and Consistency} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Measurement error in the response variable only} %\framesubtitle{} \begin{center} \begin{picture}(100,100)(150,0) % Size of picture (does not matter), origin \put(197,000){$Y$} \put(202,4){\circle{20}} \put(157,50){\framebox{$X$}} % \put(168,25){{\footnotesize $\beta_1$}} % Label the arrow X -> Y \put(182,30){{\footnotesize $\beta_1$}} % Label the arrow X -> Y \put(235,50){\framebox{$V$}} \put(167,42){\vector(1,-1){25}} % X -> Y \put(212,17){\vector(1,1){25}} % Y -> V \put(240,95){$e$} \put(243,90){\vector(0,-1){25}} % e -> V \put(244,01){$\epsilon$} \put(242,03){\vector(-1,0){25}} % e -> V \end{picture} \pause \end{center} True model: \begin{eqnarray*} Y_i &=& \beta_0 + \beta_1 X_i + \epsilon_i \\ V_i &=& \nu + Y_i + e_i \end{eqnarray*} \pause Naive model: $V_i = \beta_0 + \beta_1 X_i + \epsilon_i$ \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Is $\widehat{\beta}_1$ consistent?} \framesubtitle{Ignoring measurement error in $Y$} First calculate $Cov(X_i,V_i)$. Under the true model. \pause \begin{eqnarray*} Y_i &=& \beta_0 + \beta_1 X_i + \epsilon_i \\ V_i &=& \nu + Y_i + e_i, \end{eqnarray*} \pause \begin{eqnarray*} Cov(X_i,V_i) &=& Cov(X_i,\beta_1 X_i + \epsilon_i) \pause \\ &=& \beta_1 \sigma^2_x \end{eqnarray*} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Target of $\widehat{\beta}_1$ as $n \rightarrow \infty$} \framesubtitle{Have $Cov(X_i,V_i) = \beta_1 \sigma^2_x$ and $Var(X_i) = \sigma^2_x$} \pause \begin{eqnarray*} \widehat{\beta}_1 &=& \frac{\sum_{i=1}^n(X_i-\overline{X})(V_i-\overline{V})} {\sum_{i=1}^n(X_i-\overline{X})^2} \\ \pause &=& \frac{\widehat{\sigma}_{x,v}}{\widehat{\sigma}^2_x} \\ \pause &\stackrel{a.s.}{\rightarrow}& \frac{Cov(X_i,V_i)}{Var(X_i)} \\ \pause &=& \frac{\beta_1 \sigma^2_x}{\sigma^2_x} \\ \pause &=& \beta_1 \end{eqnarray*} Consistent. \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Why did it work?} \pause %\framesubtitle{} \begin{eqnarray*} Y_i &=& \beta_0 + \beta_1 X_i + \epsilon_i \\ V_i &=& \nu + Y_i + e \\ \pause &=& \nu + (\beta_0 + \beta_1 X_i + \epsilon_i) + e_i \\ \pause &=& (\nu + \beta_0) + \beta_1 X_i + (\epsilon_i + e_i) \\ \pause &=& \beta_0^\prime + \beta_1 X_i + \epsilon_i^\prime \pause \end{eqnarray*} \begin{itemize} \item This is a re-parameterization. \item Most definitely \emph{not} one-to-one. \pause \item $(\nu,\beta_0)$ is absorbed into $\beta_0^\prime$. \pause \item $(\epsilon_i, e_i)$ is absorbed into $\epsilon_i^\prime$. \pause \item Can't know everything, but all we care about is $\beta_1$ anyway. \end{itemize} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Don't Worry} {\Large \begin{itemize} \item If a response variable appears to have no measurement error, assume it does have measurement error but the problem has been re-parameterized. \pause \item Measurement error in $Y$ is part of $\epsilon$. \end{itemize} } % End size \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Measurement error in a single explanatory variable} %\framesubtitle{} \begin{center} % Path diagram: Had to fiddle with this! \begin{picture}(100,100)(150,0) % Size of picture (does not matter), origin \put(197,000){$X$} \put(202,4){\circle{20}} \put(210,30){{\footnotesize $\beta_1$}} % Label the arrow X -> Y \put(157,50){\framebox{$W$}} \put(232,50){\framebox{$Y$}} \put(197,15){\vector(-1,1){25}} % X -> W \put(209,15){\vector(1,1){25}} % X -> Y \put(161,95){$e$} \put(165,90){\vector(0,-1){25}} % e -> W \put(236,95){$\epsilon$} \put(240,90){\vector(0,-1){25}} % epsilon -> Y \end{picture} \end{center} \pause True model: \begin{eqnarray*} Y_i &=& \beta_0 + \beta_1 X_i + \epsilon_i \\ W_i &=& X_i + e_i, \nonumber \end{eqnarray*} Naive model: $Y_i = \beta_0 + \beta_1 W_i + \epsilon_i$ \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Target of $\widehat{\beta}_1$ as $n \rightarrow \infty$} \framesubtitle{$Y_i = \beta_0 + \beta_1 X_i + \epsilon_i$ and $W_i = X_i + e_i$} \pause Have $Cov(W_i,Y_i) = \beta_1 \sigma^2_x$ and $Var(W_i) = \sigma^2_x+\sigma^2_e$ \pause \begin{eqnarray*} \widehat{\beta}_1 &=& \frac{\sum_{i=1}^n(W_i-\overline{W})(Y_i-\overline{Y})} {\sum_{i=1}^n(W_i-\overline{W})^2} \\ \pause &=& \frac{\widehat{\sigma}_{w,y}}{\widehat{\sigma}^2_w}\nonumber \\ \pause &\stackrel{a.s.}{\rightarrow}& \frac{Cov(W,Y)}{Var(W)} = \frac{\beta_1 \sigma^2_x}{\sigma^2_x+\sigma^2_e} \\ \pause &=& \beta_1 \left(\frac{\sigma^2_x}{\sigma^2_x+\sigma^2_e} \right) \end{eqnarray*} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{$\widehat{\beta}_1 \stackrel{a.s.}{\rightarrow} \beta_1 \left(\frac{\sigma^2_x}{\sigma^2_x+\sigma^2_e} \right)$} \framesubtitle{$W_i = X_i + e_i$} \pause \begin{itemize} \item $\widehat{\beta}_1$ converges to $\beta$ times the reliability of $W_i$. \pause \item It's inconsistent. \pause \item Because the reliability is less than one, it's asymptotically biased toward zero. \pause \item The worse the measurement of $X_i$, the more the asymptotic bias. \pause \item Sometimes called ``attenuation" (weakening). \pause \item If a good estimate of reliability is available from another source, one can ``correct for attenuation." \pause \item When $H_0:\beta_1=0$ is true, no problem. \pause \item False sense of security? \end{itemize} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Measurement error in two explanatory variables} %\framesubtitle{} \begin{center} \includegraphics[width=3in]{MeReg2Path} \end{center} Want to assess the relationship of $X_2$ to $Y$ controlling for $X_1$ by testing $H_0:\beta_2=0$. \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Statement of the model} \framesubtitle{Independently for $i=1, \ldots,n$} \begin{eqnarray} Y_i &=& \beta_0 + \beta_1 X_{i,1} + \beta_2 X_{i,2} + \epsilon_i \nonumber \\ W_{i,1} & = & X_{i,1} + e_{i,1} \nonumber \\ W_{i,2} & = & X_{i,2} + e_{i,2}, \nonumber \end{eqnarray} {\footnotesize where \begin{itemize} \item[] $E(X_{i,1})=\mu_1$, $E(X_{i,2})=\mu_2$, $E(\epsilon_i) = E(e_{i,1}) = E(e_{i,2}) = 0$, \item[] $Var(\epsilon_i)=\psi$, $Var(e_{i,1})=\omega_1$, $Var(e_{i,2})=\omega_2$, \item[] The errors $\epsilon_i, e_{i,1}$ and $e_{i,2}$ are all independent, \item[] $X_{i,1}$ and $X_{i,2}$ are independent of $\epsilon_i, e_{i,1}$ and $e_{i,2}$, and \end{itemize} \begin{displaymath} cov\left( \begin{array}{c} X_{i,1} \\ X_{i,1} \end{array} \right) = \left( \begin{array}{c c} \phi_{11} & \phi_{12} \\ \phi_{12} & \phi_{22} \end{array} \right). \end{displaymath} } % End size Note \begin{itemize} \item Reliability of $W_1$ is $\frac{\phi_{11}}{\phi_{11}+\omega_1}$. \item Reliability of $W_2$ is $\frac{\phi_{22}}{\phi_{22}+\omega_2} $. \end{itemize} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{True Model versus Naive Model} %\framesubtitle{Independently for $i=1, \ldots,n$} \pause %\vspace{1mm} True model: \begin{eqnarray} Y_i &=& \beta_0 + \beta_1 X_{i,1} + \beta_2 X_{i,2} + \epsilon_i \nonumber \\ W_{i,1} & = & X_{i,1} + e_{i,1} \nonumber \\ W_{i,2} & = & X_{i,2} + e_{i,2}, \pause \nonumber \end{eqnarray} Naive model: $Y_i = \beta_0 + \beta_1 W_{i,1} + \beta_2 W_{i,2} + \epsilon_i$ \pause \vspace{3mm} \begin{itemize} \item Fit the naive model. \item See what happens to $\widehat{\beta}_2$ as $n \rightarrow \infty$ when the true model holds. \pause \item Start by calculating $cov(\mathbf{d}_i)$. \end{itemize} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Covariance matrix of the observable data} % \vspace{5mm} {\footnotesize \begin{eqnarray*} \boldsymbol{\Sigma} &=& cov\left(\begin{array}{c} W_{i,1} \\ W_{i,2} \\ Y_i \end{array}\right) \\ \pause && \\ &=& \left(\begin{array}{rrr} \omega_{1} + \phi_{11} & \phi_{12} & \beta_{1} \phi_{11} + \beta_{2} \phi_{12} \\ \phi_{12} & \omega_{2} + \phi_{22} & \beta_{1} \phi_{12} + \beta_{2} \phi_{22} \\ \beta_{1} \phi_{11} + \beta_{2} \phi_{12} & \beta_{1} \phi_{12} + \beta_{2} \phi_{22} & \beta_{1}^{2} \phi_{11} + 2 \, \beta_{1} \beta_{2} \phi_{12} + \beta_{2}^{2} \phi_{22} + \psi \end{array}\right) % Last matrix pasted in from Sage! \end{eqnarray*} } % End size \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{What happens to $\widehat{\beta}_2$ as $n \rightarrow \infty$?} \framesubtitle{Interested in $H_0:\beta_2=0$} \pause {\footnotesize \begin{eqnarray*}\label{abias} \widehat{\beta}_2 &=& \frac{\widehat{\sigma}_{11}\widehat{\sigma}_{23} - \widehat{\sigma}_{12}\widehat{\sigma}_{13}} {\widehat{\sigma}_{11}\widehat{\sigma}_{22} - \widehat{\sigma}_{12}^2} \\ \pause &\stackrel{a.s.}{\rightarrow}& \frac{\sigma_{11}\sigma_{23} - \sigma_{12}\sigma_{13}} {\sigma_{11}\sigma_{22} - \sigma_{12}^2} \nonumber \\ \pause & = & \frac{{\beta_{1} \omega_{1} \phi_{12} + \beta_{2} (\omega_{1}\phi_{22} + \phi_{11} \phi_{22}-\phi_{12}^{2}} ) } {(\phi_{1,1} + \omega_1)(\phi_{2,2} + \omega_2) - \phi_{12}^{2}} \\ \pause & \neq & \beta_2 \end{eqnarray*} } % End size Inconsistent. \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{When $H_0:\beta_2=0$ is true} %\framesubtitle{} {\LARGE \begin{displaymath} \widehat{\beta}_2 \stackrel{a.s.}{\rightarrow} \frac{\beta_{1} \omega_{1} \phi_{12}} {(\phi_{1,1} + \omega_1)(\phi_{2,2} + \omega_2) - \phi_{12}^{2}} \end{displaymath} \pause } % End size So $ \widehat{\beta}_2$ goes to the wrong target unless \pause \begin{itemize} \item There is no relationship between $X_1$ and $Y$, or \pause \item There is no measurement error in $W_1$, or \pause \item There is no correlation between $X_1$ and $X_2$. \pause \end{itemize} Also, $t$ statistic goes to plus or minus $\infty$ and the $p$-value $\stackrel{a.s.}{\rightarrow} 0$. \pause Remember, $H_0$ is true. \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Copyright Information} This slide show was prepared by \href{http://www.utstat.toronto.edu/brunner}{Jerry Brunner}, Department of Statistics, University of Toronto. It is licensed under a \href{http://creativecommons.org/licenses/by-sa/3.0/deed.en_US} {Creative Commons Attribution - ShareAlike 3.0 Unported License}. Use any part of it as you like and share the result freely. The \LaTeX~source code is available from the course website: \href{http://www.utstat.toronto.edu/brunner/oldclass/2053f22} {\footnotesize \texttt{http://www.utstat.toronto.edu/brunner/oldclass/2053f22}} \end{frame} \end{document} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{} %\framesubtitle{} \begin{itemize} \item \item \item \end{itemize} \end{frame} % \stackrel{c}{\mathbf{X}} \stackrel{\top}{\vphantom{r}_i} % Centered X_i Transpose % \stackrel{c}{X}\stackrel{2}{\vphantom{r}_i} % Centered X_i^2 % \stackrel{c}{X}\stackrel{2}{\vphantom{r}} % Centered X^2