% \documentclass[serif]{beamer} % Serif for Computer Modern math font. \documentclass[serif, handout]{beamer} % Handout to ignore pause statements. \hypersetup{colorlinks,linkcolor=,urlcolor=red} \usefonttheme{serif} % Looks like Computer Modern for non-math text -- nice! \setbeamertemplate{navigation symbols}{} % Suppress navigation symbols % \usetheme{Berlin} % Displays sections on top \usetheme{Frankfurt} % Displays section titles on top: Fairly thin but still swallows some material at bottom of crowded slides %\usetheme{Berkeley} \usepackage[english]{babel} \usepackage{amsmath} % for binom \usepackage{amsfonts} % for \mathbb{R} The set of reals % \usepackage{graphicx} % To include pdf files! % \definecolor{links}{HTML}{2A1B81} % \definecolor{links}{red} \setbeamertemplate{footline}[frame number] \mode \title{Limit Theorems\footnote{ This slide show is an open-source document. See last slide for copyright information.}} \subtitle{STA 256: Fall 2018} \date{} % To suppress date \begin{document} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \titlepage \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Overview} \tableofcontents \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Law of Large Numbers} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Infinite Sequence of random variables} %\framesubtitle{} {\Large $T_1, T_2, \ldots$ } \pause \vspace{5mm} \begin{itemize} \item We are interested in what happens to $T_n$ as $n \rightarrow \infty$. \pause \item Why even think about this? \pause \item For fun. \pause \item And because $T_n$ could be a sequence of \emph{statistics}\pause, numbers computed from sample data. \pause \item For example, $T_n = \overline{X}_n \pause = \frac{1}{n}\sum_{i=1}^nX_i$. \pause \item $n$ is the sample size. \pause \item $n \rightarrow \infty$ is an approximation of what happens for large samples. \pause \item Good things should happen when estimates are based on more information. \end{itemize} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Convergence} %\framesubtitle{} \begin{itemize} \item Convergence of $T_n$ as $n \rightarrow \infty$ is not an ordinary limit, because probability is involved. \pause \item There are several different types of convergence. \pause \item In this class, we will work with \emph{convergence in probability} and \emph{convergence in distribution}. \end{itemize} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Convergence in Probability} %\framesubtitle{} Definition: The sequence of random variables $T_1, T_2, \ldots$ is said to converge in probability to the constant $c$ if for all $\epsilon > 0$, {\LARGE \begin{displaymath} \lim_{n \rightarrow \infty}P\{|T_n-c|\geq\epsilon\} = 0 \end{displaymath} \pause } % End size % Or equivalently, \pause % \begin{displaymath} % \lim_{n \rightarrow \infty}P\{|T_n-c|\leq\epsilon\} = 1 % \end{displaymath} \pause Observe \begin{eqnarray*} |T_n-c| < \epsilon \pause & \Leftrightarrow & -\epsilon < T_n-c < \epsilon \\ \pause & \Leftrightarrow & c-\epsilon < T_n < c+\epsilon \\ \pause \end{eqnarray*} \begin{picture}(10,10) % Line, direction (1,0), horizontal extent 200, starting point (50,0) \put(50,0){\line(1,0){200} } \put(150,5){\line(0,-1){10} } \put(148,-15){$c$} \put(100,-2){(} % Left parenthesis \put(200,-2){)} % Right parenthesis \put(90,-15){$c-\epsilon$} \put(190,-15){$c+\epsilon$} \end{picture} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Example: $T_n \sim U(-\frac{1}{n}, \frac{1}{n})$} \framesubtitle{Convergence in probability means $\lim_{n \rightarrow \infty}P\{|T_n-c|\geq\epsilon\} = 0$} \begin{picture}(10,10)(25,-25) % Line, direction (1,0), horizontal extent 200, starting point (50,0) \put(50,0){\line(1,0){200} } \put(150,5){\line(0,-1){10} } \put(148,-15){$c$} \put(100,-2){(} % Left parenthesis \put(200,-2){)} % Right parenthesis \put(90,-15){$c-\epsilon$} \put(190,-15){$c+\epsilon$} \end{picture} \pause \begin{itemize} \item $T_1$ is uniform on $(-1,1)$. \pause Height of the density is $\frac{1}{2}$. \pause \item $T_2$ is uniform on $(-\frac{1}{2},\frac{1}{2})$. \pause Height of the density is 1. \pause \item $T_3$ is uniform on $(-\frac{1}{3},\frac{1}{3})$. \pause Height of the density is $\frac{3}{2}$. \pause \item Eventually, $\frac{1}{n} < \epsilon$ \pause and $P\{|T_n-0|\geq\epsilon\} = 0$\pause, forever. \pause \item Eventually means for all $n>\frac{1}{\epsilon}$. \end{itemize} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Example: $X_1, \ldots, X_n$ are independent $U(0,\theta)$} \framesubtitle{Convergence in probability means $\lim_{n \rightarrow \infty}P\{|T_n-c|\geq\epsilon\} = 0$} \pause For $0 < x < \theta$, \pause \begin{itemize} \item[] $F_{x_{i}}(x) = \int_0^x \frac{1}{\theta} \, dx \pause = \frac{x}{\theta}$. \pause \item[] $Y_n = \max_i (X_i)$. \pause \item[] $F_{y_n}(y) = \left(\frac{x}{\theta}\right)^n$ \pause \end{itemize} \vspace{2mm} \begin{picture}(10,10) % (25,-25) % Line, direction (1,0), horizontal extent 200, starting point (50,0) \put(50,0){\line(1,0){200} } \put(150,5){\line(0,-1){10} } \put(148,-15){$\theta$} \put(100,-2){(} % Left parenthesis \put(200,-2){)} % Right parenthesis \put(90,-15){$\theta-\epsilon$} \put(190,-15){$\theta+\epsilon$} \end{picture} \pause \vspace{5mm} \begin{eqnarray*} P\{|Y_n-\theta|\geq\epsilon\} & = & F_{y_n}(\theta-\epsilon) \\ \pause & = & \left(\frac{\theta-\epsilon}{\theta}\right)^n \\ \pause & \rightarrow & 0 \pause \mbox{ ~~~because } \frac{\theta-\epsilon}{\theta}<1. \pause \end{eqnarray*} So the observed maximum data value goes in probability to $\theta$, the theoretical maximum data value. \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{The Law of Large Numbers} \pause %\framesubtitle{} Theorem: Let $X_1, \ldots, X_n$ be independent random variables with expected value $\mu$ and variance $\sigma^2$. \pause Then $\overline{X}_n = \frac{1}{n}\sum_{i=1}^nX_i$ converges in probability \pause to $\mu$. \pause \vspace{3mm} { \small \begin{itemize} \item This is not surprising, because $E(\overline{X}_n) = \mu$ and \pause \item $Var(\overline{X}_n) = \frac{\sigma^2}{n}$ \pause \begin{eqnarray*} Var\left( \frac{1}{n}\sum_{i=1}^nX_i \right) \pause & = & \frac{1}{n^2} Var\left(\sum_{i=1}^nX_i \right) \\ \pause & = & \frac{1}{n^2} \sum_{i=1}^nVar(X_i) \\ \pause & = & \frac{1}{n^2} \sum_{i=1}^n \sigma^2 \pause = \frac{1}{n^2} n\sigma^2 \pause = \frac{\sigma^2}{n} \pause \downarrow 0. \end{eqnarray*} \item And the implications are huge. \end{itemize} } % End size \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Probability is long-run relative frequency} \pause %\framesubtitle{Implied by the Law of Large Numbers} This follows from the Law of Large Numbers. \pause Repeat some process over and over a lot of times, and count how many times the event $A$ occurs. \pause Independently for $i=1, \ldots, n$, \pause \begin{itemize} \item Let $X_i(\omega)=1$ if $\omega \in A$, \pause and $X_i(\omega)=0$ if $\omega \notin A$. \pause \item So $X_i$ is an \emph{indicator} for the event $A$. \pause \item $X_i$ is Bernoulli, with $P(X_i=1) = p \pause = P(A)$. \pause \item $E(X_i) = \sum_{x=0}^1 x \, p(x) \pause = 0\cdot(1-p) + 1\cdot p \pause =p$. \pause \item $\overline{X}_n$ is the proportion of times the event occurs in $n$ independent trials. \pause \item The proportion of successes converges in probability to $P(A)$. % \pause % \item So while $\overline{X}_n$ is a random quantity with its own probability distribution, \pause % \item That distribution shrinks to fit in a tiny interval around $P(A)$, no matter how small the interval. \end{itemize} \vspace{3mm} \begin{picture}(10,10) % Line, direction (1,0), horizontal extent 200, starting point (50,0) \put(50,0){\line(1,0){200} } \put(150,5){\line(0,-1){10} } \put(148,-15){$p$} \put(100,-2){(} % Left parenthesis \put(200,-2){)} % Right parenthesis \put(90,-15){$p-\epsilon$} \put(190,-15){$p+\epsilon$} \end{picture} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Proof of the Law of Large Numbers} \framesubtitle{Using $E(\overline{X}_n)= \mu$ and $Var(\overline{X}_n) = \frac{\sigma^2}{n}$} \pause \begin{itemize} \item Chebyshev's inequality says $P(|X-\mu| \geq k\sigma) \leq \frac{1}{k^2}$ \pause \item Here, $X$ is replaced by $\overline{X}_n$ and $\sigma$ is replaced by $\frac{\sigma}{\sqrt{n}}$. \pause \item So Chebyshev's inequality becomes $P(|\overline{X}_n-\mu| \geq k\frac{\sigma}{\sqrt{n}}) \leq \frac{1}{k^2}$. \pause \item $k>0$ is arbitrary, so set $\frac{k\sigma}{\sqrt{n}}=\epsilon$. \pause \item Then $k=\frac{\epsilon\sqrt{n}}{\sigma}$ \pause and $\frac{1}{k^2}= \frac{\sigma^2}{\epsilon^2 n}$. \pause \item Thus, \pause \end{itemize} {\LARGE \begin{displaymath} 0 \leq P\{|\overline{X}_n-\mu|\geq\epsilon\} \pause \leq \frac{\sigma^2}{\epsilon^2 n} \pause \downarrow 0 \ \end{displaymath} } % End size Squeeze. $\blacksquare$ \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Theorem} \framesubtitle{Proof omitted in 2018} \pause Let $g(x)$ be a function that is continuous at $x=c$. If $T_n$ converges in probability to $c$, then $g(T_n)$ converges in probability to $g(c)$. \pause \vspace{5mm} % Examples: \begin{itemize} \item A Geometric distribution has expected value $1/p$. \pause $1/\overline{X}_n$ converges in probability to $1/E(X_i) \pause = p$. \pause \item A Uniform($0,\theta$) distribution has expected value $\theta/2$. \pause $2\overline{X}_n$ converges in probability to $2E(X_i) \pause = 2\frac{\theta}{2}=\theta$. \end{itemize} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Central Limit Theorem} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Convergence in distribution} \framesubtitle{Another mode of convergence} \pause Definition: Let the random variables $X_1, X_2 \ldots$ have cumulative distribution functions $F_1(x), F_2(x) \ldots$\pause, and let the random variable $X$ have cumulative distribution function $F(x)$\pause. The (sequence of) random variable $X_n$ is said to \emph{converge in distribution} to $X$ if \pause {\LARGE \begin{displaymath} \lim_{n \rightarrow \infty}F_n(x) = F(x) \end{displaymath} \pause \vspace{4mm} } % End size at every point where $F(x)$ is continuous. \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Example: Convergence to a Bernoulli with $p=\frac{1}{2}$} \framesubtitle{$\lim_{n \rightarrow \infty}F_n(x) = F(x)$ at all continuity points of $F(x)$} \pause \begin{displaymath} p_n(x) = \left\{ \begin{array}{cl} % ll means left left 1/2 & \mbox{for } x=\frac{1}{n} \\ 1/2 & \mbox{for } x=1+\frac{1}{n} \\ 0 & \mbox{Otherwise} \end{array} \right. \end{displaymath} \vspace{3mm} \pause \begin{picture}(10,10)(0,-10) \put(15,-2){$n=1$} \put(50,0){\line(1,0){200} } \put(150,5){\line(0,-1){10} } \put(100,5){\line(0,-1){10} } \put(200,5){\line(0,-1){10} } \put(98,-15){0} \put(148,-15){1} \put(198,-15){2} \put(197.5,-2){$\bullet$} \put(147.5,-2){$\bullet$} \end{picture} \pause \begin{picture}(10,10)(0,10) \put(15,-2){$n=2$} \put(50,0){\line(1,0){200} } \put(150,5){\line(0,-1){10} } \put(100,5){\line(0,-1){10} } \put(200,5){\line(0,-1){10} } \put(98,-15){0} \put(148,-15){1} \put(198,-15){2} \put(172.5,-2){$\bullet$} \put(122.5,-2){$\bullet$} \end{picture} \pause \begin{picture}(10,10)(0,30) \put(15,-2){$n=3$} \put(50,0){\line(1,0){200} } \put(150,5){\line(0,-1){10} } \put(100,5){\line(0,-1){10} } \put(200,5){\line(0,-1){10} } \put(98,-15){0} \put(148,-15){1} \put(198,-15){2} \put(164.7,-2){$\bullet$} \put(114.7,-2){$\bullet$} \end{picture} \pause \vspace{15mm} \begin{itemize} \item For $x<0$, $\lim_{n \rightarrow \infty}F_n(x)=$ \pause $0$ \pause \item For $01$, $\lim_{n \rightarrow \infty}F_n(x)=$ \pause $1$ \pause \item What happens at $x=0$ and $x=1$ does not matter. \end{itemize} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Convergence to a constant} \pause %\framesubtitle{} {\small Consider a ``degenerate" random variable $X$ with $P(X=c)=1$. \pause \vspace{3mm} \begin{picture}(10,10) % (25,-25) % Line, direction (1,0), horizontal extent 200, starting point (50,0) \put(50,0){\line(1,0){200} } \put(150,5){\line(0,-1){10} } \put(148,-15){$c$} \put(100,-2){(} % Left parenthesis \put(200,-2){)} % Right parenthesis \put(90,-15){$c-\epsilon$} \put(190,-15){$c+\epsilon$} \end{picture} \pause \vspace{5mm} Suppose $X_n$ converges in probability to $c$. \pause \begin{itemize} \item Then for any $x>c$, $F_n(x) \rightarrow 1$ for $\epsilon$ small enough. \pause \item And for any $xc$ and $F_n(x) \rightarrow 0$ for $x0$ be given. \pause \begin{eqnarray*} P\{|X_n-c|<\epsilon\} & = & F_n(x+\epsilon)-F_n(x-\epsilon) \pause \mbox{ so} \\ \pause \lim_{n \rightarrow \infty}P\{|X_n-c|<\epsilon\} & = & \lim_{n \rightarrow \infty}F_n(x+\epsilon) - \lim_{n \rightarrow \infty}F_n(x-\epsilon) \\ \pause & = & 1-0 \pause = 1 \end{eqnarray*} \pause And $X_n$ converges in distribution to $c$. } % End size of whole slide. \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Comment} %\framesubtitle{} \begin{itemize} \item Convergence in probability might seem redundant, because it's just convergence in distribution to a constant. \pause \item But that's only true when the convergence is to a constant. \pause \item Convergence in probability to a non-degenerate random variable \pause implies convergence in distribution. \pause \item But convergence in distribution does not imply convergence in probability \pause when the convergence is to a non-degenerate variable. \end{itemize} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Big Theorem about convergence in distribution} \framesubtitle{Book calls it the ``Continuity Theorem"} \pause Let the random variables $X_1, X_2 \ldots$ have cumulative distribution functions $F_1(x), F_2(x) \ldots$ and moment-generating functions $M_1(t), M_2(t) \ldots$. \pause Let the random variable $X$ have cumulative distribution function $F(x)$ and moment-generating function $M(t)$. \pause If \begin{displaymath} \lim_{n \rightarrow \infty} M_n(t) = M(t) \end{displaymath} \pause for all $t$ in an open interval containing $t=0$, \pause then $X_n$ converges in distribution to $X$. \pause \vspace{5mm} The idea is that convergence of moment-generating functions implies convergence of distribution functions. \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Example: Poisson approximation to the binomial} \framesubtitle{We did this before with probability mass functions and it was a challenge.} \pause Let $X_n$ be a binomial ($n,p_n$) random variable with $p_n=\frac{\lambda}{n}$, so that $n \rightarrow \infty$ and $p \rightarrow 0$ in such a way that the value of $n \, p_n=\lambda$ remains fixed. Find the limiting distribution of $X_n$. \pause \vspace{1mm} Recalling that the MGF of a Poisson is $e^{\lambda(e^t-1)}$ and $\left(1 + \frac{x}{n}\right)^n \rightarrow e^x$, \pause \begin{eqnarray*} M_n(t) & = & (pe^t+1-p)^n \\ \pause & = & \left(\frac{\lambda}{n}e^t+1-\frac{\lambda}{n} \right)^n \\ \pause & = & \left(1+\frac{\lambda(e^t-1}{n} \right)^n \\ \pause & \rightarrow & e^{\lambda(e^t-1)} \\ \pause \end{eqnarray*} MGF of Poisson($\lambda$). \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{The Central Limit Theorem} \pause %\framesubtitle{} Let $X_1, \ldots, X_n$ be independent random variables from a distribution with expected value $\mu$ and variance $\sigma^2$. \pause Then \begin{displaymath} Z_n = \frac{\sqrt{n}(\overline{X}_n-\mu)}{\sigma} \end{displaymath} \pause converges in distribution to $Z \sim$ Normal(0,1). \pause \vspace{5mm} In practice, $Z_n$ is often treated as standard normal for $n>25$\pause, although the $n$ required for an accurate approximation really depends on the distribution. \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Sometimes we say the distribution of the sample mean is approximately normal, or ``asymptotically" normal.} \pause %\framesubtitle{} \begin{itemize} \item This is justified by the Central Limit Theorem. \pause \item But it does \emph{not} mean that $\overline{X}_n$ converges in distribution to a normal random variable. \pause \item The Law of Large Numbers says that $\overline{X}_n$ converges in probability to a constant, $\mu$. \pause \item So $\overline{X}_n$ converges to $\mu$ in distribution as well. \pause \item That is, $\overline{X}_n$ converges in distribution to a degenerate random variable with all its probability at $\mu$. \end{itemize} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Why would we say that for large $n$, the sample mean is approximately $N(\mu,\frac{\sigma^2}{n})$?} \pause \vspace{5mm} Have $Z_n = \frac{\sqrt{n}(\overline{X}_n-\mu)}{\sigma}$ \pause converging to $ Z \sim N(0,1)$. \pause {\footnotesize \begin{eqnarray*} Pr\{\overline{X}_n \leq x\} \pause & = & Pr\left\{ \frac{\sqrt{n}(\overline{X}_n-\mu)}{\sigma} \leq \frac{\sqrt{n}(x-\mu)}{\sigma}\right\} \\ \pause & = & Pr\left\{ Z_n \leq \frac{\sqrt{n}(x-\mu)}{\sigma}\right\} \pause \approx \Phi\left( \frac{\sqrt{n}(x-\mu)}{\sigma} \right) \end{eqnarray*} } \pause Suppose $Y$ is \emph{exactly} $N(\mu,\frac{\sigma^2}{n})$: \pause {\footnotesize \begin{eqnarray*} Pr\{Y \leq x\} \pause & = & Pr\left\{ \frac{\sqrt{n}(Y-\mu)}{\sigma} \leq \frac{x-\mu}{\sigma/\sqrt{n}}\right\} \\ \pause & = & Pr\left\{ Z_n \leq \frac{\sqrt{n}(x-\mu)}{\sigma}\right\} \pause = \Phi\left( \frac{\sqrt{n}(x-\mu)}{\sigma} \right) \end{eqnarray*} } % End size \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Copyright Information} This slide show was prepared by \href{http://www.utstat.toronto.edu/~brunner}{Jerry Brunner}, Department of Statistical Sciences, University of Toronto. It is licensed under a \href{http://creativecommons.org/licenses/by-sa/3.0/deed.en_US} {Creative Commons Attribution - ShareAlike 3.0 Unported License}. Use any part of it as you like and share the result freely. The \LaTeX~source code is available from the course website: \vspace{5mm} \href{http://www.utstat.toronto.edu/~brunner/oldclass/256f18} {\small\texttt{http://www.utstat.toronto.edu/$^\sim$brunner/oldclass/256f18}} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \end{document} and $Var(Y) = \sigma^2$ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{} \pause %\framesubtitle{} \begin{itemize} \item \pause \item \pause \item \end{itemize} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{center} \includegraphics[width=2in]{BivariateNormal} \end{center} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{} %\framesubtitle{} {\LARGE \begin{eqnarray*} m_1 & = & a + b \\ m_2 & = & c + d \end{eqnarray*} } % End size \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{} \pause %\framesubtitle{} \begin{itemize} \item \pause \item \pause \item \end{itemize} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # R code for plots of normal MGFs tt = seq(from=-1,to=1,by=0.05) mu = 0; sigsq = 1 zero = exp(mu*tt + 0.5*sigsq*tt^2) mu = 1; one = exp(mu*tt + 0.5*sigsq*tt^2) mu = -1; minusone = exp(mu*tt + 0.5*sigsq*tt^2) x = c(tt,tt,tt); y = c(zero,one,minusone) plot(x,y,pch=' ',xlab='t',ylab = 'M(t)') lines(tt,zero,lty=1) lines(tt,one,lty=2) lines(tt,minusone,lty=3) title("Fingerprints of the normal distribution") # Legend x1 <- c(-0.4,0) ; y1 <- c(4,4) ; lines(x1,y1,lty=1) text(0.25,4,expression(paste(mu," = 0, ",sigma^2," = 1"))) x2 <- c(-0.4,0) ; y2 <- c(3.75,3.75) ; lines(x2,y2,lty=2) text(0.25,3.75,expression(paste(mu," = 1, ",sigma^2," = 1"))) x3 <- c(-0.4,0) ; y3 <- c(3.5,3.5) ; lines(x3,y3,lty=3) text(0.25,3.5,expression(paste(mu," = -1, ",sigma^2," = 1"))) # R code for plots of chi-squared MGFs tt = seq(from=-0.25,to=0.25,by=0.005) nu = 1; one = (1-2*tt)^(-nu/2) nu = 2; two = (1-2*tt)^(-nu/2) nu = 3; three = (1-2*tt)^(-nu/2) x = c(tt,tt,tt); y = c(one,two,three) plot(x,y,pch=' ',xlab='t',ylab = 'M(t)') lines(tt,one,lty=1) lines(tt,two,lty=2) lines(tt,three,lty=3) title("Fingerprints of the chi-squared distribution") # Legend x1 <- c(-0.2,-0.1) ; y1 <- c(2.5,2.5) ; lines(x1,y1,lty=1) text(-0.05,2.5,expression(paste(nu," = 1"))) x2 <- c(-0.2,-0.1) ; y2 <- c(2.3,2.3) ; lines(x2,y2,lty=2) text(-0.05,2.3,expression(paste(nu," = 2"))) x3 <- c(-0.2,-0.1) ; y3 <- c(2.1,2.1) ; lines(x3,y3,lty=3) text(-0.05,2.1,expression(paste(nu," = 3")))