% STA2112f99 Convergence Handout \documentclass[12pt]{article} \pagestyle{empty} % No page numbers \usepackage{amsbsy} \usepackage{amsfonts} \begin{document} \begin{center} {\Large Convergence of Sequences of Random Variables}\\ \end{center} %\vspace{3 mm} \enlargethispage*{1000 pt} \begin{itemize} \item Definitions \begin{itemize} \item[$\star$] $ X_n \stackrel{a.s.}{\rightarrow} X$ means $P\{s:\, \lim_{n \rightarrow \infty} X(s) = X(s)\}=1$. \item[$\star$] $ X_n \stackrel{P}{\rightarrow} X$ means $\forall \epsilon>0,\,\lim_{n \rightarrow \infty} P\{|X_n-X|<\epsilon \}=1$. \item[$\star$] $ X_n \stackrel{d}{\rightarrow} X$ means for every continuity point $x$ of $F_X$, $\lim_{n \rightarrow \infty}F_{X_n}(x) = F_X(x)$. \end{itemize} \item $ X_n \stackrel{a.s.}{\rightarrow} X$ if and only if $\forall \epsilon>0,\,\lim_{n \rightarrow \infty} P( \cap_{k=n}^\infty\{|X_k-X|<\epsilon\})=1$. \item $ X_n \stackrel{a.s.}{\rightarrow} X \Rightarrow X_n \stackrel{P}{\rightarrow} X \Rightarrow X_n \stackrel{d}{\rightarrow} X $. \item If $a$ is a constant, $ X_n \stackrel{d}{\rightarrow} a \Rightarrow X_n \stackrel{P}{\rightarrow} a$. \item If $\lim_{n \rightarrow \infty}f_{X_n}(x) = f_X(x)$ for each $x$, $X_n \stackrel{d}{\rightarrow} X$. \item Let $\mathbf{X}$ and $\mathbf{X}_n$ be random vectors in $\mathbb{R}^k$. $\mathbf{X}_n \stackrel{d}{\rightarrow} \mathbf{X}$ if and only if $\lim_{n \rightarrow \infty}E[g(\mathbf{X}_n)] = E[g(\mathbf{X})]$ for every bounded continuous function $g:\,\mathbb{R}^k \rightarrow \mathbb{R}$. \item Slutsky Theorems for Convergence in Distribution: \begin{enumerate} \item If $\mathbf{X}_n \in \mathbb{R}^m$, $\mathbf{X}_n \stackrel{d}{\rightarrow} \mathbf{X}$ and if $f:\,\mathbb{R}^m \rightarrow \mathbb{R}^q$ (where $q \leq m$) is continuous except possibly on a set $C$ with $P(\mathbf{X} \in C)=0$, then $f(\mathbf{X}_n) \stackrel{d}{\rightarrow} f(\mathbf{X})$. \item If $\mathbf{X}_n \stackrel{d}{\rightarrow} \mathbf{X}$ and $(\mathbf{X}_n - \mathbf{Y}_n) \stackrel{P}{\rightarrow} 0$, then $\mathbf{Y}_n \stackrel{d}{\rightarrow} \mathbf{X}$. \item If $\mathbf{X}_n \in \mathbb{R}^d$, $\mathbf{Y}_n \in \mathbb{R}^k$, $\mathbf{X}_n \stackrel{d}{\rightarrow} \mathbf{X}$ and $\mathbf{Y}_n \stackrel{d}{\rightarrow} \mathbf{c}$, then \begin{displaymath} \left( \begin{array}{cc} \mathbf{X}_n \\ \mathbf{Y}_n \end{array} \right) \stackrel{d}{\rightarrow} \left( \begin{array}{cc} \mathbf{X} \\ \mathbf{c} \end{array} \right) \end{displaymath} \end{enumerate} \item Slutsky Theorems for Convergence in Probability: \begin{enumerate} \item If $\mathbf{X}_n \in \mathbb{R}^m$, $\mathbf{X}_n \stackrel{P}{\rightarrow} \mathbf{X}$ and if $f:\,\mathbb{R}^m \rightarrow \mathbb{R}^q$ (where $q \leq m$) is continuous except possibly on a set $C$ with $P(\mathbf{X} \in C)=0$, then $f(\mathbf{X}_n) \stackrel{P}{\rightarrow} f(\mathbf{X})$. \item If $\mathbf{X}_n \stackrel{P}{\rightarrow} \mathbf{X}$ and $(\mathbf{X}_n - \mathbf{Y}_n) \stackrel{P}{\rightarrow} 0$, then $\mathbf{Y}_n \stackrel{P}{\rightarrow} \mathbf{X}$. \item If $\mathbf{X}_n \in \mathbb{R}^d$, $\mathbf{Y}_n \in \mathbb{R}^k$, $\mathbf{X}_n \stackrel{P}{\rightarrow} \mathbf{X}$ and $\mathbf{Y}_n \stackrel{P}{\rightarrow} \mathbf{Y}$, then \begin{displaymath} \left( \begin{array}{cc} \mathbf{X}_n \\ \mathbf{Y}_n \end{array} \right) \stackrel{P}{\rightarrow} \left( \begin{array}{cc} \mathbf{X} \\ \mathbf{Y} \end{array} \right) \end{displaymath} \end{enumerate} \pagebreak \item Let $g(x)$ have a second derivative that is continuous at $x=\theta$, and let $\sqrt{n}(T_n-\theta) \stackrel{d}{\rightarrow} T$. Then $\sqrt{n}(g(T_n)-g(\theta)) \stackrel{d}{\rightarrow} g^\prime(\theta)T$. \item Strong Law of Large Numbers (SLLN): Let $X_1, \ldots X_n$ be i.i.d. random variables with finite first moment. Then $ \overline{X}_n \stackrel{a.s.}{\rightarrow} E(X_1)$. \item Let $\mathbf{X}_1, \ldots, \mathbf{X}_n$ be i.i.d. random vectors in $\mathbb{R}^k$ with expected value vector $\boldsymbol{\mu}$ and covariance matrix $\boldsymbol{\Sigma}$. Then $\sqrt{n}(\overline{\mathbf{X}}_n-\boldsymbol{\mu})$ converges in distribution to a multivariate normal with mean \textbf{0} and covariance matrix $\boldsymbol{\Sigma}$. \item Lindeberg Central Limit Theorem: Consider the triangular array of random variables \begin{displaymath} \begin{array}{lll} X_{11} \\ X_{21}, & X_{22} \\ X_{31}, & X_{32}, & X_{33} \\ \ldots, \end{array} \end{displaymath} where the random variables in each row are assumed independent with $E(X_{ij})=0$ and $Var(X_{ij})=\sigma^2_{ij}$. Let $S_n = \sum_{j=1}^n X_{nj}$, and let $v^2_n = Var(S_n) = \sum_{j=1}^n \sigma^2_{nj}$. Then $\frac{S_n}{v_n}$ converges in distribution to a standard normal provided, for all $\epsilon>0$, \begin{displaymath} \lim_{n \rightarrow \infty}\frac{1}{v^2_n} \sum_{j=1}^n E[X^2_{nj}I(|X_{nj}| \geq \epsilon v_n)] = 0 \end{displaymath} \item $U_n = O_p(V_n)$ means $\forall \epsilon>0, \, \exists M=M(\epsilon)$ and $N=N(\epsilon)$ such that if $n>N$, $P\{\frac{U_n}{V_n} \leq M\} - P\{\frac{U_n}{V_n} \leq -M\} > 1-\epsilon$. Usually, $V_n = n^{-t}$ for some $t>0$. \end{itemize} \end{document}