% \documentclass[serif]{beamer} % Serif for Computer Modern math font. \documentclass[serif, handout]{beamer} % Handout to ignore pause statements \hypersetup{colorlinks,linkcolor=,urlcolor=red} \usefonttheme{serif} % Looks like Computer Modern for non-math text -- nice! \setbeamertemplate{navigation symbols}{} % Suppress navigation symbols \usetheme{AnnArbor} % CambridgeUS % \usetheme{Frankfurt} % Displays section titles on top: Fairly thin but still swallows some material at bottom of crowded slides % \usetheme{Berlin} % Displays sections on top % \usetheme{Berkeley} \usepackage[english]{babel} \usepackage{amsmath} % for binom % \usepackage{graphicx} % To include pdf files! % \definecolor{links}{HTML}{2A1B81} % \definecolor{links}{red} \setbeamertemplate{footline}[frame number] \mode \title{Introduction to Time Series\footnote{ This slide show is an open-source document. See last slide for copyright information.}} \date{} % To suppress date \begin{document} \begin{frame} \titlepage \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Time series} % \framesubtitle{} A sequence of measurements (random variables) $X_1, X_2, \ldots\pause, X_n$ \pause \begin{itemize} \item Not a random sample. \item Not necessarily independent. \item Sequentially dependent. \end{itemize} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{} %\framesubtitle{} \begin{center} \includegraphics[width=3.4in]{1} \end{center} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{} %\framesubtitle{} \begin{center} \includegraphics[width=3in]{2} \end{center} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{} %\framesubtitle{} \begin{center} \includegraphics[width=3in]{3} \end{center} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{} %\framesubtitle{} \begin{center} \includegraphics[width=3in]{4} \end{center} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{} %\framesubtitle{} \begin{center} \includegraphics[width=3in]{5} \end{center} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame}[fragile] \frametitle{Correlations: 50 pairs of independent random walks, $n=1000$ steps} \framesubtitle{Need around $|r| = 0.13$ for significance} {\footnotesize % or scriptsize \begin{verbatim} -0.28175 -0.22242 -0.32170 -0.45053 0.07866 0.59167 -0.27414 -0.82570 -0.62175 0.43537 0.84147 0.04103 -0.17502 -0.89710 -0.19116 -0.53865 -0.50889 0.42855 -0.91074 0.90577 0.22818 0.84834 -0.52501 0.82583 -0.06838 -0.00234 0.16084 0.81393 -0.07063 -0.09908 -0.38405 -0.28510 0.24850 0.12445 0.33509 0.33586 0.41241 -0.33482 -0.32021 -0.73808 0.14045 -0.03618 -0.67757 0.81121 -0.39379 -0.58832 -0.26866 0.16687 0.38541 0.12433 \end{verbatim} } % End size \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Random walk} \framesubtitle{Sometimes called Drunkard's walk} \begin{itemize} \item Take a step left or right at random. \item Steps could be of variable length. \item Location at time $t$ depends on location at time $t-1$. \end{itemize} {\LARGE \begin{displaymath} X_t = X_{t-1} + \epsilon_t \end{displaymath} \pause } % End size $\epsilon_1, \epsilon_2, \ldots$ all independent and identically distributed. \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%7869689%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Autoregressive Time Series} \framesubtitle{A generalization of the random walk} \pause \begin{tabular}{ll} $X_t = X_{t-1} + \epsilon_t$ & Random walk \\ \pause $X_t = \beta_0 + \beta_1 X_{t-1} + \epsilon_t$ & First order autoregressive \\ \pause $X_t = \beta_0 + \beta_1 X_{t-1} + \beta_2 X_{t-2} + \epsilon_t$ & Second order autogrgressive \\ \end{tabular} \begin{center} etc. \end{center} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Stationary Time Series} %\framesubtitle{} \begin{itemize} \item In a stationary time series, the distribution of $X_t$ is not changing. \item In particular, all the $X_t$ have the same mean and variance. \end{itemize} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Expected value does not change} %\framesubtitle{} {\LARGE \begin{eqnarray*} E(X_t) &=& E(\beta_0 + \beta_1 X_{t-1} + \epsilon_t) \\ \pause &=& \beta_0 + \beta_1 E(X_{t-1}) + 0 \\ \pause \Rightarrow \mu &=& \beta_0 + \beta_1\mu \\ \pause \Rightarrow \beta_0 &=& \mu(1-\beta_1) \\ \pause \Rightarrow \mu &=& \frac{\beta_0}{1-\beta_1} \end{eqnarray*} } % End size \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Variance does not change} %\framesubtitle{} {\LARGE \begin{eqnarray*} Var(X_t) &=& Var(\beta_0 + \beta_1 X_{t-1} + \epsilon_t) \\ \pause &=& \beta_1^2 Var(X_{t-1}) + Var(\epsilon_t) \\ \pause \Rightarrow \sigma^2 &=& \beta_1^2 \sigma^2 + Var(\epsilon_t) \\ \pause \Rightarrow Var(\epsilon_t) &=& \sigma^2(1-\beta_1^2) \end{eqnarray*} } % End size \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Covariance} %\framesubtitle{} {\large \begin{eqnarray*} Cov(X_{t-1},X_t) & = & Cov(X_{t-1},\beta_0 + \beta_1 X_{t-1} + \epsilon_t) \\ \pause & = & \beta_1 Cov(X_{t-1},X_{t-1}) + Cov(X_{t-1},\epsilon_t) \\ \pause & = & \beta_1 Var(X_{t-1}) + 0 \\ \pause & = & \beta_1 \sigma^2 \pause \end{eqnarray*} } % End size So {\large \begin{eqnarray*} Corr(X_{t-1},X_t) & = & \frac{\beta_1 \sigma^2}{\sqrt{\sigma^2}\sqrt{\sigma^2}} \\ \pause & = & \beta_1 \end{eqnarray*} } % End size \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{$Corr(X_t,X_{t-1}) = \beta_1$} \framesubtitle{Where $X_t = \beta_0 + \beta_1 X_{t-1} + \epsilon_t$ } \pause \begin{itemize} \item The regression coefficient $\beta_1$ is usually denoted by $\rho$. \pause \item The \textbf{First-order Autocorrelation}. \pause \item Continuing the calculations, get $Corr(X_t,X_{t-2}) = \rho^2$, \ldots \pause \item $Corr(X_t,X_{t-m}) = \rho^m$. \pause \item So the correlation matrix looks like this: \end{itemize} \begin{displaymath} \sigma^2 \left( \begin{array}{c c c c c} 1 & \rho & \rho^2 & \rho^3 & \cdots \\ \rho & 1 & \rho & \rho^2 & \cdots \\ \rho^2 & \rho & 1 & \rho & \cdots \\ \rho^3 & \rho^2 & \rho & 1 & \cdots \\ \vdots & \vdots & \vdots & \vdots \\ \end{array} \right) \end{displaymath} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Signatures} \pause \framesubtitle{Identifying the times series model} \begin{itemize} \item Because $-1< \rho < 1$, the pattern $\rho, \rho^2, \rho^3, \ldots$ displays a pattern of \emph{exponential decay}: Graph it. \pause \item Other time series structures have known signatures too. \pause \item Higher-order autoregressive. \pause \item Moving average. \pause \item ARMA: Autoregressive Moving Average. \pause \item Seasonal: Blips at seasonal lags. \pause \item Non-stationary. \pause \item Differencing is a big trick. \pause \item ARIMA: Autoregressive Integrated Moving Average. \pause \item Theorem: All the stationary processes can be approximated by autogregressive with enough lags. \end{itemize} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Time series structures for the \emph{error terms} (epsilons) in a regression} %\framesubtitle{} \begin{itemize} \item What is the error term $\epsilon$ in a regression? \item Everything that affects $y$ other than the $x$ variables. \item Maybe those omitted variables are sequentially dependent. \pause \item Like the temperature influences pop sales. \pause \item Is sequential dependence likely? Depends on the logic of the data collection. \pause \item Diagnose by the Durbin-Watson test and time series diagnostics on the residuals. \end{itemize} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Durbin-Watson Test for Autocorrelation} \pause %\framesubtitle{} \begin{itemize} \item Usually, autocorrelation is positive. \item $H_0: \rho=0$ vs.~$H_1: \rho>0$ \pause {\LARGE \begin{displaymath} D = \frac{\sum_{i=2}^n(e_i-e_{i-1})^2}{\sum_{i=1}^n e_i^2} \end{displaymath} } % End size \item Reject when $D$ is small. How small? \pause \item Critical values and $p$-values are brutally hard to compute. \pause \item Durbin and Watson published tables with upper and lower bounds for the critical values. \pause \item Now SAS can compute all $p-$values, but it's an option. \end{itemize} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{What to do about autocorrelated residuals} \pause %\framesubtitle{} \begin{itemize} \item Try adding more explanatory variables, perhaps including time. \pause \item Consider differencing. \pause \item Directly model autocorrelated errors. \end{itemize} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{\texttt{proc autoreg}} %\framesubtitle{} \begin{itemize} \item Regression model with autogregressive errors: covers a lot of important cases. \pause \item Especially in combination with \emph{lagged} explanatory variables. \pause \item Estimate $\beta_j$ and $\rho_k$ all at once by maximum likelihood. \pause \item \texttt{proc autoreg} has many capabilities. As usual, we will explore just a few. \pause \item Can you say GARCH? \end{itemize} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Copyright Information} This slide show was prepared by \href{http://www.utstat.toronto.edu/brunner}{Jerry Brunner}, Department of Statistical Sciences, University of Toronto. It is licensed under a \href{http://creativecommons.org/licenses/by-sa/3.0/deed.en_US} {Creative Commons Attribution - ShareAlike 3.0 Unported License}. Use any part of it as you like and share the result freely. The \LaTeX~source code is available from the course website: \vspace{5mm} \href{http://www.utstat.toronto.edu/brunner/oldclass/441s24} {\small\texttt{http://www.utstat.toronto.edu/brunner/oldclass/441s24}} \end{frame} \end{document} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{} \pause %\framesubtitle{} \begin{itemize} \item \end{itemize} \end{frame} =========================================================================== n <- 2000 ; rho = 0.75 ; v <- 2 ; set.seed(32448) e <- rnorm(n) ; epsilon <- numeric(n) ; epsilon[1] <- e[1] for(i in 2:n) epsilon[i] <- rho*epsilon[i-1] + sqrt(1-rho^2)*e[i] acf(epsilon) josh <- acf(epsilon) ; josh set.seed(32448) f <- 200 ; trials <- 1000 x <- NULL ; y <- NULL for(i in 1:trials) { x <- c(x,i) f <- f + 2*rbinom(1,1,.5) -1 y <- c(y,f) } plot(x,y,type='l') title("Trend, or Drift?") =========================================================================== set.seed(52448) f1 <- f2 <- 200 ; trials <- 1000 x <- NULL ; y1 <- y2 <- NULL for(i in 1:trials) { x <- c(x,i) f1 <- f1 + 2*rbinom(1,1,.5) -1 f2 <- f2 + 2*rbinom(1,1,.5) -1 y1 <- c(y1,f1) ; y2 <- c(y2,f2) } X <- c(x,x) ; Y <- c(y1,y2) plot(X,Y,pch=' ') lines(x,y1,type='l') lines(x,y2,lty=2) title('Related?') =========================================================================== set.seed(32448) kor <- NULL for(sim in 1:50) { f1 <- f2 <- 200 ; trials <- 1000 x <- NULL ; y1 <- y2 <- NULL for(i in 1:trials) { x <- c(x,i) f1 <- f1 + 2*rbinom(1,1,.5) -1 f2 <- f2 + 2*rbinom(1,1,.5) -1 y1 <- c(y1,f1) ; y2 <- c(y2,f2) } kor <- c(kor,cor(y1,y2)) }