% \documentclass[serif]{beamer} % Serif for Computer Modern math font. \documentclass[serif, handout]{beamer} % Handout to ignore pause statements. \hypersetup{colorlinks,linkcolor=,urlcolor=red} \usefonttheme{serif} % Looks like Computer Modern for non-math text -- nice! \setbeamertemplate{navigation symbols}{} % Suppress navigation symbols % \usetheme{Berlin} % Displays sections on top \usetheme{Frankfurt} % Displays section titles on top: Fairly thin but still swallows some material at bottom of crowded slides %\usetheme{Berkeley} \usepackage[english]{babel} \usepackage{amsmath} % for binom \usepackage{amsfonts} % for \mathbb{R} The set of reals % \usepackage{graphicx} % To include pdf files! % \definecolor{links}{HTML}{2A1B81} % \definecolor{links}{red} \setbeamertemplate{footline}[frame number] \mode \title{Joint Distributions: Part Two\footnote{ This slide show is an open-source document. See last slide for copyright information.}} \subtitle{STA 256: Fall 2018} \date{} % To suppress date \begin{document} \begin{frame} \titlepage \end{frame} \begin{frame} \frametitle{Overview} \tableofcontents \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Independence} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Independent Random Variables} \pause \framesubtitle{Discrete or Continuous} The random variables $X$ and $Y$ are said to be \emph{independent} if \pause {\LARGE \begin{displaymath} F_{xy}(x,y) =F_x(x)F_y(y) \end{displaymath} \pause } % End size \vspace{6mm} For all real $x$ and $y$. \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Theorem (for discrete random variables)} \framesubtitle{Recalling independence means $ F_{xy}(x,y) =F_x(x)F_y(y)$} \pause The discrete random variables $X$ and $Y$ are independent if and only if \pause {\LARGE \begin{displaymath} p_{xy}(x,y) = p_x(x) \, p_y(y) \end{displaymath} } % End size for all real $x$ and $y$. \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Theorem (for continuous random variables)} \framesubtitle{Recalling independence means $ F_{xy}(x,y) = F_x(x)F_y(y)$} \pause The continuous random variables $X$ and $Y$ are independent if and only if \pause {\LARGE \begin{displaymath} f_{xy}(x,y) = f_x(x) \, f_y(y) \end{displaymath} } % End size for all real $x$ and $y$. \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Conditional Distributions} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Conditional Distributions} \framesubtitle{Of discrete random variables} \pause If $X$ and $Y$ are discrete random variables, the conditional probability mass function of $X$ given $Y=y$ is \pause just a conditional probability. \pause It is given by \pause %{\LARGE \begin{displaymath} P(X=x|Y=y) = \frac{P(X=x,Y=y)}{P(Y=y)} \end{displaymath} \pause %} % End size These are just probabilities of events. For example, \pause \begin{displaymath} P(X=x,Y=y) = P\{\omega \in \Omega: X(\omega)=x \mbox{ and } Y(\omega)=y \} \end{displaymath} \pause We write %{\LARGE \begin{displaymath} p_{x|y}(x|y) = \frac{p_{x,y}(x,y)}{p_y(y)} \end{displaymath} \pause %} % End size Note that $p_{x|y}(x|y)$ is defined only for $y$ values such that $p_y(y)>0$. \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Conditional Probability Mass Functions} \framesubtitle{Both ways} \pause {\LARGE \begin{displaymath} p_{y|x}(y|x) = \frac{p_{x,y}(x,y)}{p_x(x)} \end{displaymath} } % End size \vspace{3mm} {\LARGE \begin{displaymath} p_{x|y}(x|y) = \frac{p_{x,y}(x,y)}{p_y(y)} \end{displaymath} \vspace{3mm} } % End size Defined where the denominators are non-zero. \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Independence makes sense} \framesubtitle{In terms of conditional probability mass functions} \pause Suppose $X$ and $Y$ are independent. \pause Then $p_{xy}(x,y) = p_x(x)p_y(y)$\pause, and \pause %{\LARGE \begin{eqnarray*} p_{x|y}(x|y) & = & \frac{p_{x,y}(x,y)}{p_y(y)} \\ \pause & = & \frac{p_x(x)p_y(y)}{p_y(y)} \\ \pause & = & p_x(x) \end{eqnarray*} \pause %} % End size So we see that the conditional distribution of $X$ given $Y=y$ is identical for every value of $y$. \pause It does not depend on the value of $y$. \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{The other way} \pause %\framesubtitle{s} Suppose the conditional distribution of $X$ given $Y=y$ does not depend on the value of $y$. \pause Then \pause %{\LARGE \begin{eqnarray*} && p_{x|y}(x|y) = p_x(x) \\ \pause & \Leftrightarrow & p_x(x) = \frac{p_{x,y}(x,y)}{p_y(y)} \\ \pause & \Leftrightarrow & p_{x,y}(x,y) = p_x(x) \, p_y(y) \end{eqnarray*} \pause %} % End size So that $X$ and $Y$ are independent. \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Conditional distributions of continuous random variables} \pause % \framesubtitle{ } If $X$ and $Y$ are continuous random variables, the conditional probability density of $X$ given $Y=y$ is \pause {\LARGE \begin{displaymath} f_{x|y}(x|y) = \frac{f_{x,y}(x,y)}{f_y(y)} \end{displaymath} \pause } % End size \begin{itemize} \item Note that $f_{x|y}(x|y)$ is defined only for $y$ values such that $f_y(y)>0$. \pause \item It looks like we are conditioning on an event of probability zero\pause, but the conditional density is a limit of a conditional probability\pause, as the radius of a tiny region surrounding $(x,y)$ goes to zero. \end{itemize} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Conditional Probability Density Functions} \framesubtitle{Both ways} \pause {\LARGE \begin{displaymath} f_{y|x}(y|x) = \frac{f_{x,y}(x,y)}{f_x(x)} \end{displaymath} } % End size \vspace{3mm} {\LARGE \begin{displaymath} f_{x|y}(x|y) = \frac{f_{x,y}(x,y)}{f_y(y)} \end{displaymath} \vspace{3mm} } % End size Defined where the denominators are non-zero. \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Independence makes sense} \framesubtitle{In terms of conditional densities} \pause Suppose $X$ and $Y$ are independent. \pause Then $f_{xy}(x,y) = f_x(x)f_y(y)$\pause, and \pause %{\LARGE \begin{eqnarray*} f_{x|y}(x|y) & = & \frac{f_{x,y}(x,y)}{f_y(y)} \\ \pause & = & \frac{f_x(x)f_y(y)}{f_y(y)} \\ \pause & = & f_x(x) \end{eqnarray*} \pause %} % End size And we see that the conditional density of $X$ given $Y=y$ is identical for every value of $y$. \pause It does not depend on the value of $y$. \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{The other way} \pause %\framesubtitle{s} Suppose the conditional density of $X$ given $Y=y$ does not depend on the value of $y$. \pause Then \pause %{\LARGE \begin{eqnarray*} && f_{x|y}(x|y) = f_x(x) \\ \pause & \Leftrightarrow & f_x(x) = \frac{f_{x,y}(x,y)}{f_y(y)} \\ \pause & \Leftrightarrow & f_{x,y}(x,y) = f_x(x) \, f_y(y) \end{eqnarray*} \pause %} % End size So that $X$ and $Y$ are independent. \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Transformations} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Transformations of Jointly Distributed Random Variables} \pause %\framesubtitle{} Let $Y = g(X_1, \ldots, X_n)$. \pause What is the probability distribution of~$Y$? \pause For example, \begin{itemize} \item $X_1$ is the number of jobs completed by employee 1. \item $X_2$ is the number of jobs completed by employee 2. \pause \item You know the probability distributions of $X_1$ and $X_2$. \pause \item You would like to know the probability distribution of $Y = X_1 + X_2$. \end{itemize} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Convolutions of discrete random variables} \pause %\framesubtitle{} \begin{itemize} \item Let $X$ and $Y$ be discrete random variables. \pause \item The standard case is where they are independent. \pause \item Want probability mass function of $Z = X + Y$. \end{itemize} \pause \begin{eqnarray*} p_z(z) & = & P(Z=z) \\ \pause & = & P(X+Y=z) \\ \pause & = & \sum_x P(X+Y=z|X=x)P(X=x) \\ \pause & = & \sum_x P(x+Y=z|X=x)P(X=x) \\ \pause & = & \sum_x P(Y=z-x|X=x)P(X=x) \\ \pause & = & \sum_x P(Y=z-x)P(X=x) \mbox{ by independence}\\ \pause & = & \sum_x p_x(x) p_y(z-x) \end{eqnarray*} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Summarizing} \framesubtitle{Convolutions of discrete random variables} Let $X$ and $Y$ be \emph{independent} discrete random variables, and $Z = X + Y$. \pause {\LARGE \begin{displaymath} p_z(z) = \sum_x p_x(x) p_y(z-x) \end{displaymath} } % End size \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Two Important results} \framesubtitle{Proved using the convolution formula} \pause \begin{itemize} \item Let $X \sim$ Poisson($\lambda_1$) and $Y \sim$ Poisson($\lambda_2$) be independent. \pause Then $Z=X+Y \sim$ Poisson($\lambda_1+\lambda_2$). \pause % Sum using binomial theorem \item Let $X \sim$ Binomial($n_1,p$) and $Y \sim$ Binomial($n_2,p$) be independent. \pause Then $Z=X+Y \sim$ Binomial($n_1+n_2,p$) % Sum over a hypergeometric. \end{itemize} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Convolutions of \emph{continuous} random variables} \pause %\framesubtitle{} \begin{itemize} \item Let $X$ and $Y$ be continuous random variables. \pause \item The standard case is where they are independent. \pause \item Want probability density function of $Z = X + Y$. \end{itemize} \pause \begin{columns} \column{0.5\textwidth} \begin{eqnarray*} f_z(z) & = & \frac{d}{dz} P(Z \leq z) \\ \pause & = & \frac{d}{dz} P(X+Y \leq z) \\ &&\\ &&\\ &&\\ &&\\ &&\\ \pause \end{eqnarray*} \column{0.5\textwidth} %\begin{center} \includegraphics[width=2in]{x+y0$. \pause Then \\ $Z=X+Y \sim$ Gamma($\alpha=2,\lambda$). \pause % Integrate cdf of exponential. \item Let $X \sim$ Normal($\mu_1,\sigma_1$) and $Y \sim$ Normal($\mu_2,\sigma_2$) be independent. \pause Then \\ $Z=X+Y \sim$ Normal$\left(\mu_1+\mu_2,\sqrt{\sigma^2_1+\sigma^2_2}\right)$. % Complete the square, ugh! \end{itemize} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{The Jacobian Method} \pause %\framesubtitle{} \begin{itemize} \item $X_1$ and $X_2$ are continuous random variables. \pause \item $Y_1 = g_1(X_1,X_2)$ and $Y_2 = g_2(X_1,X_2)$. \pause \item Want $f_{y_1y_2}(y_1,y_2)$ \pause \end{itemize} Solve for $x_1$ and $x_2$, obtaining $x_1(y_1,y_2)$ and $x_2(y_1,y_2)$\pause. Then \begin{displaymath} f_{y_1y_2}(y_1,y_2) = f_{x_1x_2}(\, x_1(y_1,y_2),x_2(y_1,y_2) \,) \pause \cdot abs \renewcommand{\arraystretch}{1.5} \left| \begin{array}{cc} \frac{\partial x_1}{\partial y_1} & \frac{\partial x_1}{\partial y_2} \\ \frac{\partial x_2}{\partial y_1} & \frac{\partial x_2}{\partial y_2} \end{array}\right| \renewcommand{\arraystretch}{1.0} \end{displaymath} \pause The determinant $\left| \begin{array}{cc} a & b \\ c & d \end{array}\right| = ad-bc$. \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{More about the Jacobian method} \framesubtitle{$Y_1 = g_1(X_1,X_2)$ and $Y_2 = g_2(X_1,X_2)$} \pause \begin{itemize} \item It follows directly from a change of variables formula in multi-variable integration. The proof is omitted. \pause \item It must be possible to solve $y_1 = g_1(x_1,x_2)$ and $y_2 = g_2(x_1,x_2)$ for $x_1$ and $x_2$. \pause \item That is, the function $g: \mathbb{R}^2 \rightarrow \mathbb{R}^2$ must be one to one (injective). \pause \item Frequently you are only interested in $Y_1$, and $Y_2 = g_2(X_1,X_2)$ is chosen to make reverse solution easy. \pause \item The partial derivatives must all be continuous, except possibly on a set of probability zero (they almost always are). \pause \item It extends naturally to higher dimension. \end{itemize} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Change from rectangular to polar co-ordinates} \framesubtitle{By the Jacobian method} \pause A point on the plane may be represented as $(x,y)$, or \pause \begin{center} \includegraphics[width=2.5in]{circle} \end{center} \pause An angle $\theta$ and a radius $r$. \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Change of variables} %\framesubtitle{Via the Jacobian method} \begin{columns} \column{0.6\textwidth} \begin{center} \includegraphics[width=2.5in]{circle} \end{center} \pause \column{0.4\textwidth} \begin{itemize} \item[] $x = r \cos(\theta)$ \pause \item[] $y = r \sin(\theta)$ \pause \item[] $x^2 + y^2 = r^2$ \pause \end{itemize} \begin{itemize} \item As $x$ and $y$ range from $-\infty$ to $\infty$, \pause \item $r$ goes from 0 to $\infty$ \pause \item And $\theta$ goes from $\theta$ to $2\pi$. \end{itemize} \end{columns} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Integral $\int_0^\infty \int_0^\infty f_{x,y}(x,y) \, dx \, dy$} \pause %\framesubtitle{} Change of variables: \pause \begin{columns} \column{0.5\textwidth} \begin{itemize} \item[] $x = r \cos(\theta)$ \pause \item[] $y = r \sin(\theta)$ \pause \end{itemize} \column{0.5\textwidth} \begin{center} \includegraphics[width=1.75in]{circle} \end{center} \pause \end{columns} \begin{eqnarray*} & & \int_0^\infty \int_0^\infty f_{x,y}(x,y) \, dx \, dy \\ &=& \pause \int_0^{\pi/2} \int_0^\infty f_{x,y}(r\cos\theta,r\sin\theta) \, abs \renewcommand{\arraystretch}{1.5} \left| \begin{array}{cc} \frac{\partial x}{\partial r} & \frac{\partial x}{\partial \theta} \\ \frac{\partial y}{\partial r} & \frac{\partial y}{\partial \theta} \end{array}\right| \renewcommand{\arraystretch}{1.0} \, dr \, d\theta \end{eqnarray*} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Evaluate the determinant} \framesubtitle{(with $x = r\cos(\theta)$ and $y = r\sin(\theta)$)} \begin{eqnarray*} \renewcommand{\arraystretch}{1.5} \left| \begin{array}{cc} \frac{\partial x}{\partial r} & \frac{\partial x}{\partial \theta} \\ \frac{\partial y}{\partial r} & \frac{\partial y}{\partial \theta} \end{array}\right| \renewcommand{\arraystretch}{1.0} \pause & = & \renewcommand{\arraystretch}{1.5} \left| \begin{array}{cc} \frac{\partial \, r\cos(\theta)}{\partial r} & \frac{\partial \, r\cos(\theta)}{\partial \theta} \\ \frac{\partial \, r\sin(\theta)}{\partial r} & \frac{\partial \, r\sin(\theta)}{\partial \theta} \end{array}\right| \renewcommand{\arraystretch}{1.0} \pause \\ && \\ & = & \left| \begin{array}{cc} \cos(\theta) & -r\sin(\theta) \\ \sin(\theta) & r\cos(\theta) \end{array}\right| \pause \\ && \\ & = & r \cos^2\theta - - r\sin^2\theta \pause \\ & = & r (\sin^2\theta + \cos^2\theta) \pause \\ & = & r \end{eqnarray*} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{So the integral is} \pause %\framesubtitle{} {\large \begin{displaymath} \int_0^\infty \int_0^\infty f_{x,y}(x,y) \, dx \, dy = \int_0^{\pi/2} \int_0^\infty f_{x,y}(r\cos\theta,r\sin\theta) \, r \, dr \, d\theta \end{displaymath} \pause } % End size \begin{itemize} \item The standard formula for change from rectangular to polar co-ordinates is $dx \, dy = r \, dr \, d\theta$. \pause \item It comes from a Jacobian. \pause \item Other limits of integration are possible. \end{itemize} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Copyright Information} This slide show was prepared by \href{http://www.utstat.toronto.edu/~brunner}{Jerry Brunner}, Department of Statistical Sciences, University of Toronto. It is licensed under a \href{http://creativecommons.org/licenses/by-sa/3.0/deed.en_US} {Creative Commons Attribution - ShareAlike 3.0 Unported License}. Use any part of it as you like and share the result freely. The \LaTeX~source code is available from the course website: \vspace{5mm} \href{http://www.utstat.toronto.edu/~brunner/oldclass/256f18} {\small\texttt{http://www.utstat.toronto.edu/$^\sim$brunner/oldclass/256f18}} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \end{document} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{} \pause %\framesubtitle{} \begin{itemize} \item \pause \item \pause \item \end{itemize} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{center} \includegraphics[width=2in]{BivariateNormal} \end{center} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Change from rectangular to polar co-ordinates} \framesubtitle{Via the Jacobian method} \begin{columns} \column{0.5\textwidth} \begin{itemize} \item \item \item \end{itemize} \column{0.5\textwidth} \begin{itemize} \item \item \item \end{itemize} \end{columns} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Change from rectangular to polar co-ordinates} \framesubtitle{via the Jacobian method} \pause A point on the plane may be represented as $(x,y)$, or \pause \begin{center} %\includegraphics[width=3in]{circle} \end{center} \pause An angle $\theta$ and a radius $r$. \pause \begin{itemize} \item[] $x = r \cos(\theta)$ \pause \item[] $y = r \sin(\theta)$ \pause \item[] $x^2 + y^2 = r^2$ \pause \end{itemize} As $x$ and $y$ range from $-\infty$ to $\infty$, \pause $r$ goes from 0 to $\infty$ \pause and $\theta$ goes from $\theta$ to $2\pi$. \end{frame} \begin{frame} \frametitle{The Jacobian Method} \pause %\framesubtitle{} \begin{itemize} \item $X_1$ and $X_2$ are continuous random variables. \pause \item $Y_1 = g_1(X_1,X_2)$ and $Y_2 = g_2(X_1,X_2)$. \pause \item Want $f_{y_1y_2}(y_1,y_2)$ \pause \end{itemize} Solve for $x_1$ and $x_2$, obtaining $x_1(y_1,y_2)$ and $x_2(y_1,y_2)$\pause. Then \begin{displaymath} f_{y_1y_2}(y_1,y_2) = f_{x_1x_2}(\, x_1(y_1,y_2),x_2(y_1,y_2) \,) \pause \cdot abs \renewcommand{\arraystretch}{1.5} \left| \begin{array}{cc} \frac{\partial x_1}{\partial y_1} & \frac{\partial x_1}{\partial y_2} \\ \frac{\partial x_2}{\partial y_1} & \frac{\partial x_2}{\partial y_2} \end{array}\right| \renewcommand{\arraystretch}{1.0} \end{displaymath} \pause The determinant $\left| \begin{array}{cc} a & b \\ c & d \end{array}\right| = ad-bc$. \end{frame}