\documentclass[12pt]{article} \textwidth 7.0in \oddsidemargin -0.3in \evensidemargin 0.0in \textheight 9.0in \topmargin -0.3in \usepackage{amsthm,amssymb,amsmath,mathrsfs} \newtheorem*{thm}{Theorem} \newtheorem{lem}{Lemma} \newtheorem{prop}{Proposition} \begin{document} \title{\textbf{Report in Probability}} \author{Wang Chung-Chen} \date{2013.06} \maketitle Recall that the \textbf{characteristic function} of a given random variable $X$ is given by \begin{align} \varphi(t) := Ee^{itX} = E\cos tX + iE\sin tX.\notag \end{align} with several properties about its values. An important theorem called \textbf{Inversion Formula} says that: \begin{prop}[Theorem 3.3.4.] Let $\varphi(t) = \int e^{itx} \mu(dx)$ where $\mu$ is a probability measure. If $a 0$. That is, $\int |g(x) - g_N(x)| dx < \varepsilon$ (Zygmund pp.54) . \begin{align} \left|\int g(x)\cos nx dx\right| &\leq \left| \int g_N(x)\cos nx dx\right| + \int |g(x)-g_N(x)|dx\notag\\ &\leq\frac{2K}{n} + \varepsilon,\notag \end{align} so if $n>>0$ we can conclude that $\int g(x)\cos nx dx=0$. $\,\,\,\,\,\,\square$ \text{ } Then consider \begin{align} \varphi(t) = \int\left(\cos (tX) + i\sin (tX)\right)d\mu.\notag \end{align} By above lemma, as $t\to\infty$, $\varphi(t)\to 0$. Its converse also fails. Let $\Omega = [0,1]$ with Lebesgue measure. Let \begin{align} X(\omega)= \begin{cases} \omega &\text{as $0\leq\omega\leq 0.5$}\\ 2\omega - 0.5 &\text{as $0.5\leq \omega\leq 1$}. \end{cases} \notag \end{align} Then the distribution function $F(x)$ has a non-differentiable point at $\frac{1}{2}$ and so cannot be expressed by $\int_{-\infty}^x f(y) dy$ for some $f$. Meanwhile, \begin{align} \varphi(t) = Ee^{-itX} &= \int_0^\frac{1}{2} e^{-it\omega} d\omega+\int_\frac{1}{2}^1 e^{-it(2\omega - 0.5)}d\omega\notag\\ &=\left( \frac{e^{-it\omega}}{-it}\right)\big|_0^\frac{1}{2} + \left(\frac{e^{-it(2\omega -0.5)}}{-2it}\right)\big|_\frac{1}{2}^1\to 0\notag \end{align} as $t\to \infty$. $\,\,\,\,\,\,\square$ \text{ } \noindent\textsc{Proof of (3)}. We need the following properties. \begin{lem} \begin{itemize} \item[(i)] $\mu(\{a\}) = \lim_{T\to\infty}\frac{1}{2T}\int_{-T}^T e^{-ita}\varphi(t) dt$. \item[(ii)] If $P(X\in hZ) = 1$ where $h > 0$, then its ch.f. has $\varphi(\frac{2\pi}{h} + t) = \varphi(t)$ , so for $x\in hZ$, \begin{align} P(X=x)=\frac{h}{2\pi}\int_\frac{-\pi}{h}^\frac{\pi}{h} e^{-itx} \varphi(t)dt\notag \end{align} \item[(iii)] Write $X=Y+b$, then $Ee^{itX} = e^{itb}Ee^{itY}$. So if $P(X\in b+hZ)=1$, the inversion formula in (ii) is alid for $x\in b+hZ$. \end{itemize} \end{lem} \begin{prop} If $X$ and $Y$ are independent and have ch.f. $\varphi$ and distribution $\mu$. Then \begin{align} \lim_{T\to\infty}\frac{1}{2T}\int_{-T}^T |\varphi(t)|^2 dt = P(X-Y=0)=\sum_x \mu(\{x\})^2.\notag \end{align} \end{prop} To prove this proposition, we need the following. \begin{prop} (i) If $X$ and $Y$ are independent with distributions $\mu$ and $\nu$ then $P(X+Y=0)=\sum_y \mu(\{-y\})\nu(\{y\})$. (ii) If $X$ has continuous distribution then $P(X=Y)=0$. \end{prop} \noindent\textsc{Proof of original proposition}. Then $X-Y$ has ch.f $\varphi\cdot \bar{\varphi} = |\varphi|^2$. Let $a=0$ in (i) of the last theorem, then \begin{align} P(X-Y = 0 ) =\lim_{T\to\infty} \frac{1}{2T}\int_{-T}^T|\varphi(t)|^2 dt. \notag \end{align} By exercise 2.1.8, $P(X-Y=0) = \sum_x \mu(\{x\})^2$. $\,\,\,\,\,\square$ \text{ } Back to its proof. We show that $\frac{1}{T}\int_0^T|\varphi(t)|^2dt\to 0$ as $T\to\infty$. Given any $\varepsilon>0$, by condition there is an $M>0$ such that $|\varphi(t)|<\sqrt\varepsilon$ for any $T>M$. At this time, \begin{align} 0\leq \frac{1}{T}\int_{0}^T|\varphi(t)|^2dt& =\frac{1}{T}\int_0^M|\varphi(t)|^2dt+\int_M^T|\varphi(t)|^2dt\notag\\ &\leq \frac{1}{T}\int_0^M|\varphi(t)|^2dt+\int_M^T\varepsilon dt\leq\frac{1}{T}\int_0^M|\varphi(t)|^2dt+\varepsilon \notag. \end{align} Then $0\leq\liminf_{T\to\infty}\frac{1}{T}\int_0^M|\varphi(t)|^2dt+\varepsilon\leq\limsup_{T\to\infty}\frac{1}{T}\int_0^M|\varphi(t)|^2dt+\varepsilon=\varepsilon$. Since $\varepsilon$ is arbitrary, $\lim_{T\to\infty}\frac{1}{T}\int_0^T|\varphi(t)|^2dt=0$. Hence by last theorem, there is no point masses. The converse is false. Give a random variable $P(X=0)=P(X=0.5)=0.5$. Then we claim that $X$ has ch.f. \begin{align} \varphi(t) = \prod_{j=1}^\infty \frac{1+e^{-it2\cdot 3^{-j}}}{2}.\notag \end{align} Since $e^{2\pi j} = 1 $ for all $j\in\mathbb{N}$, \begin{align} \varphi(3^k \pi) = \prod_{j=1}^\infty\frac{1+e^{i2\pi\cdot 3^{k-j}}}{2} = \prod_{r=1}^\infty \frac{1+e^{i2\pi\cdot 3^{-r}}}{2} = \varphi{\pi}.\notag \end{align} Since $\varphi(\pi)\ne 0$, $\lim_{t\to\infty}\varphi(t)\ne 0$. $\,\,\,\,\,\,\square$ \text{ } \noindent\textsc{Proof of Claim}. It follows from the lemma which states that if $X_1, X_2,\cdots$ are independent and $S_n = X_1 + \cdots + X_n$, and $\varphi_j$ is the ch.f. of $X_j$ and $S_n\to S_\infty$ a.s. then $S_\infty$ has ch.f. $\prod_{j=1}^\infty \varphi_j(t)$. The proof is like this: Firstly we show that if $X_n\to X$ in probability and then $X_n\Rightarrow X$. Supposely that $X_n\to X$ in probability, then choose a bounded continuous $g$. Then $Eg(X_n)\to Eg(X)$ by BCT. Since $g$ is arbitrary, by theorem $X_n\Rightarrow X$. By basic properties of ch.f. $S_n$ has ch.f. $\prod_{j=1}^n\varphi_j(t)$. By condition plus the last paragraph $S_n\Rightarrow S_\infty$. By continuity theorem that $\prod_{j=1}^\infty \varphi_(t)\to \varphi$, which is the ch.f. of $S_\infty$. $\,\,\,\,\,\square$ \begin{thebibliography}{100} \bibitem{ } Rick Durrett, ``Probability : theory and examples", Cambridge University Press, 2010. \end{thebibliography} \end{document}