% This is a LaTex file.
% Homework for the course "Stochastic Calculus",
% Fall semester, 2004, Jonathan Goodman.
\documentclass[12pt]{article}
% The page format, somewhat wider and taller page than in art12.sty.
\topmargin -0.1in \headsep 0in \textheight 8.9in \footskip 0.6in
\oddsidemargin 0in \evensidemargin 0in \textwidth 6.5in
\begin{document}
% The title and header.
\noindent
{\scriptsize Stochastic Calculus, Fall 2004
(http://www.math.nyu.edu/faculty/goodman/teaching/StochCalc2004/)} \hfill
\begin{center}
\large
Assignment 9.
\normalsize
\end{center}
\noindent
Given December 9, due December 23. Last revised, December 91.\\
\vspace{.3in}
\noindent{\bf Instructions}: Please answer these questions without
discussing them with others or looking up the answers in books.
\vspace{.5cm}
% The questions
\begin{enumerate}
\item Let $\cal S$ be a finite state space for a Markov chain.
Let $\xi(t) \in \cal S$ be the state of the chain at time $t$.
The chain is {\em nondegenerate} if there is an $n$ with
$P_{jk}^n \neq 0$ for all $j \in \cal S$ and $k \in \cal S$.
Here the $P_{jk}$ are the $j \to k$ transition probabilities and
$P_{jk}^n$ is the $(j,k)$ entry of $P^n$, which is the $n$ step
$j \to k$ transition probability.
For any nondegenerate Markov chain with a finite state space, the
{\em Perron Frobeneus theorem} gives the following information.
There is a row vector, $\pi$, with $\sum_{k \in {\cal S}} \pi(k) = 1$
and $\pi(k) > 0$ for all $k \in \cal S$ (a probability vector) so that
$\left\|P^t - {\bf 1}\pi \right\| \leq Ce^{-\alpha t}$.
Here ${\bf 1}$ is the column vector of all ones and $\alpha > 0$.
In the problems below, assume that the transition matrix $P$ represents
a nondegenerate Markov chain.
\begin{enumerate}
\item Show that if $P(\xi(t) = k) = \pi(k)$ for all $k \in \cal S$, then
$P(\xi(t+1) = k) = \pi(k)$ for all $k \in \cal S$.
In this sense, $\pi$ represents the {\em steady state} or {\em invariant}
probability distribution.
\item Show that $P$ has one eigenvalue equal to one, which is simple, and
that every other eigenvalue has $\left|\lambda\right|<1$.
\item Let $u(k,t) = P(\xi(t) = k)$.
Show that $u(k,t) \to \pi(k)$ as $t \to \infty$.
No matter what probability distribution the Markov chain starts with, the
probability distribution converges to the unique steady state distribution.
\item Suppose we have a function $f(k)$ defined for $k \in \cal S$ and
that $E_{\pi}[f(\xi)]=0$.
Let $f$ be the column vector with entries $f(k)$ and $\widehat{f}$ the
row vector with entries $\widehat(f)(k) = f(k)\pi(k)$.
Show that
$$
\mbox{cov}_{\pi}(f(\xi(0)),f(\xi(t))) = E_{\pi}[f(\xi(0)),f(\xi(t))] =
\widehat{f}P^tf\; .
$$
\item Show that if $A$ is a square matrix with $\left\|A\right\| < 1$, then
$$
\sum_{t=0}^{\infty} A^t = \left(I-A\right)^{-1} \; .$$
This is a generalization of the geometric sequence formula
$\sum_{t=0}^{\infty} a^t = 1/(1-a)$ if $\left|a\right|<1$, and the
proof/derivation can be almost the same, once the series is shown to converge.
\item Show that if $E_{\pi}[f(\xi)] = 0$, then
$\sum_{t=0}^{\infty} P^t f = g$ with $g-Pg = f$ and $E_{\pi}[g(\xi)]=0$.
If the series converges, the argument above should apply.
\item Show that
$$
C= \sum_{t=0}^{\infty} \mbox{cov}_{\pi}[f(\xi(0)),f(\xi(t))] = \widehat{f}g
\; ,
$$
where $g$ is as above.
\item Let $X(T) = \sum_{t=0}^{T} f(\xi(t))$.
Show that $\mbox{var}(X(T)) \approx DT$ for large $T$,
where
$$
D = \mbox{var}_{\pi} [f(\xi)]
+ 2 \sum_{t=1}^{\infty} \mbox{cov}_{\pi}[f(\xi(0)),f(\xi(t))] \; .
$$
This is a version of the Einstein Kubo formula.
To be precise, $\frac{1}{T} \mbox{var}(X(T)) \to D$ as $T \to \infty$.
Even more precisely, $\left| \mbox{var}(X(T)) -DT \right|$ is bounded
as $T \to \infty$.
Prove whichever of these you prefer.
\item Suppose $P$ represents a Markov chain with invariant probability
distribution $\pi$ and we want to know $\mu = E_{\pi}[f(\xi)]$.
Show that $\widehat{\mu}_T = \frac{1}{T} \sum_{t=0}^{T} f(\xi(t))$
converges to $\mu$ as $T \to \infty$ in the sense that
$E[(\widehat{\mu}_T - \mu )^2] \to 0$ as $T \to \infty$.
Show that this convergence does not depend on $u(k,0)$, the initial
probability distribution.
It is not terribly hard (though not required in this assignment) to show that
$\widehat{\mu}_T \to \mu$ as $T \to \infty$ almost surely.
This is the basis of {\em Markov chain Monte Carlo}, which uses Markov
chains to sample probability distributions, $\pi$, that cannot be sampled
in any simpler way.
\item Consider the Markov chain with state space $-L \leq k \leq L$ having
$2L+1$ states.
The one step transition probabilities are $\frac{1}{3}$ for any
$k \to k-1$, $k \to k$ or $k \to k+1$ transitions that do not take the
state out of the state space.
Transitions that would go out of $\cal S$ are {\em rejected}, so that, for
example, $P(L \to L) = \frac{2}{3}$.
Take $f(k) = k$ and calculate $\pi$ and $D$.
Hint: the general solution to the equations $(g-Pg)(k) = k$ is a cubic
polynomial in $k$.
\end{enumerate}
\item A Brownian bridge is a Brownian motion, $X(t)$, with $X(0) = X(T) = 0$.
Find an SDE satisfied by the Brownian bridge. Hint: Calculate
$E_{x,t}[\Delta X \bigm| X(T) = 0]$, which is something about a multivariate
normal.
\item Suppose stock prices $S_1(t), \ldots,S_n(t)$ satisfy the SDEs
$dS_k(t) = \mu_k S_k dt + \sigma_k S_k dW_k(t)$, where the $W_k(t)$
are {\em correlated} standard Brownian motions woth correlation coefficients
$\rho_{jk} = \mbox{corr}(W_j(t),W_k(t))$.
\begin{enumerate}
\item Write a formula for $S_1(t), \ldots,S_n(t)$ in terms of {\em independent}
Brownian motions $B_1(t), \ldots,B_n(t)$.
You may use the Cholesky decomposition $LL^t = \rho$.
\item Write a formula for $u(s,t)$, the joint density function of
$S(t) \in R^n$.
This is the $n$ dimensional correlated lognormal density.
\item Write the partial differential equation one could solve to determine
$E[\mbox{max}(S_1(T),S_2(T))]$ with $S_1(0) = s_1$ and $S_2(0) = s_2$
and $\rho_{12} \neq 0$
\end{enumerate}
\item Suppose $dS(t) = a(S(t),t) dt + \sigma(S(t),t) S(t) dB(t)$.
Write a formula for $\int_0^T S(t) dS(t)$ that involves only Riemann
integrals and evaluations.
\end{enumerate}
\end{document}