Excercise 5 \[\begin{equation*} \begin{split} Z &\sim \mathcal{N}\left(0,\,1 \right), \\ Y|Z &\sim \mathcal{N}\left(1 + Z,\,1 \right), \\ X|(Y,\, Z) &\sim \mathcal{N}\left(1 - Y,\,1 \right). \end{split} \end{equation*}\]

library(mvtnorm)
set.seed(609)
N <- 1000
Z <- rnorm(mean = 0, sd = 1, n = N)
Y <- rnorm(mean = 1 + Z, sd = 1, n = N)
X <- rnorm(mean = 1 - Y, sd = 1, n = N)

x <- seq(min(Z)-0.5, max(Z)+0.5, by = 0.01)
hist(Z, prob = TRUE)
lines(x, dnorm(x), col = "red")

The derived densities are \[\begin{equation*} \begin{split} f_Z(z) &= \frac{1}{\sqrt{2\pi}} \exp \left\{ -\frac{z^2}{2} \right\}, \\ f_{Y|Z}(y) &= \frac{1}{\sqrt{2\pi}} \exp \left\{ -\frac{(y - (1 + Z))^2}{2} \right\}, \\ f_{X\,|\,Y,\,Z}(x) &= \frac{1}{\sqrt{2\pi}}\exp \left\{-\frac{(x - (1 - Y))^2}{2} \right\} = \frac{1}{\sqrt{2\pi}}\exp \left\{-\frac{(x + (Y - 1))^2}{2} \right\}. \end{split} \end{equation*}\] Derived marginal mean values are \[\begin{equation*} \begin{split} \mu_Z &= \mathbb{E}Z = 0, \\ \mu_Y &= \mathbb{E}Y = \mathbb{E}\left( \mathbb{E}\left[ Y | Z \right] \right) = \mathbb{E} \left( 1 + Z \right) = 1,\\ \mu_X &= \mathbb{E}X = \mathbb{E}\left( \mathbb{E}\left[ X | Y,\,Z \right] \right) = \mathbb{E} \left( 1 - Y \right) = 0. \end{split} \end{equation*}\] Joint density of \((Y,\,Z)\): \[\begin{equation*} \begin{split} f_{(Y,\, Z)} (y, \, z) &= f_{Y|Z=z}(y\,|\,z) \cdot f_Z(z) \\ &= \frac{1}{2\pi} \exp \left\{ -\frac{1}{2}(z^2 + y^2 - 2y(1 + z) + (1 + z)^2) \right\} \\ &= \frac{1}{2\pi} \exp \left\{ -\frac{1}{2}( (y - 1)^2 - 2(y - 1)z + 2z^2 \right\}\\ &= \frac{1}{2\pi} \exp \left\{ \begin{pmatrix} y - 1, & z \end{pmatrix} \begin{pmatrix} 1 & -1 \\ -1 & 2 \end{pmatrix} \begin{pmatrix} y - 1 \\ z \end{pmatrix}\right\}. \end{split} \end{equation*}\] Hence \[\begin{equation*} \Sigma_{(Y,\,Z)} = \begin{pmatrix} 1 & -1 \\ -1 & 2 \end{pmatrix}^{-1} = \begin{pmatrix} 2 & 1 \\ 1 & 1 \end{pmatrix} \end{equation*}\] and \[\begin{equation*} \begin{pmatrix} Z \\ Y \end{pmatrix} \sim \mathcal{N}_2 \left( \begin{pmatrix} 0\\ 1 \end{pmatrix} , \, \begin{pmatrix} 1 & 1 \\ 1 & 2 \end{pmatrix} \right). \end{equation*}\]

Comparassions of scatterplot of (Z, Y) with theoretical contours.

z <- seq(min(Z), max(Z),0.01)
y <- seq(min(Y), max(Y), 0.01)
plot(Z, Y, col = "blue")
## variance-covariance matrix of (Z, Y)
Sigma <- matrix(c(1,1,1,2), nrow=2)
# Derived theoretical contours for joint distribution (Z, Y)
contour(z,y,outer(z,y,function(z,y){dmvnorm(cbind(z,y),
                                    mean = c(0, 1), sigma=Sigma)}),
        col = "red", add = TRUE, lwd = 1.75)

Joint density of \((X,\, Y,\, Z)\): \[\begin{equation*} \begin{split} f_{(X,\,Y,\, Z)} (x, \, y, \, z) &= f_{X\,|\, Y = y, \, Z=z}(x\,|\,y,\,z) \cdot f_{(Y,\,Z)}(y,\,z) \\ &= \left( \frac{1}{2\pi} \right)^{\frac{3}{2}} \exp \left\{ - \frac{1}{2} \left(x^2 + 2x(y - 1) + (y - 1)^2 + (y - 1)^2 - 2(y - 1)z + 2z^2 \right) \right\} \\ &= \left( \frac{1}{2\pi} \right)^{\frac{3}{2}} \exp \left\{ \begin{pmatrix} x, & y - 1, & z \end{pmatrix} \begin{pmatrix} 1 & 1 & 0 \\ 1 & 2 & -1 \\ 0 & -1 & 2 \end{pmatrix} \begin{pmatrix} x \\ y - 1 \\ z \end{pmatrix} \right\}. \end{split} \end{equation*}\] Hence \[\begin{equation*} \mathbf{\Sigma_{(X,\, Y,\,Z)}} = \begin{pmatrix} 1 & 1 & 0 \\ 1 & 2 & -1 \\ 0 & -1 & 2 \end{pmatrix}^{-1} = \begin{pmatrix} 3 & -2 & -1\\ -2 & 2 & 1 \\ -1 & 1 & 1 \end{pmatrix}. \end{equation*}\] Finally \[\begin{equation*} \begin{pmatrix} X \\ Y \\ Z \end{pmatrix} \sim \mathcal{N}_3 \left( \begin{pmatrix} 0 \\ 1 \\ 0 \end{pmatrix}, \, \begin{pmatrix} 3 & -2 & -1\\ -2 & 2 & 1 \\ -1 & 1 & 1 \end{pmatrix}\right). \end{equation*}\]

Cross-sections:

x <- seq(min(X), max(X), 0.01)
plot(Y, X, col = "blue")
## variance-covariance matrix of (Y, X)
Sigma <- matrix(c(2,-2,-2,3), nrow=2)
# Derived theoretical contours for joint distribution (Z, Y)
contour(y,x,outer(y,x,function(y,x){dmvnorm(cbind(y,x),
                                    mean = c(1, 0), sigma=Sigma)}),
        col = "red", add = TRUE, lwd = 1.75)

plot(Z, X, col = "blue")
## variance-covariance matrix of (Z, X)
Sigma <- matrix(c(1,-1,-1,3), nrow=2)
# Derived theoretical contours for joint distribution (Z, Y)
contour(z,x,outer(z,x,function(z,x){dmvnorm(cbind(z,x),
                                    mean = c(0, 0), sigma=Sigma)}),
        col = "red", add = TRUE, lwd = 1.75)

3D interactive plot

library("threejs")
scatterplot3js(X, Y, Z, size = 0.30)

\(Y|(X,\,Z)\) \[\begin{equation*} \begin{pmatrix} X \\ Z \end{pmatrix} \sim \mathcal{N}_2 \left( \begin{pmatrix} 0 \\ 0 \end{pmatrix}, \, \begin{pmatrix} 3 & -1\\ -1 & 1 \end{pmatrix}\right) \end{equation*}\] \[\begin{equation*} \begin{split} f_{(X,\,Z)}(x,\,z) &= \left( \det \mathbf{\Sigma_{(X,\,Z)}}(2\pi)^2\right)^{-\frac{1}{2}}\exp \left\{ -\frac{1}{4} \begin{pmatrix} x, & z \end{pmatrix} \begin{pmatrix} 1 & 1 \\ 1 & 3 \end{pmatrix} \begin{pmatrix} x \\ z \end{pmatrix} \right\} \\ &= \left( \frac{1}{2^3\pi^2}\right)^{\frac{1}{2}}\exp \left\{ -\frac{1}{2}\frac{x^2 + 2xz + 3z^2}{2} \right\} \end{split} \end{equation*}\] \[\begin{equation*} \begin{split} f_{Y|(X = x,\, Z = z)} (y) &= \frac{f_{(X,\, Y,\, Z)} (x,\, y,\, z)}{f_{(X,\,Z)}(x,\,z)} \\ &= \frac{1}{\sqrt{\pi}} \exp\left\{- \frac{1}{2}(x^2 + 2x(y - 1) + 2(y-1)^2 - 2(y-1)z + 2z^2) + \frac{1}{2}\frac{x^2 + 2xz + 3z^2}{2} \right\} \\ &= \frac{1}{\sqrt{\pi}} \exp\left\{- \frac{1}{2}\frac{(y - (-\frac{x}{2} + \frac{z}{2} + 1))^2}{\frac{1}{2}} \right\}. \end{split} \end{equation*}\] Hence \[\begin{equation*} Y|(X,\, Z) \sim \mathcal{N} \left( -\frac{X}{2} + \frac{Z}{2} + 1, \, \frac{1}{2} \right). \end{equation*}\] Consider the linear transformation \[\begin{equation*} U = 1 + Z \end{equation*}\] and \[\begin{equation*} V = 1 - Y \end{equation*}\] and find the joint distribution of the random vector \[\begin{equation*} \begin{pmatrix} U \\ V \end{pmatrix}. \end{equation*}\] \[\begin{equation*} \begin{split} \mathbb{E}U = 1 + \mathbb{E}Z = 1, \\ \mathbb{E}V = 1 - \mathbb{E}Y = 0, \\ \text{Var}(U) = \text{Var}(Z) = 1, \\ \text{Var}(V) = \text{Var}(Y) = 2, \\ \text{cov}(U, \, V) = \text{cov}(1 + Z, \,1 - Y) = \text{cov}(Z, \,-Y) = -(-1) = 1. \\ \end{split} \end{equation*}\] Hence \[\begin{equation*} \begin{pmatrix} U \\ V \end{pmatrix} \sim \mathcal{N}_2 \left( \begin{pmatrix} 1 \\ 0 \end{pmatrix}, \, \begin{pmatrix} 1 & 1 \\ 1 & 2 \end{pmatrix}\right). \end{equation*}\]

U <- 1 + Z
V <- 1 - Y
x <- seq(min(U),max(U),0.01)
y <- seq(min(V), max(V), 0.01)
 ## variance-covariance matrix for (U, V)
Sigma <- matrix(c(1,-1,-1,2), nrow=2)
plot(U, V, col = "blue")
contour(x,y,outer(x,y,function(x,y){dmvnorm(cbind(x,y), mean = c(1, 0), sigma=Sigma)}),
              col = "red", add = TRUE, lwd = 1.75)

Calculate the value of \(\mathbb{E}\left(Y\,|\, U = 2\right)\). \[\begin{equation*} \mathbb{E}\left(Y\,|\, U = 2\right) = \mathbb{E}\left(Y\,|\, Z + 1 = 2\right) = \mathbb{E}\left(Y\,|\, Z = 1 \right) \overset{\ast}{=} 1 + 1 = 2, \end{equation*}\] since \[\begin{equation} Y|Z \sim \mathcal{N}\left(1 + Z,\,1 \right)\tag{$\ast$}. \end{equation}\]


Example of two one-dimensional normal distributions that are not jointly normal: \[\begin{equation*} \begin{split} X \sim \mathcal{N}(\mu_X,\,\sigma_X^2), \\ Y \sim \mathcal{N}\left(0,\,|X|\right). \end{split} \end{equation*}\]

Looks like stenosis

### seed 609
library(MASS)
N <- 500
ahoj <- function(m, var){
  X <- rnorm(mean = m, sd = var, n = N)
  Y <- rnorm(sd = abs(X), n = N)
  Z <- kde2d(X, Y)
  plot(X, Y, col = "blue", main = paste("X mean = ",m, ", sd = ", var))
  contour(Z, add=TRUE, col = "red", nlevels = 10, lwd = 1.75)
}
ahoj(-2,1)

ahoj(0, 1)

ahoj(2,1)