\documentclass[notitlepage]{article} \usepackage{../../Math598} \usepackage{listings} \usepackage{numprint} \usepackage{enumerate} \usepackage{multirow} \usepackage{bbm} \usepackage{bbold} \usepackage{amsfonts,amsmath,amssymb} \setlength{\parindent}{0pt} \lstloadlanguages{R} \definecolor{keywordcolor}{rgb}{0,0.6,0.6} \definecolor{delimcolor}{rgb}{0.461,0.039,0.102} \definecolor{Rcommentcolor}{rgb}{0.101,0.043,0.432} \lstdefinestyle{Rsettings}{ basicstyle=\ttfamily, breaklines=true, showstringspaces=false, keywords={if, else, function, theFunction, tmp}, % Write as many keywords otherkeywords={}, commentstyle=\itshape\color{Rcommentcolor}, keywordstyle=\color{keywordcolor}, moredelim=[s][\color{delimcolor}]{"}{"}, } \lstset{basicstyle=\ttfamily, numbers=none, literate={~} {$\sim$}{2}} \begin{document} <>= library(knitr) # global chunk options opts_chunk$set(cache=TRUE, autodep=TRUE) options(scipen=999) options(repos=c(CRAN="https://cloud.r-project.org/")) inline_hook <- function (x) { if (is.numeric(x)) { # ifelse does a vectorized comparison # If integer, print without decimal; otherwise print two places res <- ifelse(x == round(x), sprintf("%6i", x), sprintf("%.6f", x) ) paste(res, collapse = ", ") } } knit_hooks$set(inline = inline_hook) @ \begin{center} \textsclarge{MATH 598: TOPICS IN STATISTICS} \vspace{0.1 in} \textsc{MCMC for hierarchical models} \end{center} The following Bayesian hierarchical model is used to describe the variation of health outcomes of four different types of surgery in six hospitals. Let $i=1,\ldots,6$ denote hospital, and $j=1,\ldots,4$ denote surgery type. Then, let $m_{ij}$ denote the number of surgeries of type $j$ at hospital $i$, and $y_{ij}$ denote the number of these surgeries that were successful (that is, surgeries for which the patient was not re-hospitalized in the thirty days after the surgery took place. The following data are to be studied. \begin{table}[ht] \centering \begin{tabular}{ll|cc|cc|cc|cc|} && \multicolumn{8}{c|}{Surgery Type} \\[6pt] && \multicolumn{2}{c|}{1} & \multicolumn{2}{c|}{2} & \multicolumn{2}{c|}{3} & \multicolumn{2}{c|}{4}\\ && $y$ & $m$ & $y$ & $m$& $y$ & $m$& $y$ & $m$\\ \hline \multirow{6}{*}{Hospital} & 1 &1&49&7&30&1&29&3&26\\ & 2 & 11&35&2&17&6&22&1&32\\ & 3 & 14&26&30&43&15&29&15&31\\ & 4 & 16&26&23&25&22&34&32&37\\ & 5 & 29&34&21&25&22&28&16&30\\ & 6 & 15&30&20&33&13&34&12&39\\ \hline \end{tabular} \end{table} \begin{itemize} \item \textrm{STAGE 1:} Observed data \[ Y_{ij} \sim {Binomial}(m_{ij},\theta_{ij}) \qquad i=1,\ldots,6, j=1,\ldots,4 ~\text{independently} \] \item \textrm{STAGE 2:} Log-odds model \[ \psi_{ij} = \log \left(\frac{\theta_{ij}}{1-\theta_{ij}}\right) \sim {Normal}(\alpha_i + \mu_j,\sigma_j^2) \qquad i=1,\ldots,6, j=1,\ldots,4 ~\text{independently} \] \item \textrm{STAGE 3:} Prior model \begin{align*} \alpha_i & \sim {Normal}(0,5^2) \qquad i=1,\ldots,6~\text{independently}\\[6pt] \mu_j & \sim {Normal}(0,5^2) \qquad j=1,\ldots,4~\text{independently}\\[6pt] \sigma_j^2 & \sim {InvGamma}(5,2.5) \qquad j=1,\ldots,4~\text{independently} \end{align*} \end{itemize} In this model, the parameter vector comprises 38 parameters \[ \{\psi_{ij}, i=1,\ldots,6,j=1,\ldots,4\}, \{\alpha_i,i=1,\ldots,6\}, \{\mu_j,j=1,\ldots,4\}, \{\sigma_j^2,j=1,\ldots,4\}. \] and it can be seen that the posterior, up to proportionality, factorizes as follows \begin{align*} \pi_n(\psi,\alpha,\mu,\sigma^2) & \propto \prod_{i=1}^6 \prod_{j=1}^4 f(y_{ij};m_{ij},\psi_{ij}) & \text{Likelihood} \\[6pt] & \times \prod_{i=1}^6 \prod_{j=1}^4 \pi_0^{(1)}(\psi_{ij}|\alpha_i,\mu_j,\sigma_j^2) & \text{First stage prior} \\[6pt] & \times \prod_{i=1}^6 \pi_0^{(2)}(\alpha_i) & \text{Second stage prior} \\[6pt] & \times \prod_{j=1}^4 \pi_0^{(2)}(\mu_j) & \text{Second stage prior} \\[6pt] & \times \prod_{j=1}^4 \pi_0^{(2)}(\sigma_j^2) & \text{Second stage prior} \end{align*} This is high-dimensional posterior distribution that cannot be analyzed directly. However, using a Gibbs sampler strategy, the posterior can be sampled and summarized. \bigskip \textbf{Algorithm 1:} \textbf{Single parameter Gibbs sampler.} In this algorithm, the 38 full conditional posterior distributions are sampled individually in a sweep. However, the intrinsic conditional independence structure of the model simplifies the sampling \begin{itemizeWide} \item $\psi_{ij}, i=1,\ldots,6, j=1,\ldots 4$: The full conditional for $\psi_{ij}$ is not available in closed form, but up to proportionality is given by \[ \pi_n(\psi_{ij}|-) \propto \exp\{y_{ij} \psi_{ij} -m_{ij} \log(1+\exp\{\psi_{ij}\}) \exp\left\{-\frac{1}{2 \sigma_j^2} (\psi_{ij} - \alpha_i - \mu_j)^2 \right\} \] For the Gibbs sampler update of $\psi_{ij}$ we use a Metropolis-within-Gibbs step, and propose an update using a Normal proposal mechanism with variance $\sigma_\psi = 0.5$. Note that, conditional on the collection of $\alpha, \mu$ and $\sigma^2$ parameters, the $\psi_{ij}$ parameters are independent, so that they can be updated in parallel. \item $\alpha_i, i=1,\ldots,6$: At the second stage of the hierarchy, notice that the second stage parameters are \textbf{conditionally independent of the data} given the $\psi$ parameters. We have that for $i=1,\ldots,6$ \begin{align*} \pi_n(\alpha_{i}|-) & \propto \prod_{j=1}^4 \pi_0^{(1)}(\psi_{ij}|\alpha_i,\mu_j,\sigma_j^2) \times \pi_0^{(2)}(\alpha_i) \\[6pt] & \propto \exp\left\{- \dfrac{1}{2} \sum_{j=1}^4 \dfrac{1}{\sigma_j^2} (\psi_{ij} - \alpha_i - \mu_j)^2 \right\} \exp\left\{ -\dfrac{\alpha_i^2 }{2\times 5^2} \right\} \end{align*} After some algebra, we see that the full conditional for $\alpha_i$ takes the form \[ \pi_n(\alpha_{i}|-) \equiv Normal(m_{\alpha_i},M_{\alpha_i}) \] where \[ m_{\alpha_i} = \dfrac{\sum\limits_{j=1}^4 \dfrac{(\psi_{ij}-\mu_j)}{\sigma_j^2}}{\sum\limits_{j=1}^4 \dfrac{1}{\sigma_j^2} + \dfrac{1}{5^2}} \qquad \qquad M_{\alpha_i} = \dfrac{1}{\sum\limits_{j=1}^4 \dfrac{1}{\sigma_j^2} + \dfrac{1}{5^2}}. \] \item $\mu_j, j=1,\ldots,4$: As above, we have that for $j=1,\ldots,4$ \begin{align*} \pi_n(\mu_{j}|-) & \propto \prod_{i=1}^6 \pi_0^{(1)}(\psi_{ij}|\alpha_i,\mu_j,\sigma_j^2) \times \pi_0^{(2)}(\mu_j) \\[6pt] & \propto \exp\left\{- \dfrac{1}{2\sigma_j^2} \sum_{i=1}^6 (\psi_{ij} - \alpha_i - \mu_j)^2 \right\} \exp\left\{ -\dfrac{\mu_j^2 }{2\times 5^2} \right\} \end{align*} After some algebra, we see that the full conditional for $\mu_j$ takes the form \[ \pi_n(\mu_{j}|-) \equiv Normal(m_{\mu_j},M_{\mu_j}) \] where \[ m_{\mu_j} = \dfrac{\dfrac{1}{\sigma_j^2} \sum\limits_{i=1}^6 (\psi_{ij}-\alpha_i)}{\dfrac{6}{\sigma_j^2} + \dfrac{1}{5^2}} \qquad \qquad M_{\mu_j} = \dfrac{1}{\dfrac{6}{\sigma_j^2} + \dfrac{1}{5^2}}. \] \item $\sigma_j^2, j=1,\ldots,4$: We have that for $j=1,\ldots,4$ \begin{align*} \pi_n(\sigma_{j}^2|-) & \propto \prod_{i=1}^6 \pi_0^{(1)}(\psi_{ij}|\alpha_i,\mu_j,\sigma_j^2) \times \pi_0^{(2)}(\sigma_j^2) \\[6pt] & \propto \left(\dfrac{1}{\sigma_j^2} \right)^{6/2} \exp\left\{- \dfrac{1}{2\sigma_j^2} \sum_{i=1}^6 (\psi_{ij} - \alpha_i - \mu_j)^2 \right\} \left(\dfrac{1}{\sigma_j^2} \right)^{10/2+1} \exp\left\{-\frac{5}{2 \sigma_j^2} \right\} \end{align*} We then see that the full conditional for $\sigma_j^2$ takes the form \[ \pi_n(\sigma_{j}^2|-) \equiv InvGamma\left(\dfrac{16}{2},\dfrac{\sum\limits_{i=1}^6 (\psi_{ij} - \alpha_i - \mu_j)^2+5}{2} \right) \] \end{itemizeWide} <>= set.seed(3234434) #Data Irow<-6 Jcol<-4 ymdat<-c(1,49,7,30,1,29,3,26,11,35,2,17,6,22,1,32,14,26,30,43,15,29,15,31, 16,26,23,25,22,34,32,37,29,34,21,25,22,28,16,30,15,30,20,33,13,34,12,39) ym.mat<-matrix(ymdat,nrow=6,byrow=T) y.mat<-ym.mat[,c(1,3,5,7)] m.mat<-ym.mat[,c(1,3,5,7)+1] z.mat<-m.mat-y.mat ###################################################################### #MCMC: Starting values psi.hat<-log(y.mat/z.mat) old.psi<-new.psi<-log(y.mat/z.mat) old.al<-apply(old.psi,1,mean) old.mu<-apply(old.psi,2,mean) old.sigsq<-1/rgamma(Jcol,10,5) #MCMC settings nsamp<-2000 #Number of samples to be collected nburn<-500 #Number of burn-in iteratons nthin<-1 #Thinning rate: take a sample after every nthin iterations nits<-nburn+nsamp*nthin #Total number of iterations sq<-0.5 #MH proposal standard deviation ico<-0 #Matrices to store the samples psi.samp<-th.samp<-array(0,c(nsamp,Irow,Jcol)) al.samp<-matrix(0,nrow=nsamp,ncol=Irow) mu.samp<-sig.samp<-matrix(0,nrow=nsamp,ncol=Jcol) #Set the prior parameters prior.var<-5^2 prior.a<-5;prior.b<-2.5 #Run the Gibbs sampler for(iter in 1:nits){ #Metropolis-Hastings for the log-odds parameters for(i in 1:Irow){ for(j in 1:Jcol){ old.pij<-1/(1+exp(-old.psi[i,j])) old.like<-y.mat[i,j]*log(old.pij)+z.mat[i,j]*log(1-old.pij) old.prior<-dnorm(old.psi[i,j],old.al[i]+old.mu[j],sqrt(old.sigsq[j])) new.psi[i,j]<-old.psi[i,j]+rnorm(1)*sq new.pij<-1/(1+exp(-new.psi[i,j])) new.like<-y.mat[i,j]*log(new.pij)+z.mat[i,j]*log(1-new.pij) new.prior<-dnorm(new.psi[i,j],old.al[i]+old.mu[j],sqrt(old.sigsq[j])) if(log(runif(1)) < new.like+new.prior-old.like-old.prior){ old.psi[i,j]<-new.psi[i,j] } } } #Exact updates for the alphas, mus and sigmas from their full conditionals. for(i in 1:Irow){ old.al[i]<-rnorm(1, sum((old.psi[i,]-old.mu)/old.sigsq)/(sum(1/old.sigsq)+1/prior.var), sqrt(1/(sum(1/old.sigsq)+1/prior.var))) } for(j in 1:Jcol){ old.mu[j]<-rnorm(1, sum((old.psi[,j]-old.al)/old.sigsq[j])/(Irow/old.sigsq[j]+1/prior.var), sqrt(1/(Irow/old.sigsq[j]+1/prior.var))) } mean.mat<-outer(old.al,old.mu,"+") for(j in 1:Jcol){ ssq.j<-sum((old.psi[,j]-mean.mat[,j])^2) old.sigsq[j]<-1/rgamma(1,Irow/2+prior.a,ssq.j/2+prior.b) } #Store the samples if(iter > nburn & iter %% nthin ==0){ ico<-ico+1 psi.samp[ico,,]<-old.psi th.samp[ico,,]<-1/(1+exp(-old.psi)) al.samp[ico,]<-old.al mu.samp[ico,]<-old.mu sig.samp[ico,]<-old.sigsq } } @ Having run the chain for a total of \Sexpr{nits} iterations, we can inspect the output in the form of trace plots and acf plots to assess how well the chain is performing. We do this for the post burn-in samples. In the plots, the blue lines represent the maximum likelihood estimates from a non-hierarchical analysis. <>= par(mfrow=c(2,2),mar=c(4,4,2,2)) for(i in 1:Irow){ for(j in 1:Jcol){ plot(psi.samp[,i,j],type="l",ylab=substitute(psi[ival][jval], list(ival=i,jval=j)),xlab="Iteration") abline(h=psi.hat[i,j],col="blue",lwd=2) } } @ <>= par(mfrow=c(2,2),mar=c(4,4,2,2)) for(i in 1:Irow){ for(j in 1:Jcol){ acf(psi.samp[,i,j],main=' ',lag.max=50) title(substitute(psi[ival][jval],list(ival=i,jval=j)),line=1) } } @ \bigskip Evidence from these plots for the $\psi_{ij}$ parameters indicates reasonable performance, despite the fact that there is a degree of autocorrelation in the chains. Now we inspect the $\alpha_i$ and $\mu_j$ parameter samples. \medskip <>= par(mfrow=c(3,2),mar=c(4,4,3,2)) for(i in 1:Irow){ plot(al.samp[,i],type="l",ylab=expression(alpha), main=substitute(alpha[ival], list(ival=i))) } @ <>= par(mfrow=c(2,2),mar=c(4,4,3,2)) for(j in 1:Jcol){ plot(mu.samp[,j],type="l",ylab=expression(mu),main=substitute(mu[jval], list(jval=i))) } @ <>= par(mfrow=c(3,2),mar=c(4,4,3,2)) for(i in 1:Irow){ acf(al.samp[,i],main=' ',lag.max=50) title(substitute(alpha[ival], list(ival=i)),line=1) } @ <>= par(mfrow=c(2,2),mar=c(4,4,3,2)) for(j in 1:Jcol){ acf(mu.samp[,j],main=' ',lag.max=50) title(substitute(mu[jval], list(jval=j)),line=1) } @ \bigskip These plots exhibit a very high degree of dependence within the chains. If we compute the covariances between these samples, we see that they are very large in magnitude. <>= comb.samp<-cbind(al.samp,mu.samp) colnames(comb.samp)<-c("al1","al2","al3","al4","al5","al6","mu1","mu2","mu3","mu4") round(cor(comb.samp),3) @ \bigskip We finally inspect the $\sigma_j^2$ parameters. <>= par(mfrow=c(2,2),mar=c(4,4,3,2)) for(j in 1:Jcol){ plot(sig.samp[,j],type="l",ylab=expression(sigma[j]^2), main=substitute(sigma[jval]^2, list(jval=j))) } @ <>= par(mfrow=c(2,2),mar=c(4,4,3,2)) for(j in 1:Jcol){ acf(sig.samp[,j],main='',lag.max=50);title(substitute(sigma[jv]^2,list(jv=j)),line=1) } @ The sampling for these parameters also exhibits a relatively high degree of auto correlation. Finally, we can check the effective sample sizes <>= library(coda) colnames(al.samp)<-c("al1","al2","al3","al4","al5","al6") colnames(mu.samp)<-c("mu1","mu2","mu3","mu4") colnames(sig.samp)<-c("sig1","sig2","sig3","sig4") effectiveSize(al.samp);effectiveSize(mu.samp);effectiveSize(sig.samp) @ These results indicate a very high degree of autocorrelation in sampling of the parameters at the third stage of the hierarchical model. \bigskip \textbf{Algorithm 2:} \textbf{Blocked Gibbs sampler.} In a blocked Gibbs sampler, we place several of the parameters together in a ``block'' and attempt to update them together. In this model, we choose to form a block by defining the 10-dimensional vector \[ \varphi = (\alpha_1,\alpha_2,\ldots,\alpha_6,\mu_1,\ldots,\mu_4)^\top, \] and exploit the fact that the joint full conditional distribution for this block of parameters, given the other parameters, is multivariate Normal. We have that if $\Psi$ is the vector defined by \[ \Psi = (\psi_{11},\psi_{12},\psi_{13},\psi_{14},\psi_{21},\ldots,\psi_{64})^\top \] then \begin{align*} \pi_n(\varphi|-) \propto \exp\left\{-\frac{1}{2} (\Psi - \bA \varphi)^\top \bD^{-1} (\Psi - \bA \varphi) \right\} \exp\left\{-\frac{1}{2} \varphi^\top \bV^{-1} \varphi \right\} \end{align*} where $\bD$ is the $24 \times 24$ diagonal matrix with diagonal \[ (\sigma_1^2,\sigma_2^2,\sigma_3^2,\sigma_4^2,\sigma_1^2,\ldots,\sigma_4^2) \] and $\bV$ is the $10 \times 10$ diagonal matrix with $5^2$ everywhere on the diagonal. The matrix $\bA$ is the design matrix for the Second Stage model: it is a $24 \times 10$ matrix of zeros and ones. By a standard calculation, we have that \[ \pi_n(\varphi|-) \equiv Normal_{10}(\mathbf{m}, \bM) \] where \[ \mathbf{m} = \left(\bA^\top \bD^{-1} \bA + \bV^{-1} \right)^{-1} \bA^\top \bD^{-1} \Psi \qquad \qquad \bM = \left(\bA^\top \bD^{-1} \bA + \bV^{-1} \right)^{-1} \] <>= set.seed(3234434) Irow<-6 Jcol<-4 ymdat<-c(1,49,7,30,1,29,3,26,11,35,2,17,6,22,1,32,14,26,30,43,15,29,15,31, 16,26,23,25,22,34,32,37,29,34,21,25,22,28,16,30,15,30,20,33,13,34,12,39) ym.mat<-matrix(ymdat,nrow=6,byrow=T) y.mat<-ym.mat[,c(1,3,5,7)] m.mat<-ym.mat[,c(1,3,5,7)+1] z.mat<-m.mat-y.mat #The design matrix A<-matrix(0,nrow=24,ncol=10) A[1:4,1]<-A[5:8,2]<-A[9:12,3]<-A[13:16,4]<-A[17:20,5]<-A[21:24,6]<-1 A[(0:5)*4+1,7]<-A[(0:5)*4+2,8]<-A[(0:5)*4+3,9]<-A[(0:5)*4+4,10]<-1 A ###################################################################### #Starting values library(mvnfast) old.psi<-new.psi<-log(y.mat/z.mat) psi.hat<-log(y.mat/z.mat) old.al<-apply(old.psi,1,mean) old.mu<-apply(old.psi,2,mean) old.sigsq<-1/rgamma(Jcol,10,5) nsamp<-2000 nburn<-500 nthin<-1 nits<-nburn+nsamp*nthin sq<-0.5 ico<-0 psi.samp.new<-th.samp.new<-array(0,c(nsamp,Irow,Jcol)) al.samp.new<-matrix(0,nrow=nsamp,ncol=Irow) mu.samp.new<-sig.samp.new<-matrix(0,nrow=nsamp,ncol=Jcol) prior.var<-5^2 prior.a<-5;prior.b<-2.5 for(iter in 1:nits){ for(i in 1:Irow){ for(j in 1:Jcol){ old.pij<-1/(1+exp(-old.psi[i,j])) old.like<-y.mat[i,j]*log(old.pij)+z.mat[i,j]*log(1-old.pij) old.prior<-dnorm(old.psi[i,j],old.al[i]+old.mu[j],sqrt(old.sigsq[j])) new.psi[i,j]<-old.psi[i,j]+rnorm(1)*sq new.pij<-1/(1+exp(-new.psi[i,j])) new.like<-y.mat[i,j]*log(new.pij)+z.mat[i,j]*log(1-new.pij) new.prior<-dnorm(new.psi[i,j],old.al[i]+old.mu[j],sqrt(old.sigsq[j])) if(log(runif(1)) < new.like+new.prior-old.like-old.prior){ old.psi[i,j]<-new.psi[i,j] } } } #Do the joint update psivec<-as.vector(t(old.psi)) Dinv<-diag(1/old.sigsq[rep(1:4,6)]) Vinv<-diag(1/prior.var,Irow+Jcol) Mval<-solve(t(A) %*% (Dinv %*% A) + Vinv) mval<-Mval %*% (t(A) %*% (Dinv %*% psivec)) vtheta<-rmvn(1,mu=mval,sigma=Mval) old.al<-vtheta[1:6] old.mu<-vtheta[7:10] mean.mat<-outer(old.al,old.mu,"+") for(j in 1:Jcol){ ssq.j<-sum((old.psi[,j]-mean.mat[,j])^2) old.sigsq[j]<-1/rgamma(1,Irow/2+prior.a,ssq.j/2+prior.b) } if(iter > nburn & iter %% nthin ==0){ ico<-ico+1 psi.samp.new[ico,,]<-old.psi th.samp.new[ico,,]<-1/(1+exp(-old.psi)) al.samp.new[ico,]<-old.al mu.samp.new[ico,]<-old.mu sig.samp.new[ico,]<-old.sigsq } } @ <>= psi.means.new<-apply(psi.samp.new,2:3,mean) psi.medians.new<-apply(psi.samp.new,2:3,median) psi.lower.new<-apply(psi.samp.new,2:3,quantile,prob=0.025) psi.upper.new<-apply(psi.samp.new,2:3,quantile,prob=0.975) expit<-function(x){return(1/(1+exp(-x)))} th.medians.new<-expit(psi.medians.new) th.lower.new<-expit(psi.lower.new) th.upper.new<-expit(psi.upper.new) par(mfrow=c(2,2),mar=c(4,4,3,2)) for(i in 1:Irow){ for(j in 1:Jcol){ plot(psi.samp.new[,i,j],type="l",ylab=substitute(psi[ival][jval], list(ival=i,jval=j)),xlab="Iteration") abline(h=psi.hat[i,j],col="blue",lwd=2) } } @ <>= par(mfrow=c(2,2),mar=c(4,4,3,2)) for(i in 1:Irow){ for(j in 1:Jcol){ acf(psi.samp.new[,i,j],main=substitute(psi[ival][jval], list(ival=i,jval=j)),lag.max=50) } } @ <>= par(mfrow=c(3,2),mar=c(4,4,3,2)) for(i in 1:Irow){ plot(al.samp.new[,i],type="l",ylab=expression(alpha), main=substitute(alpha[ival], list(ival=i))) } @ <>= par(mfrow=c(2,2),mar=c(4,4,3,2)) for(j in 1:Jcol){ plot(mu.samp.new[,j],type="l",ylab=expression(mu), main=substitute(mu[jval], list(jval=i))) } @ <>= par(mfrow=c(3,2),mar=c(4,4,3,2)) for(i in 1:Irow){ acf(al.samp.new[,i],main=' ',lag.max=50) title(substitute(alpha[ival], list(ival=i)),line=1) } @ <>= par(mfrow=c(2,2),mar=c(4,4,3,2)) for(j in 1:Jcol){ acf(mu.samp.new[,j],main=' ',lag.max=50) title(substitute(mu[jval], list(jval=j)),line=1) } @ <>= par(mfrow=c(2,2),mar=c(4,4,3,2)) for(j in 1:Jcol){ plot(sig.samp.new[,j],type="l",ylab=expression(sigma[j]^2), main=substitute(sigma[jval]^2, list(jval=j))) } @ <>= par(mfrow=c(2,2),mar=c(4,4,3,2)) for(j in 1:Jcol){ acf(sig.samp.new[,j],main=' ',lag.max=50) title(substitute(sigma[jval]^2, list(jval=j)),line=1) } @ <>= colnames(al.samp.new)<-c("al1","al2","al3","al4","al5","al6") colnames(mu.samp.new)<-c("mu1","mu2","mu3","mu4") colnames(sig.samp.new)<-c("sig1","sig2","sig3","sig4") effectiveSize(al.samp.new) effectiveSize(mu.samp.new) effectiveSize(sig.samp.new) round(cor(cbind(al.samp.new,mu.samp.new)),3) @ In the new blocked algorithm, we observe that the $\mu$ and $\alpha$ parameters are being sampled much more successfully. <>= al.both<-cbind(al.samp,al.samp.new) al.both<-al.both[,c(1,7,2,8,3,9,4,10,5,11,6,12)] mu.both<-cbind(mu.samp,mu.samp.new) mu.both<-mu.both[,c(1,5,2,6,3,7,4,8)] par(mar=c(4,4,2,0)) boxplot(al.both,col=c('gray','red'),names=rep(c('Alg-1','Alg-2'),6),cex.axis=0.75) title(expression(paste('Boxplots for ',alpha[i],' parameters for Algorithms 1 and 2'))) boxplot(mu.both,col=c('gray','red'),names=rep(c('Alg-1','Alg-2'),4),cex.axis=0.75) title(expression(paste('Boxplots for ',mu[j],' parameters for Algorithms 1 and 2'))) @ \end{document}