#Monte Carlo test code for Study 4 (Application to Cognitive Changes) library(OpenMx) library(lavaan) #latent change score models in lavaan #no change model noch.mod <- ' #true scores lx1 =~ 1*x1 lx2 =~ 1*x2 lx3 =~ 0*x1 #spacing variable lx4 =~ 0*x1 #spacing variable lx5 =~ 0*x1 #spacing variable lx6 =~ 1*x6 lx7 =~ 0*x1 #spacing variable lx8 =~ 1*x8 #autoregressions lx2 ~ 1*lx1 lx3 ~ 1*lx2 lx4 ~ 1*lx3 lx5 ~ 1*lx4 lx6 ~ 1*lx5 lx7 ~ 1*lx6 lx8 ~ 1*lx7 #change scores dx2 =~ 1*lx2 dx3 =~ 1*lx3 dx4 =~ 1*lx4 dx5 =~ 1*lx5 dx6 =~ 1*lx6 dx7 =~ 1*lx7 dx8 =~ 1*lx8 #intercept i =~ 1*lx1 i ~ 1 i ~~ i #means x1 ~ 0*1 x2 ~ 0*1 x6 ~ 0*1 x8 ~ 0*1 lx1 ~ 0*1 lx2 ~ 0*1 lx3 ~ 0*1 lx4 ~ 0*1 lx5 ~ 0*1 lx6 ~ 0*1 lx7 ~ 0*1 lx8 ~ 0*1 dx2 ~ 0*1 dx3 ~ 0*1 dx4 ~ 0*1 dx5 ~ 0*1 dx6 ~ 0*1 dx7 ~ 0*1 dx8 ~ 0*1 #variances lx1 ~~ 0*lx1 lx2 ~~ 0*lx2 lx3 ~~ 0*lx3 lx4 ~~ 0*lx4 lx5 ~~ 0*lx5 lx6 ~~ 0*lx6 lx7 ~~ 0*lx7 lx8 ~~ 0*lx8 dx2 ~~ 0*dx2 dx3 ~~ 0*dx3 dx4 ~~ 0*dx4 dx5 ~~ 0*dx5 dx6 ~~ 0*dx6 dx7 ~~ 0*dx7 dx8 ~~ 0*dx8 x1 ~~ e1*x1 x2 ~~ e2*x2 x6 ~~ e6*x6 x8 ~~ e8*x8 #covariances dx2 ~~ 0*dx3 + 0*dx4 + 0*dx5 + 0*dx6 + 0*dx7 + 0*dx8 dx3 ~~ 0*dx4 + 0*dx5 + 0*dx6 + 0*dx7 + 0*dx8 dx4 ~~ 0*dx5 + 0*dx6 + 0*dx7 + 0*dx8 dx5 ~~ 0*dx6 + 0*dx7 + 0*dx8 dx6 ~~ 0*dx7 + 0*dx8 dx7 ~~ 0*dx8 i ~~ 0*dx2 + 0*dx3 + 0*dx4 + 0*dx5 + 0*dx6 + 0*dx7 + 0*dx8 ' #proportional change model prop.mod <- ' #true scores lx1 =~ 1*x1 lx2 =~ 1*x2 lx3 =~ 0*x1 #spacing variable lx4 =~ 0*x1 #spacing variable lx5 =~ 0*x1 #spacing variable lx6 =~ 1*x6 lx7 =~ 0*x1 #spacing variable lx8 =~ 1*x8 #autoregressions lx2 ~ 1*lx1 lx3 ~ 1*lx2 lx4 ~ 1*lx3 lx5 ~ 1*lx4 lx6 ~ 1*lx5 lx7 ~ 1*lx6 lx8 ~ 1*lx7 #change scores dx2 =~ 1*lx2 dx3 =~ 1*lx3 dx4 =~ 1*lx4 dx5 =~ 1*lx5 dx6 =~ 1*lx6 dx7 =~ 1*lx7 dx8 =~ 1*lx8 #proportional change dx2 ~ beta*lx1 dx3 ~ beta*lx2 dx4 ~ beta*lx3 dx5 ~ beta*lx4 dx6 ~ beta*lx5 dx7 ~ beta*lx6 dx8 ~ beta*lx7 #intercept i =~ 1*lx1 i ~ 1 i ~~ i #means x1 ~ 0*1 x2 ~ 0*1 x6 ~ 0*1 x8 ~ 0*1 lx1 ~ 0*1 lx2 ~ 0*1 lx3 ~ 0*1 lx4 ~ 0*1 lx5 ~ 0*1 lx6 ~ 0*1 lx7 ~ 0*1 lx8 ~ 0*1 dx2 ~ 0*1 dx3 ~ 0*1 dx4 ~ 0*1 dx5 ~ 0*1 dx6 ~ 0*1 dx7 ~ 0*1 dx8 ~ 0*1 #variances lx1 ~~ 0*lx1 lx2 ~~ 0*lx2 lx3 ~~ 0*lx3 lx4 ~~ 0*lx4 lx5 ~~ 0*lx5 lx6 ~~ 0*lx6 lx7 ~~ 0*lx7 lx8 ~~ 0*lx8 dx2 ~~ 0*dx2 dx3 ~~ 0*dx3 dx4 ~~ 0*dx4 dx5 ~~ 0*dx5 dx6 ~~ 0*dx6 dx7 ~~ 0*dx7 dx8 ~~ 0*dx8 x1 ~~ e1*x1 x2 ~~ e2*x2 x6 ~~ e6*x6 x8 ~~ e8*x8 #covariances dx2 ~~ 0*dx3 + 0*dx4 + 0*dx5 + 0*dx6 + 0*dx7 + 0*dx8 dx3 ~~ 0*dx4 + 0*dx5 + 0*dx6 + 0*dx7 + 0*dx8 dx4 ~~ 0*dx5 + 0*dx6 + 0*dx7 + 0*dx8 dx5 ~~ 0*dx6 + 0*dx7 + 0*dx8 dx6 ~~ 0*dx7 + 0*dx8 dx7 ~~ 0*dx8 i ~~ 0*dx2 + 0*dx3 + 0*dx4 + 0*dx5 + 0*dx6 + 0*dx7 + 0*dx8 ' #fitting models in lavaan fit.noch = lavaan(noch.mod, data=data, missing="fiml") fit.noch fit.prop = lavaan(prop.mod, data=data, missing="fiml") fit.prop #counting proportion of missing data counter = 0 for(i in 1:nrow(data)){ for(j in 1:ncol(data)) if(is.na(data[i,j])){ counter=counter+1 } } n=nrow(data)-1 C=1-counter/(n*4) #Monte Carlo test #masking function mask = function(data, prop){ flag = 1 while(flag == 1){ data1 = data m = rbinom(dim(data)[1]*dim(data)[2], 1, prop) m = matrix(m, nrow=dim(data)[1], ncol=dim(data)[2]) for(i in 1:nrow(data)){ for(j in 1:ncol(data)){ if(m[i,j]==1){ data1[i,j] = NA } } } if(all(rowSums(is.na(data1[,c(1,2,3,4)]))!=4)){flag = 0} } return(data1) } #no change model OpenMx model = mxModel("No Change t4", type="RAM", mxData(observed=data, type="raw" ), manifestVars=c("x1","x2","x6","x8"), latentVars=c('ly1','ly2','ly3','ly4','ly5','ly6','ly7','ly8', 'dy2','dy3','dy4','dy5','dy6','dy7','dy8', 'i'), # Defining true scores mxPath(from=c('ly1','ly2','ly3','ly4','ly5','ly6','ly7','ly8'), to=c('x1','x2','x1','x1','x1','x6','x1','x8'), arrows=1, free=FALSE, values=c(1,1,0,0,0,1,0,1)), # Autoregressive Effects mxPath(from=c('ly1','ly2','ly3','ly4','ly5','ly6','ly7'), to=c('ly2','ly3','ly4','ly5','ly6','ly7','ly8'), arrows=1, free=FALSE, values=1), # Defining change scores mxPath(from=c('dy2','dy3','dy4','dy5','dy6','dy7','dy8'), to=c('ly2','ly3','ly4','ly5','ly6','ly7','ly8'), arrows=1, free=FALSE, values=1), #intercept component mxPath(from='i', to=c('ly1'), arrows=1, free=FALSE, values=1), # Variance paths mxPath(from=c("x1","x2","x6","x8"), arrows=2, free=TRUE, values=340, labels=c("v_e1","v_e2","v_e6","v_e8")), # Latent variable covariance matrix path mxPath(from=c('i'), arrows=2, connect='unique.pairs', free=TRUE, values=c(400), labels=c('v_i')), # means mxPath(from='one', to=c('i'), arrows=1, free=TRUE, values=c(515), labels=c('m_i')) ) #fit model and extract estimates fit = mxRun(model) est = fit$output$estimate #simulate data in lavaan using estimated parameters no.pop <- paste(" #true scores lx1 =~ 1*x1 lx2 =~ 1*x2 lx3 =~ 0*x1 lx4 =~ 0*x1 lx5 =~ 0*x1 lx6 =~ 1*x6 lx7 =~ 0*x1 lx8 =~ 1*x8 #autoregressions lx2 ~ 1*lx1 lx3 ~ 1*lx2 lx4 ~ 1*lx3 lx5 ~ 1*lx4 lx6 ~ 1*lx5 lx7 ~ 1*lx6 lx8 ~ 1*lx7 #change scores dx2 =~ 1*lx2 dx3 =~ 1*lx3 dx4 =~ 1*lx4 dx5 =~ 1*lx5 dx6 =~ 1*lx6 dx7 =~ 1*lx7 dx8 =~ 1*lx8 #intercept i =~ 1*lx1 i ~ ",est["m_i"],"*1 i ~~ ",est["v_i"],"*i #means x1 ~ 0*1 x2 ~ 0*1 x6 ~ 0*1 x8 ~ 0*1 lx1 ~ 0*1 lx2 ~ 0*1 lx3 ~ 0*1 lx4 ~ 0*1 lx5 ~ 0*1 lx6 ~ 0*1 lx7 ~ 0*1 lx8 ~ 0*1 dx2 ~ 0*1 dx3 ~ 0*1 dx4 ~ 0*1 dx5 ~ 0*1 dx6 ~ 0*1 dx7 ~ 0*1 dx8 ~ 0*1 #variances lx1 ~~ 0*lx1 lx2 ~~ 0*lx2 lx3 ~~ 0*lx3 lx4 ~~ 0*lx4 lx5 ~~ 0*lx5 lx6 ~~ 0*lx6 lx7 ~~ 0*lx7 lx8 ~~ 0*lx8 dx2 ~~ 0*dx2 dx3 ~~ 0*dx3 dx4 ~~ 0*dx4 dx5 ~~ 0*dx5 dx6 ~~ 0*dx6 dx7 ~~ 0*dx7 dx8 ~~ 0*dx8 x1 ~~ ",est["v_e1"],"*x1 x2 ~~ ",est["v_e2"],"*x2 x6 ~~ ",est["v_e6"],"*x6 x8 ~~ ",est["v_e8"],"*x8 #covariances dx2 ~~ 0*dx3 + 0*dx4 + 0*dx5 + 0*dx6 + 0*dx7 + 0*dx8 dx3 ~~ 0*dx4 + 0*dx5 + 0*dx6 + 0*dx7 + 0*dx8 dx4 ~~ 0*dx5 + 0*dx6 + 0*dx7 + 0*dx8 dx5 ~~ 0*dx6 + 0*dx7 + 0*dx8 dx6 ~~ 0*dx7 + 0*dx8 dx7 ~~ 0*dx8 i ~~ 0*dx2 + 0*dx3 + 0*dx4 + 0*dx5 + 0*dx6 + 0*dx7 + 0*dx8 ",sep="") #simulate and mask Monte Carlo samples mcdata = vector("list", 1000) d = lavaan::simulateData(no.pop, sample.nobs=n*1000) for(i in 1:1000){ mcdata[[i]] = d[((i-1)*as.numeric(n)+1):(i*as.numeric(n)),] mcdata[[i]] = mask(mcdata[[i]],(1-C)) } #fit models to Monte Carlo samples using OpenMx mxOption(NULL,"Calculate Hessian", "No") mxOption(NULL,"Standard Errors", "No") mx.no.model = mxModel("No", type="RAM", mxData(observed=mcdata[[1]], type="raw" ), manifestVars=c("x1","x2","x6","x8"), latentVars=c('ly1','ly2','ly3','ly4','ly5','ly6','ly7','ly8', 'dy2','dy3','dy4','dy5','dy6','dy7','dy8', 'i'), # Defining true scores mxPath(from=c('ly1','ly2','ly3','ly4','ly5','ly6','ly7','ly8'), to=c('x1','x2','x1','x1','x1','x6','x1','x8'), arrows=1, free=FALSE, values=c(1,1,0,0,0,1,0,1)), # Autoregressive Effects mxPath(from=c('ly1','ly2','ly3','ly4','ly5','ly6','ly7'), to=c('ly2','ly3','ly4','ly5','ly6','ly7','ly8'), arrows=1, free=FALSE, values=1), # Defining change scores mxPath(from=c('dy2','dy3','dy4','dy5','dy6','dy7','dy8'), to=c('ly2','ly3','ly4','ly5','ly6','ly7','ly8'), arrows=1, free=FALSE, values=1), #intercept component mxPath(from='i', to=c('ly1'), arrows=1, free=FALSE, values=1), # Variance paths mxPath(from=c("x1","x2","x6","x8"), arrows=2, free=TRUE, values=est[1:4], labels=c("v_e1","v_e2","v_e6","v_e8")), # Latent variable covariance matrix path mxPath(from=c('i'), arrows=2, connect='unique.pairs', free=TRUE, values=est[5],labels=c('v_i')), # means mxPath(from='one', to=c('i'), arrows=1, free=TRUE, values=est[6], labels=c('m_i')) ) mx.prop.model = mxModel("Prop", type="RAM", mxData(observed=mcdata[[1]], type="raw" ), manifestVars=c("x1","x2","x6","x8"), latentVars=c('ly1','ly2','ly3','ly4','ly5','ly6','ly7','ly8', 'dy2','dy3','dy4','dy5','dy6','dy7','dy8', 'i'), # Defining true scores mxPath(from=c('ly1','ly2','ly3','ly4','ly5','ly6','ly7','ly8'), to=c('x1','x2','x1','x1','x1','x6','x1','x8'), arrows=1, free=FALSE, values=c(1,1,0,0,0,1,0,1)), # Autoregressive Effects mxPath(from=c('ly1','ly2','ly3','ly4','ly5','ly6','ly7'), to=c('ly2','ly3','ly4','ly5','ly6','ly7','ly8'), arrows=1, free=FALSE, values=1), # Defining change scores mxPath(from=c('dy2','dy3','dy4','dy5','dy6','dy7','dy8'), to=c('ly2','ly3','ly4','ly5','ly6','ly7','ly8'), arrows=1, free=FALSE, values=1), #intercept component mxPath(from='i', to=c('ly1'), arrows=1, free=FALSE, values=1), # Proportional Effects mxPath(from=c('ly1','ly2','ly3','ly4','ly5','ly6','ly7'), to=c('dy2','dy3','dy4','dy5','dy6','dy7','dy8'), arrows=1, free=TRUE, values=0.05, labels='b'), # Variance paths mxPath(from=c("x1","x2","x6","x8"), arrows=2, free=TRUE, values=est[1:4], labels=c("v_e1","v_e2","v_e6","v_e8")), # Latent variable covariance matrix path mxPath(from=c('i'), arrows=2, connect='unique.pairs', free=TRUE, values=est[5],labels=c('v_i')), # means mxPath(from='one', to=c('i'), arrows=1, free=TRUE, values=est[6], labels=c('m_i')) ) #saturated model in lavaan (to calculate tml) sat.model = ' x1 + x2 + x6 + x8 ~ 1 x1 ~~ x1 x2 ~~ x2 x6 ~~ x6 x8 ~~ x8 ' #variables to store information fit.temp.n = fit.temp.p = fit.sat = vector("list",1000) lrt = sat.m2ll = pfit = numeric(1000) #fitting models to Monte Carlo samples #tryCatch used so warnings don't crash the loop fit.temp.n[[1]] = tryCatch(expr=mxRun(mx.no.model),warning=function(i){return(NA)},error=function(i){return(NA)}) fit.temp.p[[1]] = tryCatch(expr=mxRun(mx.prop.model),warning=function(i){return(NA)},error=function(i){return(NA)}) fit.sat[[1]] = tryCatch(expr=lavaan(sat.model, data=mcdata[[1]], missing="fiml",se="none"), warning=function(i){return(NA)},error=function(i){return(NA)}) lrt[1] = tryCatch(expr=mxCompare(fit.temp.p[[1]],fit.temp.n[[1]])$diffLL[2], warning=function(mc.samp){return(NA)},error=function(mc.samp){return(NA)}) sat.m2ll[1] = tryCatch(expr=-2*fitmeasures(fit.sat[[1]])["unrestricted.logl"], warning=function(mc.samp){return(NA)},error=function(mc.samp){return(NA)}) pfit[1] = tryCatch(expr=fit.temp.p[[1]]$output$Minus2LogLikelihood - sat.m2ll[1], warning=function(mc.samp){return(NA)},error=function(mc.samp){return(NA)}) for(mc.samp in 2:1000){ mx.no.model = mxModel(mx.no.model, mxData(observed=mcdata[[mc.samp]], type="raw")) mx.prop.model = mxModel(mx.prop.model, mxData(observed=mcdata[[mc.samp]], type="raw")) fit.temp.n[[mc.samp]] = tryCatch(expr=mxRun(mx.no.model),warning=function(i){return(NA)},error=function(i){return(NA)}) fit.temp.p[[mc.samp]] = tryCatch(expr=mxRun(mx.prop.model),warning=function(i){return(NA)},error=function(i){return(NA)}) fit.sat[[mc.samp]] = tryCatch(expr=lavaan(sat.model, data=mcdata[[mc.samp]], missing="fiml",se="none"), warning=function(i){return(NA)},error=function(i){return(NA)}) lrt[mc.samp] = tryCatch(expr=mxCompare(fit.temp.p[[mc.samp]],fit.temp.n[[mc.samp]])$diffLL[2], warning=function(mc.samp){return(NA)},error=function(mc.samp){return(NA)}) sat.m2ll[mc.samp] = tryCatch(expr=-2*fitmeasures(fit.sat[[mc.samp]])["unrestricted.logl"], warning=function(mc.samp){return(NA)},error=function(mc.samp){return(NA)}) pfit[mc.samp] = tryCatch(expr=fit.temp.p[[mc.samp]]$output$Minus2LogLikelihood - sat.m2ll[mc.samp], warning=function(mc.samp){return(NA)},error=function(mc.samp){return(NA)}) print(mc.samp) } #analysis comparing models lrts = sort(na.omit(lrt)) #distribution of T_MLs from Monte Carlo samples lrts[ceiling(.95*length(lrts))] #Monte Carlo critical value length(lrts[which(lrts > anova(fit.noch,fit.prop)$'Chisq diff'[2])])/length(lrts) #Monte Carlo test pvalue anova(fit.noch,fit.prop)$'Chisq diff'[2] #original sample T_ML #if original sample T_ML > Monte Carlo critical value, reject #analysis to see if the model fits the data pfits = sort(na.omit(pfit)) #distribution of T_MLs from Monte Carlo samples pfits[ceiling(.95*length(pfits))] #Monte Carlo critical value fit.prop length(which(pfits > fitmeasures(fit.prop)["chisq"]))/length(pfits) #Monte Carlo test pvalue #if original sample T_ML > Monte Carlo critical value, reject