i didn't write them because I thought it would be long. I am using
HPbayes package. I changed mp8.mle function. Two functions depend on
this one; loop.optim and prior.likewts, so I changed them and rename
them. The memory problem arises when applying the new loop.optim
function named loop.optim_m. The data is> dput(AUS)
structure(list(Year = c(2011L, 2011L, 2011L, 2011L, 2011L, 2011L,
2011L, 2011L, 2011L, 2011L, 2011L, 2011L, 2011L, 2011L, 2011L,
2011L, 2011L, 2011L, 2011L, 2011L, 2011L, 2011L, 2011L, 2011L
), Age = structure(c(1L, 2L, 3L, 7L, 8L, 9L, 10L, 11L, 12L, 13L,
14L, 15L, 16L, 17L, 18L, 19L, 20L, 21L, 22L, 23L, 24L, 4L, 5L,
6L), .Label = c("0", "04-Jan", "09-May",
"100-104", "105-109",
"110+", "14-Oct", "15-19", "20-24",
"25-29", "30-34", "35-39",
"40-44", "45-49", "50-54", "55-59",
"60-64", "65-69", "70-74",
"75-79", "80-84", "85-89", "90-94",
"95-99"), class = "factor"),
mx = c(0.00381, 0.00018, 1e-04, 9e-05, 0.00033, 0.00046,
0.00051, 0.00067, 0.00088, 0.00122, 0.00184, 0.00277, 0.00418,
0.00645, 0.01005, 0.01725, 0.02955, 0.05478, 0.10292, 0.18274,
0.30093, 0.45866, 0.62819, 0.75716), qx = c(0.0038, 0.00071,
5e-04, 0.00047, 0.00163, 0.00229, 0.00256, 0.00337, 0.00437,
0.00609, 0.00916, 0.01374, 0.02068, 0.03177, 0.04912, 0.08298,
0.13827, 0.24257, 0.41114, 0.61482, 0.80056, 0.91837, 0.9686,
1), ax = c(0.06, 1.56, 2.36, 2.79, 2.81, 2.47, 2.55, 2.59,
2.6, 2.62, 2.7, 2.67, 2.65, 2.64, 2.67, 2.7, 2.67, 2.64,
2.55, 2.34, 2.08, 1.74, 1.43, 1.32), lx = c(100000L, 99620L,
99550L, 99500L, 99453L, 99291L, 99064L, 98811L, 98478L, 98048L,
97450L, 96558L, 95231L, 93262L, 90299L, 85864L, 78739L, 67852L,
51393L, 30263L, 11657L, 2325L, 190L, 6L), dx = c(380L, 70L,
50L, 47L, 162L, 227L, 253L, 333L, 430L, 598L, 893L, 1327L,
1969L, 2963L, 4436L, 7125L, 10887L, 16459L, 21130L, 18606L,
9332L, 2135L, 184L, 6L), Lx = c(99643L, 398308L, 497617L,
497397L, 496912L, 495882L, 494700L, 493254L, 491358L, 488818L,
485200L, 479691L, 471529L, 459313L, 441161L, 412941L, 368377L,
300433L, 205300L, 101820L, 31011L, 4655L, 293L, 8L), Tx = c(8215623L,
8115980L, 7717672L, 7220055L, 6722657L, 6225746L, 5729864L,
5235163L, 4741909L, 4250551L, 3761733L, 3276532L, 2796841L,
2325312L, 1865999L, 1424838L, 1011897L, 643520L, 343087L,
137787L, 35967L, 4956L, 301L, 8L), ex = c(82.16, 81.47, 77.53,
72.56, 67.6, 62.7, 57.84, 52.98, 48.15, 43.35, 38.6, 33.93,
29.37, 24.93, 20.66, 16.59, 12.85, 9.48, 6.68, 4.55, 3.09,
2.13, 1.58, 1.32)), .Names = c("Year", "Age",
"mx", "qx",
"ax", "lx", "dx", "Lx", "Tx",
"ex"), class = "data.frame", row.names = c(NA,
-24L))
loop.optim_m function is
function (prior, nrisk, ndeath, d = 10, theta.dim = 8, age = c(1e-05,
1, seq(5, 110, 5)))
{
lx <- nrisk
dx <- ndeath
H.k <- prior
pllwts <- prior.likewts_m(prior = prior, nrisk = lx, ndeath = dx)
log.like.0 <- pllwts$log.like.0
wts.0 <- pllwts$wts.0
B0 <- 1000 * theta.dim
q0 <- H.k
d.keep <- 0
theta.new <- H.k[wts.0 == max(wts.0), ]
keep <- H.k
ll.keep <- log.like.0
opt.mu.d <- matrix(NA, nrow = d, ncol = theta.dim)
opt.cov.d <- array(NA, dim = c(theta.dim, theta.dim, d))
prior.cov <- cov(q0)
opt.low <- apply(q0, 2, min)
opt.hi <- apply(q0, 2, max)
imp.keep <- theta.dim * 100
max.log.like.0 <- max(log.like.0)
mp8.mle <- function(theta, x.fit = age) {
p.hat <- mod8p(theta = q0, x = age)
ll = dmultinom(x = dx, size = NULL, prob = p.hat, log = FALSE)
return(ll)
}
for (i in 1:d) {
out <- optim(par = theta.new, fn = mp8.mle, method =
"L-BFGS-B",
lower = opt.low, upper = opt.hi, control = list(fnscale = -1,
maxit = 1e+05))
out.mu <- out$par
if (out$value > max.log.like.0) {
d.keep <- d.keep + 1
opt.mu.d[i, ] <- out.mu
out.hess <- hessian(func = mp8.mle, x = out$par)
if (is.positive.definite(-out.hess)) {
out.cov <- try(solve(-out.hess))
opt.cov.d[, , i] <- out.cov
}
if (!is.positive.definite(-out.hess)) {
out.grad <- grad(func = mp8.mle, x = out.mu)
A <- out.grad %*% t(out.grad)
out.prec <- try(solve(prior.cov)) + A
if (!is.positive.definite(out.prec)) {
out.prec <- solve(prior.cov)
}
out.cov <- try(solve(out.prec))
opt.cov.d[, , i] <- out.cov
}
}
if (i == 1 & out$value <= max.log.like.0) {
out.hess <- hessian(func = mp8.mle, x = out$par)
if (is.positive.definite(-out.hess)) {
out.cov <- solve(-out.hess)
}
if (!is.positive.definite(-out.hess)) {
out.grad <- grad(func = mp8.mle, x = out.mu)
A <- out.grad %*% t(out.grad)
out.prec <- solve(prior.cov) + A
if (!is.positive.definite(out.prec)) {
out.prec <- solve(prior.cov)
}
out.cov <- solve(out.prec)
}
warning("likelihood of first local maximum does not exceed
maximum \t\t\tlikelihood from the prior")
}
if (i < d) {
keep <- keep[ll.keep != max(ll.keep), ]
ll.keep <- ll.keep[ll.keep != max(ll.keep)]
dist.to.mu <- mahalanobis(x = keep, center = out.mu,
cov = out.cov)
keep <- keep[rank(1/dist.to.mu) <= (d - i) * B0/d,
]
ll.keep <- ll.keep[rank(1/dist.to.mu) <= (d - i) *
B0/d]
theta.new <- keep[ll.keep == max(ll.keep), ]
}
}
return(list(opt.mu.d = opt.mu.d, opt.cov.d = opt.cov.d, theta.new
= theta.new,
d.keep = d.keep, log.like.0 = log.like.0, wts.0 = wts.0))
}
Thank you for your reply.