1 |
#' Fitting an MMRM with Single Optimizer |
|
2 |
#' |
|
3 |
#' @description `r lifecycle::badge("stable")` |
|
4 |
#' |
|
5 |
#' This function helps to fit an MMRM using `TMB` with a single optimizer, |
|
6 |
#' while capturing messages and warnings. |
|
7 |
#' |
|
8 |
#' @inheritParams mmrm |
|
9 |
#' @param control (`mmrm_control`)\cr object. |
|
10 |
#' @param tmb_data (`mmrm_tmb_data`)\cr object. |
|
11 |
#' @param formula_parts (`mmrm_tmb_formula_parts`)\cr object. |
|
12 |
#' @param ... Additional arguments to pass to [mmrm_control()]. |
|
13 |
#' |
|
14 |
#' @details |
|
15 |
#' `fit_single_optimizer` will fit the `mmrm` model using the `control` provided. |
|
16 |
#' If there are multiple optimizers provided in `control`, only the first optimizer |
|
17 |
#' will be used. |
|
18 |
#' If `tmb_data` and `formula_parts` are both provided, `formula`, `data`, `weights`, |
|
19 |
#' `reml`, and `covariance` are ignored. |
|
20 |
#' |
|
21 |
#' @return The `mmrm_fit` object, with additional attributes containing warnings, |
|
22 |
#' messages, optimizer used and convergence status in addition to the |
|
23 |
#' `mmrm_tmb` contents. |
|
24 |
#' @export |
|
25 |
#' |
|
26 |
#' @examples |
|
27 |
#' mod_fit <- fit_single_optimizer( |
|
28 |
#' formula = FEV1 ~ RACE + SEX + ARMCD * AVISIT + us(AVISIT | USUBJID), |
|
29 |
#' data = fev_data, |
|
30 |
#' weights = rep(1, nrow(fev_data)), |
|
31 |
#' optimizer = "nlminb" |
|
32 |
#' ) |
|
33 |
#' attr(mod_fit, "converged") |
|
34 |
fit_single_optimizer <- function(formula, |
|
35 |
data, |
|
36 |
weights, |
|
37 |
reml = TRUE, |
|
38 |
covariance = NULL, |
|
39 |
tmb_data, |
|
40 |
formula_parts, |
|
41 |
..., |
|
42 |
control = mmrm_control(...)) { |
|
43 | 199x |
to_remove <- list( |
44 |
# Transient visit to invalid parameters. |
|
45 | 199x |
warnings = c("NA/NaN function evaluation") |
46 |
) |
|
47 | 199x |
as_diverged <- list( |
48 | 199x |
errors = c( |
49 | 199x |
"NA/NaN Hessian evaluation", |
50 | 199x |
"L-BFGS-B needs finite values of 'fn'" |
51 |
) |
|
52 |
) |
|
53 | 199x |
if (missing(tmb_data) || missing(formula_parts)) { |
54 | 14x |
h_valid_formula(formula) |
55 | 13x |
assert_data_frame(data) |
56 | 13x |
assert_numeric(weights, any.missing = FALSE, lower = .Machine$double.xmin) |
57 | 13x |
assert_flag(reml) |
58 | 13x |
assert_class(control, "mmrm_control") |
59 | 13x |
assert_list(control$optimizers, names = "unique", types = c("function", "partial")) |
60 | 13x |
quiet_fit <- h_record_all_output( |
61 | 13x |
fit_mmrm( |
62 | 13x |
formula = formula, |
63 | 13x |
data = data, |
64 | 13x |
weights = weights, |
65 | 13x |
reml = reml, |
66 | 13x |
covariance = covariance, |
67 | 13x |
control = control |
68 |
), |
|
69 | 13x |
remove = to_remove, |
70 | 13x |
divergence = as_diverged |
71 |
) |
|
72 |
} else { |
|
73 | 185x |
assert_class(tmb_data, "mmrm_tmb_data") |
74 | 185x |
assert_class(formula_parts, "mmrm_tmb_formula_parts") |
75 | 185x |
quiet_fit <- h_record_all_output( |
76 | 185x |
fit_mmrm( |
77 | 185x |
formula_parts = formula_parts, |
78 | 185x |
tmb_data = tmb_data, |
79 | 185x |
control = control |
80 |
), |
|
81 | 185x |
remove = to_remove, |
82 | 185x |
divergence = as_diverged |
83 |
) |
|
84 |
} |
|
85 | 198x |
if (length(quiet_fit$errors)) { |
86 | 4x |
stop(quiet_fit$errors) |
87 |
} |
|
88 | 194x |
converged <- (length(quiet_fit$warnings) == 0L) && |
89 | 194x |
(length(quiet_fit$divergence) == 0L) && |
90 | 194x |
isTRUE(quiet_fit$result$opt_details$convergence == 0) |
91 | 194x |
structure( |
92 | 194x |
quiet_fit$result, |
93 | 194x |
warnings = quiet_fit$warnings, |
94 | 194x |
messages = quiet_fit$messages, |
95 | 194x |
divergence = quiet_fit$divergence, |
96 | 194x |
converged = converged, |
97 | 194x |
class = c("mmrm_fit", class(quiet_fit$result)) |
98 |
) |
|
99 |
} |
|
100 | ||
101 |
#' Summarizing List of Fits |
|
102 |
#' |
|
103 |
#' @param all_fits (`list` of `mmrm_fit` or `try-error`)\cr list of fits. |
|
104 |
#' |
|
105 |
#' @return List with `warnings`, `messages`, `log_liks` and `converged` results. |
|
106 |
#' @keywords internal |
|
107 |
h_summarize_all_fits <- function(all_fits) { |
|
108 | 8x |
assert_list(all_fits, types = c("mmrm_fit", "try-error")) |
109 | 8x |
is_error <- vapply(all_fits, is, logical(1), class2 = "try-error") |
110 | ||
111 | 8x |
warnings <- messages <- vector(mode = "list", length = length(all_fits)) |
112 | 8x |
warnings[is_error] <- lapply(all_fits[is_error], as.character) |
113 | 8x |
warnings[!is_error] <- lapply(all_fits[!is_error], attr, which = "warnings") |
114 | 8x |
messages[!is_error] <- lapply(all_fits[!is_error], attr, which = "messages") |
115 | 8x |
log_liks <- as.numeric(rep(NA, length.out = length(all_fits))) |
116 | 8x |
log_liks[!is_error] <- vapply(all_fits[!is_error], stats::logLik, numeric(1L)) |
117 | 8x |
converged <- rep(FALSE, length.out = length(all_fits)) |
118 | 8x |
converged[!is_error] <- vapply(all_fits[!is_error], attr, logical(1), which = "converged") |
119 | ||
120 | 8x |
list( |
121 | 8x |
warnings = warnings, |
122 | 8x |
messages = messages, |
123 | 8x |
log_liks = log_liks, |
124 | 8x |
converged = converged |
125 |
) |
|
126 |
} |
|
127 | ||
128 |
#' Refitting MMRM with Multiple Optimizers |
|
129 |
#' |
|
130 |
#' @description `r lifecycle::badge("stable")` |
|
131 |
#' |
|
132 |
#' @param fit (`mmrm_fit`)\cr original model fit from [fit_single_optimizer()]. |
|
133 |
#' @param ... Additional arguments passed to [mmrm_control()]. |
|
134 |
#' @param control (`mmrm_control`)\cr object. |
|
135 |
#' |
|
136 |
#' @return The best (in terms of log likelihood) fit which converged. |
|
137 |
#' |
|
138 |
#' @note For Windows, no parallel computations are currently implemented. |
|
139 |
#' @export |
|
140 |
#' |
|
141 |
#' @examples |
|
142 |
#' fit <- fit_single_optimizer( |
|
143 |
#' formula = FEV1 ~ RACE + SEX + ARMCD * AVISIT + us(AVISIT | USUBJID), |
|
144 |
#' data = fev_data, |
|
145 |
#' weights = rep(1, nrow(fev_data)), |
|
146 |
#' optimizer = "nlminb" |
|
147 |
#' ) |
|
148 |
#' best_fit <- refit_multiple_optimizers(fit) |
|
149 |
refit_multiple_optimizers <- function(fit, |
|
150 |
..., |
|
151 |
control = mmrm_control(...)) { |
|
152 | 6x |
assert_class(fit, "mmrm_fit") |
153 | 6x |
assert_class(control, "mmrm_control") |
154 | ||
155 | 6x |
n_cores_used <- ifelse( |
156 | 6x |
.Platform$OS.type == "windows", |
157 | 6x |
1L, |
158 | 6x |
min( |
159 | 6x |
length(control$optimizers), |
160 | 6x |
control$n_cores |
161 |
) |
|
162 |
) |
|
163 | 6x |
controls <- h_split_control( |
164 | 6x |
control, |
165 | 6x |
start = fit$theta_est |
166 |
) |
|
167 | ||
168 |
# Take the results from old fit as starting values for new fits. |
|
169 | 6x |
all_fits <- suppressWarnings(parallel::mcmapply( |
170 | 6x |
FUN = fit_single_optimizer, |
171 | 6x |
control = controls, |
172 | 6x |
MoreArgs = list( |
173 | 6x |
tmb_data = fit$tmb_data, |
174 | 6x |
formula_parts = fit$formula_parts |
175 |
), |
|
176 | 6x |
mc.cores = n_cores_used, |
177 | 6x |
mc.silent = TRUE, |
178 | 6x |
SIMPLIFY = FALSE |
179 |
)) |
|
180 | 6x |
all_fits <- c(all_fits, list(old_result = fit)) |
181 | ||
182 |
# Find the results that are ok and return best in terms of log-likelihood. |
|
183 | 6x |
all_fits_summary <- h_summarize_all_fits(all_fits) |
184 | 6x |
is_ok <- all_fits_summary$converged |
185 | 6x |
if (!any(is_ok)) { |
186 | 1x |
stop( |
187 | 1x |
"No optimizer led to a successful model fit. ", |
188 | 1x |
"Please try to use a different covariance structure or other covariates." |
189 |
) |
|
190 |
} |
|
191 | 5x |
best_optimizer <- which.max(all_fits_summary$log_liks[is_ok]) |
192 | 5x |
all_fits[[which(is_ok)[best_optimizer]]] |
193 |
} |
|
194 | ||
195 |
#' Control Parameters for Fitting an MMRM |
|
196 |
#' |
|
197 |
#' @description `r lifecycle::badge("stable")` |
|
198 |
#' Fine-grained specification of the MMRM fit details is possible using this |
|
199 |
#' control function. |
|
200 |
#' |
|
201 |
#' @param n_cores (`count`)\cr number of cores to be used. |
|
202 |
#' @param method (`string`)\cr adjustment method for degrees of freedom. |
|
203 |
#' @param vcov (`string`)\cr coefficients covariance matrix adjustment method. |
|
204 |
#' @param start (`NULL`, `numeric` or `function`)\cr optional start values for variance |
|
205 |
#' parameters. See details for more information. |
|
206 |
#' @param accept_singular (`flag`)\cr whether singular design matrices are reduced |
|
207 |
#' to full rank automatically and additional coefficient estimates will be missing. |
|
208 |
#' @param optimizers (`list`)\cr optimizer specification, created with [h_get_optimizers()]. |
|
209 |
#' @param drop_visit_levels (`flag`)\cr whether to drop levels for visit variable, |
|
210 |
#' if visit variable is a factor, see details. |
|
211 |
#' @param ... additional arguments passed to [h_get_optimizers()]. |
|
212 |
#' |
|
213 |
#' @details |
|
214 |
# - The `drop_visit_levels` flag will decide whether unobserved visits will be kept for analysis. |
|
215 |
#' For example, if the data only has observations at visits `VIS1`, `VIS3` and `VIS4`, by default |
|
216 |
#' they are treated to be equally spaced, the distance from `VIS1` to `VIS3`, and from `VIS3` to `VIS4`, |
|
217 |
#' are identical. However, you can manually convert this visit into a factor, with |
|
218 |
#' `levels = c("VIS1", "VIS2", "VIS3", "VIS4")`, and also use `drop_visits_levels = FALSE`, |
|
219 |
#' then the distance from `VIS1` to `VIS3` will be double, as `VIS2` is a valid visit. |
|
220 |
#' However, please be cautious because this can lead to convergence failure |
|
221 |
#' when using an unstructured covariance matrix and there are no observations |
|
222 |
#' at the missing visits. |
|
223 |
#' - The `method` and `vcov` arguments specify the degrees of freedom and coefficients |
|
224 |
#' covariance matrix adjustment methods, respectively. |
|
225 |
#' - Allowed `vcov` includes: "Asymptotic", "Kenward-Roger", "Kenward-Roger-Linear", "Empirical" (CR0), |
|
226 |
#' "Empirical-Jackknife" (CR3), and "Empirical-Bias-Reduced" (CR2). |
|
227 |
#' - Allowed `method` includes: "Satterthwaite", "Kenward-Roger", "Between-Within" and "Residual". |
|
228 |
#' - If `method` is "Kenward-Roger" then only "Kenward-Roger" or "Kenward-Roger-Linear" are allowed for `vcov`. |
|
229 |
#' - The `vcov` argument can be `NULL` to use the default covariance method depending on the `method` |
|
230 |
#' used for degrees of freedom, see the following table: |
|
231 |
#' |
|
232 |
#' | `method` | Default `vcov`| |
|
233 |
#' |-----------|----------| |
|
234 |
#' |Satterthwaite| Asymptotic| |
|
235 |
#' |Kenward-Roger| Kenward-Roger| |
|
236 |
#' |Residual| Empirical| |
|
237 |
#' |Between-Within| Asymptotic| |
|
238 |
#' |
|
239 |
#' - Please note that "Kenward-Roger" for "Unstructured" covariance gives different results |
|
240 |
#' compared to SAS; Use "Kenward-Roger-Linear" for `vcov` instead for better matching |
|
241 |
#' of the SAS results. |
|
242 |
#' |
|
243 |
#' - The argument `start` is used to facilitate the choice of initial values for fitting the model. |
|
244 |
#' If `function` is provided, make sure its parameter is a valid element of `mmrm_tmb_data` |
|
245 |
#' or `mmrm_tmb_formula_parts` and it returns a numeric vector. |
|
246 |
#' By default or if `NULL` is provided, `std_start` will be used. |
|
247 |
#' Other implemented methods include `emp_start`. |
|
248 |
#' |
|
249 |
#' @return List of class `mmrm_control` with the control parameters. |
|
250 |
#' @export |
|
251 |
#' |
|
252 |
#' @examples |
|
253 |
#' mmrm_control( |
|
254 |
#' optimizer_fun = stats::optim, |
|
255 |
#' optimizer_args = list(method = "L-BFGS-B") |
|
256 |
#' ) |
|
257 |
mmrm_control <- function(n_cores = 1L, |
|
258 |
method = c("Satterthwaite", "Kenward-Roger", "Residual", "Between-Within"), |
|
259 |
vcov = NULL, |
|
260 |
start = std_start, |
|
261 |
accept_singular = TRUE, |
|
262 |
drop_visit_levels = TRUE, |
|
263 |
..., |
|
264 |
optimizers = h_get_optimizers(...)) { |
|
265 | 243x |
assert_count(n_cores, positive = TRUE) |
266 | 243x |
assert_character(method) |
267 | 243x |
if (is.null(start)) { |
268 | 1x |
start <- std_start |
269 |
} |
|
270 | 243x |
assert( |
271 | 243x |
check_function(start, args = "..."), |
272 | 243x |
check_numeric(start, null.ok = FALSE), |
273 | 243x |
combine = "or" |
274 |
) |
|
275 | 243x |
assert_flag(accept_singular) |
276 | 243x |
assert_flag(drop_visit_levels) |
277 | 243x |
assert_list(optimizers, names = "unique", types = c("function", "partial")) |
278 | 243x |
assert_string(vcov, null.ok = TRUE) |
279 | 243x |
method <- match.arg(method) |
280 | 243x |
if (is.null(vcov)) { |
281 | 192x |
vcov <- h_get_cov_default(method) |
282 |
} |
|
283 | 243x |
assert_subset( |
284 | 243x |
vcov, |
285 | 243x |
c( |
286 | 243x |
"Asymptotic", |
287 | 243x |
"Empirical", |
288 | 243x |
"Empirical-Bias-Reduced", |
289 | 243x |
"Empirical-Jackknife", |
290 | 243x |
"Kenward-Roger", |
291 | 243x |
"Kenward-Roger-Linear" |
292 |
) |
|
293 |
) |
|
294 | 243x |
if (xor(identical(method, "Kenward-Roger"), vcov %in% c("Kenward-Roger", "Kenward-Roger-Linear"))) { |
295 | 5x |
stop(paste( |
296 | 5x |
"Kenward-Roger degrees of freedom must work together with Kenward-Roger", |
297 | 5x |
"or Kenward-Roger-Linear covariance!" |
298 |
)) |
|
299 |
} |
|
300 | 238x |
structure( |
301 | 238x |
list( |
302 | 238x |
optimizers = optimizers, |
303 | 238x |
start = start, |
304 | 238x |
accept_singular = accept_singular, |
305 | 238x |
method = method, |
306 | 238x |
vcov = vcov, |
307 | 238x |
n_cores = as.integer(n_cores), |
308 | 238x |
drop_visit_levels = drop_visit_levels |
309 |
), |
|
310 | 238x |
class = "mmrm_control" |
311 |
) |
|
312 |
} |
|
313 | ||
314 |
#' Fit an MMRM |
|
315 |
#' |
|
316 |
#' @description `r lifecycle::badge("stable")` |
|
317 |
#' |
|
318 |
#' This is the main function fitting the MMRM. |
|
319 |
#' |
|
320 |
#' @param formula (`formula`)\cr the model formula, see details. |
|
321 |
#' @param data (`data`)\cr the data to be used for the model. |
|
322 |
#' @param weights (`vector`)\cr an optional vector of weights to be used in |
|
323 |
#' the fitting process. Should be `NULL` or a numeric vector. |
|
324 |
#' @param reml (`flag`)\cr whether restricted maximum likelihood (REML) |
|
325 |
#' estimation is used, otherwise maximum likelihood (ML) is used. |
|
326 |
#' @param covariance (`cov_struct`)\cr a covariance structure type definition |
|
327 |
#' as produced with [cov_struct()], or value that can be coerced to a |
|
328 |
#' covariance structure using [as.cov_struct()]. If no value is provided, |
|
329 |
#' a structure is derived from the provided formula. |
|
330 |
#' @param control (`mmrm_control`)\cr fine-grained fitting specifications list |
|
331 |
#' created with [mmrm_control()]. |
|
332 |
#' @param ... arguments passed to [mmrm_control()]. |
|
333 |
#' |
|
334 |
#' @details |
|
335 |
#' The `formula` typically looks like: |
|
336 |
#' `FEV1 ~ RACE + SEX + ARMCD * AVISIT + us(AVISIT | USUBJID)` |
|
337 |
#' so specifies response and covariates as usual, and exactly one special term |
|
338 |
#' defines which covariance structure is used and what are the time point and |
|
339 |
#' subject variables. The covariance structures in the formula can be |
|
340 |
#' found in [`covariance_types`]. |
|
341 |
#' |
|
342 |
#' The time points have to be unique for each subject. That is, |
|
343 |
#' there cannot be time points with multiple observations for any subject. |
|
344 |
#' The rationale is that these observations would need to be correlated, but it |
|
345 |
#' is not possible within the currently implemented covariance structure framework |
|
346 |
#' to do that correctly. Moreover, for non-spatial covariance structures, the time |
|
347 |
#' variable must be a factor variable. |
|
348 |
#' |
|
349 |
#' When optimizer is not set, first the default optimizer |
|
350 |
#' (`L-BFGS-B`) is used to fit the model. If that converges, this is returned. |
|
351 |
#' If not, the other available optimizers from [h_get_optimizers()], |
|
352 |
#' including `BFGS`, `CG` and `nlminb` are |
|
353 |
#' tried (in parallel if `n_cores` is set and not on Windows). |
|
354 |
#' If none of the optimizers converge, then the function fails. Otherwise |
|
355 |
#' the best fit is returned. |
|
356 |
#' |
|
357 |
#' Note that fine-grained control specifications can either be passed directly |
|
358 |
#' to the `mmrm` function, or via the `control` argument for bundling together |
|
359 |
#' with the [mmrm_control()] function. Both cannot be used together, since |
|
360 |
#' this would delete the arguments passed via `mmrm`. |
|
361 |
#' |
|
362 |
#' @return An `mmrm` object. |
|
363 |
#' |
|
364 |
#' @note The `mmrm` object is also an `mmrm_fit` and an `mmrm_tmb` object, |
|
365 |
#' therefore corresponding methods also work (see [`mmrm_tmb_methods`]). |
|
366 |
#' |
|
367 |
#' Additional contents depend on the choice of the adjustment `method`: |
|
368 |
#' - If Satterthwaite adjustment is used, the Jacobian information `jac_list` |
|
369 |
#' is included. |
|
370 |
#' - If Kenward-Roger adjustment is used, `kr_comp` contains necessary |
|
371 |
#' components and `beta_vcov_adj` includes the adjusted coefficients covariance |
|
372 |
#' matrix. |
|
373 |
#' |
|
374 |
#' Use of the package `emmeans` is supported, see [`emmeans_support`]. |
|
375 |
#' |
|
376 |
#' NA values are always omitted regardless of `na.action` setting. |
|
377 |
#' |
|
378 |
#' When the number of visit levels is large, it usually requires large memory to create the |
|
379 |
#' covariance matrix. By default, the maximum allowed visit levels is 100, and if there are more |
|
380 |
#' visit levels, a confirmation is needed if run interactively. |
|
381 |
#' You can use `options(mmrm.max_visits = <target>)` to increase the maximum allowed number of visit |
|
382 |
#' levels. In non-interactive sessions the confirmation is not raised and will directly give you an error if |
|
383 |
#' the number of visit levels exceeds the maximum. |
|
384 |
#' |
|
385 |
#' @export |
|
386 |
#' |
|
387 |
#' @examples |
|
388 |
#' fit <- mmrm( |
|
389 |
#' formula = FEV1 ~ RACE + SEX + ARMCD * AVISIT + us(AVISIT | USUBJID), |
|
390 |
#' data = fev_data |
|
391 |
#' ) |
|
392 |
#' |
|
393 |
#' # Direct specification of control details: |
|
394 |
#' fit <- mmrm( |
|
395 |
#' formula = FEV1 ~ RACE + SEX + ARMCD * AVISIT + us(AVISIT | USUBJID), |
|
396 |
#' data = fev_data, |
|
397 |
#' weights = fev_data$WEIGHTS, |
|
398 |
#' method = "Kenward-Roger" |
|
399 |
#' ) |
|
400 |
#' |
|
401 |
#' # Alternative specification via control argument (but you cannot mix the |
|
402 |
#' # two approaches): |
|
403 |
#' fit <- mmrm( |
|
404 |
#' formula = FEV1 ~ RACE + SEX + ARMCD * AVISIT + us(AVISIT | USUBJID), |
|
405 |
#' data = fev_data, |
|
406 |
#' control = mmrm_control(method = "Kenward-Roger") |
|
407 |
#' ) |
|
408 |
mmrm <- function(formula, |
|
409 |
data, |
|
410 |
weights = NULL, |
|
411 |
covariance = NULL, |
|
412 |
reml = TRUE, |
|
413 |
control = mmrm_control(...), |
|
414 |
...) { |
|
415 | 175x |
assert_false(!missing(control) && !missing(...)) |
416 | 174x |
assert_class(control, "mmrm_control") |
417 | 169x |
assert_list(control$optimizers, min.len = 1) |
418 | ||
419 | 169x |
if (control$method %in% c("Kenward-Roger", "Kenward-Roger-Linear") && !reml) { |
420 | ! |
stop("Kenward-Roger only works for REML") |
421 |
} |
|
422 | 169x |
h_valid_formula(formula) |
423 | 168x |
covariance <- h_reconcile_cov_struct(formula, covariance) |
424 | 167x |
formula_parts <- h_mmrm_tmb_formula_parts(formula, covariance) |
425 | 167x |
h_tmb_warn_non_deterministic() |
426 | ||
427 | 167x |
if (!missing(data)) { |
428 | 166x |
attr(data, which = "dataname") <- toString(match.call()$data) |
429 |
} else { |
|
430 |
# na.action set to na.pass to allow data to be full; will be futher trimmed later |
|
431 | 1x |
data <- model.frame(formula_parts$full_formula, na.action = "na.pass") |
432 |
} |
|
433 | ||
434 | 167x |
if (is.null(weights)) { |
435 | 151x |
weights <- rep(1, nrow(data)) |
436 |
} else { |
|
437 | 16x |
attr(weights, which = "dataname") <- deparse(match.call()$weights) |
438 |
} |
|
439 | 167x |
tmb_data <- h_mmrm_tmb_data( |
440 | 167x |
formula_parts, data, weights, reml, |
441 | 167x |
singular = if (control$accept_singular) "drop" else "error", |
442 | 167x |
drop_visit_levels = control$drop_visit_levels, |
443 | 167x |
allow_na_response = FALSE |
444 |
) |
|
445 | 167x |
fit <- structure("", class = "try-error") |
446 | 167x |
names_all_optimizers <- names(control$optimizers) |
447 | 167x |
while (is(fit, "try-error") && length(control$optimizers) > 0) { |
448 | 171x |
fit <- fit_single_optimizer( |
449 | 171x |
tmb_data = tmb_data, |
450 | 171x |
formula_parts = formula_parts, |
451 | 171x |
control = control |
452 |
) |
|
453 | 168x |
if (is(fit, "try-error")) { |
454 | 6x |
warning(paste0( |
455 | 6x |
"Divergence with optimizer ", names(control$optimizers[1L]), " due to problems: ", |
456 | 6x |
toString(attr(fit, "divergence")) |
457 |
)) |
|
458 |
} |
|
459 | 168x |
control$optimizers <- control$optimizers[-1] |
460 |
} |
|
461 | 164x |
if (!attr(fit, "converged")) { |
462 | 7x |
more_optimizers <- length(control$optimizers) >= 1L |
463 | 7x |
if (more_optimizers) { |
464 | 5x |
fit <- refit_multiple_optimizers( |
465 | 5x |
fit = fit, |
466 | 5x |
control = control |
467 |
) |
|
468 |
} else { |
|
469 | 2x |
all_problems <- unlist( |
470 | 2x |
attributes(fit)[c("errors", "warnings")], |
471 | 2x |
use.names = FALSE |
472 |
) |
|
473 | 2x |
stop(paste0( |
474 | 2x |
"Chosen optimizers '", toString(names_all_optimizers), "' led to problems during model fit:\n", |
475 | 2x |
paste(paste0(seq_along(all_problems), ") ", all_problems), collapse = ";\n"), "\n", |
476 | 2x |
"Consider trying multiple or different optimizers." |
477 |
)) |
|
478 |
} |
|
479 |
} |
|
480 | 161x |
fit_msg <- attr(fit, "messages") |
481 | 161x |
if (!is.null(fit_msg)) { |
482 | ! |
message(paste(fit_msg, collapse = "\n")) |
483 |
} |
|
484 | 161x |
fit$call <- match.call() |
485 | 161x |
fit$call$formula <- formula |
486 | 161x |
fit$method <- control$method |
487 | 161x |
fit$vcov <- control$vcov |
488 | 161x |
if (control$vcov %in% c("Kenward-Roger", "Kenward-Roger-Linear")) { |
489 | 47x |
fit$kr_comp <- h_get_kr_comp(fit$tmb_data, fit$theta_est) |
490 | 47x |
fit$beta_vcov_adj <- h_var_adj( |
491 | 47x |
v = fit$beta_vcov, |
492 | 47x |
w = component(fit, "theta_vcov"), |
493 | 47x |
p = fit$kr_comp$P, |
494 | 47x |
q = fit$kr_comp$Q, |
495 | 47x |
r = fit$kr_comp$R, |
496 | 47x |
linear = (control$vcov == "Kenward-Roger-Linear") |
497 |
) |
|
498 | 114x |
} else if (control$vcov %in% c("Empirical", "Empirical-Bias-Reduced", "Empirical-Jackknife")) { |
499 | 31x |
empirical_comp <- h_get_empirical( |
500 | 31x |
fit$tmb_data, fit$theta_est, fit$beta_est, fit$beta_vcov, control$vcov |
501 |
) |
|
502 | 31x |
fit$beta_vcov_adj <- empirical_comp$cov |
503 | 31x |
fit$empirical_df_mat <- empirical_comp$df_mat |
504 | 31x |
dimnames(fit$beta_vcov_adj) <- dimnames(fit$beta_vcov) |
505 | 83x |
} else if (identical(control$vcov, "Asymptotic")) { |
506 |
# Note that we only need the Jacobian list under Asymptotic covariance method, |
|
507 |
# cf. the Satterthwaite vignette. |
|
508 | 83x |
if (identical(fit$method, "Satterthwaite")) { |
509 | 81x |
fit$jac_list <- h_jac_list(fit$tmb_data, fit$theta_est, fit$beta_vcov) |
510 |
} |
|
511 |
} else { |
|
512 | ! |
stop("Unrecognized coefficent variance-covariance method!") |
513 |
} |
|
514 | ||
515 | 161x |
class(fit) <- c("mmrm", class(fit)) |
516 | 161x |
fit |
517 |
} |
1 |
#' Capture all Output |
|
2 |
#' |
|
3 |
#' This function silences all warnings, errors & messages and instead returns a list |
|
4 |
#' containing the results (if it didn't error), as well as the warnings, errors |
|
5 |
#' and messages and divergence signals as character vectors. |
|
6 |
#' |
|
7 |
#' @param expr (`expression`)\cr to be executed. |
|
8 |
#' @param remove (`list`)\cr optional list with elements `warnings`, `errors`, |
|
9 |
#' `messages` which can be character vectors, which will be removed from the |
|
10 |
#' results if specified. |
|
11 |
#' @param divergence (`list`)\cr optional list similar as `remove`, but these |
|
12 |
#' character vectors will be moved to the `divergence` result and signal |
|
13 |
#' that the fit did not converge. |
|
14 |
#' |
|
15 |
#' @return |
|
16 |
#' A list containing |
|
17 |
#' |
|
18 |
#' - `result`: The object returned by `expr` or `list()` if an error was thrown. |
|
19 |
#' - `warnings`: `NULL` or a character vector if warnings were thrown. |
|
20 |
#' - `errors`: `NULL` or a string if an error was thrown. |
|
21 |
#' - `messages`: `NULL` or a character vector if messages were produced. |
|
22 |
#' - `divergence`: `NULL` or a character vector if divergence messages were caught. |
|
23 |
#' |
|
24 |
#' @keywords internal |
|
25 |
h_record_all_output <- function(expr, |
|
26 |
remove = list(), |
|
27 |
divergence = list()) { |
|
28 |
# Note: We don't need to and cannot assert `expr` here. |
|
29 | 201x |
assert_list(remove, types = "character") |
30 | 201x |
assert_list(divergence, types = "character") |
31 | 201x |
env <- new.env() |
32 | 201x |
result <- withCallingHandlers( |
33 | 201x |
withRestarts( |
34 | 201x |
expr, |
35 | 201x |
muffleStop = function(e) structure(e$message, class = "try-error") |
36 |
), |
|
37 | 201x |
message = function(m) { |
38 | 6x |
msg_without_newline <- gsub(m$message, pattern = "\n$", replacement = "") |
39 | 6x |
env$message <- c(env$message, msg_without_newline) |
40 | 6x |
invokeRestart("muffleMessage") |
41 |
}, |
|
42 | 201x |
warning = function(w) { |
43 | 14x |
env$warning <- c(env$warning, w$message) |
44 | 14x |
invokeRestart("muffleWarning") |
45 |
}, |
|
46 | 201x |
error = function(e) { |
47 | 14x |
env$error <- c(env$error, e$message) |
48 | 14x |
invokeRestart("muffleStop", e) |
49 |
} |
|
50 |
) |
|
51 | 201x |
list( |
52 | 201x |
result = result, |
53 | 201x |
warnings = setdiff(env$warning, c(remove$warnings, divergence$warnings)), |
54 | 201x |
errors = setdiff(env$error, c(remove$errors, divergence$errors)), |
55 | 201x |
messages = setdiff(env$message, c(remove$messages, divergence$messages)), |
56 | 201x |
divergence = c( |
57 | 201x |
intersect(env$warning, divergence$warnings), |
58 | 201x |
intersect(env$error, divergence$errors), |
59 | 201x |
intersect(env$message, divergence$messages) |
60 |
) |
|
61 |
) |
|
62 |
} |
|
63 | ||
64 |
#' Trace of a Matrix |
|
65 |
#' |
|
66 |
#' @description Obtain the trace of a matrix if the matrix is diagonal, otherwise raise an error. |
|
67 |
#' |
|
68 |
#' @param x (`matrix`)\cr square matrix input. |
|
69 |
#' |
|
70 |
#' @return The trace of the square matrix. |
|
71 |
#' |
|
72 |
#' @keywords internal |
|
73 |
h_tr <- function(x) { |
|
74 | 1790x |
if (nrow(x) != ncol(x)) { |
75 | 1x |
stop("x must be square matrix") |
76 |
} |
|
77 | 1789x |
sum(Matrix::diag(x)) |
78 |
} |
|
79 | ||
80 |
#' Split Control List |
|
81 |
#' |
|
82 |
#' @description Split the [mmrm_control()] object according to its optimizers and use additional arguments |
|
83 |
#' to replace the elements in the original object. |
|
84 |
#' |
|
85 |
#' @param control (`mmrm_control`)\cr object. |
|
86 |
#' @param ... additional parameters to update the `control` object. |
|
87 |
#' |
|
88 |
#' @return A `list` of `mmrm_control` entries. |
|
89 |
#' @keywords internal |
|
90 |
h_split_control <- function(control, ...) { |
|
91 | 8x |
assert_class(control, "mmrm_control") |
92 | 8x |
l <- length(control$optimizers) |
93 | 8x |
lapply(seq_len(l), function(i) { |
94 | 22x |
ret <- utils::modifyList(control, list(...)) |
95 | 22x |
ret$optimizers <- control$optimizers[i] |
96 | 22x |
ret |
97 |
}) |
|
98 |
} |
|
99 | ||
100 |
#' Obtain Optimizer according to Optimizer String Value |
|
101 |
#' |
|
102 |
#' @description This function creates optimizer functions with arguments. |
|
103 |
#' |
|
104 |
#' @param optimizer (`character`)\cr names of built-in optimizers to try, subset |
|
105 |
#' of "L-BFGS-B", "BFGS", "CG" and "nlminb". |
|
106 |
#' @param optimizer_fun (`function` or `list` of `function`)\cr alternatively to `optimizer`, |
|
107 |
#' an optimizer function or a list of optimizer functions can be passed directly here. |
|
108 |
#' @param optimizer_args (`list`)\cr additional arguments for `optimizer_fun`. |
|
109 |
#' @param optimizer_control (`list`)\cr passed to argument `control` in `optimizer_fun`. |
|
110 |
#' |
|
111 |
#' @details |
|
112 |
#' If you want to use only the built-in optimizers: |
|
113 |
#' - `optimizer` is a shortcut to create a list of built-in optimizer functions |
|
114 |
#' passed to `optimizer_fun`. |
|
115 |
#' - Allowed are "L-BFGS-B", "BFGS", "CG" (using [stats::optim()] with corresponding method) |
|
116 |
#' and "nlminb" (using [stats::nlminb()]). |
|
117 |
#' - Other arguments should go into `optimizer_args`. |
|
118 |
#' |
|
119 |
#' If you want to use your own optimizer function: |
|
120 |
#' - Make sure that there are three arguments: parameter (start value), objective function |
|
121 |
#' and gradient function are sequentially in the function arguments. |
|
122 |
#' - If there are other named arguments in front of these, make sure they are correctly |
|
123 |
#' specified through `optimizer_args`. |
|
124 |
#' - If the hessian can be used, please make sure its argument name is `hessian` and |
|
125 |
#' please add attribute `use_hessian = TRUE` to the function, |
|
126 |
#' using `attr(fun, "use_hessian) <- TRUE`. |
|
127 |
#' |
|
128 |
#' @return Named `list` of optimizers created by [h_partial_fun_args()]. |
|
129 |
#' |
|
130 |
#' @keywords internal |
|
131 |
h_get_optimizers <- function(optimizer = c("L-BFGS-B", "BFGS", "CG", "nlminb"), |
|
132 |
optimizer_fun = h_optimizer_fun(optimizer), |
|
133 |
optimizer_args = list(), |
|
134 |
optimizer_control = list()) { |
|
135 | 246x |
if ("automatic" %in% optimizer) { |
136 | 1x |
lifecycle::deprecate_warn( |
137 | 1x |
when = "0.2.0", |
138 | 1x |
what = I("\"automatic\" optimizer"), |
139 | 1x |
details = "please just omit optimizer argument" |
140 |
) |
|
141 | 1x |
optimizer_fun <- h_optimizer_fun() |
142 |
} |
|
143 | 246x |
assert( |
144 | 246x |
test_function(optimizer_fun), |
145 | 246x |
test_list(optimizer_fun, types = "function", names = "unique") |
146 |
) |
|
147 | 246x |
if (is.function(optimizer_fun)) { |
148 | 7x |
optimizer_fun <- list(custom_optimizer = optimizer_fun) |
149 |
} |
|
150 | 246x |
lapply(optimizer_fun, function(x) { |
151 | 924x |
do.call(h_partial_fun_args, c(list(fun = x, control = optimizer_control), optimizer_args)) |
152 |
}) |
|
153 |
} |
|
154 | ||
155 |
#' Obtain Optimizer Function with Character |
|
156 |
#' @description Obtain the optimizer function through the character provided. |
|
157 |
#' @param optimizer (`character`)\cr vector of optimizers. |
|
158 |
#' |
|
159 |
#' @return A (`list`)\cr of optimizer functions generated from [h_partial_fun_args()]. |
|
160 |
#' @keywords internal |
|
161 |
h_optimizer_fun <- function(optimizer = c("L-BFGS-B", "BFGS", "CG", "nlminb")) { |
|
162 | 240x |
optimizer <- match.arg(optimizer, several.ok = TRUE) |
163 | 240x |
lapply(stats::setNames(optimizer, optimizer), function(x) { |
164 | 920x |
switch(x, |
165 | 229x |
"L-BFGS-B" = h_partial_fun_args(fun = stats::optim, method = x), |
166 | 230x |
"BFGS" = h_partial_fun_args(fun = stats::optim, method = x), |
167 | 228x |
"CG" = h_partial_fun_args(fun = stats::optim, method = x), |
168 | 233x |
"nlminb" = h_partial_fun_args(fun = stats::nlminb, additional_attr = list(use_hessian = TRUE)) |
169 |
) |
|
170 |
}) |
|
171 |
} |
|
172 | ||
173 |
#' Create Partial Functions |
|
174 |
#' @description Creates partial functions with arguments. |
|
175 |
#' |
|
176 |
#' @param fun (`function`)\cr to be wrapped. |
|
177 |
#' @param ... Additional arguments for `fun`. |
|
178 |
#' @param additional_attr (`list`)\cr of additional attributes to apply to the result. |
|
179 |
#' |
|
180 |
#' @details This function add `args` attribute to the original function, |
|
181 |
#' and add an extra class `partial` to the function. |
|
182 |
#' `args` is the argument for the function, and elements in `...` will override the existing |
|
183 |
#' arguments in attribute `args`. `additional_attr` will override the existing attributes. |
|
184 |
#' |
|
185 |
#' @return Object with S3 class `"partial"`, a `function` with `args` attribute (and possibly more |
|
186 |
#' attributes from `additional_attr`). |
|
187 |
#' @keywords internal |
|
188 |
h_partial_fun_args <- function(fun, ..., additional_attr = list()) { |
|
189 | 1848x |
assert_function(fun) |
190 | 1848x |
assert_list(additional_attr, names = "unique") |
191 | 1848x |
a_args <- list(...) |
192 | 1848x |
assert_list(a_args, names = "unique") |
193 | 1848x |
args <- attr(fun, "args") |
194 | 1848x |
if (is.null(args)) { |
195 | 932x |
args <- list() |
196 |
} |
|
197 | 1848x |
do.call( |
198 | 1848x |
structure, |
199 | 1848x |
args = utils::modifyList( |
200 | 1848x |
list( |
201 | 1848x |
.Data = fun, |
202 | 1848x |
args = utils::modifyList(args, a_args), |
203 | 1848x |
class = c("partial", "function") |
204 |
), |
|
205 | 1848x |
additional_attr |
206 |
) |
|
207 |
) |
|
208 |
} |
|
209 | ||
210 |
#' Obtain Default Covariance Method |
|
211 |
#' |
|
212 |
#' @description Obtain the default covariance method depending on |
|
213 |
#' the degrees of freedom method used. |
|
214 |
#' |
|
215 |
#' @param method (`string`)\cr degrees of freedom method. |
|
216 |
#' |
|
217 |
#' @details The default covariance method is different for different degrees of freedom method. |
|
218 |
#' For "Satterthwaite" or "Between-Within", "Asymptotic" is returned. |
|
219 |
#' For "Kenward-Roger" only, "Kenward-Roger" is returned. |
|
220 |
#' For "Residual" only, "Empirical" is returned. |
|
221 |
#' |
|
222 |
#' @return String of the default covariance method. |
|
223 |
#' @keywords internal |
|
224 |
h_get_cov_default <- function(method = c("Satterthwaite", "Kenward-Roger", "Residual", "Between-Within")) { |
|
225 | 197x |
assert_string(method) |
226 | 197x |
method <- match.arg(method) |
227 | 196x |
switch(method, |
228 | 1x |
"Residual" = "Empirical", |
229 | 158x |
"Satterthwaite" = "Asymptotic", |
230 | 35x |
"Kenward-Roger" = "Kenward-Roger", |
231 | 2x |
"Between-Within" = "Asymptotic" |
232 |
) |
|
233 |
} |
|
234 | ||
235 |
#' Complete `character` Vector Names From Values |
|
236 |
#' |
|
237 |
#' @param x (`character` or `list`)\cr value whose names should be completed |
|
238 |
#' from element values. |
|
239 |
#' |
|
240 |
#' @return A named vector or list. |
|
241 |
#' |
|
242 |
#' @keywords internal |
|
243 |
fill_names <- function(x) { |
|
244 | 4x |
n <- names(x) |
245 | 4x |
is_unnamed <- if (is.null(n)) rep_len(TRUE, length(x)) else n == "" |
246 | 4x |
names(x)[is_unnamed] <- x[is_unnamed] |
247 | 4x |
x |
248 |
} |
|
249 | ||
250 |
#' Drop Items from an Indexible |
|
251 |
#' |
|
252 |
#' Drop elements from an indexible object (`vector`, `list`, etc.). |
|
253 |
#' |
|
254 |
#' @param x Any object that can be consumed by [seq_along()] and indexed by a |
|
255 |
#' logical vector of the same length. |
|
256 |
#' @param n (`integer`)\cr the number of terms to drop. |
|
257 |
#' |
|
258 |
#' @return A subset of `x`. |
|
259 |
#' |
|
260 |
#' @keywords internal |
|
261 |
drop_elements <- function(x, n) { |
|
262 | 819x |
x[seq_along(x) > n] |
263 |
} |
|
264 | ||
265 |
#' Ask for Confirmation on Large Visit Levels |
|
266 |
#' |
|
267 |
#' @description Ask the user for confirmation if there are too many visit levels |
|
268 |
#' for non-spatial covariance structure in interactive sessions. |
|
269 |
#' |
|
270 |
#' @param x (`numeric`)\cr number of visit levels. |
|
271 |
#' |
|
272 |
#' @return Logical value `TRUE`. |
|
273 |
#' @keywords internal |
|
274 |
h_confirm_large_levels <- function(x) { |
|
275 | 297x |
assert_count(x) |
276 | 297x |
allowed_lvls <- x <= getOption("mmrm.max_visits", 100) |
277 | 297x |
if (allowed_lvls) { |
278 | 295x |
return(TRUE) |
279 |
} |
|
280 | 2x |
if (!interactive()) { |
281 | 2x |
stop("Visit levels too large!", call. = FALSE) |
282 |
} |
|
283 | ! |
proceed <- utils::askYesNo( |
284 | ! |
paste( |
285 | ! |
"Visit levels is possibly too large.", |
286 | ! |
"This requires large memory. Are you sure to continue?", |
287 | ! |
collapse = " " |
288 |
) |
|
289 |
) |
|
290 | ! |
if (!identical(proceed, TRUE)) { |
291 | ! |
stop("Visit levels too large!", call. = FALSE) |
292 |
} |
|
293 | ! |
return(TRUE) |
294 |
} |
|
295 | ||
296 |
#' Default Value on NULL |
|
297 |
#' Return default value when first argument is NULL. |
|
298 |
#' |
|
299 |
#' @param x Object. |
|
300 |
#' @param y Object. |
|
301 |
#' |
|
302 |
#' @details If `x` is NULL, returns `y`. Otherwise return `x`. |
|
303 |
#' |
|
304 |
#' @keywords internal |
|
305 |
h_default_value <- function(x, y) { |
|
306 | 312x |
if (is.null(x)) { |
307 | 277x |
y |
308 |
} else { |
|
309 | 35x |
x |
310 |
} |
|
311 |
} |
|
312 | ||
313 |
#' Warn on na.action |
|
314 |
#' @keywords internal |
|
315 |
h_warn_na_action <- function() { |
|
316 | 260x |
if (!identical(getOption("na.action"), "na.omit")) { |
317 | 6x |
warning("na.action is always set to `na.omit` for `mmrm` fit!") |
318 |
} |
|
319 |
} |
|
320 | ||
321 |
#' Obtain `na.action` as Function |
|
322 |
#' @keywords internal |
|
323 |
h_get_na_action <- function(na_action) { |
|
324 | 56x |
if (is.function(na_action) && identical(methods::formalArgs(na_action), c("object", "..."))) { |
325 | 5x |
return(na_action) |
326 |
} |
|
327 | 51x |
if (is.character(na_action) && length(na_action) == 1L) { |
328 | 51x |
assert_subset(na_action, c("na.omit", "na.exclude", "na.fail", "na.pass", "na.contiguous")) |
329 | 51x |
return(get(na_action, mode = "function", pos = "package:stats")) |
330 |
} |
|
331 |
} |
|
332 | ||
333 |
#' Validate mmrm Formula |
|
334 |
#' @param formula (`formula`)\cr to check. |
|
335 |
#' |
|
336 |
#' @details In mmrm models, `.` is not allowed as it introduces ambiguity of covariates |
|
337 |
#' to be used, so it is not allowed to be in formula. |
|
338 |
#' |
|
339 |
#' @keywords internal |
|
340 |
h_valid_formula <- function(formula) { |
|
341 | 183x |
assert_formula(formula) |
342 | 183x |
if ("." %in% all.vars(formula)) { |
343 | 2x |
stop("`.` is not allowed in mmrm models!") |
344 |
} |
|
345 |
} |
|
346 | ||
347 |
#' Standard Starting Value |
|
348 |
#' |
|
349 |
#' @description Obtain standard start values. |
|
350 |
#' |
|
351 |
#' @param cov_type (`string`)\cr name of the covariance structure. |
|
352 |
#' @param n_visits (`int`)\cr number of visits. |
|
353 |
#' @param n_groups (`int`)\cr number of groups. |
|
354 |
#' @param ... not used. |
|
355 |
#' |
|
356 |
#' @details |
|
357 |
#' `std_start` will try to provide variance parameter from identity matrix. |
|
358 |
#' However, for `ar1` and `ar1h` the corresponding values are not ideal because the |
|
359 |
#' \eqn{\rho} is usually a positive number thus using 0 as starting value can lead to |
|
360 |
#' incorrect optimization result, and we use 0.5 as the initial value of \eqn{\rho}. |
|
361 |
#' |
|
362 |
#' @return A numeric vector of starting values. |
|
363 |
#' |
|
364 |
#' @export |
|
365 |
std_start <- function(cov_type, n_visits, n_groups, ...) { |
|
366 | 502x |
assert_string(cov_type) |
367 | 502x |
assert_subset(cov_type, cov_types(c("abbr", "habbr"))) |
368 | 502x |
assert_int(n_visits, lower = 1L) |
369 | 502x |
assert_int(n_groups, lower = 1L) |
370 | 502x |
start_value <- switch(cov_type, |
371 | 502x |
us = rep(0, n_visits * (n_visits + 1) / 2), |
372 | 502x |
toep = rep(0, n_visits), |
373 | 502x |
toeph = rep(0, 2 * n_visits - 1), |
374 | 502x |
ar1 = c(0, 0.5), |
375 | 502x |
ar1h = c(rep(0, n_visits), 0.5), |
376 | 502x |
ad = rep(0, n_visits), |
377 | 502x |
adh = rep(0, 2 * n_visits - 1), |
378 | 502x |
cs = rep(0, 2), |
379 | 502x |
csh = rep(0, n_visits + 1), |
380 | 502x |
sp_exp = rep(0, 2) |
381 |
) |
|
382 | 502x |
rep(start_value, n_groups) |
383 |
} |
|
384 | ||
385 |
#' Empirical Starting Value |
|
386 |
#' |
|
387 |
#' @description Obtain empirical start value for unstructured covariance |
|
388 |
#' |
|
389 |
#' @param data (`data.frame`)\cr data used for model fitting. |
|
390 |
#' @param model_formula (`formula`)\cr the formula in mmrm model without covariance structure part. |
|
391 |
#' @param visit_var (`string`)\cr visit variable. |
|
392 |
#' @param subject_var (`string`)\cr subject id variable. |
|
393 |
#' @param subject_groups (`factor`)\cr subject group assignment. |
|
394 |
#' @param ... not used. |
|
395 |
#' |
|
396 |
#' @details |
|
397 |
#' This `emp_start` only works for unstructured covariance structure. |
|
398 |
#' It uses linear regression to first obtain the coefficients and use the residuals |
|
399 |
#' to obtain the empirical variance-covariance, and it is then used to obtain the |
|
400 |
#' starting values. |
|
401 |
#' |
|
402 |
#' @note `data` is used instead of `full_frame` because `full_frame` is already |
|
403 |
#' transformed if model contains transformations, e.g. `log(FEV1) ~ exp(FEV1_BL)` will |
|
404 |
#' drop `FEV1` and `FEV1_BL` but add `log(FEV1)` and `exp(FEV1_BL)` in `full_frame`. |
|
405 |
#' |
|
406 |
#' @return A numeric vector of starting values. |
|
407 |
#' |
|
408 |
#' @export |
|
409 |
emp_start <- function(data, model_formula, visit_var, subject_var, subject_groups, ...) { |
|
410 | 4x |
assert_formula(model_formula) |
411 | 4x |
assert_data_frame(data) |
412 | 4x |
assert_subset(all.vars(model_formula), colnames(data)) |
413 | 4x |
assert_string(visit_var) |
414 | 4x |
assert_string(subject_var) |
415 | 4x |
assert_factor(data[[visit_var]]) |
416 | 4x |
n_visits <- length(levels(data[[visit_var]])) |
417 | 4x |
assert_factor(data[[subject_var]]) |
418 | 4x |
subjects <- droplevels(data[[subject_var]]) |
419 | 4x |
n_subjects <- length(levels(subjects)) |
420 | 4x |
fit <- stats::lm(formula = model_formula, data = data) |
421 | 4x |
res <- rep(NA, n_subjects * n_visits) |
422 | 4x |
res[ |
423 | 4x |
n_visits * as.integer(subjects) - n_visits + as.integer(data[[visit_var]]) |
424 | 4x |
] <- residuals(fit) |
425 | 4x |
res_mat <- matrix(res, ncol = n_visits, nrow = n_subjects, byrow = TRUE) |
426 | 4x |
emp_covs <- lapply( |
427 | 4x |
unname(split(seq_len(n_subjects), subject_groups)), |
428 | 4x |
function(x) { |
429 | 4x |
stats::cov(res_mat[x, , drop = FALSE], use = "pairwise.complete.obs") |
430 |
} |
|
431 |
) |
|
432 | 4x |
unlist(lapply(emp_covs, h_get_theta_from_cov)) |
433 |
} |
|
434 |
#' Obtain Theta from Covariance Matrix |
|
435 |
#' |
|
436 |
#' @description Obtain unstructured theta from covariance matrix. |
|
437 |
#' |
|
438 |
#' @param covariance (`matrix`) of covariance matrix values. |
|
439 |
#' |
|
440 |
#' @details |
|
441 |
#' If the covariance matrix has `NA` in some of the elements, they will be replaced by |
|
442 |
#' 0 (non-diagonal) and 1 (diagonal). This ensures that the matrix is positive definite. |
|
443 |
#' |
|
444 |
#' @return Numeric vector of the theta values. |
|
445 |
#' @keywords internal |
|
446 |
h_get_theta_from_cov <- function(covariance) { |
|
447 | 7x |
assert_matrix(covariance, mode = "numeric", ncols = nrow(covariance)) |
448 | 7x |
covariance[is.na(covariance)] <- 0 |
449 | 7x |
diag(covariance)[diag(covariance) == 0] <- 1 |
450 |
# empirical is not always positive definite in some special cases of numeric singularity. |
|
451 | 7x |
qr_res <- qr(covariance) |
452 | 7x |
if (qr_res$rank < ncol(covariance)) { |
453 | ! |
covariance <- Matrix::nearPD(covariance)$mat |
454 |
} |
|
455 | 7x |
emp_chol <- t(chol(covariance)) |
456 | 7x |
mat <- t(solve(diag(diag(emp_chol)), emp_chol)) |
457 | 7x |
ret <- c(log(diag(emp_chol)), mat[upper.tri(mat)]) |
458 | 7x |
unname(ret) |
459 |
} |
|
460 | ||
461 |
#' Register S3 Method |
|
462 |
#' Register S3 method to a generic. |
|
463 |
#' |
|
464 |
#' @param pkg (`string`) name of the package name. |
|
465 |
#' @param generic (`string`) name of the generic. |
|
466 |
#' @param class (`string`) class name the function want to dispatch. |
|
467 |
#' @param envir (`environment`) the location the method is defined. |
|
468 |
#' |
|
469 |
#' @details This function is adapted from `emmeans:::register_s3_method()`. |
|
470 |
#' |
|
471 |
#' @keywords internal |
|
472 |
h_register_s3 <- function(pkg, generic, class, envir = parent.frame()) { |
|
473 | 1x |
assert_string(pkg) |
474 | 1x |
assert_string(generic) |
475 | 1x |
assert_string(class) |
476 | 1x |
assert_environment(envir) |
477 | 1x |
fun <- get(paste0(generic, ".", class), envir = envir) |
478 | 1x |
if (isNamespaceLoaded(pkg)) { |
479 | 1x |
registerS3method(generic, class, fun, envir = asNamespace(pkg)) |
480 |
} |
|
481 | 1x |
setHook(packageEvent(pkg, "onLoad"), function(...) { |
482 | ! |
registerS3method(generic, class, fun, envir = asNamespace(pkg)) |
483 |
}) |
|
484 |
} |
|
485 | ||
486 |
#' Check if a Factor Should Drop Levels |
|
487 |
#' |
|
488 |
#' @param x (`vector`) vector to check. |
|
489 |
#' |
|
490 |
#' @keywords internal |
|
491 |
h_extra_levels <- function(x) { |
|
492 | 1629x |
is.factor(x) && length(levels(x)) > length(unique(x)) |
493 |
} |
|
494 | ||
495 |
#' Drop Levels from Dataset |
|
496 |
#' @param data (`data.frame`) data to drop levels. |
|
497 |
#' @param subject_var (`character`) subject variable. |
|
498 |
#' @param visit_var (`character`) visit variable. |
|
499 |
#' @param except (`character`) variables to exclude from dropping. |
|
500 |
#' @keywords internal |
|
501 |
h_drop_levels <- function(data, subject_var, visit_var, except) { |
|
502 | 263x |
assert_data_frame(data) |
503 | 263x |
assert_character(subject_var) |
504 | 263x |
assert_character(visit_var) |
505 | 263x |
assert_character(except, null.ok = TRUE) |
506 | 263x |
all_cols <- colnames(data) |
507 | 263x |
to_drop <- vapply( |
508 | 263x |
data, |
509 | 263x |
h_extra_levels, |
510 | 263x |
logical(1L) |
511 |
) |
|
512 | 263x |
to_drop <- all_cols[to_drop] |
513 |
# only drop levels for those not defined in excep and not in visit_var. |
|
514 | 263x |
to_drop <- setdiff(to_drop, c(visit_var, except)) |
515 | 263x |
data[to_drop] <- lapply(data[to_drop], droplevels) |
516 |
# subject var are always dropped and no message given. |
|
517 | 263x |
dropped <- setdiff(to_drop, subject_var) |
518 | 263x |
if (length(dropped) > 0) { |
519 | 3x |
message( |
520 | 3x |
"Some factor levels are dropped due to singular design matrix: ", |
521 | 3x |
toString(dropped) |
522 |
) |
|
523 |
} |
|
524 | 263x |
data |
525 |
} |
|
526 | ||
527 |
#' Warn if TMB is Configured to Use Non-Deterministic Hash for Tape Optimizer |
|
528 |
#' |
|
529 |
#' This function checks the TMB configuration for the `tmbad_deterministic_hash` setting |
|
530 |
#' If it is set to `FALSE`, a warning is issued indicating that this may lead to |
|
531 |
#' unreproducible results. |
|
532 |
#' |
|
533 |
#' @return No return value, called for side effects. |
|
534 |
#' @keywords internal |
|
535 |
h_tmb_warn_non_deterministic <- function() { |
|
536 | 169x |
if (utils::packageVersion("TMB") < "1.9.15") { |
537 | ! |
return() |
538 |
} |
|
539 | 169x |
tmb_config <- TMB::config(DLL = "mmrm") |
540 | 169x |
tape_deterministic <- tmb_config$tmbad_deterministic_hash |
541 | 169x |
if (!tape_deterministic) { |
542 | 2x |
msg <- paste( |
543 | 2x |
"TMB is configured to use a non-deterministic hash for its tape optimizer,", |
544 | 2x |
"and this may lead to unreproducible results.", |
545 | 2x |
"To disable this behavior, use `TMB::config(tmbad_deterministic_hash = 1)`.", |
546 | 2x |
sep = "\n" |
547 |
) |
|
548 | 2x |
warning(msg) |
549 |
} |
|
550 |
} |
1 |
#' Extract Formula Terms used for Covariance Structure Definition |
|
2 |
#' |
|
3 |
#' @param f (`formula`)\cr a formula from which covariance terms should be |
|
4 |
#' extracted. |
|
5 |
#' |
|
6 |
#' @return A list of covariance structure expressions found in `f`. |
|
7 |
#' |
|
8 |
#' @importFrom stats terms |
|
9 |
#' @keywords internal |
|
10 |
h_extract_covariance_terms <- function(f) { |
|
11 | 291x |
specials <- cov_types(c("abbr", "habbr")) |
12 | 291x |
terms <- stats::terms(formula_rhs(f), specials = specials) |
13 | 291x |
covariance_terms <- Filter(length, attr(terms, "specials")) |
14 | 291x |
variables <- attr(terms, "variables") |
15 | 291x |
lapply(covariance_terms, function(i) variables[[i + 1]]) |
16 |
} |
|
17 | ||
18 |
#' Drop Formula Terms used for Covariance Structure Definition |
|
19 |
#' |
|
20 |
#' @param f (`formula`)\cr a formula from which covariance terms should be |
|
21 |
#' dropped. |
|
22 |
#' |
|
23 |
#' @return The formula without accepted covariance terms. |
|
24 |
#' |
|
25 |
#' @details `terms` is used and it will preserve the environment attribute. |
|
26 |
#' This ensures the returned formula and the input formula have the same environment. |
|
27 |
#' @importFrom stats terms drop.terms |
|
28 |
#' @keywords internal |
|
29 |
h_drop_covariance_terms <- function(f) { |
|
30 | 274x |
specials <- cov_types(c("abbr", "habbr")) |
31 | ||
32 | 274x |
terms <- stats::terms(f, specials = specials) |
33 | 274x |
covariance_terms <- Filter(Negate(is.null), attr(terms, "specials")) |
34 | ||
35 |
# if no covariance terms were found, return original formula |
|
36 | 274x |
if (length(covariance_terms) == 0) { |
37 | 6x |
return(f) |
38 |
} |
|
39 | 268x |
if (length(f) != 3) { |
40 | 1x |
update_str <- "~ . -" |
41 |
} else { |
|
42 | 267x |
update_str <- ". ~ . -" |
43 |
} |
|
44 | 268x |
stats::update( |
45 | 268x |
f, |
46 | 268x |
stats::as.formula(paste(update_str, deparse(attr(terms, "variables")[[covariance_terms[[1]] + 1]]))) |
47 |
) |
|
48 |
} |
|
49 | ||
50 |
#' Add Individual Covariance Variables As Terms to Formula |
|
51 |
#' |
|
52 |
#' @param f (`formula`)\cr a formula to which covariance structure terms should |
|
53 |
#' be added. |
|
54 |
#' @param covariance (`cov_struct`)\cr a covariance structure object from which |
|
55 |
#' additional variables should be sourced. |
|
56 |
#' |
|
57 |
#' @return A new formula with included covariance terms. |
|
58 |
#' |
|
59 |
#' @details [stats::update()] is used to append the covariance structure and the environment |
|
60 |
#' attribute will not be changed. This ensures the returned formula and the input formula |
|
61 |
#' have the same environment. |
|
62 |
#' |
|
63 |
#' @keywords internal |
|
64 |
h_add_covariance_terms <- function(f, covariance) { |
|
65 | 272x |
cov_terms <- with(covariance, c(subject, visits, group)) |
66 | 266x |
cov_terms <- paste(cov_terms, collapse = " + ") |
67 | 266x |
stats::update(f, stats::as.formula(paste(". ~ . + ", cov_terms))) |
68 |
} |
|
69 | ||
70 |
#' Add Formula Terms with Character |
|
71 |
#' |
|
72 |
#' Add formula terms from the original formula with character representation. |
|
73 |
#' |
|
74 |
#' @param f (`formula`)\cr a formula to be updated. |
|
75 |
#' @param adds (`character`)\cr representation of elements to be added. |
|
76 |
#' @param drop_response (`flag`)\cr whether response should be dropped. |
|
77 |
#' |
|
78 |
#' @details Elements in `adds` will be added from the formula, while the environment |
|
79 |
#' of the formula is unchanged. If `adds` is `NULL` or `character(0)`, the formula is |
|
80 |
#' unchanged. |
|
81 |
#' @return A new formula with elements in `drops` removed. |
|
82 |
#' |
|
83 |
#' @keywords internal |
|
84 |
h_add_terms <- function(f, adds, drop_response = FALSE) { |
|
85 | 599x |
assert_character(adds, null.ok = TRUE) |
86 | 599x |
if (length(adds) > 0L) { |
87 | 321x |
add_terms <- stats::as.formula(sprintf(". ~ . + %s", paste(adds, collapse = "+"))) |
88 | 321x |
f <- stats::update(f, add_terms) |
89 |
} |
|
90 | 599x |
if (drop_response && length(f) == 3L) { |
91 | 35x |
f[[2]] <- NULL |
92 |
} |
|
93 | 599x |
f |
94 |
} |
1 |
#' Methods for `mmrm_tmb` Objects |
|
2 |
#' |
|
3 |
#' @description `r lifecycle::badge("stable")` |
|
4 |
#' |
|
5 |
#' @param object (`mmrm_tmb`)\cr the fitted MMRM object. |
|
6 |
#' @param x (`mmrm_tmb`)\cr same as `object`. |
|
7 |
#' @param formula (`mmrm_tmb`)\cr same as `object`. |
|
8 |
#' @param complete (`flag`)\cr whether to include potential non-estimable |
|
9 |
#' coefficients. |
|
10 |
#' @param ... mostly not used; |
|
11 |
#' Exception is `model.matrix()` passing `...` to the default method. |
|
12 |
#' @return Depends on the method, see Functions. |
|
13 |
#' |
|
14 |
#' @name mmrm_tmb_methods |
|
15 |
#' |
|
16 |
#' @seealso [`mmrm_methods`], [`mmrm_tidiers`] for additional methods. |
|
17 |
#' |
|
18 |
#' @examples |
|
19 |
#' formula <- FEV1 ~ RACE + SEX + ARMCD * AVISIT + us(AVISIT | USUBJID) |
|
20 |
#' object <- fit_mmrm(formula, fev_data, weights = rep(1, nrow(fev_data))) |
|
21 |
NULL |
|
22 | ||
23 |
#' @describeIn mmrm_tmb_methods obtains the estimated coefficients. |
|
24 |
#' @importFrom stats coef |
|
25 |
#' @exportS3Method |
|
26 |
#' @examples |
|
27 |
#' # Estimated coefficients: |
|
28 |
#' coef(object) |
|
29 |
coef.mmrm_tmb <- function(object, complete = TRUE, ...) { |
|
30 | 58x |
assert_flag(complete) |
31 | 58x |
nm <- if (complete) "beta_est_complete" else "beta_est" |
32 | 58x |
component(object, name = nm) |
33 |
} |
|
34 | ||
35 |
#' @describeIn mmrm_tmb_methods obtains the fitted values. |
|
36 |
#' @importFrom stats fitted |
|
37 |
#' @exportS3Method |
|
38 |
#' @examples |
|
39 |
#' # Fitted values: |
|
40 |
#' fitted(object) |
|
41 |
fitted.mmrm_tmb <- function(object, ...) { |
|
42 | 19x |
fitted_col <- component(object, "x_matrix") %*% component(object, "beta_est") |
43 | 19x |
fitted_col[, 1L, drop = TRUE] |
44 |
} |
|
45 | ||
46 |
#' @describeIn mmrm_tmb_methods predict conditional means for new data; |
|
47 |
#' optionally with standard errors and confidence or prediction intervals. |
|
48 |
#' Returns a vector of predictions if `se.fit == FALSE` and |
|
49 |
#' `interval == "none"`; otherwise it returns a data.frame with multiple |
|
50 |
#' columns and one row per input data row. |
|
51 |
#' |
|
52 |
#' @param newdata (`data.frame`)\cr optional new data, otherwise data from `object` is used. |
|
53 |
#' @param se.fit (`flag`)\cr indicator if standard errors are required. |
|
54 |
#' @param interval (`string`)\cr type of interval calculation. Can be abbreviated. |
|
55 |
#' @param level (`number`)\cr tolerance/confidence level. |
|
56 |
#' @param nsim (`count`)\cr number of simulations to use. |
|
57 |
#' @param conditional (`flag`)\cr indicator if the prediction is conditional on the observation or not. |
|
58 |
#' |
|
59 |
#' @importFrom stats predict |
|
60 |
#' @exportS3Method |
|
61 |
#' |
|
62 |
#' @examples |
|
63 |
#' predict(object, newdata = fev_data) |
|
64 |
predict.mmrm_tmb <- function(object, |
|
65 |
newdata, |
|
66 |
se.fit = FALSE, # nolint |
|
67 |
interval = c("none", "confidence", "prediction"), |
|
68 |
level = 0.95, |
|
69 |
nsim = 1000L, |
|
70 |
conditional = FALSE, |
|
71 |
...) { |
|
72 | 45x |
if (missing(newdata)) { |
73 | 8x |
newdata <- object$data |
74 |
} |
|
75 | 45x |
assert_data_frame(newdata) |
76 | 45x |
orig_row_names <- row.names(newdata) |
77 | 45x |
assert_flag(se.fit) |
78 | 45x |
assert_number(level, lower = 0, upper = 1) |
79 | 45x |
assert_count(nsim, positive = TRUE) |
80 | 45x |
assert_flag(conditional) |
81 | 45x |
interval <- match.arg(interval) |
82 | 45x |
formula_parts <- object$formula_parts |
83 | 45x |
if (any(object$tmb_data$x_cols_aliased)) { |
84 | 1x |
warning( |
85 | 1x |
"In fitted object there are co-linear variables and therefore dropped terms, ", |
86 | 1x |
"and this could lead to incorrect prediction on new data." |
87 |
) |
|
88 |
} |
|
89 | 45x |
colnames <- names(Filter(isFALSE, object$tmb_data$x_cols_aliased)) |
90 | 45x |
if (!conditional && interval %in% c("none", "confidence")) { |
91 |
# model.matrix always return a complete matrix (no NA allowed) |
|
92 | 27x |
x_mat <- stats::model.matrix(object, data = newdata, use_response = FALSE)[, colnames, drop = FALSE] |
93 | 27x |
x_mat_full <- matrix( |
94 | 27x |
NA, |
95 | 27x |
nrow = nrow(newdata), ncol = ncol(x_mat), |
96 | 27x |
dimnames = list(row.names(newdata), colnames(x_mat)) |
97 |
) |
|
98 | 27x |
x_mat_full[row.names(x_mat), ] <- x_mat |
99 | 27x |
predictions <- (x_mat_full %*% component(object, "beta_est"))[, 1] |
100 | 27x |
predictions_raw <- stats::setNames(rep(NA_real_, nrow(newdata)), row.names(newdata)) |
101 | 27x |
predictions_raw[names(predictions)] <- predictions |
102 | 27x |
if (identical(interval, "none")) { |
103 | 20x |
return(predictions_raw) |
104 |
} |
|
105 | 7x |
se <- switch(interval, |
106 |
# can be NA if there are aliased cols |
|
107 | 7x |
"confidence" = diag(x_mat_full %*% component(object, "beta_vcov") %*% t(x_mat_full)), |
108 | 7x |
"none" = NA_real_ |
109 |
) |
|
110 | 7x |
res <- cbind( |
111 | 7x |
fit = predictions, se = se, |
112 | 7x |
lwr = predictions - stats::qnorm(1 - level / 2) * se, upr = predictions + stats::qnorm(1 - level / 2) * se |
113 |
) |
|
114 | 7x |
if (!se.fit) { |
115 | 1x |
res <- res[, setdiff(colnames(res), "se")] |
116 |
} |
|
117 | 7x |
res_raw <- matrix( |
118 | 7x |
NA_real_, |
119 | 7x |
ncol = ncol(res), nrow = nrow(newdata), |
120 | 7x |
dimnames = list(row.names(newdata), colnames(res)) |
121 |
) |
|
122 | 7x |
res_raw[row.names(res), ] <- res |
123 | 7x |
return(res_raw) |
124 |
} |
|
125 | 18x |
tmb_data <- h_mmrm_tmb_data( |
126 | 18x |
formula_parts, newdata, |
127 | 18x |
weights = rep(1, nrow(newdata)), |
128 | 18x |
reml = TRUE, |
129 | 18x |
singular = "keep", |
130 | 18x |
drop_visit_levels = FALSE, |
131 | 18x |
allow_na_response = TRUE, |
132 | 18x |
drop_levels = FALSE, |
133 | 18x |
xlev = component(object, "xlev"), |
134 | 18x |
contrasts = component(object, "contrasts") |
135 |
) |
|
136 | 18x |
tmb_data$x_matrix <- tmb_data$x_matrix[, colnames, drop = FALSE] |
137 | 18x |
predictions <- h_get_prediction( |
138 | 18x |
tmb_data, object$theta_est, object$beta_est, component(object, "beta_vcov") |
139 | 18x |
)$prediction |
140 | 18x |
res <- cbind(fit = rep(NA_real_, nrow(newdata))) |
141 | 18x |
new_order <- match(row.names(tmb_data$full_frame), orig_row_names) |
142 | 18x |
res[new_order, "fit"] <- predictions[, "fit"] |
143 | 18x |
se <- switch(interval, |
144 | 18x |
"confidence" = sqrt(predictions[, "conf_var"]), |
145 | 18x |
"prediction" = sqrt(h_get_prediction_variance(object, nsim, tmb_data)), |
146 | 18x |
"none" = NULL |
147 |
) |
|
148 | 18x |
if (interval != "none") { |
149 | 7x |
res <- cbind( |
150 | 7x |
res, |
151 | 7x |
se = NA_real_ |
152 |
) |
|
153 | 7x |
res[new_order, "se"] <- se |
154 | 7x |
alpha <- 1 - level |
155 | 7x |
z <- stats::qnorm(1 - alpha / 2) * res[, "se"] |
156 | 7x |
res <- cbind( |
157 | 7x |
res, |
158 | 7x |
lwr = res[, "fit"] - z, |
159 | 7x |
upr = res[, "fit"] + z |
160 |
) |
|
161 | 7x |
if (!se.fit) { |
162 | ! |
res <- res[, setdiff(colnames(res), "se")] |
163 |
} |
|
164 |
} |
|
165 |
# Use original names. |
|
166 | 18x |
row.names(res) <- orig_row_names |
167 | 18x |
if (ncol(res) == 1) { |
168 | 11x |
res <- res[, "fit"] |
169 |
} |
|
170 | 18x |
return(res) |
171 |
} |
|
172 | ||
173 |
#' Get Prediction |
|
174 |
#' |
|
175 |
#' @description Get predictions with given `data`, `theta`, `beta`, `beta_vcov`. |
|
176 |
#' |
|
177 |
#' @details See `predict` function in `predict.cpp` which is called internally. |
|
178 |
#' |
|
179 |
#' @param tmb_data (`mmrm_tmb_data`)\cr object. |
|
180 |
#' @param theta (`numeric`)\cr theta value. |
|
181 |
#' @param beta (`numeric`)\cr beta value. |
|
182 |
#' @param beta_vcov (`matrix`)\cr beta_vcov matrix. |
|
183 |
#' |
|
184 |
#' @return List with: |
|
185 |
#' - `prediction`: Matrix with columns `fit`, `conf_var`, and `var`. |
|
186 |
#' - `covariance`: List with subject specific covariance matrices. |
|
187 |
#' - `index`: List of zero-based subject indices. |
|
188 |
#' |
|
189 |
#' @keywords internal |
|
190 |
h_get_prediction <- function(tmb_data, theta, beta, beta_vcov) { |
|
191 | 1696x |
assert_class(tmb_data, "mmrm_tmb_data") |
192 | 1696x |
assert_numeric(theta) |
193 | 1696x |
n_beta <- ncol(tmb_data$x_matrix) |
194 | 1696x |
assert_numeric(beta, finite = TRUE, any.missing = FALSE, len = n_beta) |
195 | 1696x |
assert_matrix(beta_vcov, mode = "numeric", any.missing = FALSE, nrows = n_beta, ncols = n_beta) |
196 | 1696x |
.Call(`_mmrm_predict`, PACKAGE = "mmrm", tmb_data, theta, beta, beta_vcov) |
197 |
} |
|
198 | ||
199 |
#' Get Prediction Variance |
|
200 |
#' |
|
201 |
#' @description Get prediction variance with given fit, `tmb_data` with the Monte Carlo sampling method. |
|
202 |
#' |
|
203 |
#' @param object (`mmrm_tmb`)\cr the fitted MMRM. |
|
204 |
#' @param nsim (`count`)\cr number of samples. |
|
205 |
#' @param tmb_data (`mmrm_tmb_data`)\cr object. |
|
206 |
#' |
|
207 |
#' @keywords internal |
|
208 |
h_get_prediction_variance <- function(object, nsim, tmb_data) { |
|
209 | 7x |
assert_class(object, "mmrm_tmb") |
210 | 7x |
assert_class(tmb_data, "mmrm_tmb_data") |
211 | 7x |
assert_count(nsim, positive = TRUE) |
212 | 7x |
theta_chol <- chol(object$theta_vcov) |
213 | 7x |
n_theta <- length(object$theta_est) |
214 | 7x |
res <- replicate(nsim, { |
215 | 1150x |
z <- stats::rnorm(n = n_theta) |
216 | 1150x |
theta_sample <- object$theta_est + z %*% theta_chol |
217 | 1150x |
cond_beta_results <- object$tmb_object$report(theta_sample) |
218 | 1150x |
beta_mean <- cond_beta_results$beta |
219 | 1150x |
beta_cov <- cond_beta_results$beta_vcov |
220 | 1150x |
h_get_prediction(tmb_data, theta_sample, beta_mean, beta_cov)$prediction |
221 |
}) |
|
222 | 7x |
mean_of_var <- rowMeans(res[, "var", ]) |
223 | 7x |
var_of_mean <- apply(res[, "fit", ], 1, stats::var) |
224 | 7x |
mean_of_var + var_of_mean |
225 |
} |
|
226 | ||
227 |
#' @describeIn mmrm_tmb_methods obtains the model frame. |
|
228 |
#' @param data (`data.frame`)\cr object in which to construct the frame. |
|
229 |
#' @param include (`character`)\cr names of variable types to include. |
|
230 |
#' Must be `NULL` or one or more of `c("subject_var", "visit_var", "group_var", "response_var")`. |
|
231 |
#' @param full (`flag`)\cr indicator whether to return full model frame (deprecated). |
|
232 |
#' @param na.action (`string`)\cr na action. |
|
233 |
#' @importFrom stats model.frame |
|
234 |
#' @exportS3Method |
|
235 |
#' |
|
236 |
#' @details |
|
237 |
#' `include` argument controls the variables the returned model frame will include. |
|
238 |
#' Possible options are "response_var", "subject_var", "visit_var" and "group_var", representing the |
|
239 |
#' response variable, subject variable, visit variable or group variable. |
|
240 |
#' `character` values in new data will always be factorized according to the data in the fit |
|
241 |
#' to avoid mismatched in levels or issues in `model.matrix`. |
|
242 |
#' |
|
243 |
#' @examples |
|
244 |
#' # Model frame: |
|
245 |
#' model.frame(object) |
|
246 |
#' model.frame(object, include = "subject_var") |
|
247 |
model.frame.mmrm_tmb <- function(formula, data, include = c("subject_var", "visit_var", "group_var", "response_var"), |
|
248 |
full, na.action = "na.omit", ...) { # nolint |
|
249 |
# Construct updated formula and data arguments. |
|
250 | 46x |
lst_formula_and_data <- |
251 | 46x |
h_construct_model_frame_inputs( |
252 | 46x |
formula = formula, |
253 | 46x |
data = data, |
254 | 46x |
include = include, |
255 | 46x |
full = full |
256 |
) |
|
257 |
# Only if include is default (full) and also data is missing, and also na.action is na.omit we will |
|
258 |
# use the model frame from the tmb_data. |
|
259 | 46x |
include_choice <- c("subject_var", "visit_var", "group_var", "response_var") |
260 | 46x |
if (missing(data) && setequal(include, include_choice) && identical(h_get_na_action(na.action), stats::na.omit)) { |
261 | 2x |
ret <- formula$tmb_data$full_frame |
262 |
# Remove weights column. |
|
263 | 2x |
ret[, "(weights)"] <- NULL |
264 | 2x |
ret |
265 |
} else { |
|
266 |
# Construct data frame to return to users. |
|
267 | 44x |
ret <- |
268 | 44x |
stats::model.frame( |
269 | 44x |
formula = lst_formula_and_data$formula, |
270 | 44x |
data = h_get_na_action(na.action)(lst_formula_and_data$data), |
271 | 44x |
na.action = na.action, |
272 | 44x |
xlev = stats::.getXlevels(terms(formula), formula$tmb_data$full_frame) |
273 |
) |
|
274 |
} |
|
275 | 45x |
ret |
276 |
} |
|
277 | ||
278 | ||
279 |
#' Construction of Model Frame Formula and Data Inputs |
|
280 |
#' |
|
281 |
#' @description |
|
282 |
#' Input formulas are converted from mmrm-style to a style compatible |
|
283 |
#' with default [stats::model.frame()] and [stats::model.matrix()] methods. |
|
284 |
#' |
|
285 |
#' The full formula is returned so we can construct, for example, the |
|
286 |
#' `model.frame()` including all columns as well as the requested subset. |
|
287 |
#' The full set is used to identify rows to include in the reduced model frame. |
|
288 |
#' |
|
289 |
#' @param formula (`mmrm`)\cr mmrm fit object. |
|
290 |
#' @param data optional data frame that will be |
|
291 |
#' passed to `model.frame()` or `model.matrix()` |
|
292 |
#' @param include (`character`)\cr names of variable to include |
|
293 |
#' @param full (`flag`)\cr indicator whether to return full model frame (deprecated). |
|
294 |
#' |
|
295 |
#' @return named list with four elements: |
|
296 |
#' - `"formula"`: the formula including the columns requested in the `include=` argument. |
|
297 |
#' - `"data"`: a data frame including all columns needed in the formula. |
|
298 |
#' full formula are identical |
|
299 |
#' @keywords internal |
|
300 |
h_construct_model_frame_inputs <- function(formula, |
|
301 |
data, |
|
302 |
include, |
|
303 |
include_choice = c("subject_var", "visit_var", "group_var", "response_var"), |
|
304 |
full) { |
|
305 | 280x |
if (!missing(full) && identical(full, TRUE)) { |
306 | ! |
lifecycle::deprecate_warn("0.3", "model.frame.mmrm_tmb(full)") |
307 | ! |
include <- include_choice |
308 |
} |
|
309 | ||
310 | 280x |
assert_class(formula, classes = "mmrm_tmb") |
311 | 280x |
assert_subset(include, include_choice) |
312 | 280x |
if (missing(data)) { |
313 | 256x |
data <- formula$data |
314 |
} |
|
315 | 280x |
assert_data_frame(data) |
316 | ||
317 | 280x |
drop_response <- !"response_var" %in% include |
318 | 280x |
add_vars <- unlist(formula$formula_parts[include]) |
319 | 280x |
new_formula <- h_add_terms(formula$formula_parts$model_formula, add_vars, drop_response) |
320 | ||
321 | 280x |
drop_response_full <- !"response_var" %in% include_choice |
322 | 280x |
add_vars_full <- unlist(formula$formula_parts[include_choice]) |
323 | 280x |
new_formula_full <- |
324 | 280x |
h_add_terms(formula$formula_parts$model_formula, add_vars_full, drop_response_full) |
325 | ||
326 |
# Update data based on the columns in the full formula return. |
|
327 | 280x |
all_vars <- all.vars(new_formula_full) |
328 | 280x |
assert_names(colnames(data), must.include = all_vars) |
329 | 280x |
data <- data[, all_vars, drop = FALSE] |
330 | ||
331 |
# Return list with updated formula, data. |
|
332 | 280x |
list( |
333 | 280x |
formula = new_formula, |
334 | 280x |
data = data |
335 |
) |
|
336 |
} |
|
337 | ||
338 |
#' @describeIn mmrm_tmb_methods obtains the model matrix. |
|
339 |
#' @exportS3Method |
|
340 |
#' @param use_response (`flag`)\cr whether to use the response for complete rows. |
|
341 |
#' |
|
342 |
#' @examples |
|
343 |
#' # Model matrix: |
|
344 |
#' model.matrix(object) |
|
345 |
model.matrix.mmrm_tmb <- function(object, data, use_response = TRUE, ...) { # nolint |
|
346 |
# Always return the utilized model matrix if data not provided. |
|
347 | 37x |
if (missing(data)) { |
348 | 3x |
return(object$tmb_data$x_matrix) |
349 |
} |
|
350 | 34x |
stats::model.matrix( |
351 | 34x |
h_add_terms(object$formula_parts$model_formula, NULL, drop_response = !use_response), |
352 | 34x |
data = data, |
353 | 34x |
contrasts.arg = attr(object$tmb_data$x_matrix, "contrasts"), |
354 | 34x |
xlev = component(object, "xlev"), |
355 |
... |
|
356 |
) |
|
357 |
} |
|
358 | ||
359 |
#' @describeIn mmrm_tmb_methods obtains the terms object. |
|
360 |
#' @importFrom stats model.frame |
|
361 |
#' @exportS3Method |
|
362 |
#' |
|
363 |
#' @examples |
|
364 |
#' # terms: |
|
365 |
#' terms(object) |
|
366 |
#' terms(object, include = "subject_var") |
|
367 |
terms.mmrm_tmb <- function(x, include = "response_var", ...) { # nolint |
|
368 |
# Construct updated formula and data arguments. |
|
369 | 231x |
lst_formula_and_data <- |
370 | 231x |
h_construct_model_frame_inputs( |
371 | 231x |
formula = x, |
372 | 231x |
include = include |
373 |
) |
|
374 | ||
375 |
# Use formula method for `terms()` to construct the mmrm terms object. |
|
376 | 231x |
stats::terms( |
377 | 231x |
x = lst_formula_and_data$formula, |
378 | 231x |
data = lst_formula_and_data$data |
379 |
) |
|
380 |
} |
|
381 | ||
382 | ||
383 |
#' @describeIn mmrm_tmb_methods obtains the attained log likelihood value. |
|
384 |
#' @importFrom stats logLik |
|
385 |
#' @exportS3Method |
|
386 |
#' @examples |
|
387 |
#' # Log likelihood given the estimated parameters: |
|
388 |
#' logLik(object) |
|
389 |
logLik.mmrm_tmb <- function(object, ...) { |
|
390 | 50x |
-component(object, "neg_log_lik") |
391 |
} |
|
392 | ||
393 |
#' @describeIn mmrm_tmb_methods obtains the used formula. |
|
394 |
#' @importFrom stats formula |
|
395 |
#' @exportS3Method |
|
396 |
#' @examples |
|
397 |
#' # Formula which was used: |
|
398 |
#' formula(object) |
|
399 |
formula.mmrm_tmb <- function(x, ...) { |
|
400 | 5x |
x$formula_parts$formula |
401 |
} |
|
402 | ||
403 |
#' @describeIn mmrm_tmb_methods obtains the variance-covariance matrix estimate |
|
404 |
#' for the coefficients. |
|
405 |
#' @importFrom stats vcov |
|
406 |
#' @exportS3Method |
|
407 |
#' @examples |
|
408 |
#' # Variance-covariance matrix estimate for coefficients: |
|
409 |
#' vcov(object) |
|
410 |
vcov.mmrm_tmb <- function(object, complete = TRUE, ...) { |
|
411 | 3x |
assert_flag(complete) |
412 | 3x |
nm <- if (complete) "beta_vcov_complete" else "beta_vcov" |
413 | 3x |
component(object, name = nm) |
414 |
} |
|
415 | ||
416 |
#' @describeIn mmrm_tmb_methods obtains the variance-covariance matrix estimate |
|
417 |
#' for the residuals. |
|
418 |
#' @param sigma cannot be used (this parameter does not exist in MMRM). |
|
419 |
#' @importFrom nlme VarCorr |
|
420 |
#' @export VarCorr |
|
421 |
#' @aliases VarCorr |
|
422 |
#' @exportS3Method |
|
423 |
#' @examples |
|
424 |
#' # Variance-covariance matrix estimate for residuals: |
|
425 |
#' VarCorr(object) |
|
426 |
VarCorr.mmrm_tmb <- function(x, sigma = NA, ...) { # nolint |
|
427 | 10x |
assert_scalar_na(sigma) |
428 | ||
429 | 10x |
component(x, name = "varcor") |
430 |
} |
|
431 | ||
432 |
#' @describeIn mmrm_tmb_methods obtains the deviance, which is defined here |
|
433 |
#' as twice the negative log likelihood, which can either be integrated |
|
434 |
#' over the coefficients for REML fits or the usual one for ML fits. |
|
435 |
#' @importFrom stats deviance |
|
436 |
#' @exportS3Method |
|
437 |
#' @examples |
|
438 |
#' # REML criterion (twice the negative log likelihood): |
|
439 |
#' deviance(object) |
|
440 |
deviance.mmrm_tmb <- function(object, ...) { |
|
441 | 74x |
2 * component(object, "neg_log_lik") |
442 |
} |
|
443 | ||
444 |
#' @describeIn mmrm_tmb_methods obtains the Akaike Information Criterion, |
|
445 |
#' where the degrees of freedom are the number of variance parameters (`n_theta`). |
|
446 |
#' If `corrected`, then this is multiplied with `m / (m - n_theta - 1)` where |
|
447 |
#' `m` is the number of observations minus the number of coefficients, or |
|
448 |
#' `n_theta + 2` if it is smaller than that \insertCite{hurvich1989regression,burnham1998practical}{mmrm}. |
|
449 |
#' @param corrected (`flag`)\cr whether corrected AIC should be calculated. |
|
450 |
#' @param k (`number`)\cr the penalty per parameter to be used; default `k = 2` |
|
451 |
#' is the classical AIC. |
|
452 |
#' @importFrom stats AIC |
|
453 |
#' @exportS3Method |
|
454 |
#' @examples |
|
455 |
#' # AIC: |
|
456 |
#' AIC(object) |
|
457 |
#' AIC(object, corrected = TRUE) |
|
458 |
#' @references |
|
459 |
#' - \insertRef{hurvich1989regression}{mmrm} |
|
460 |
#' - \insertRef{burnham1998practical}{mmrm} |
|
461 |
AIC.mmrm_tmb <- function(object, corrected = FALSE, ..., k = 2) { |
|
462 |
# nolint |
|
463 | 44x |
assert_flag(corrected) |
464 | 44x |
assert_number(k, lower = 1) |
465 | ||
466 | 44x |
n_theta <- length(component(object, "theta_est")) |
467 | 44x |
df <- if (!corrected) { |
468 | 43x |
n_theta |
469 |
} else { |
|
470 | 1x |
n_obs <- length(component(object, "y_vector")) |
471 | 1x |
n_beta <- length(component(object, "beta_est")) |
472 | 1x |
m <- max(n_theta + 2, n_obs - n_beta) |
473 | 1x |
n_theta * (m / (m - n_theta - 1)) |
474 |
} |
|
475 | ||
476 | 44x |
2 * component(object, "neg_log_lik") + k * df |
477 |
} |
|
478 | ||
479 |
#' @describeIn mmrm_tmb_methods obtains the Bayesian Information Criterion, |
|
480 |
#' which is using the natural logarithm of the number of subjects for the |
|
481 |
#' penalty parameter `k`. |
|
482 |
#' @importFrom stats BIC |
|
483 |
#' @exportS3Method |
|
484 |
#' @examples |
|
485 |
#' # BIC: |
|
486 |
#' BIC(object) |
|
487 |
BIC.mmrm_tmb <- function(object, ...) { |
|
488 |
# nolint |
|
489 | 21x |
k <- log(component(object, "n_subjects")) |
490 | 21x |
AIC(object, corrected = FALSE, k = k) |
491 |
} |
|
492 | ||
493 | ||
494 |
#' @describeIn mmrm_tmb_methods prints the object. |
|
495 |
#' @exportS3Method |
|
496 |
print.mmrm_tmb <- function(x, |
|
497 |
...) { |
|
498 | 2x |
cat("mmrm fit\n\n") |
499 | ||
500 | 2x |
h_print_call( |
501 | 2x |
component(x, "call"), component(x, "n_obs"), |
502 | 2x |
component(x, "n_subjects"), component(x, "n_timepoints") |
503 |
) |
|
504 | 2x |
h_print_cov(component(x, "cov_type"), component(x, "n_theta"), component(x, "n_groups")) |
505 | ||
506 | 2x |
cat("Inference: ") |
507 | 2x |
cat(ifelse(component(x, "reml"), "REML", "ML")) |
508 | 2x |
cat("\n") |
509 | 2x |
cat("Deviance: ") |
510 | 2x |
cat(deviance(x)) |
511 | ||
512 | 2x |
cat("\n\nCoefficients: ") |
513 | 2x |
n_singular_coefs <- sum(component(x, "beta_aliased")) |
514 | 2x |
if (n_singular_coefs > 0) { |
515 | 1x |
cat("(", n_singular_coefs, " not defined because of singularities)", sep = "") |
516 |
} |
|
517 | 2x |
cat("\n") |
518 | 2x |
print(coef(x, complete = TRUE)) |
519 | ||
520 | 2x |
cat("\nModel Inference Optimization:") |
521 | ||
522 | 2x |
cat(ifelse(component(x, "convergence") == 0, "\nConverged", "\nFailed to converge")) |
523 | 2x |
cat( |
524 | 2x |
" with code", component(x, "convergence"), |
525 | 2x |
"and message:", |
526 | 2x |
if (is.null(component(x, "conv_message"))) "No message provided." else tolower(component(x, "conv_message")) |
527 |
) |
|
528 | 2x |
cat("\n") |
529 | 2x |
invisible(x) |
530 |
} |
|
531 | ||
532 | ||
533 |
#' @describeIn mmrm_tmb_methods to obtain residuals - either unscaled ('response'), 'pearson' or 'normalized'. |
|
534 |
#' @param type (`string`)\cr unscaled (`response`), `pearson` or `normalized`. Default is `response`, |
|
535 |
#' and this is the only type available for use with models with a spatial covariance structure. |
|
536 |
#' @importFrom stats residuals |
|
537 |
#' @exportS3Method |
|
538 |
#' @examples |
|
539 |
#' # residuals: |
|
540 |
#' residuals(object, type = "response") |
|
541 |
#' residuals(object, type = "pearson") |
|
542 |
#' residuals(object, type = "normalized") |
|
543 |
#' @references |
|
544 |
#' - \insertRef{galecki2013linear}{mmrm} |
|
545 |
residuals.mmrm_tmb <- function(object, type = c("response", "pearson", "normalized"), ...) { |
|
546 | 20x |
type <- match.arg(type) |
547 | 20x |
switch(type, |
548 | 8x |
"response" = h_residuals_response(object), |
549 | 5x |
"pearson" = h_residuals_pearson(object), |
550 | 7x |
"normalized" = h_residuals_normalized(object) |
551 |
) |
|
552 |
} |
|
553 |
#' Calculate Pearson Residuals |
|
554 |
#' |
|
555 |
#' This is used by [residuals.mmrm_tmb()] to calculate Pearson residuals. |
|
556 |
#' |
|
557 |
#' @param object (`mmrm_tmb`)\cr the fitted MMRM. |
|
558 |
#' |
|
559 |
#' @return Vector of residuals. |
|
560 |
#' |
|
561 |
#' @keywords internal |
|
562 |
h_residuals_pearson <- function(object) { |
|
563 | 6x |
assert_class(object, "mmrm_tmb") |
564 | 6x |
h_residuals_response(object) * object$tmb_object$report()$diag_cov_inv_sqrt |
565 |
} |
|
566 | ||
567 |
#' Calculate normalized residuals |
|
568 |
#' |
|
569 |
#' This is used by [residuals.mmrm_tmb()] to calculate normalized / scaled residuals. |
|
570 |
#' |
|
571 |
#' @param object (`mmrm_tmb`)\cr the fitted MMRM. |
|
572 |
#' |
|
573 |
#' @return Vector of residuals |
|
574 |
#' |
|
575 |
#' @keywords internal |
|
576 |
h_residuals_normalized <- function(object) { |
|
577 | 8x |
assert_class(object, "mmrm_tmb") |
578 | 8x |
object$tmb_object$report()$epsilonTilde |
579 |
} |
|
580 |
#' Calculate response residuals. |
|
581 |
#' |
|
582 |
#' This is used by [residuals.mmrm_tmb()] to calculate response residuals. |
|
583 |
#' |
|
584 |
#' @param object (`mmrm_tmb`)\cr the fitted MMRM. |
|
585 |
#' |
|
586 |
#' @return Vector of residuals |
|
587 |
#' |
|
588 |
#' @keywords internal |
|
589 |
h_residuals_response <- function(object) { |
|
590 | 15x |
assert_class(object, "mmrm_tmb") |
591 | 15x |
component(object, "y_vector") - unname(fitted(object)) |
592 |
} |
|
593 | ||
594 |
#' @describeIn mmrm_tmb_methods simulate responses from a fitted model according |
|
595 |
#' to the simulation `method`, returning a `data.frame` of dimension `[n, m]` |
|
596 |
#' where n is the number of rows in `newdata`, |
|
597 |
#' and m is the number `nsim` of simulated responses. |
|
598 |
#' |
|
599 |
#' @param seed unused argument from [stats::simulate()]. |
|
600 |
#' @param method (`string`)\cr simulation method to use. If "conditional", |
|
601 |
#' simulated values are sampled given the estimated covariance matrix of `object`. |
|
602 |
#' If "marginal", the variance of the estimated covariance matrix is taken into account. |
|
603 |
#' |
|
604 |
#' @importFrom stats simulate |
|
605 |
#' @exportS3Method |
|
606 |
simulate.mmrm_tmb <- function(object, |
|
607 |
nsim = 1, |
|
608 |
seed = NULL, |
|
609 |
newdata, |
|
610 |
..., |
|
611 |
method = c("conditional", "marginal")) { |
|
612 | 15x |
assert_count(nsim, positive = TRUE) |
613 | 15x |
assert_null(seed) |
614 | 15x |
if (missing(newdata)) { |
615 | 12x |
newdata <- object$data |
616 |
} |
|
617 | 15x |
assert_data_frame(newdata) |
618 | 15x |
method <- match.arg(method) |
619 | ||
620 | ||
621 | 15x |
tmb_data <- h_mmrm_tmb_data( |
622 | 15x |
object$formula_parts, newdata, |
623 | 15x |
weights = rep(1, nrow(newdata)), |
624 | 15x |
reml = TRUE, |
625 | 15x |
singular = "keep", |
626 | 15x |
drop_visit_levels = FALSE, |
627 | 15x |
allow_na_response = TRUE, |
628 | 15x |
drop_levels = FALSE, |
629 | 15x |
xlev = component(object, "xlev"), |
630 | 15x |
contrasts = component(object, "contrasts") |
631 |
) |
|
632 | 15x |
ret <- if (method == "conditional") { |
633 | 8x |
predict_res <- h_get_prediction(tmb_data, object$theta_est, object$beta_est, object$beta_vcov) |
634 | 8x |
as.data.frame(h_get_sim_per_subj(predict_res, tmb_data$n_subjects, nsim)) |
635 | 15x |
} else if (method == "marginal") { |
636 | 7x |
theta_chol <- t(chol(object$theta_vcov)) |
637 | 7x |
n_theta <- length(object$theta_est) |
638 | 7x |
as.data.frame( |
639 | 7x |
sapply(seq_len(nsim), function(x) { |
640 | 503x |
newtheta <- object$theta_est + theta_chol %*% matrix(stats::rnorm(n_theta), ncol = 1) |
641 |
# Recalculate betas with sampled thetas. |
|
642 | 503x |
hold <- object$tmb_object$report(newtheta) |
643 |
# Resample betas given new beta distribution. |
|
644 |
# We first solve L^\top w = D^{-1/2}z_{sample}: |
|
645 | 503x |
w_sample <- backsolve( |
646 | 503x |
r = hold$XtWX_L, |
647 | 503x |
x = stats::rnorm(length(hold$beta)) / sqrt(hold$XtWX_D), |
648 | 503x |
upper.tri = FALSE, |
649 | 503x |
transpose = TRUE |
650 |
) |
|
651 |
# Then we add the mean vector, the beta estimate. |
|
652 | 503x |
beta_sample <- hold$beta + w_sample |
653 | 503x |
predict_res <- h_get_prediction(tmb_data, newtheta, beta_sample, hold$beta_vcov) |
654 | 503x |
h_get_sim_per_subj(predict_res, tmb_data$n_subjects, 1L) |
655 |
}) |
|
656 |
) |
|
657 |
} |
|
658 | 15x |
orig_row_names <- row.names(newdata) |
659 | 15x |
new_order <- match(orig_row_names, row.names(tmb_data$full_frame)) |
660 | 15x |
ret[new_order, , drop = FALSE] |
661 |
} |
|
662 | ||
663 |
#' Get simulated values by patient. |
|
664 |
#' |
|
665 |
#' @param predict_res (`list`)\cr from [h_get_prediction()]. |
|
666 |
#' @param nsub (`count`)\cr number of subjects. |
|
667 |
#' @param nsim (`count`)\cr number of values to simulate. |
|
668 |
#' |
|
669 |
#' @keywords internal |
|
670 |
h_get_sim_per_subj <- function(predict_res, nsub, nsim) { |
|
671 | 517x |
assert_list(predict_res) |
672 | 517x |
assert_count(nsub, positive = TRUE) |
673 | 516x |
assert_count(nsim, positive = TRUE) |
674 | ||
675 | 515x |
ret <- matrix( |
676 | 515x |
predict_res$prediction[, "fit"], |
677 | 515x |
ncol = nsim, |
678 | 515x |
nrow = nrow(predict_res$prediction) |
679 |
) |
|
680 | 515x |
for (i in seq_len(nsub)) { |
681 |
# Skip subjects which are not included in predict_res. |
|
682 | 82699x |
if (length(predict_res$index[[i]]) > 0) { |
683 |
# Obtain indices of data.frame belonging to subject i |
|
684 |
# (increment by 1, since indices from cpp are 0-order). |
|
685 | 66631x |
inds <- predict_res$index[[i]] + 1 |
686 | 66631x |
obs <- length(inds) |
687 | ||
688 |
# Get relevant covariance matrix for subject i. |
|
689 | 66631x |
covmat_i <- predict_res$covariance[[i]] |
690 | 66631x |
theta_chol <- t(chol(covmat_i)) |
691 | ||
692 |
# Simulate epsilon from covariance matrix. |
|
693 | 66631x |
mus <- ret[inds, , drop = FALSE] |
694 | 66631x |
epsilons <- theta_chol %*% matrix(stats::rnorm(nsim * obs), ncol = nsim) |
695 | 66631x |
ret[inds, ] <- mus + epsilons |
696 |
} |
|
697 |
} |
|
698 | ||
699 | 515x |
ret |
700 |
} |
1 |
#' Processing the Formula for `TMB` Fit |
|
2 |
#' |
|
3 |
#' @param formula (`formula`)\cr Original formula. |
|
4 |
#' @param covariance (`cov_struct`)\cr A covariance structure from which |
|
5 |
#' additional formula parts should be added. |
|
6 |
#' |
|
7 |
#' @return List of class `mmrm_tmb_formula_parts` with elements: |
|
8 |
#' |
|
9 |
#' - `formula`: the original input. |
|
10 |
#' - `model_formula`: `formula` with the covariance term is removed. |
|
11 |
#' - `model_formula`: `formula` with the covariance term removed. |
|
12 |
#' - `full_formula`: same as `model_formula` but includes the covariance |
|
13 |
#' structure's subject, visit and (optionally) group variables. |
|
14 |
#' - `cov_type`: `string` with covariance term type (e.g. `"us"`). |
|
15 |
#' - `is_spatial`: `flag` indicator of whether the covariance structure is |
|
16 |
#' spatial |
|
17 |
#' - `visit_var`: `character` with the visit variable name. |
|
18 |
#' - `subject_var`: `string` with the subject variable name. |
|
19 |
#' - `group_var`: `string` with the group variable name. If no group specified, |
|
20 |
#' this element is `NULL`. |
|
21 |
#' - `model_var`: `character` with the variables names of the formula, except `subject_var`. |
|
22 |
#' |
|
23 |
#' @keywords internal |
|
24 |
h_mmrm_tmb_formula_parts <- function( |
|
25 |
formula, |
|
26 |
covariance = as.cov_struct(formula, warn_partial = FALSE)) { |
|
27 | 270x |
assert_formula(formula) |
28 | 270x |
assert_true(identical(length(formula), 3L)) |
29 | ||
30 | 270x |
model_formula <- h_drop_covariance_terms(formula) |
31 | ||
32 | 270x |
structure( |
33 | 270x |
list( |
34 | 270x |
formula = formula, |
35 | 270x |
model_formula = model_formula, |
36 | 270x |
full_formula = h_add_covariance_terms(model_formula, covariance), |
37 | 270x |
cov_type = tmb_cov_type(covariance), |
38 | 270x |
is_spatial = covariance$type == "sp_exp", |
39 | 270x |
visit_var = covariance$visits, |
40 | 270x |
subject_var = covariance$subject, |
41 | 270x |
group_var = if (length(covariance$group) < 1) NULL else covariance$group, |
42 | 270x |
model_var = setdiff(all.vars(formula[[3]]), covariance$subject) |
43 |
), |
|
44 | 270x |
class = "mmrm_tmb_formula_parts" |
45 |
) |
|
46 |
} |
|
47 | ||
48 |
#' Data for `TMB` Fit |
|
49 |
#' |
|
50 |
#' @param formula_parts (`mmrm_tmb_formula_parts`)\cr list with formula parts |
|
51 |
#' from [h_mmrm_tmb_formula_parts()]. |
|
52 |
#' @param data (`data.frame`)\cr which contains variables used in `formula_parts`. |
|
53 |
#' @param weights (`vector`)\cr weights to be used in the fitting process. |
|
54 |
#' @param reml (`flag`)\cr whether restricted maximum likelihood (REML) estimation is used, |
|
55 |
#' otherwise maximum likelihood (ML) is used. |
|
56 |
#' @param singular (`string`)\cr choices of method deal with rank-deficient matrices. "error" to |
|
57 |
#' stop the function return the error, "drop" to drop these columns, and "keep" to keep all the columns. |
|
58 |
#' @param drop_visit_levels (`flag`)\cr whether to drop levels for visit variable, if visit variable is a factor. |
|
59 |
#' @param allow_na_response (`flag`)\cr whether NA in response is allowed. |
|
60 |
#' @param drop_levels (`flag`)\cr whether drop levels for covariates. If not dropped could lead to singular matrix. |
|
61 |
#' |
|
62 |
#' @return List of class `mmrm_tmb_data` with elements: |
|
63 |
#' - `full_frame`: `data.frame` with `n` rows containing all variables needed in the model. |
|
64 |
#' - `data`: `data.frame` of input dataset. |
|
65 |
#' - `x_matrix`: `matrix` with `n` rows and `p` columns specifying the overall design matrix. |
|
66 |
#' - `x_cols_aliased`: `logical` with potentially more than `p` elements indicating which |
|
67 |
#' columns in the original design matrix have been left out to obtain a full rank |
|
68 |
#' `x_matrix`. |
|
69 |
#' - `y_vector`: length `n` `numeric` specifying the overall response vector. |
|
70 |
#' - `weights_vector`: length `n` `numeric` specifying the weights vector. |
|
71 |
#' - `n_visits`: `int` with the number of visits, which is the dimension of the |
|
72 |
#' covariance matrix. |
|
73 |
#' - `n_subjects`: `int` with the number of subjects. |
|
74 |
#' - `subject_zero_inds`: length `n_subjects` `integer` containing the zero-based start |
|
75 |
#' indices for each subject. |
|
76 |
#' - `subject_n_visits`: length `n_subjects` `integer` containing the number of |
|
77 |
#' observed visits for each subjects. So the sum of this vector equals `n`. |
|
78 |
#' - `cov_type`: `string` value specifying the covariance type. |
|
79 |
#' - `is_spatial_int`: `int` specifying whether the covariance structure is spatial(1) or not(0). |
|
80 |
#' - `reml`: `int` specifying whether REML estimation is used (1), otherwise ML (0). |
|
81 |
#' - `subject_groups`: `factor` specifying the grouping for each subject. |
|
82 |
#' - `n_groups`: `int` with the number of total groups |
|
83 |
#' |
|
84 |
#' @details Note that the `subject_var` must not be factor but can also be character. |
|
85 |
#' If it is character, then it will be converted to factor internally. Here |
|
86 |
#' the levels will be the unique values, sorted alphabetically and numerically if there |
|
87 |
#' is a common string prefix of numbers in the character elements. For full control |
|
88 |
#' on the order please use a factor. |
|
89 |
#' |
|
90 |
#' @keywords internal |
|
91 |
h_mmrm_tmb_data <- function(formula_parts, |
|
92 |
data, |
|
93 |
weights, |
|
94 |
reml, |
|
95 |
singular = c("drop", "error", "keep"), |
|
96 |
drop_visit_levels, |
|
97 |
allow_na_response = FALSE, |
|
98 |
drop_levels = TRUE, |
|
99 |
xlev = NULL, |
|
100 |
contrasts = NULL) { |
|
101 | 312x |
assert_class(formula_parts, "mmrm_tmb_formula_parts") |
102 | 312x |
assert_data_frame(data) |
103 | 312x |
varname <- formula_parts[grepl("_var", names(formula_parts))] |
104 | 312x |
assert_names( |
105 | 312x |
names(data), |
106 | 312x |
must.include = unlist(varname, use.names = FALSE) |
107 |
) |
|
108 | 312x |
assert_true(is.factor(data[[formula_parts$subject_var]]) || is.character(data[[formula_parts$subject_var]])) |
109 | 312x |
assert_numeric(weights, len = nrow(data)) |
110 | 312x |
assert_flag(reml) |
111 | 312x |
singular <- match.arg(singular) |
112 | 312x |
assert_flag(drop_visit_levels) |
113 | ||
114 | 312x |
if (is.character(data[[formula_parts$subject_var]])) { |
115 | 5x |
data[[formula_parts$subject_var]] <- factor( |
116 | 5x |
data[[formula_parts$subject_var]], |
117 | 5x |
levels = stringr::str_sort(unique(data[[formula_parts$subject_var]]), numeric = TRUE) |
118 |
) |
|
119 |
} |
|
120 | 312x |
data_order <- if (formula_parts$is_spatial) { |
121 | 16x |
order(data[[formula_parts$subject_var]]) |
122 |
} else { |
|
123 | 296x |
subject_visit_data <- data[, c(formula_parts$subject_var, formula_parts$visit_var)] |
124 | 296x |
is_duplicated <- duplicated(subject_visit_data) |
125 | 296x |
if (any(is_duplicated)) { |
126 | 1x |
stop( |
127 | 1x |
"time points have to be unique for each subject, detected following duplicates in data:\n", |
128 | 1x |
paste(utils::capture.output(print(subject_visit_data[is_duplicated, ])), collapse = "\n") |
129 |
) |
|
130 |
} |
|
131 | 295x |
order(data[[formula_parts$subject_var]], data[[formula_parts$visit_var]]) |
132 |
} |
|
133 | 311x |
if (identical(formula_parts$is_spatial, FALSE)) { |
134 | 295x |
h_confirm_large_levels(length(levels(data[[formula_parts$visit_var]]))) |
135 |
} |
|
136 | 310x |
data <- data[data_order, ] |
137 | 310x |
weights <- weights[data_order] |
138 | 310x |
data <- data.frame(data, weights) |
139 |
# Weights is always the last column. |
|
140 | 310x |
weights_name <- colnames(data)[ncol(data)] |
141 |
# If `y` is allowed to be NA, then first replace y with 1:n, then replace it with original y. |
|
142 | 310x |
if (!allow_na_response) { |
143 | 260x |
h_warn_na_action() |
144 |
} |
|
145 | 310x |
full_frame <- eval( |
146 | 310x |
bquote(stats::model.frame( |
147 | 310x |
formula_parts$full_formula, |
148 | 310x |
data = data, |
149 | 310x |
weights = .(as.symbol(weights_name)), |
150 | 310x |
na.action = "na.pass", |
151 | 310x |
xlev = xlev |
152 |
)) |
|
153 |
) |
|
154 | 310x |
if (drop_levels) { |
155 | 262x |
full_frame <- h_drop_levels(full_frame, formula_parts$subject_var, formula_parts$visit_var, names(xlev)) |
156 |
} |
|
157 | 310x |
has_response <- !identical(attr(attr(full_frame, "terms"), "response"), 0L) |
158 | 310x |
keep_ind <- if (allow_na_response && has_response) { |
159 |
# Note that response is always the first column if there is response. |
|
160 | 50x |
stats::complete.cases(full_frame[, -1L, drop = FALSE]) |
161 |
} else { |
|
162 | 260x |
stats::complete.cases(full_frame) |
163 |
} |
|
164 | 310x |
full_frame <- full_frame[keep_ind, ] |
165 | 310x |
if (drop_visit_levels && !formula_parts$is_spatial && h_extra_levels(full_frame[[formula_parts$visit_var]])) { |
166 | 3x |
visit_vec <- full_frame[[formula_parts$visit_var]] |
167 | 3x |
old_levels <- levels(visit_vec) |
168 | 3x |
full_frame[[formula_parts$visit_var]] <- droplevels(visit_vec) |
169 | 3x |
new_levels <- levels(full_frame[[formula_parts$visit_var]]) |
170 | 3x |
dropped <- setdiff(old_levels, new_levels) |
171 | 3x |
message( |
172 | 3x |
"In ", formula_parts$visit_var, " there are dropped visits: ", toString(dropped), |
173 | 3x |
".\n Additional attributes including contrasts are lost.\n", |
174 | 3x |
"To avoid this behavior, make sure use `drop_visit_levels = FALSE`." |
175 |
) |
|
176 |
} |
|
177 | 310x |
is_factor_col <- vapply(full_frame, is.factor, FUN.VALUE = TRUE) |
178 | 310x |
is_factor_col <- intersect(names(is_factor_col)[is_factor_col], all.vars(formula_parts$model_formula)) |
179 | 310x |
x_matrix <- stats::model.matrix( |
180 | 310x |
formula_parts$model_formula, |
181 | 310x |
data = full_frame, |
182 | 310x |
contrasts.arg = h_default_value(contrasts, lapply(full_frame[is_factor_col], contrasts)) |
183 |
) |
|
184 | 309x |
x_cols_aliased <- stats::setNames(rep(FALSE, ncol(x_matrix)), nm = colnames(x_matrix)) |
185 | 309x |
qr_x_mat <- qr(x_matrix) |
186 | 309x |
if (qr_x_mat$rank < ncol(x_matrix)) { |
187 | 23x |
cols_to_drop <- utils::tail(qr_x_mat$pivot, ncol(x_matrix) - qr_x_mat$rank) |
188 | 23x |
if (identical(singular, "error")) { |
189 | 1x |
stop( |
190 | 1x |
"design matrix only has rank ", qr_x_mat$rank, " and ", length(cols_to_drop), |
191 | 1x |
" columns (", toString(colnames(x_matrix)[cols_to_drop]), ") could be dropped", |
192 | 1x |
" to achieve full rank ", ncol(x_matrix), " by using `accept_singular = TRUE`" |
193 |
) |
|
194 | 22x |
} else if (identical(singular, "drop")) { |
195 | 11x |
assign_attr <- attr(x_matrix, "assign") |
196 | 11x |
contrasts_attr <- attr(x_matrix, "contrasts") |
197 | 11x |
x_matrix <- x_matrix[, -cols_to_drop, drop = FALSE] |
198 | 11x |
x_cols_aliased[cols_to_drop] <- TRUE |
199 | 11x |
attr(x_matrix, "assign") <- assign_attr[-cols_to_drop] |
200 | 11x |
attr(x_matrix, "contrasts") <- contrasts_attr |
201 |
} |
|
202 |
} |
|
203 | 308x |
y_vector <- if (has_response) { |
204 | 308x |
as.numeric(stats::model.response(full_frame)) |
205 |
} else { |
|
206 | ! |
rep(NA_real_, nrow(full_frame)) |
207 |
} |
|
208 | 308x |
weights_vector <- as.numeric(stats::model.weights(full_frame)) |
209 | 308x |
n_subjects <- length(unique(full_frame[[formula_parts$subject_var]])) |
210 | 308x |
subject_zero_inds <- which(!duplicated(full_frame[[formula_parts$subject_var]])) - 1L |
211 | 308x |
subject_n_visits <- c(utils::tail(subject_zero_inds, -1L), nrow(full_frame)) - subject_zero_inds |
212 |
# It is possible that `subject_var` is factor with more levels (and this does not affect fit) |
|
213 |
# so no check is needed for `subject_visits`. |
|
214 | 308x |
assert_true(all(subject_n_visits > 0)) |
215 | 308x |
if (!is.null(formula_parts$group_var)) { |
216 | 41x |
assert_factor(data[[formula_parts$group_var]]) |
217 | 41x |
subject_groups <- full_frame[[formula_parts$group_var]][subject_zero_inds + 1L] |
218 | 41x |
n_groups <- nlevels(subject_groups) |
219 |
} else { |
|
220 | 267x |
subject_groups <- factor(rep(0L, n_subjects)) |
221 | 267x |
n_groups <- 1L |
222 |
} |
|
223 | 308x |
coordinates <- full_frame[, formula_parts$visit_var, drop = FALSE] |
224 | 308x |
if (formula_parts$is_spatial) { |
225 | 16x |
lapply(coordinates, assert_numeric) |
226 | 16x |
coordinates_matrix <- as.matrix(coordinates) |
227 | 16x |
n_visits <- max(subject_n_visits) |
228 |
} else { |
|
229 | 292x |
assert(identical(ncol(coordinates), 1L)) |
230 | 292x |
assert_factor(coordinates[[1L]]) |
231 | 292x |
coordinates_matrix <- as.matrix(as.integer(coordinates[[1L]]) - 1, ncol = 1) |
232 | 292x |
n_visits <- nlevels(coordinates[[1L]]) |
233 | 292x |
assert_true(all(subject_n_visits <= n_visits)) |
234 |
} |
|
235 | 308x |
structure( |
236 | 308x |
list( |
237 | 308x |
full_frame = full_frame, |
238 | 308x |
data = data, |
239 | 308x |
x_matrix = x_matrix, |
240 | 308x |
x_cols_aliased = x_cols_aliased, |
241 | 308x |
coordinates = coordinates_matrix, |
242 | 308x |
y_vector = y_vector, |
243 | 308x |
weights_vector = weights_vector, |
244 | 308x |
n_visits = n_visits, |
245 | 308x |
n_subjects = n_subjects, |
246 | 308x |
subject_zero_inds = subject_zero_inds, |
247 | 308x |
subject_n_visits = subject_n_visits, |
248 | 308x |
cov_type = formula_parts$cov_type, |
249 | 308x |
is_spatial_int = as.integer(formula_parts$is_spatial), |
250 | 308x |
reml = as.integer(reml), |
251 | 308x |
subject_groups = subject_groups, |
252 | 308x |
n_groups = n_groups |
253 |
), |
|
254 | 308x |
class = "mmrm_tmb_data" |
255 |
) |
|
256 |
} |
|
257 | ||
258 |
#' Start Parameters for `TMB` Fit |
|
259 |
#' |
|
260 |
#' @param formula_parts (`mmrm_tmb_formula_parts`)\cr produced by |
|
261 |
#' [h_mmrm_tmb_formula_parts()]. |
|
262 |
#' @param tmb_data (`mmrm_tmb_data`)\cr produced by [h_mmrm_tmb_data()]. |
|
263 |
#' @param start (`numeric` or `NULL`)\cr optional start values for variance |
|
264 |
#' parameters. |
|
265 |
#' @param n_groups (`int`)\cr number of groups. |
|
266 |
#' @return List with element `theta` containing the start values for the variance |
|
267 |
#' parameters. |
|
268 |
#' |
|
269 |
#' @keywords internal |
|
270 |
h_mmrm_tmb_parameters <- function(formula_parts, |
|
271 |
tmb_data, |
|
272 |
start, |
|
273 |
n_groups = 1L) { |
|
274 | 265x |
assert_class(formula_parts, "mmrm_tmb_formula_parts") |
275 | 265x |
assert_class(tmb_data, "mmrm_tmb_data") |
276 | ||
277 | 265x |
m <- tmb_data$n_visits |
278 | 265x |
start_value0 <- std_start(formula_parts$cov_type, m, n_groups) |
279 | 265x |
theta_dim <- length(start_value0) |
280 | 265x |
start_values <- if (is.null(start)) { |
281 | 15x |
start_value0 |
282 | 265x |
} else if (test_function(start)) { |
283 | 233x |
do.call(start, utils::modifyList(formula_parts, tmb_data)) |
284 |
} else { |
|
285 | 17x |
start |
286 |
} |
|
287 | 264x |
assert_numeric(start_values, len = theta_dim, any.missing = FALSE, finite = TRUE) |
288 | 262x |
list(theta = start_values) |
289 |
} |
|
290 | ||
291 |
#' Asserting Sane Start Values for `TMB` Fit |
|
292 |
#' |
|
293 |
#' @param tmb_object (`list`)\cr created with [TMB::MakeADFun()]. |
|
294 |
#' |
|
295 |
#' @return Nothing, only used for assertions. |
|
296 |
#' |
|
297 |
#' @keywords internal |
|
298 |
h_mmrm_tmb_assert_start <- function(tmb_object) { |
|
299 | 249x |
assert_list(tmb_object) |
300 | 249x |
assert_subset(c("fn", "gr", "par"), names(tmb_object)) |
301 | ||
302 | 249x |
if (is.na(tmb_object$fn(tmb_object$par))) { |
303 | 1x |
stop("negative log-likelihood is NaN at starting parameter values") |
304 |
} |
|
305 | 248x |
if (any(is.na(tmb_object$gr(tmb_object$par)))) { |
306 | 1x |
stop("some elements of gradient are NaN at starting parameter values") |
307 |
} |
|
308 |
} |
|
309 | ||
310 |
#' Checking the `TMB` Optimization Result |
|
311 |
#' |
|
312 |
#' @param tmb_opt (`list`)\cr optimization result. |
|
313 |
#' @param mmrm_tmb (`mmrm_tmb`)\cr result from [h_mmrm_tmb_fit()]. |
|
314 |
#' |
|
315 |
#' @return Nothing, only used to generate warnings in case that the model |
|
316 |
#' did not converge. |
|
317 |
#' |
|
318 |
#' @keywords internal |
|
319 |
h_mmrm_tmb_check_conv <- function(tmb_opt, mmrm_tmb) { |
|
320 | 245x |
assert_list(tmb_opt) |
321 | 245x |
assert_subset(c("par", "objective", "convergence", "message"), names(tmb_opt)) |
322 | 245x |
assert_class(mmrm_tmb, "mmrm_tmb") |
323 | ||
324 | 245x |
if (!is.null(tmb_opt$convergence) && tmb_opt$convergence != 0) { |
325 | 3x |
warning("Model convergence problem: ", tmb_opt$message, ".") |
326 | 3x |
return() |
327 |
} |
|
328 | 242x |
theta_vcov <- mmrm_tmb$theta_vcov |
329 | 242x |
if (is(theta_vcov, "try-error")) { |
330 | 3x |
warning("Model convergence problem: hessian is singular, theta_vcov not available.") |
331 | 3x |
return() |
332 |
} |
|
333 | 239x |
if (!all(is.finite(theta_vcov))) { |
334 | 3x |
warning("Model convergence problem: theta_vcov contains non-finite values.") |
335 | 3x |
return() |
336 |
} |
|
337 | 236x |
eigen_vals <- eigen(theta_vcov, only.values = TRUE)$values |
338 | 236x |
if (mode(eigen_vals) == "complex" || any(eigen_vals <= 0)) { |
339 |
# Note: complex eigen values signal that the matrix is not symmetric, therefore not positive definite. |
|
340 | 3x |
warning("Model convergence problem: theta_vcov is not positive definite.") |
341 | 3x |
return() |
342 |
} |
|
343 | 233x |
qr_rank <- qr(theta_vcov)$rank |
344 | 233x |
if (qr_rank < ncol(theta_vcov)) { |
345 | 1x |
warning("Model convergence problem: theta_vcov is numerically singular.") |
346 |
} |
|
347 |
} |
|
348 | ||
349 |
#' Extract covariance matrix from `TMB` report and input data |
|
350 |
#' |
|
351 |
#' This helper does some simple post-processing to extract covariance matrix or named |
|
352 |
#' list of covariance matrices if the fitting is using grouped covariance matrices. |
|
353 |
#' |
|
354 |
#' @param tmb_report (`list`)\cr report created with [TMB::MakeADFun()] report function. |
|
355 |
#' @param tmb_data (`mmrm_tmb_data`)\cr produced by [h_mmrm_tmb_data()]. |
|
356 |
#' @param visit_var (`character`)\cr character vector of the visit variable |
|
357 |
#' @param is_spatial (`flag`)\cr indicator whether the covariance structure is spatial. |
|
358 |
#' @return Return a simple covariance matrix if there is no grouping, or a named |
|
359 |
#' list of estimated grouped covariance matrices, |
|
360 |
#' with its name equal to the group levels. |
|
361 |
#' |
|
362 |
#' @keywords internal |
|
363 |
h_mmrm_tmb_extract_cov <- function(tmb_report, tmb_data, visit_var, is_spatial) { |
|
364 | 241x |
d <- dim(tmb_report$covariance_lower_chol) |
365 | 241x |
visit_names <- if (!is_spatial) { |
366 | 228x |
levels(tmb_data$full_frame[[visit_var]]) |
367 |
} else { |
|
368 | 13x |
c(0, 1) |
369 |
} |
|
370 | 241x |
cov <- lapply( |
371 | 241x |
seq_len(d[1] / d[2]), |
372 | 241x |
function(i) { |
373 | 278x |
ret <- tcrossprod(tmb_report$covariance_lower_chol[seq(1 + (i - 1) * d[2], i * d[2]), ]) |
374 | 278x |
dimnames(ret) <- list(visit_names, visit_names) |
375 | 278x |
return(ret) |
376 |
} |
|
377 |
) |
|
378 | 241x |
if (identical(tmb_data$n_groups, 1L)) { |
379 | 204x |
cov <- cov[[1]] |
380 |
} else { |
|
381 | 37x |
names(cov) <- levels(tmb_data$subject_groups) |
382 |
} |
|
383 | 241x |
return(cov) |
384 |
} |
|
385 | ||
386 |
#' Build `TMB` Fit Result List |
|
387 |
#' |
|
388 |
#' This helper does some simple post-processing of the `TMB` object and |
|
389 |
#' optimization results, including setting names, inverting matrices etc. |
|
390 |
#' |
|
391 |
#' @param tmb_object (`list`)\cr created with [TMB::MakeADFun()]. |
|
392 |
#' @param tmb_opt (`list`)\cr optimization result. |
|
393 |
#' @param formula_parts (`mmrm_tmb_formula_parts`)\cr produced by |
|
394 |
#' [h_mmrm_tmb_formula_parts()]. |
|
395 |
#' @param tmb_data (`mmrm_tmb_data`)\cr produced by [h_mmrm_tmb_data()]. |
|
396 |
#' |
|
397 |
#' @return List of class `mmrm_tmb` with: |
|
398 |
#' - `cov`: estimated covariance matrix, or named list of estimated group specific covariance matrices. |
|
399 |
#' - `beta_est`: vector of coefficient estimates. |
|
400 |
#' - `beta_vcov`: Variance-covariance matrix for coefficient estimates. |
|
401 |
#' - `beta_vcov_inv_L`: Lower triangular matrix `L` of the inverse variance-covariance matrix decomposition. |
|
402 |
#' - `beta_vcov_inv_D`: vector of diagonal matrix `D` of the inverse variance-covariance matrix decomposition. |
|
403 |
#' - `theta_est`: vector of variance parameter estimates. |
|
404 |
#' - `theta_vcov`: variance-covariance matrix for variance parameter estimates. |
|
405 |
#' - `neg_log_lik`: obtained negative log-likelihood. |
|
406 |
#' - `formula_parts`: input. |
|
407 |
#' - `data`: input. |
|
408 |
#' - `weights`: input. |
|
409 |
#' - `reml`: input as a flag. |
|
410 |
#' - `opt_details`: list with optimization details including convergence code. |
|
411 |
#' - `tmb_object`: original `TMB` object created with [TMB::MakeADFun()]. |
|
412 |
#' - `tmb_data`: input. |
|
413 |
#' |
|
414 |
#' @details Instead of inverting or decomposing `beta_vcov`, it can be more efficient to use its robust |
|
415 |
#' Cholesky decomposition `LDL^T`, therefore we return the corresponding two components `L` and `D` |
|
416 |
#' as well since they have been available on the `C++` side already. |
|
417 |
#' |
|
418 |
#' @keywords internal |
|
419 |
h_mmrm_tmb_fit <- function(tmb_object, |
|
420 |
tmb_opt, |
|
421 |
formula_parts, |
|
422 |
tmb_data) { |
|
423 | 239x |
assert_list(tmb_object) |
424 | 239x |
assert_subset(c("fn", "gr", "par", "he"), names(tmb_object)) |
425 | 239x |
assert_list(tmb_opt) |
426 | 239x |
assert_subset(c("par", "objective", "convergence", "message"), names(tmb_opt)) |
427 | 239x |
assert_class(formula_parts, "mmrm_tmb_formula_parts") |
428 | 239x |
assert_class(tmb_data, "mmrm_tmb_data") |
429 | ||
430 | 239x |
tmb_report <- tmb_object$report(par = tmb_opt$par) |
431 | 239x |
x_matrix_cols <- colnames(tmb_data$x_matrix) |
432 | 239x |
cov <- h_mmrm_tmb_extract_cov(tmb_report, tmb_data, formula_parts$visit_var, formula_parts$is_spatial) |
433 | 239x |
beta_est <- tmb_report$beta |
434 | 239x |
names(beta_est) <- x_matrix_cols |
435 | 239x |
beta_vcov <- tmb_report$beta_vcov |
436 | 239x |
dimnames(beta_vcov) <- list(x_matrix_cols, x_matrix_cols) |
437 | 239x |
beta_vcov_inv_L <- tmb_report$XtWX_L # nolint |
438 | 239x |
beta_vcov_inv_D <- tmb_report$XtWX_D # nolint |
439 | 239x |
theta_est <- tmb_opt$par |
440 | 239x |
names(theta_est) <- NULL |
441 | 239x |
theta_vcov <- try(solve(tmb_object$he(tmb_opt$par)), silent = TRUE) |
442 | 239x |
opt_details_names <- setdiff( |
443 | 239x |
names(tmb_opt), |
444 | 239x |
c("par", "objective") |
445 |
) |
|
446 | 239x |
structure( |
447 | 239x |
list( |
448 | 239x |
cov = cov, |
449 | 239x |
beta_est = beta_est, |
450 | 239x |
beta_vcov = beta_vcov, |
451 | 239x |
beta_vcov_inv_L = beta_vcov_inv_L, |
452 | 239x |
beta_vcov_inv_D = beta_vcov_inv_D, |
453 | 239x |
theta_est = theta_est, |
454 | 239x |
theta_vcov = theta_vcov, |
455 | 239x |
neg_log_lik = tmb_opt$objective, |
456 | 239x |
formula_parts = formula_parts, |
457 | 239x |
data = tmb_data$data, |
458 | 239x |
weights = tmb_data$weights_vector, |
459 | 239x |
reml = as.logical(tmb_data$reml), |
460 | 239x |
opt_details = tmb_opt[opt_details_names], |
461 | 239x |
tmb_object = tmb_object, |
462 | 239x |
tmb_data = tmb_data |
463 |
), |
|
464 | 239x |
class = "mmrm_tmb" |
465 |
) |
|
466 |
} |
|
467 | ||
468 |
#' Low-Level Fitting Function for MMRM |
|
469 |
#' |
|
470 |
#' @description `r lifecycle::badge("stable")` |
|
471 |
#' |
|
472 |
#' This is the low-level function to fit an MMRM. Note that this does not |
|
473 |
#' try different optimizers or adds Jacobian information etc. in contrast to |
|
474 |
#' [mmrm()]. |
|
475 |
#' |
|
476 |
#' @param formula (`formula`)\cr model formula with exactly one special term |
|
477 |
#' specifying the visits within subjects, see details. |
|
478 |
#' @param data (`data.frame`)\cr input data containing the variables used in |
|
479 |
#' `formula`. |
|
480 |
#' @param weights (`vector`)\cr input vector containing the weights. |
|
481 |
#' @inheritParams h_mmrm_tmb_data |
|
482 |
#' @param covariance (`cov_struct`)\cr A covariance structure type definition, |
|
483 |
#' or value that can be coerced to a covariance structure using |
|
484 |
#' [as.cov_struct()]. If no value is provided, a structure is derived from |
|
485 |
#' the provided formula. |
|
486 |
#' @param control (`mmrm_control`)\cr list of control options produced by |
|
487 |
#' [mmrm_control()]. |
|
488 |
#' @inheritParams fit_single_optimizer |
|
489 |
#' |
|
490 |
#' @return List of class `mmrm_tmb`, see [h_mmrm_tmb_fit()] for details. |
|
491 |
#' In addition, it contains elements `call` and `optimizer`. |
|
492 |
#' |
|
493 |
#' @details |
|
494 |
#' The `formula` typically looks like: |
|
495 |
#' |
|
496 |
#' `FEV1 ~ RACE + SEX + ARMCD * AVISIT + us(AVISIT | USUBJID)` |
|
497 |
#' |
|
498 |
#' which specifies response and covariates as usual, and exactly one special term |
|
499 |
#' defines which covariance structure is used and what are the visit and |
|
500 |
#' subject variables. |
|
501 |
#' |
|
502 |
#' Always use only the first optimizer if multiple optimizers are provided. |
|
503 |
#' |
|
504 |
#' @export |
|
505 |
#' |
|
506 |
#' @examples |
|
507 |
#' formula <- FEV1 ~ RACE + SEX + ARMCD * AVISIT + us(AVISIT | USUBJID) |
|
508 |
#' data <- fev_data |
|
509 |
#' system.time(result <- fit_mmrm(formula, data, rep(1, nrow(fev_data)))) |
|
510 |
fit_mmrm <- function(formula, |
|
511 |
data, |
|
512 |
weights, |
|
513 |
reml = TRUE, |
|
514 |
covariance = NULL, |
|
515 |
tmb_data, |
|
516 |
formula_parts, |
|
517 |
control = mmrm_control()) { |
|
518 | 252x |
if (missing(formula_parts) || missing(tmb_data)) { |
519 | 67x |
covariance <- h_reconcile_cov_struct(formula, covariance) |
520 | 65x |
formula_parts <- h_mmrm_tmb_formula_parts(formula, covariance) |
521 | ||
522 | 65x |
if (!formula_parts$is_spatial && !is.factor(data[[formula_parts$visit_var]])) { |
523 | 1x |
stop("Time variable must be a factor for non-spatial covariance structures") |
524 |
} |
|
525 | ||
526 | 64x |
assert_class(control, "mmrm_control") |
527 | 64x |
assert_list(control$optimizers, min.len = 1) |
528 | 64x |
assert_numeric(weights, any.missing = FALSE) |
529 | 64x |
assert_true(all(weights > 0)) |
530 | 64x |
tmb_data <- h_mmrm_tmb_data( |
531 | 64x |
formula_parts, data, weights, reml, |
532 | 64x |
singular = if (control$accept_singular) "drop" else "error", drop_visit_levels = control$drop_visit_levels |
533 |
) |
|
534 |
} else { |
|
535 | 185x |
assert_class(tmb_data, "mmrm_tmb_data") |
536 | 185x |
assert_class(formula_parts, "mmrm_tmb_formula_parts") |
537 |
} |
|
538 | 249x |
tmb_parameters <- h_mmrm_tmb_parameters(formula_parts, tmb_data, start = control$start, n_groups = tmb_data$n_groups) |
539 | ||
540 | 246x |
tmb_object <- TMB::MakeADFun( |
541 | 246x |
data = tmb_data, |
542 | 246x |
parameters = tmb_parameters, |
543 | 246x |
hessian = TRUE, |
544 | 246x |
DLL = "mmrm", |
545 | 246x |
silent = TRUE |
546 |
) |
|
547 | 246x |
h_mmrm_tmb_assert_start(tmb_object) |
548 | 246x |
used_optimizer <- control$optimizers[[1L]] |
549 | 246x |
used_optimizer_name <- names(control$optimizers)[1L] |
550 | 246x |
args <- with( |
551 | 246x |
tmb_object, |
552 | 246x |
c( |
553 | 246x |
list(par, fn, gr), |
554 | 246x |
attr(used_optimizer, "args") |
555 |
) |
|
556 |
) |
|
557 | 246x |
if (identical(attr(used_optimizer, "use_hessian"), TRUE)) { |
558 | 8x |
args$hessian <- tmb_object$he |
559 |
} |
|
560 | 246x |
tmb_opt <- do.call( |
561 | 246x |
what = used_optimizer, |
562 | 246x |
args = args |
563 |
) |
|
564 |
# Ensure negative log likelihood is stored in `objective` element of list. |
|
565 | 237x |
if ("value" %in% names(tmb_opt)) { |
566 | 227x |
tmb_opt$objective <- tmb_opt$value |
567 | 227x |
tmb_opt$value <- NULL |
568 |
} |
|
569 | 237x |
fit <- h_mmrm_tmb_fit(tmb_object, tmb_opt, formula_parts, tmb_data) |
570 | 237x |
h_mmrm_tmb_check_conv(tmb_opt, fit) |
571 | 237x |
fit$call <- match.call() |
572 | 237x |
fit$call$formula <- formula_parts$formula |
573 | 237x |
fit$optimizer <- used_optimizer_name |
574 | 237x |
fit |
575 |
} |
1 |
#' Dynamic Registration for Package Interoperability |
|
2 |
#' |
|
3 |
#' @seealso See `vignette("xtending", package = "emmeans")` for background. |
|
4 |
#' @keywords internal |
|
5 |
#' @noRd |
|
6 |
.onLoad <- function(libname, pkgname) { # nolint |
|
7 | ! |
if (utils::packageVersion("TMB") < "1.9.15") { |
8 | ! |
warning("TMB version 1.9.15 or higher is required for reproducible model fits", call. = FALSE) |
9 |
} |
|
10 | ||
11 | ! |
register_on_load( |
12 | ! |
"emmeans", c("1.6", NA), |
13 | ! |
callback = function() emmeans::.emm_register("mmrm", pkgname), |
14 | ! |
message = "mmrm() registered as emmeans extension" |
15 |
) |
|
16 | ||
17 | ! |
register_on_load( |
18 | ! |
"parsnip", c("1.1.0", NA), |
19 | ! |
callback = parsnip_add_mmrm, |
20 | ! |
message = emit_tidymodels_register_msg |
21 |
) |
|
22 | ! |
register_on_load( |
23 | ! |
"car", c("3.1.2", NA), |
24 | ! |
callback = car_add_mmrm, |
25 | ! |
message = "mmrm() registered as car::Anova extension" |
26 |
) |
|
27 |
} |
|
28 | ||
29 |
#' Helper Function for Registering Functionality With Suggests Packages |
|
30 |
#' |
|
31 |
#' @inheritParams check_package_version |
|
32 |
#' |
|
33 |
#' @param callback (`function(...) ANY`)\cr a callback to execute upon package |
|
34 |
#' load. Note that no arguments are passed to this function. Any necessary |
|
35 |
#' data must be provided upon construction. |
|
36 |
#' |
|
37 |
#' @param message (`NULL` or `string`)\cr an optional message to print after |
|
38 |
#' the callback is executed upon successful registration. |
|
39 |
#' |
|
40 |
#' @return A logical (invisibly) indicating whether registration was successful. |
|
41 |
#' If not, a onLoad hook was set for the next time the package is loaded. |
|
42 |
#' |
|
43 |
#' @keywords internal |
|
44 |
register_on_load <- function(pkg, |
|
45 |
ver = c(NA_character_, NA_character_), |
|
46 |
callback, |
|
47 |
message = NULL) { |
|
48 | 4x |
if (isNamespaceLoaded(pkg) && check_package_version(pkg, ver)) { |
49 | 3x |
callback() |
50 | 2x |
if (is.character(message)) packageStartupMessage(message) |
51 | 1x |
if (is.function(message)) packageStartupMessage(message()) |
52 | 3x |
return(invisible(TRUE)) |
53 |
} |
|
54 | ||
55 | 1x |
setHook( |
56 | 1x |
packageEvent(pkg, event = "onLoad"), |
57 | 1x |
action = "append", |
58 | 1x |
function(...) { |
59 | ! |
register_on_load( |
60 | ! |
pkg = pkg, |
61 | ! |
ver = ver, |
62 | ! |
callback = callback, |
63 | ! |
message = message |
64 |
) |
|
65 |
} |
|
66 |
) |
|
67 | ||
68 | 1x |
invisible(FALSE) |
69 |
} |
|
70 | ||
71 |
#' Check Suggested Dependency Against Version Requirements |
|
72 |
#' |
|
73 |
#' @param pkg (`string`)\cr package name. |
|
74 |
#' @param ver (`character`)\cr of length 2 whose elements can be provided to |
|
75 |
#' [numeric_version()], representing a minimum and maximum (inclusive) version |
|
76 |
#' requirement for interoperability. When `NA`, no version requirement is |
|
77 |
#' imposed. Defaults to no version requirement. |
|
78 |
#' |
|
79 |
#' @return A logical (invisibly) indicating whether the loaded package meets |
|
80 |
#' the version requirements. A warning is emitted otherwise. |
|
81 |
#' |
|
82 |
#' @keywords internal |
|
83 |
check_package_version <- function(pkg, ver = c(NA_character_, NA_character_)) { |
|
84 | 7x |
assert_character(ver, len = 2L) |
85 | 6x |
pkg_ver <- utils::packageVersion(pkg) |
86 | 6x |
ver <- numeric_version(ver, strict = FALSE) |
87 | ||
88 | 6x |
warn_version <- function(pkg, pkg_ver, ver) { |
89 | 2x |
ver_na <- is.na(ver) |
90 | 2x |
warning(sprintf( |
91 | 2x |
"Cannot register mmrm for use with %s (v%s). Version %s required.", |
92 | 2x |
pkg, pkg_ver, |
93 | 2x |
if (!any(ver_na)) { |
94 | ! |
sprintf("%s to %s", ver[1], ver[2]) |
95 | 2x |
} else if (ver_na[2]) { |
96 | 1x |
paste0(">= ", ver[1]) |
97 | 2x |
} else if (ver_na[1]) { |
98 | 1x |
paste0("<= ", ver[2]) |
99 |
} |
|
100 |
)) |
|
101 |
} |
|
102 | ||
103 | 6x |
if (identical(pkg_ver < ver[1], TRUE) || identical(pkg_ver > ver[2], TRUE)) { |
104 | 2x |
warn_version(pkg, pkg_ver, ver) |
105 | 2x |
return(invisible(FALSE)) |
106 |
} |
|
107 | ||
108 | 4x |
invisible(TRUE) |
109 |
} |
|
110 | ||
111 |
#' Format a Message to Emit When Tidymodels is Loaded |
|
112 |
#' |
|
113 |
#' @return A character message to emit. Either a ansi-formatted cli output if |
|
114 |
#' package 'cli' is available or a plain-text message otherwise. |
|
115 |
#' |
|
116 |
#' @keywords internal |
|
117 |
emit_tidymodels_register_msg <- function() { |
|
118 | 1x |
pkg <- utils::packageName() |
119 | 1x |
ver <- utils::packageVersion(pkg) |
120 | ||
121 | 1x |
if (isTRUE(getOption("tidymodels.quiet"))) { |
122 | ! |
return() |
123 |
} |
|
124 | ||
125 |
# if tidymodels is attached, cli packages come as a dependency |
|
126 | 1x |
has_cli <- requireNamespace("cli", quietly = TRUE) |
127 | 1x |
if (has_cli) { |
128 |
# unfortunately, cli does not expose many formatting tools for emitting |
|
129 |
# messages (only via conditions to stderr) which can't be suppressed using |
|
130 |
# suppressPackageStartupMessages() so formatting must be done adhoc, |
|
131 |
# similar to how it's done in {tidymodels} R/attach.R |
|
132 | 1x |
paste0( |
133 | 1x |
cli::rule( |
134 | 1x |
left = cli::style_bold("Model Registration"), |
135 | 1x |
right = paste(pkg, ver) |
136 |
), |
|
137 | 1x |
"\n", |
138 | 1x |
cli::col_green(cli::symbol$tick), " ", |
139 | 1x |
cli::col_blue("mmrm"), "::", cli::col_green("mmrm()") |
140 |
) |
|
141 |
} else { |
|
142 | ! |
paste0(pkg, "::mmrm() registered for use with tidymodels") |
143 |
} |
|
144 |
} |
1 |
#' Register `mmrm` For Use With `car::Anova` |
|
2 |
#' |
|
3 |
#' @inheritParams base::requireNamespace |
|
4 |
#' @return A logical value indicating whether registration was successful. |
|
5 |
#' |
|
6 |
#' @keywords internal |
|
7 |
car_add_mmrm <- function(quietly = FALSE) { |
|
8 | 1x |
if (!requireNamespace("car", quietly = quietly)) { |
9 | ! |
return(FALSE) |
10 |
} |
|
11 | 1x |
envir <- asNamespace("mmrm") |
12 | 1x |
h_register_s3("car", "Anova", "mmrm", envir) |
13 | 1x |
TRUE |
14 |
} |
|
15 | ||
16 | ||
17 |
#' Obtain Contrast for Specified Effect |
|
18 |
#' |
|
19 |
#' This is support function to obtain contrast matrix for type II/III testing. |
|
20 |
#' |
|
21 |
#' @param object (`mmrm`)\cr the fitted MMRM. |
|
22 |
#' @param effect (`string`) the name of the effect. |
|
23 |
#' @param type (`string`) type of test, "II", "III", '2', or '3'. |
|
24 |
#' @param tol (`numeric`) threshold blow which values are treated as 0. |
|
25 |
#' |
|
26 |
#' @return A `matrix` of the contrast. |
|
27 |
#' |
|
28 |
#' @keywords internal |
|
29 |
h_get_contrast <- function(object, effect, type = c("II", "III", "2", "3"), tol = sqrt(.Machine$double.eps)) { |
|
30 | 45x |
assert_class(object, "mmrm") |
31 | 45x |
assert_string(effect) |
32 | 45x |
assert_double(tol, finite = TRUE, len = 1L) |
33 | 45x |
type <- match.arg(type) |
34 | 45x |
mx <- component(object, "x_matrix") |
35 | 45x |
asg <- attr(mx, "assign") |
36 | 45x |
formula <- object$formula_parts$model_formula |
37 | 45x |
tms <- terms(formula) |
38 | 45x |
fcts <- attr(tms, "factors")[-1L, , drop = FALSE] # Discard the response. |
39 | 45x |
ods <- attr(tms, "order") |
40 | 45x |
assert_subset(effect, colnames(fcts)) |
41 | 45x |
idx <- which(effect == colnames(fcts)) |
42 | 45x |
cols <- which(asg == idx) |
43 | 45x |
xlev <- component(object, "xlev") |
44 | 45x |
contains_intercept <- (!0 %in% asg) && h_first_contain_categorical(effect, fcts, names(xlev)) |
45 | 45x |
coef_rows <- length(cols) - as.integer(contains_intercept) |
46 | 45x |
l_mx <- matrix(0, nrow = coef_rows, ncol = length(asg)) |
47 | 45x |
if (coef_rows == 0L) { |
48 | 1x |
return(l_mx) |
49 |
} |
|
50 | 44x |
if (contains_intercept) { |
51 | 4x |
l_mx[, cols] <- cbind(-1, diag(rep(1, coef_rows))) |
52 |
} else { |
|
53 | 40x |
l_mx[, cols] <- diag(rep(1, coef_rows)) |
54 |
} |
|
55 | 44x |
for (i in setdiff(seq_len(ncol(fcts)), idx)) { |
56 | 120x |
additional_vars <- names(which(fcts[, i] > fcts[, idx])) |
57 | 120x |
additional_numeric <- any(!additional_vars %in% names(xlev)) |
58 | 120x |
current_col <- which(asg == i) |
59 | 120x |
if (ods[i] >= ods[idx] && all(fcts[, i] >= fcts[, idx]) && !additional_numeric) { |
60 | 24x |
sub_mat <- switch(type, |
61 | 24x |
"2" = , |
62 | 24x |
"II" = { |
63 | 8x |
x1 <- mx[, cols, drop = FALSE] |
64 | 8x |
x0 <- mx[, -c(cols, current_col), drop = FALSE] |
65 | 8x |
x2 <- mx[, current_col, drop = FALSE] |
66 | 8x |
m <- diag(rep(1, nrow(x0))) - x0 %*% solve(t(x0) %*% x0) %*% t(x0) |
67 | 8x |
ret <- solve(t(x1) %*% m %*% x1) %*% t(x1) %*% m %*% x2 |
68 | 8x |
if (contains_intercept) { |
69 | 1x |
ret[-1, ] - ret[1, ] |
70 |
} else { |
|
71 | 7x |
ret |
72 |
} |
|
73 |
}, |
|
74 | 24x |
"3" = , |
75 | 24x |
"III" = { |
76 | 16x |
lvls <- h_obtain_lvls(effect, additional_vars, xlev) |
77 | 16x |
t_levels <- lvls$total |
78 | 16x |
nms_base <- colnames(mx)[cols] |
79 | 16x |
nms <- colnames(mx)[current_col] |
80 | 16x |
nms_base_split <- strsplit(nms_base, ":") |
81 | 16x |
nms_split <- strsplit(nms, ":") |
82 | 16x |
base_idx <- h_get_index(nms_split, nms_base_split) |
83 | 16x |
mt <- l_mx[, cols, drop = FALSE] / t_levels |
84 | 16x |
ret <- mt[, base_idx, drop = FALSE] |
85 |
# if there is extra levels, replace it with -1/t_levels |
|
86 | 16x |
ret[is.na(ret)] <- -1 / t_levels |
87 | 16x |
ret |
88 |
} |
|
89 |
) |
|
90 | 24x |
l_mx[, current_col] <- sub_mat |
91 |
} |
|
92 |
} |
|
93 | 44x |
l_mx[abs(l_mx) < tol] <- 0 |
94 | 44x |
l_mx |
95 |
} |
|
96 | ||
97 |
#' Conduct type II/III hypothesis testing on the MMRM fit results. |
|
98 |
#' |
|
99 |
#' @param mod (`mmrm`)\cr the fitted MMRM. |
|
100 |
#' @param ... not used. |
|
101 |
#' @inheritParams h_get_contrast |
|
102 |
#' |
|
103 |
#' @details |
|
104 |
#' `Anova` will return `anova` object with one row per variable and columns |
|
105 |
#' `Num Df`(numerator degrees of freedom), `Denom Df`(denominator degrees of freedom), |
|
106 |
#' `F Statistic` and `Pr(>=F)`. |
|
107 |
#' |
|
108 |
#' @keywords internal |
|
109 |
# Please do not load `car` and then create the documentation. The Rd file will be different. |
|
110 |
Anova.mmrm <- function(mod, type = c("II", "III", "2", "3"), tol = sqrt(.Machine$double.eps), ...) { # nolint |
|
111 | 9x |
assert_double(tol, finite = TRUE, len = 1L) |
112 | 9x |
type <- match.arg(type) |
113 | 9x |
vars <- colnames(attr(terms(mod$formula_parts$model_formula), "factors")) |
114 | 9x |
ret <- lapply( |
115 | 9x |
vars, |
116 | 9x |
function(x) df_md(mod, h_get_contrast(mod, x, type, tol)) |
117 |
) |
|
118 | 9x |
ret_df <- do.call(rbind.data.frame, ret) |
119 | 9x |
row.names(ret_df) <- vars |
120 | 9x |
colnames(ret_df) <- c("Num Df", "Denom Df", "F Statistic", "Pr(>=F)") |
121 | 9x |
class(ret_df) <- c("anova", "data.frame") |
122 | 9x |
attr(ret_df, "heading") <- sprintf( |
123 | 9x |
"Analysis of Fixed Effect Table (Type %s F tests)", |
124 | 9x |
switch(type, |
125 | 9x |
"2" = , |
126 | 9x |
"II" = "II", |
127 | 9x |
"3" = , |
128 | 9x |
"III" = "III" |
129 |
) |
|
130 |
) |
|
131 | 9x |
ret_df |
132 |
} |
|
133 | ||
134 | ||
135 |
#' Obtain Levels Prior and Posterior |
|
136 |
#' @param var (`string`) name of the effect. |
|
137 |
#' @param additional_vars (`character`) names of additional variables. |
|
138 |
#' @param xlev (`list`) named list of character levels. |
|
139 |
#' @param factors (`matrix`) the factor matrix. |
|
140 |
#' @keywords internal |
|
141 |
h_obtain_lvls <- function(var, additional_vars, xlev, factors) { |
|
142 | 18x |
assert_string(var) |
143 | 18x |
assert_character(additional_vars) |
144 | 18x |
assert_list(xlev, types = "character") |
145 | 18x |
nms <- names(xlev) |
146 | 18x |
assert_subset(additional_vars, nms) |
147 | 18x |
if (var %in% nms) { |
148 | 14x |
prior_vars <- intersect(nms[seq_len(match(var, nms) - 1)], additional_vars) |
149 | 14x |
prior_lvls <- vapply(xlev[prior_vars], length, FUN.VALUE = 1L) |
150 | 14x |
post_vars <- intersect(nms[seq(match(var, nms) + 1, length(nms))], additional_vars) |
151 | 14x |
post_lvls <- vapply(xlev[post_vars], length, FUN.VALUE = 1L) |
152 | 14x |
total_lvls <- prod(prior_lvls) * prod(post_lvls) |
153 |
} else { |
|
154 | 4x |
prior_lvls <- vapply(xlev[additional_vars], length, FUN.VALUE = 1L) |
155 | 4x |
post_lvls <- 2L |
156 | 4x |
total_lvls <- prod(prior_lvls) |
157 |
} |
|
158 | 18x |
list( |
159 | 18x |
prior = prior_lvls, |
160 | 18x |
post = post_lvls, |
161 | 18x |
total = total_lvls |
162 |
) |
|
163 |
} |
|
164 | ||
165 |
#' Check if the Effect is the First Categorical Effect |
|
166 |
#' @param effect (`string`) name of the effect. |
|
167 |
#' @param categorical (`character`) names of the categorical values. |
|
168 |
#' @param factors (`matrix`) the factor matrix. |
|
169 |
#' @keywords internal |
|
170 |
h_first_contain_categorical <- function(effect, factors, categorical) { |
|
171 | 9x |
assert_string(effect) |
172 | 9x |
assert_matrix(factors) |
173 | 9x |
assert_character(categorical) |
174 | 9x |
mt <- match(effect, colnames(factors)) |
175 | 9x |
varnms <- row.names(factors) |
176 |
# if the effect is not categorical in any value, return FALSE |
|
177 | 9x |
if (!any(varnms[factors[, mt] > 0] %in% categorical)) { |
178 | 2x |
return(FALSE) |
179 |
} |
|
180 |
# keep only categorical rows that is in front of the current factor |
|
181 | 7x |
factors <- factors[row.names(factors) %in% categorical, seq_len(mt - 1L), drop = FALSE] |
182 |
# if previous cols are all numerical, return TRUE |
|
183 | 7x |
if (ncol(factors) < 1L) { |
184 | 4x |
return(TRUE) |
185 |
} |
|
186 | 3x |
col_ind <- apply(factors, 2, prod) |
187 |
# if any of the previous cols are categorical, return FALSE |
|
188 | 3x |
return(!any(col_ind > 0)) |
189 |
} |
|
190 | ||
191 |
#' Test if the First Vector is Subset of the Second Vector |
|
192 |
#' @param x (`vector`) the first list. |
|
193 |
#' @param y (`vector`) the second list. |
|
194 |
#' @keywords internal |
|
195 |
h_get_index <- function(x, y) { |
|
196 | 18x |
assert_list(x) |
197 | 18x |
assert_list(y) |
198 | 18x |
vapply( |
199 | 18x |
x, |
200 | 18x |
\(i) { |
201 | 68x |
r <- vapply(y, \(j) test_subset(j, i), FUN.VALUE = TRUE) |
202 | 68x |
if (sum(r) == 1L) { |
203 | 65x |
which(r) |
204 |
} else { |
|
205 | 18x |
NA_integer_ |
206 |
} |
|
207 |
}, |
|
208 | 18x |
FUN.VALUE = 1L |
209 |
) |
|
210 |
} |
1 |
#' Obtain Kenward-Roger Adjustment Components |
|
2 |
#' |
|
3 |
#' @description Obtains the components needed downstream for the computation of Kenward-Roger degrees of freedom. |
|
4 |
#' Used in [mmrm()] fitting if method is "Kenward-Roger". |
|
5 |
#' |
|
6 |
#' @param tmb_data (`mmrm_tmb_data`)\cr produced by [h_mmrm_tmb_data()]. |
|
7 |
#' @param theta (`numeric`)\cr theta estimate. |
|
8 |
#' |
|
9 |
#' @details the function returns a named list, \eqn{P}, \eqn{Q} and \eqn{R}, which corresponds to the |
|
10 |
#' paper in 1997. The matrices are stacked in columns so that \eqn{P}, \eqn{Q} and \eqn{R} has the same |
|
11 |
#' column number(number of beta parameters). The number of rows, is dependent on |
|
12 |
#' the total number of theta and number of groups, if the fit is a grouped mmrm. |
|
13 |
#' For \eqn{P} matrix, it is stacked sequentially. For \eqn{Q} and \eqn{R} matrix, it is stacked so |
|
14 |
#' that the \eqn{Q_{ij}} and \eqn{R_{ij}} is stacked from \eqn{j} then to \eqn{i}, i.e. \eqn{R_{i1}}, \eqn{R_{i2}}, etc. |
|
15 |
#' \eqn{Q} and \eqn{R} only contains intra-group results and inter-group results should be all zero matrices |
|
16 |
#' so they are not stacked in the result. |
|
17 |
#' |
|
18 |
#' @return Named list with elements: |
|
19 |
#' - `P`: `matrix` of \eqn{P} component. |
|
20 |
#' - `Q`: `matrix` of \eqn{Q} component. |
|
21 |
#' - `R`: `matrix` of \eqn{R} component. |
|
22 |
#' |
|
23 |
#' @keywords internal |
|
24 |
h_get_kr_comp <- function(tmb_data, theta) { |
|
25 | 47x |
assert_class(tmb_data, "mmrm_tmb_data") |
26 | 47x |
assert_class(theta, "numeric") |
27 | 47x |
.Call(`_mmrm_get_pqr`, PACKAGE = "mmrm", tmb_data, theta) |
28 |
} |
|
29 | ||
30 |
#' Calculation of Kenward-Roger Degrees of Freedom for Multi-Dimensional Contrast |
|
31 |
#' |
|
32 |
#' @description Used in [df_md()] if method is "Kenward-Roger" or "Kenward-Roger-Linear". |
|
33 |
#' |
|
34 |
#' @inheritParams h_df_md_sat |
|
35 |
#' @inherit h_df_md_sat return |
|
36 |
#' @keywords internal |
|
37 |
h_df_md_kr <- function(object, contrast) { |
|
38 | 6x |
assert_class(object, "mmrm") |
39 | 6x |
assert_matrix(contrast, mode = "numeric", any.missing = FALSE, ncols = length(component(object, "beta_est"))) |
40 | 6x |
if (component(object, "reml") != 1) { |
41 | ! |
stop("Kenward-Roger is only for REML") |
42 |
} |
|
43 | 6x |
kr_comp <- object$kr_comp |
44 | 6x |
w <- component(object, "theta_vcov") |
45 | 6x |
v_adj <- object$beta_vcov_adj |
46 | 6x |
df <- h_kr_df(v0 = object$beta_vcov, l = contrast, w = w, p = kr_comp$P) |
47 | ||
48 | 6x |
h_test_md(object, contrast, df = df$m, f_stat_factor = df$lambda) |
49 |
} |
|
50 | ||
51 |
#' Calculation of Kenward-Roger Degrees of Freedom for One-Dimensional Contrast |
|
52 |
#' |
|
53 |
#' @description Used in [df_1d()] if method is |
|
54 |
#' "Kenward-Roger" or "Kenward-Roger-Linear". |
|
55 |
#' |
|
56 |
#' @inheritParams h_df_1d_sat |
|
57 |
#' @inherit h_df_1d_sat return |
|
58 |
#' @keywords internal |
|
59 |
h_df_1d_kr <- function(object, contrast) { |
|
60 | 21x |
assert_class(object, "mmrm") |
61 | 21x |
assert_numeric(contrast, len = length(component(object, "beta_est"))) |
62 | 21x |
if (component(object, "reml") != 1) { |
63 | ! |
stop("Kenward-Roger is only for REML!") |
64 |
} |
|
65 | ||
66 | 21x |
df <- h_kr_df( |
67 | 21x |
v0 = object$beta_vcov, |
68 | 21x |
l = matrix(contrast, nrow = 1), |
69 | 21x |
w = component(object, "theta_vcov"), |
70 | 21x |
p = object$kr_comp$P |
71 |
) |
|
72 | ||
73 | 21x |
h_test_1d(object, contrast, df$m) |
74 |
} |
|
75 | ||
76 |
#' Obtain the Adjusted Kenward-Roger degrees of freedom |
|
77 |
#' |
|
78 |
#' @description Obtains the adjusted Kenward-Roger degrees of freedom and F statistic scale parameter. |
|
79 |
#' Used in [h_df_md_kr()] or [h_df_1d_kr]. |
|
80 |
#' |
|
81 |
#' @param v0 (`matrix`)\cr unadjusted covariance matrix. |
|
82 |
#' @param l (`matrix`)\cr linear combination matrix. |
|
83 |
#' @param w (`matrix`)\cr hessian matrix. |
|
84 |
#' @param p (`matrix`)\cr P matrix from [h_get_kr_comp()]. |
|
85 |
#' |
|
86 |
#' @return Named list with elements: |
|
87 |
#' - `m`: `numeric` degrees of freedom. |
|
88 |
#' - `lambda`: `numeric` F statistic scale parameter. |
|
89 |
#' |
|
90 |
#' @keywords internal |
|
91 |
h_kr_df <- function(v0, l, w, p) { |
|
92 | 28x |
n_beta <- ncol(v0) |
93 | 28x |
assert_matrix(v0, ncols = n_beta, nrows = n_beta) |
94 | 28x |
assert_matrix(l, ncols = n_beta) |
95 | 28x |
n_theta <- ncol(w) |
96 | 28x |
assert_matrix(w, ncols = n_theta, nrows = n_theta) |
97 | 28x |
n_visits <- ncol(p) |
98 | 28x |
assert_matrix(p, nrows = n_visits * n_theta) |
99 |
# see vignettes/kenward.Rmd#279 |
|
100 | 28x |
slvol <- solve(h_quad_form_mat(l, v0)) |
101 | 28x |
m <- h_quad_form_mat(t(l), slvol) |
102 | 28x |
nl <- nrow(l) |
103 | 28x |
mv0 <- m %*% v0 |
104 | 28x |
pl <- lapply(seq_len(nrow(p) / ncol(p)), function(x) { |
105 | 108x |
ii <- (x - 1) * ncol(p) + 1 |
106 | 108x |
jj <- x * ncol(p) |
107 | 108x |
p[ii:jj, ] |
108 |
}) |
|
109 | 28x |
mv0pv0 <- lapply(pl, function(x) { |
110 | 108x |
mv0 %*% x %*% v0 |
111 |
}) |
|
112 | 28x |
a1 <- 0 |
113 | 28x |
a2 <- 0 |
114 |
# see vignettes/kenward.Rmd#283 |
|
115 | 28x |
for (i in seq_len(length(pl))) { |
116 | 108x |
for (j in seq_len(length(pl))) { |
117 | 592x |
a1 <- a1 + w[i, j] * h_tr(mv0pv0[[i]]) * h_tr(mv0pv0[[j]]) |
118 | 592x |
a2 <- a2 + w[i, j] * h_tr(mv0pv0[[i]] %*% mv0pv0[[j]]) |
119 |
} |
|
120 |
} |
|
121 | 28x |
b <- 1 / (2 * nl) * (a1 + 6 * a2) |
122 | 28x |
e <- 1 + a2 / nl |
123 | 28x |
e_star <- 1 / (1 - a2 / nl) |
124 | 28x |
g <- ((nl + 1) * a1 - (nl + 4) * a2) / ((nl + 2) * a2) |
125 | 28x |
denom <- (3 * nl + 2 - 2 * g) |
126 | 28x |
c1 <- g / denom |
127 | 28x |
c2 <- (nl - g) / denom |
128 | 28x |
c3 <- (nl + 2 - g) / denom |
129 | 28x |
v_star <- 2 / nl * (1 + c1 * b) / (1 - c2 * b)^2 / (1 - c3 * b) |
130 | 28x |
rho <- v_star / (2 * e_star^2) |
131 | 28x |
m <- 4 + (nl + 2) / (nl * rho - 1) |
132 | 28x |
lambda <- m / (e_star * (m - 2)) |
133 | 28x |
list(m = m, lambda = lambda) |
134 |
} |
|
135 | ||
136 |
#' Obtain the Adjusted Covariance Matrix |
|
137 |
#' |
|
138 |
#' @description Obtains the Kenward-Roger adjusted covariance matrix for the |
|
139 |
#' coefficient estimates. |
|
140 |
#' Used in [mmrm()] fitting if method is "Kenward-Roger" or "Kenward-Roger-Linear". |
|
141 |
#' |
|
142 |
#' @param v (`matrix`)\cr unadjusted covariance matrix. |
|
143 |
#' @param w (`matrix`)\cr hessian matrix. |
|
144 |
#' @param p (`matrix`)\cr P matrix from [h_get_kr_comp()]. |
|
145 |
#' @param q (`matrix`)\cr Q matrix from [h_get_kr_comp()]. |
|
146 |
#' @param r (`matrix`)\cr R matrix from [h_get_kr_comp()]. |
|
147 |
#' @param linear (`flag`)\cr whether to use linear Kenward-Roger approximation. |
|
148 |
#' |
|
149 |
#' @return The matrix of adjusted covariance matrix. |
|
150 |
#' |
|
151 |
#' @keywords internal |
|
152 |
h_var_adj <- function(v, w, p, q, r, linear = FALSE) { |
|
153 | 49x |
assert_flag(linear) |
154 | 49x |
n_beta <- ncol(v) |
155 | 49x |
assert_matrix(v, nrows = n_beta) |
156 | 49x |
n_theta <- ncol(w) |
157 | 49x |
assert_matrix(w, nrows = n_theta) |
158 | 49x |
n_visits <- ncol(p) |
159 | 49x |
theta_per_group <- nrow(q) / nrow(p) |
160 | 49x |
n_groups <- n_theta / theta_per_group |
161 | 49x |
assert_matrix(p, nrows = n_theta * n_visits) |
162 | 49x |
assert_matrix(q, nrows = theta_per_group^2 * n_groups * n_visits, ncols = n_visits) |
163 | 49x |
assert_matrix(r, nrows = theta_per_group^2 * n_groups * n_visits, ncols = n_visits) |
164 | 49x |
if (linear) { |
165 | 13x |
r <- matrix(0, nrow = nrow(r), ncol = ncol(r)) |
166 |
} |
|
167 | ||
168 |
# see vignettes/kenward.Rmd#131 |
|
169 | 49x |
ret <- v |
170 | 49x |
for (i in seq_len(n_theta)) { |
171 | 264x |
for (j in seq_len(n_theta)) { |
172 | 2164x |
gi <- ceiling(i / theta_per_group) |
173 | 2164x |
gj <- ceiling(j / theta_per_group) |
174 | 2164x |
iid <- (i - 1) * n_beta + 1 |
175 | 2164x |
jid <- (j - 1) * n_beta + 1 |
176 | 2164x |
ii <- i - (gi - 1) * theta_per_group |
177 | 2164x |
jj <- j - (gi - 1) * theta_per_group |
178 | 2164x |
ijid <- ((ii - 1) * theta_per_group + jj - 1) * n_beta + (gi - 1) * n_beta * theta_per_group^2 + 1 |
179 | 2164x |
if (gi != gj) { |
180 | 592x |
ret <- ret + 2 * w[i, j] * v %*% (-p[iid:(iid + n_beta - 1), ] %*% v %*% p[jid:(jid + n_beta - 1), ]) %*% v |
181 |
} else { |
|
182 | 1572x |
ret <- ret + 2 * w[i, j] * v %*% ( |
183 | 1572x |
q[ijid:(ijid + n_beta - 1), ] - |
184 | 1572x |
p[iid:(iid + n_beta - 1), ] %*% v %*% p[jid:(jid + n_beta - 1), ] - |
185 | 1572x |
1 / 4 * r[ijid:(ijid + n_beta - 1), ] |
186 | 1572x |
) %*% v |
187 |
} |
|
188 |
} |
|
189 |
} |
|
190 | 49x |
ret |
191 |
} |
1 |
#' Calculation of Degrees of Freedom for One-Dimensional Contrast |
|
2 |
#' |
|
3 |
#' @description `r lifecycle::badge("stable")` |
|
4 |
#' Calculates the estimate, adjusted standard error, degrees of freedom, |
|
5 |
#' t statistic and p-value for one-dimensional contrast. |
|
6 |
#' |
|
7 |
#' @param object (`mmrm`)\cr the MMRM fit. |
|
8 |
#' @param contrast (`numeric`)\cr contrast vector. Note that this should not include |
|
9 |
#' elements for singular coefficient estimates, i.e. only refer to the |
|
10 |
#' actually estimated coefficients. |
|
11 |
#' @return List with `est`, `se`, `df`, `t_stat` and `p_val`. |
|
12 |
#' @export |
|
13 |
#' |
|
14 |
#' @examples |
|
15 |
#' object <- mmrm( |
|
16 |
#' formula = FEV1 ~ RACE + SEX + ARMCD * AVISIT + us(AVISIT | USUBJID), |
|
17 |
#' data = fev_data |
|
18 |
#' ) |
|
19 |
#' contrast <- numeric(length(object$beta_est)) |
|
20 |
#' contrast[3] <- 1 |
|
21 |
#' df_1d(object, contrast) |
|
22 |
df_1d <- function(object, contrast) { |
|
23 | 338x |
assert_class(object, "mmrm") |
24 | 338x |
assert_numeric(contrast, len = length(component(object, "beta_est")), any.missing = FALSE) |
25 | 338x |
contrast <- as.vector(contrast) |
26 | 338x |
switch(object$method, |
27 | 318x |
"Satterthwaite" = h_df_1d_sat(object, contrast), |
28 | 19x |
"Kenward-Roger" = h_df_1d_kr(object, contrast), |
29 | ! |
"Residual" = h_df_1d_res(object, contrast), |
30 | 1x |
"Between-Within" = h_df_1d_bw(object, contrast), |
31 | ! |
stop("Unrecognized degrees of freedom method: ", object$method) |
32 |
) |
|
33 |
} |
|
34 | ||
35 | ||
36 |
#' Calculation of Degrees of Freedom for Multi-Dimensional Contrast |
|
37 |
#' |
|
38 |
#' @description `r lifecycle::badge("stable")` |
|
39 |
#' Calculates the estimate, standard error, degrees of freedom, |
|
40 |
#' t statistic and p-value for one-dimensional contrast, depending on the method |
|
41 |
#' used in [mmrm()]. |
|
42 |
#' |
|
43 |
#' @param object (`mmrm`)\cr the MMRM fit. |
|
44 |
#' @param contrast (`matrix`)\cr numeric contrast matrix, if given a `numeric` |
|
45 |
#' then this is coerced to a row vector. Note that this should not include |
|
46 |
#' elements for singular coefficient estimates, i.e. only refer to the |
|
47 |
#' actually estimated coefficients. |
|
48 |
#' |
|
49 |
#' @return List with `num_df`, `denom_df`, `f_stat` and `p_val` (2-sided p-value). |
|
50 |
#' @export |
|
51 |
#' |
|
52 |
#' @examples |
|
53 |
#' object <- mmrm( |
|
54 |
#' formula = FEV1 ~ RACE + SEX + ARMCD * AVISIT + us(AVISIT | USUBJID), |
|
55 |
#' data = fev_data |
|
56 |
#' ) |
|
57 |
#' contrast <- matrix(data = 0, nrow = 2, ncol = length(object$beta_est)) |
|
58 |
#' contrast[1, 2] <- contrast[2, 3] <- 1 |
|
59 |
#' df_md(object, contrast) |
|
60 |
df_md <- function(object, contrast) { |
|
61 | 150x |
assert_class(object, "mmrm") |
62 | 150x |
assert_numeric(contrast, any.missing = FALSE) |
63 | 150x |
if (!is.matrix(contrast)) { |
64 | 113x |
contrast <- matrix(contrast, ncol = length(contrast)) |
65 |
} |
|
66 | 150x |
assert_matrix(contrast, ncols = length(component(object, "beta_est"))) |
67 | 150x |
if (nrow(contrast) == 0) { |
68 | 1x |
return( |
69 | 1x |
list( |
70 | 1x |
num_df = 0, |
71 | 1x |
denom_df = NA_real_, |
72 | 1x |
f_stat = NA_real_, |
73 | 1x |
p_val = NA_real_ |
74 |
) |
|
75 |
) |
|
76 |
} |
|
77 | 149x |
switch(object$method, |
78 | 145x |
"Satterthwaite" = h_df_md_sat(object, contrast), |
79 | 3x |
"Kenward-Roger" = h_df_md_kr(object, contrast), |
80 | ! |
"Residual" = h_df_md_res(object, contrast), |
81 | 1x |
"Between-Within" = h_df_md_bw(object, contrast), |
82 | ! |
stop("Unrecognized degrees of freedom method: ", object$method) |
83 |
) |
|
84 |
} |
|
85 | ||
86 |
#' Creating T-Statistic Test Results For One-Dimensional Contrast |
|
87 |
#' |
|
88 |
#' @description Creates a list of results for one-dimensional contrasts using |
|
89 |
#' a t-test statistic and the given degrees of freedom. |
|
90 |
#' |
|
91 |
#' @inheritParams df_1d |
|
92 |
#' @param df (`number`)\cr degrees of freedom for the one-dimensional contrast. |
|
93 |
#' |
|
94 |
#' @return List with `est`, `se`, `df`, `t_stat` and `p_val` (2-sided p-value). |
|
95 |
#' |
|
96 |
#' @keywords internal |
|
97 |
h_test_1d <- function(object, |
|
98 |
contrast, |
|
99 |
df) { |
|
100 | 486x |
assert_class(object, "mmrm") |
101 | 486x |
assert_numeric(contrast, len = length(component(object, "beta_est"))) |
102 | 486x |
assert_number(df, lower = .Machine$double.xmin) |
103 | ||
104 | 486x |
est <- sum(contrast * component(object, "beta_est")) |
105 | 486x |
var <- h_quad_form_vec(contrast, component(object, "beta_vcov")) |
106 | 486x |
se <- sqrt(var) |
107 | 486x |
t_stat <- est / se |
108 | 486x |
p_val <- 2 * stats::pt(q = abs(t_stat), df = df, lower.tail = FALSE) |
109 | ||
110 | 486x |
list( |
111 | 486x |
est = est, |
112 | 486x |
se = se, |
113 | 486x |
df = df, |
114 | 486x |
t_stat = t_stat, |
115 | 486x |
p_val = p_val |
116 |
) |
|
117 |
} |
|
118 | ||
119 |
#' Creating F-Statistic Test Results For Multi-Dimensional Contrast |
|
120 |
#' |
|
121 |
#' @description Creates a list of results for multi-dimensional contrasts using |
|
122 |
#' an F-test statistic and the given degrees of freedom. |
|
123 |
#' |
|
124 |
#' @inheritParams df_md |
|
125 |
#' @param contrast (`matrix`)\cr numeric contrast matrix. |
|
126 |
#' @param df (`number`)\cr denominator degrees of freedom for the multi-dimensional contrast. |
|
127 |
#' @param f_stat_factor (`number`)\cr optional scaling factor on top of the standard F-statistic. |
|
128 |
#' |
|
129 |
#' @return List with `num_df`, `denom_df`, `f_stat` and `p_val` (2-sided p-value). |
|
130 |
#' |
|
131 |
#' @keywords internal |
|
132 |
h_test_md <- function(object, |
|
133 |
contrast, |
|
134 |
df, |
|
135 |
f_stat_factor = 1) { |
|
136 | 15x |
assert_class(object, "mmrm") |
137 | 15x |
assert_matrix(contrast, ncols = length(component(object, "beta_est"))) |
138 | 15x |
num_df <- nrow(contrast) |
139 | 15x |
assert_number(df, lower = .Machine$double.xmin) |
140 | 15x |
assert_number(f_stat_factor, lower = .Machine$double.xmin) |
141 | ||
142 | 15x |
prec_contrast <- solve(h_quad_form_mat(contrast, component(object, "beta_vcov"))) |
143 | 15x |
contrast_est <- component(object, "beta_est") %*% t(contrast) |
144 | 15x |
f_statistic <- as.numeric(f_stat_factor / num_df * h_quad_form_mat(contrast_est, prec_contrast)) |
145 | 15x |
p_val <- stats::pf( |
146 | 15x |
q = f_statistic, |
147 | 15x |
df1 = num_df, |
148 | 15x |
df2 = df, |
149 | 15x |
lower.tail = FALSE |
150 |
) |
|
151 | ||
152 | 15x |
list( |
153 | 15x |
num_df = num_df, |
154 | 15x |
denom_df = df, |
155 | 15x |
f_stat = f_statistic, |
156 | 15x |
p_val = p_val |
157 |
) |
|
158 |
} |
1 |
#' Covariance Type Database |
|
2 |
#' |
|
3 |
#' An internal constant for covariance type information. |
|
4 |
#' |
|
5 |
#' @format A data frame with 5 variables and one record per covariance type: |
|
6 |
#' |
|
7 |
#' \describe{ |
|
8 |
#' \item{name}{ |
|
9 |
#' The long-form name of the covariance structure type |
|
10 |
#' } |
|
11 |
#' \item{abbr}{ |
|
12 |
#' The abbreviated name of the covariance structure type |
|
13 |
#' } |
|
14 |
#' \item{habbr}{ |
|
15 |
#' The abbreviated name of the heterogeneous version of a covariance |
|
16 |
#' structure type (The abbreviated name (`abbr`) with a trailing `"h"` if |
|
17 |
#' the structure has a heterogeneous implementation or `NA` otherwise). |
|
18 |
#' } |
|
19 |
#' \item{heterogeneous}{ |
|
20 |
#' A logical value indicating whether the covariance structure has a |
|
21 |
#' heterogeneous counterpart. |
|
22 |
#' } |
|
23 |
#' \item{spatial}{ |
|
24 |
#' A logical value indicating whether the covariance structure is spatial. |
|
25 |
#' } |
|
26 |
#' } |
|
27 |
#' |
|
28 |
#' @keywords internal |
|
29 |
COV_TYPES <- local({ # nolint |
|
30 |
type <- function(name, abbr, habbr, heterogeneous, spatial) { |
|
31 |
args <- as.list(match.call()[-1]) |
|
32 |
do.call(data.frame, args) |
|
33 |
} |
|
34 | ||
35 |
as.data.frame( |
|
36 |
col.names = names(formals(type)), |
|
37 |
rbind( |
|
38 |
type("unstructured", "us", NA, FALSE, FALSE), |
|
39 |
type("Toeplitz", "toep", "toeph", TRUE, FALSE), |
|
40 |
type("auto-regressive order one", "ar1", "ar1h", TRUE, FALSE), |
|
41 |
type("ante-dependence", "ad", "adh", TRUE, FALSE), |
|
42 |
type("compound symmetry", "cs", "csh", TRUE, FALSE), |
|
43 |
type("spatial exponential", "sp_exp", NA, FALSE, TRUE) |
|
44 |
) |
|
45 |
) |
|
46 |
}) |
|
47 | ||
48 |
#' Covariance Types |
|
49 |
#' |
|
50 |
#' @description `r lifecycle::badge("stable")` |
|
51 |
#' |
|
52 |
#' @param form (`character`)\cr covariance structure type name form. One or |
|
53 |
#' more of `"name"`, `"abbr"` (abbreviation), or `"habbr"` (heterogeneous |
|
54 |
#' abbreviation). |
|
55 |
#' @param filter (`character`)\cr covariance structure type filter. One or |
|
56 |
#' more of `"heterogeneous"` or `"spatial"`. |
|
57 |
#' |
|
58 |
#' @return A character vector of accepted covariance structure type names and |
|
59 |
#' abbreviations. |
|
60 |
#' |
|
61 |
#' @section Abbreviations for Covariance Structures: |
|
62 |
#' |
|
63 |
#' ## Common Covariance Structures: |
|
64 |
#' |
|
65 |
#' \tabular{clll}{ |
|
66 |
#' |
|
67 |
#' \strong{Structure} |
|
68 |
#' \tab \strong{Description} |
|
69 |
#' \tab \strong{Parameters} |
|
70 |
#' \tab \strong{\eqn{(i, j)} element} |
|
71 |
#' \cr |
|
72 |
#' |
|
73 |
#' ad |
|
74 |
#' \tab Ante-dependence |
|
75 |
#' \tab \eqn{m} |
|
76 |
#' \tab \eqn{\sigma^{2}\prod_{k=i}^{j-1}\rho_{k}} |
|
77 |
#' \cr |
|
78 |
#' |
|
79 |
#' adh |
|
80 |
#' \tab Heterogeneous ante-dependence |
|
81 |
#' \tab \eqn{2m-1} |
|
82 |
#' \tab \eqn{\sigma_{i}\sigma_{j}\prod_{k=i}^{j-1}\rho_{k}} |
|
83 |
#' \cr |
|
84 |
#' |
|
85 |
#' ar1 |
|
86 |
#' \tab First-order auto-regressive |
|
87 |
#' \tab \eqn{2} |
|
88 |
#' \tab \eqn{\sigma^{2}\rho^{\left \vert {i-j} \right \vert}} |
|
89 |
#' \cr |
|
90 |
#' |
|
91 |
#' ar1h |
|
92 |
#' \tab Heterogeneous first-order auto-regressive |
|
93 |
#' \tab \eqn{m+1} |
|
94 |
#' \tab \eqn{\sigma_{i}\sigma_{j}\rho^{\left \vert {i-j} \right \vert}} |
|
95 |
#' \cr |
|
96 |
#' |
|
97 |
#' cs |
|
98 |
#' \tab Compound symmetry |
|
99 |
#' \tab \eqn{2} |
|
100 |
#' \tab \eqn{\sigma^{2}\left[ \rho I(i \neq j)+I(i=j) \right]} |
|
101 |
#' \cr |
|
102 |
#' |
|
103 |
#' csh |
|
104 |
#' \tab Heterogeneous compound symmetry |
|
105 |
#' \tab \eqn{m+1} |
|
106 |
#' \tab \eqn{\sigma_{i}\sigma_{j}\left[ \rho I(i \neq j)+I(i=j) \right]} |
|
107 |
#' \cr |
|
108 |
#' |
|
109 |
#' toep |
|
110 |
#' \tab Toeplitz |
|
111 |
#' \tab \eqn{m} |
|
112 |
#' \tab \eqn{\sigma_{\left \vert {i-j} \right \vert +1}} |
|
113 |
#' \cr |
|
114 |
#' |
|
115 |
#' toeph |
|
116 |
#' \tab Heterogeneous Toeplitz |
|
117 |
#' \tab \eqn{2m-1} |
|
118 |
#' \tab \eqn{\sigma_{i}\sigma_{j}\rho_{\left \vert {i-j} \right \vert}} |
|
119 |
#' \cr |
|
120 |
#' |
|
121 |
#' us |
|
122 |
#' \tab Unstructured |
|
123 |
#' \tab \eqn{m(m+1)/2} |
|
124 |
#' \tab \eqn{\sigma_{ij}} |
|
125 |
#' |
|
126 |
#' } |
|
127 |
#' |
|
128 |
#' where \eqn{i} and \eqn{j} denote \eqn{i}-th and \eqn{j}-th time points, |
|
129 |
#' respectively, out of total \eqn{m} time points, \eqn{1 \leq i, j \leq m}. |
|
130 |
#' |
|
131 |
#' @note The **ante-dependence** covariance structure in this package refers to |
|
132 |
#' homogeneous ante-dependence, while the ante-dependence covariance structure |
|
133 |
#' from SAS `PROC MIXED` refers to heterogeneous ante-dependence and the |
|
134 |
#' homogeneous version is not available in SAS. |
|
135 |
#' |
|
136 |
#' @note For all non-spatial covariance structures, the time variable must |
|
137 |
#' be coded as a factor. |
|
138 |
#' |
|
139 |
#' ## Spatial Covariance structures: |
|
140 |
#' |
|
141 |
#' \tabular{clll}{ |
|
142 |
#' |
|
143 |
#' \strong{Structure} |
|
144 |
#' \tab \strong{Description} |
|
145 |
#' \tab \strong{Parameters} |
|
146 |
#' \tab \strong{\eqn{(i, j)} element} |
|
147 |
#' \cr |
|
148 |
#' |
|
149 |
#' sp_exp |
|
150 |
#' \tab spatial exponential |
|
151 |
#' \tab \eqn{2} |
|
152 |
#' \tab \eqn{\sigma^{2}\rho^{-d_{ij}}} |
|
153 |
#' |
|
154 |
#' } |
|
155 |
#' |
|
156 |
#' where \eqn{d_{ij}} denotes the Euclidean distance between time points |
|
157 |
#' \eqn{i} and \eqn{j}. |
|
158 |
#' |
|
159 |
#' @family covariance types |
|
160 |
#' @name covariance_types |
|
161 |
#' @export |
|
162 |
cov_types <- function( |
|
163 |
form = c("name", "abbr", "habbr"), |
|
164 |
filter = c("heterogeneous", "spatial")) { |
|
165 | 1666x |
form <- match.arg(form, several.ok = TRUE) |
166 | 1666x |
filter <- if (missing(filter)) c() else match.arg(filter, several.ok = TRUE) |
167 | 1666x |
df <- COV_TYPES[form][rowSums(!COV_TYPES[filter]) == 0, ] |
168 | 1666x |
Filter(Negate(is.na), unlist(t(df), use.names = FALSE)) |
169 |
} |
|
170 | ||
171 |
#' Retrieve Associated Abbreviated Covariance Structure Type Name |
|
172 |
#' |
|
173 |
#' @param type (`string`)\cr either a full name or abbreviate covariance |
|
174 |
#' structure type name to collapse into an abbreviated type. |
|
175 |
#' |
|
176 |
#' @return The corresponding abbreviated covariance type name. |
|
177 |
#' |
|
178 |
#' @keywords internal |
|
179 |
cov_type_abbr <- function(type) { |
|
180 | 299x |
row <- which(COV_TYPES == type, arr.ind = TRUE)[, 1] |
181 | 299x |
COV_TYPES$abbr[row] |
182 |
} |
|
183 | ||
184 |
#' Retrieve Associated Full Covariance Structure Type Name |
|
185 |
#' |
|
186 |
#' @param type (`string`)\cr either a full name or abbreviate covariance |
|
187 |
#' structure type name to convert to a long-form type. |
|
188 |
#' |
|
189 |
#' @return The corresponding abbreviated covariance type name. |
|
190 |
#' |
|
191 |
#' @keywords internal |
|
192 |
cov_type_name <- function(type) { |
|
193 | 6x |
row <- which(COV_TYPES == type, arr.ind = TRUE)[, 1] |
194 | 6x |
COV_TYPES$name[row] |
195 |
} |
|
196 | ||
197 |
#' Produce A Covariance Identifier Passing to TMB |
|
198 |
#' |
|
199 |
#' @param cov (`cov_struct`)\cr a covariance structure object. |
|
200 |
#' |
|
201 |
#' @return A string used for method dispatch when passed to TMB. |
|
202 |
#' |
|
203 |
#' @keywords internal |
|
204 |
tmb_cov_type <- function(cov) { |
|
205 | 266x |
paste0(cov$type, if (cov$heterogeneous) "h") |
206 |
} |
|
207 | ||
208 |
#' Define a Covariance Structure |
|
209 |
#' |
|
210 |
#' @description `r lifecycle::badge("stable")` |
|
211 |
#' |
|
212 |
#' @param type (`string`)\cr the name of the covariance structure type to use. |
|
213 |
#' For available options, see `cov_types()`. If a type abbreviation is used |
|
214 |
#' that implies heterogeneity (e.g. `cph`) and no value is provided to |
|
215 |
#' `heterogeneous`, then the heterogeneity is derived from the type name. |
|
216 |
#' @param visits (`character`)\cr a vector of variable names to use for the |
|
217 |
#' longitudinal terms of the covariance structure. Multiple terms are only |
|
218 |
#' permitted for the `"spatial"` covariance type. |
|
219 |
#' @param subject (`string`)\cr the name of the variable that encodes a subject |
|
220 |
#' identifier. |
|
221 |
#' @param group (`string`)\cr optionally, the name of the variable that encodes |
|
222 |
#' a grouping variable for subjects. |
|
223 |
#' @param heterogeneous (`flag`)\cr |
|
224 |
#' |
|
225 |
#' @return A `cov_struct` object. |
|
226 |
#' |
|
227 |
#' @examples |
|
228 |
#' cov_struct("csh", "AVISITN", "USUBJID") |
|
229 |
#' cov_struct("spatial", c("VISITA", "VISITB"), group = "GRP", subject = "SBJ") |
|
230 |
#' |
|
231 |
#' @family covariance types |
|
232 |
#' @export |
|
233 |
cov_struct <- function( |
|
234 |
type = cov_types(), visits, subject, group = character(), |
|
235 |
heterogeneous = FALSE) { |
|
236 |
# if heterogeneous isn't provided, derive from provided type |
|
237 | 296x |
if (missing(heterogeneous)) { |
238 | 294x |
heterogeneous <- switch(type, |
239 | 294x |
toeph = , |
240 | 294x |
ar1h = , |
241 | 294x |
adh = , |
242 | 294x |
csh = TRUE, |
243 | 294x |
heterogeneous |
244 |
) |
|
245 |
} |
|
246 | ||
247 |
# coerce all type options into abbreviated form |
|
248 | 296x |
type <- match.arg(type) |
249 | 295x |
type <- cov_type_abbr(type) |
250 | ||
251 | 295x |
x <- structure( |
252 | 295x |
list( |
253 | 295x |
type = type, |
254 | 295x |
heterogeneous = heterogeneous, |
255 | 295x |
visits = visits, |
256 | 295x |
subject = subject, |
257 | 295x |
group = group |
258 |
), |
|
259 | 295x |
class = c("cov_struct", "mmrm_cov_struct", "list") |
260 |
) |
|
261 | ||
262 | 295x |
validate_cov_struct(x) |
263 |
} |
|
264 | ||
265 |
#' Reconcile Possible Covariance Structure Inputs |
|
266 |
#' |
|
267 |
#' @inheritParams mmrm |
|
268 |
#' |
|
269 |
#' @return The value `covariance` if it's provided or a covariance structure |
|
270 |
#' derived from the provided `formula` otherwise. An error is raised of both |
|
271 |
#' are provided. |
|
272 |
#' |
|
273 |
#' @keywords internal |
|
274 |
h_reconcile_cov_struct <- function(formula = NULL, covariance = NULL) { |
|
275 | 238x |
assert_multi_class(covariance, c("formula", "cov_struct"), null.ok = TRUE) |
276 | 238x |
assert_formula(formula, null.ok = FALSE) |
277 | 238x |
if (inherits(covariance, "formula")) { |
278 | 4x |
covariance <- as.cov_struct(covariance) |
279 |
} |
|
280 | 238x |
if (!is.null(covariance) && length(h_extract_covariance_terms(formula)) > 0) { |
281 | 2x |
stop(paste0( |
282 | 2x |
"Redundant covariance structure definition in `formula` and ", |
283 | 2x |
"`covariance` arguments" |
284 |
)) |
|
285 |
} |
|
286 | ||
287 | 236x |
if (!is.null(covariance)) { |
288 | 5x |
return(covariance) |
289 |
} |
|
290 | ||
291 | 231x |
as.cov_struct(formula, warn_partial = FALSE) |
292 |
} |
|
293 | ||
294 |
#' Validate Covariance Structure Data |
|
295 |
#' |
|
296 |
#' Run checks against relational integrity of covariance definition |
|
297 |
#' |
|
298 |
#' @param x (`cov_struct`)\cr a covariance structure object. |
|
299 |
#' |
|
300 |
#' @return `x` if successful, or an error is thrown otherwise. |
|
301 |
#' |
|
302 |
#' @keywords internal |
|
303 |
validate_cov_struct <- function(x) { |
|
304 | 295x |
checks <- checkmate::makeAssertCollection() |
305 | ||
306 | 295x |
with(x, { |
307 | 295x |
assert_character(subject, len = 1, add = checks) |
308 | 295x |
assert_logical(heterogeneous, len = 1, add = checks) |
309 | ||
310 | 295x |
if (length(group) > 1 || length(visits) < 1) { |
311 | 4x |
checks$push( |
312 | 4x |
"Covariance structure must be of the form `time | (group /) subject`" |
313 |
) |
|
314 |
} |
|
315 | ||
316 | 295x |
if (!type %in% cov_types(filter = "spatial") && length(visits) > 1) { |
317 | 2x |
checks$push(paste( |
318 | 2x |
"Non-spatial covariance structures must have a single longitudinal", |
319 | 2x |
"variable" |
320 |
)) |
|
321 |
} |
|
322 |
}) |
|
323 | ||
324 | 295x |
reportAssertions(checks) |
325 | 289x |
x |
326 |
} |
|
327 | ||
328 |
#' Format Covariance Structure Object |
|
329 |
#' |
|
330 |
#' @param x (`cov_struct`)\cr a covariance structure object. |
|
331 |
#' @param ... Additional arguments unused. |
|
332 |
#' |
|
333 |
#' @return A formatted string for `x`. |
|
334 |
#' |
|
335 |
#' @export |
|
336 |
format.cov_struct <- function(x, ...) { |
|
337 | 3x |
sprintf( |
338 | 3x |
"<covariance structure>\n%s%s:\n\n %s | %s%s\n", |
339 | 3x |
if (x$heterogeneous) "heterogeneous " else "", |
340 | 3x |
cov_type_name(x$type), |
341 | 3x |
format_symbols(x$visits), |
342 | 3x |
if (length(x$group) > 0) paste0(format_symbols(x$group), " / ") else "", |
343 | 3x |
format_symbols(x$subject) |
344 |
) |
|
345 |
} |
|
346 | ||
347 |
#' Print a Covariance Structure Object |
|
348 |
#' |
|
349 |
#' @param x (`cov_struct`)\cr a covariance structure object. |
|
350 |
#' @param ... Additional arguments unused. |
|
351 |
#' |
|
352 |
#' @return `x` invisibly. |
|
353 |
#' |
|
354 |
#' @export |
|
355 |
print.cov_struct <- function(x, ...) { |
|
356 | 3x |
cat(format(x, ...), "\n") |
357 | 3x |
invisible(x) |
358 |
} |
|
359 | ||
360 |
#' Coerce into a Covariance Structure Definition |
|
361 |
#' |
|
362 |
#' @description `r lifecycle::badge("stable")` |
|
363 |
#' |
|
364 |
#' @details |
|
365 |
#' A covariance structure can be parsed from a model definition formula or call. |
|
366 |
#' Generally, covariance structures defined using non-standard evaluation take |
|
367 |
#' the following form: |
|
368 |
#' |
|
369 |
#' ``` |
|
370 |
#' type( (visit, )* visit | (group /)? subject ) |
|
371 |
#' ``` |
|
372 |
#' |
|
373 |
#' For example, formulas may include terms such as |
|
374 |
#' |
|
375 |
#' ```r |
|
376 |
#' us(time | subject) |
|
377 |
#' cp(time | group / subject) |
|
378 |
#' sp_exp(coord1, coord2 | group / subject) |
|
379 |
#' ``` |
|
380 |
#' |
|
381 |
#' Note that only `sp_exp` (spatial) covariance structures may provide multiple |
|
382 |
#' coordinates, which identify the Euclidean distance between the time points. |
|
383 |
#' |
|
384 |
#' @param x an object from which to derive a covariance structure. See object |
|
385 |
#' specific sections for details. |
|
386 |
#' @param warn_partial (`flag`)\cr whether to emit a warning when parts of the |
|
387 |
#' formula are disregarded. |
|
388 |
#' @param ... additional arguments unused. |
|
389 |
#' |
|
390 |
#' @return A [cov_struct()] object. |
|
391 |
#' |
|
392 |
#' @examples |
|
393 |
#' # provide a covariance structure as a right-sided formula |
|
394 |
#' as.cov_struct(~ csh(visit | group / subject)) |
|
395 |
#' |
|
396 |
#' # when part of a full formula, suppress warnings using `warn_partial = FALSE` |
|
397 |
#' as.cov_struct(y ~ x + csh(visit | group / subject), warn_partial = FALSE) |
|
398 |
#' |
|
399 |
#' @family covariance types |
|
400 |
#' @export |
|
401 |
as.cov_struct <- function(x, ...) { # nolint |
|
402 | 278x |
UseMethod("as.cov_struct") |
403 |
} |
|
404 | ||
405 |
#' @export |
|
406 |
as.cov_struct.cov_struct <- function(x, ...) { |
|
407 | ! |
x |
408 |
} |
|
409 | ||
410 |
#' @describeIn as.cov_struct |
|
411 |
#' When provided a formula, any specialized functions are assumed to be |
|
412 |
#' covariance structure definitions and must follow the form: |
|
413 |
#' |
|
414 |
#' ``` |
|
415 |
#' y ~ xs + type( (visit, )* visit | (group /)? subject ) |
|
416 |
#' ``` |
|
417 |
#' |
|
418 |
#' Any component on the right hand side of a formula is considered when |
|
419 |
#' searching for a covariance definition. |
|
420 |
#' |
|
421 |
#' @export |
|
422 |
as.cov_struct.formula <- function(x, warn_partial = TRUE, ...) { |
|
423 | 278x |
x_calls <- h_extract_covariance_terms(x) |
424 | ||
425 | 278x |
if (length(x_calls) < 1) { |
426 | 4x |
stop( |
427 | 4x |
"Covariance structure must be specified in formula. ", |
428 | 4x |
"Possible covariance structures include: ", |
429 | 4x |
paste0(cov_types(c("abbr", "habbr")), collapse = ", ") |
430 |
) |
|
431 |
} |
|
432 | ||
433 | 274x |
if (length(x_calls) > 1) { |
434 | 1x |
cov_struct_types <- as.character(lapply(x_calls, `[[`, 1L)) |
435 | 1x |
stop( |
436 | 1x |
"Only one covariance structure can be specified. ", |
437 | 1x |
"Currently specified covariance structures are: ", |
438 | 1x |
paste0(cov_struct_types, collapse = ", ") |
439 |
) |
|
440 |
} |
|
441 | ||
442 |
# flatten into list of infix operators, calls and names/atomics |
|
443 | 273x |
x <- flatten_call(x_calls[[1]]) |
444 | 273x |
type <- as.character(x[[1]]) |
445 | 273x |
x <- drop_elements(x, 1) |
446 | ||
447 |
# take visits until "|" |
|
448 | 273x |
n <- position_symbol(x, "|", nomatch = 0) |
449 | 273x |
visits <- as.character(utils::head(x, max(n - 1, 0))) |
450 | 273x |
x <- drop_elements(x, n) |
451 | ||
452 |
# take group until "/" |
|
453 | 273x |
n <- position_symbol(x, "/", nomatch = 0) |
454 | 273x |
group <- as.character(utils::head(x, max(n - 1, 0))) |
455 | 273x |
x <- drop_elements(x, n) |
456 | ||
457 |
# remainder is subject |
|
458 | 273x |
subject <- as.character(x) |
459 | ||
460 | 273x |
cov_struct(type = type, visits = visits, group = group, subject = subject) |
461 |
} |
1 |
#' Support for `emmeans` |
|
2 |
#' |
|
3 |
#' @description `r lifecycle::badge("stable")` |
|
4 |
#' |
|
5 |
#' This package includes methods that allow `mmrm` objects to be used |
|
6 |
#' with the `emmeans` package. `emmeans` computes estimated marginal means |
|
7 |
#' (also called least-square means) for the coefficients of the MMRM. |
|
8 |
#' We can also e.g. obtain differences between groups by applying |
|
9 |
#' [`pairs()`][emmeans::pairs.emmGrid()] on the object returned |
|
10 |
#' by [emmeans::emmeans()]. |
|
11 |
#' |
|
12 |
#' @examples |
|
13 |
#' fit <- mmrm( |
|
14 |
#' formula = FEV1 ~ RACE + SEX + ARMCD * AVISIT + us(AVISIT | USUBJID), |
|
15 |
#' data = fev_data |
|
16 |
#' ) |
|
17 |
#' if (require(emmeans)) { |
|
18 |
#' emmeans(fit, ~ ARMCD | AVISIT) |
|
19 |
#' pairs(emmeans(fit, ~ ARMCD | AVISIT), reverse = TRUE) |
|
20 |
#' } |
|
21 |
#' @name emmeans_support |
|
22 |
NULL |
|
23 | ||
24 |
#' Returns a `data.frame` for `emmeans` Purposes |
|
25 |
#' |
|
26 |
#' @seealso See [emmeans::recover_data()] for background. |
|
27 |
#' @keywords internal |
|
28 |
#' @noRd |
|
29 |
recover_data.mmrm <- function(object, ...) { # nolint |
|
30 | 13x |
fun_call <- stats::getCall(object) |
31 |
# subject_var is excluded because it should not contain fixed effect. |
|
32 |
# visit_var is not excluded because emmeans can provide marginal mean |
|
33 |
# by each visit if visit_var is not spatial. |
|
34 | 13x |
model_frame <- stats::model.frame( |
35 | 13x |
object, |
36 | 13x |
include = c( |
37 | 13x |
if (!object$formula_parts$is_spatial) "visit_var" else NULL, |
38 | 13x |
"response_var", "group_var" |
39 |
) |
|
40 |
) |
|
41 | 13x |
model_terms <- stats::delete.response(stats::terms(model_frame)) |
42 | 13x |
emmeans::recover_data( |
43 | 13x |
fun_call, |
44 | 13x |
trms = model_terms, |
45 | 13x |
na.action = "na.omit", |
46 | 13x |
frame = model_frame, |
47 |
... |
|
48 |
) |
|
49 |
} |
|
50 | ||
51 |
#' Returns a List of Model Details for `emmeans` Purposes |
|
52 |
#' |
|
53 |
#' @seealso See [emmeans::emm_basis()] for background. |
|
54 |
#' @keywords internal |
|
55 |
#' @noRd |
|
56 |
emm_basis.mmrm <- function(object, # nolint |
|
57 |
trms, |
|
58 |
xlev, |
|
59 |
grid, |
|
60 |
...) { |
|
61 | 13x |
model_frame <- stats::model.frame(trms, grid, na.action = stats::na.pass, xlev = xlev) |
62 | 13x |
contrasts <- component(object, "contrasts") |
63 | 13x |
model_mat <- stats::model.matrix(trms, model_frame, contrasts.arg = contrasts) |
64 | 13x |
beta_hat <- component(object, "beta_est") |
65 | 13x |
nbasis <- if (length(beta_hat) < ncol(model_mat)) { |
66 | 6x |
kept <- match(names(beta_hat), colnames(model_mat)) |
67 | 6x |
beta_hat <- NA * model_mat[1L, ] |
68 | 6x |
beta_hat[kept] <- component(object, "beta_est") |
69 | 6x |
orig_model_mat <- stats::model.matrix( |
70 | 6x |
trms, |
71 | 6x |
stats::model.frame( |
72 | 6x |
object, |
73 | 6x |
include = c( |
74 | 6x |
if (!object$formula_parts$is_spatial) "visit_var" else NULL, |
75 | 6x |
"response_var", "group_var" |
76 |
) |
|
77 |
), |
|
78 | 6x |
contrasts.arg = contrasts |
79 |
) |
|
80 | 6x |
estimability::nonest.basis(orig_model_mat) |
81 |
} else { |
|
82 | 7x |
estimability::all.estble |
83 |
} |
|
84 | 13x |
dfargs <- list(object = object) |
85 | 13x |
dffun <- function(k, dfargs) { |
86 | 113x |
mmrm::df_md(dfargs$object, contrast = k)$denom_df |
87 |
} |
|
88 | 13x |
list( |
89 | 13x |
X = model_mat, |
90 | 13x |
bhat = beta_hat, |
91 | 13x |
nbasis = nbasis, |
92 | 13x |
V = component(object, "beta_vcov"), |
93 | 13x |
dffun = dffun, |
94 | 13x |
dfargs = dfargs |
95 |
) |
|
96 |
} |
1 |
#' Obtain List of Jacobian Matrix Entries for Covariance Matrix |
|
2 |
#' |
|
3 |
#' @description Obtain the Jacobian matrices given the covariance function and variance parameters. |
|
4 |
#' |
|
5 |
#' @param tmb_data (`mmrm_tmb_data`)\cr produced by [h_mmrm_tmb_data()]. |
|
6 |
#' @param theta_est (`numeric`)\cr variance parameters point estimate. |
|
7 |
#' @param beta_vcov (`matrix`)\cr vairance covariance matrix of coefficients. |
|
8 |
#' |
|
9 |
#' @return List with one element per variance parameter containing a matrix |
|
10 |
#' of the same dimensions as the covariance matrix. The values are the derivatives |
|
11 |
#' with regards to this variance parameter. |
|
12 |
#' |
|
13 |
#' @keywords internal |
|
14 |
h_jac_list <- function(tmb_data, |
|
15 |
theta_est, |
|
16 |
beta_vcov) { |
|
17 | 82x |
assert_class(tmb_data, "mmrm_tmb_data") |
18 | 82x |
assert_numeric(theta_est) |
19 | 82x |
assert_matrix(beta_vcov) |
20 | 82x |
.Call(`_mmrm_get_jacobian`, PACKAGE = "mmrm", tmb_data, theta_est, beta_vcov) |
21 |
} |
|
22 | ||
23 |
#' Quadratic Form Calculations |
|
24 |
#' |
|
25 |
#' @description These helpers are mainly for easier readability and slightly better efficiency |
|
26 |
#' of the quadratic forms used in the Satterthwaite calculations. |
|
27 |
#' |
|
28 |
#' @param center (`matrix`)\cr square numeric matrix with the same dimensions as |
|
29 |
#' `x` as the center of the quadratic form. |
|
30 |
#' |
|
31 |
#' @name h_quad_form |
|
32 |
NULL |
|
33 | ||
34 |
#' @describeIn h_quad_form calculates the number `vec %*% center %*% t(vec)` |
|
35 |
#' as a numeric (not a matrix). |
|
36 |
#' |
|
37 |
#' @param vec (`numeric`)\cr interpreted as a row vector. |
|
38 |
#' |
|
39 |
#' @keywords internal |
|
40 |
h_quad_form_vec <- function(vec, center) { |
|
41 | 5607x |
vec <- as.vector(vec) |
42 | 5607x |
assert_numeric(vec, any.missing = FALSE) |
43 | 5607x |
assert_matrix( |
44 | 5607x |
center, |
45 | 5607x |
mode = "numeric", |
46 | 5607x |
any.missing = FALSE, |
47 | 5607x |
nrows = length(vec), |
48 | 5607x |
ncols = length(vec) |
49 |
) |
|
50 | ||
51 | 5607x |
sum(vec * (center %*% vec)) |
52 |
} |
|
53 | ||
54 |
#' @describeIn h_quad_form calculates the quadratic form `mat %*% center %*% t(mat)` |
|
55 |
#' as a matrix, the result is square and has dimensions identical to the number |
|
56 |
#' of rows in `mat`. |
|
57 |
#' |
|
58 |
#' @param mat (`matrix`)\cr numeric matrix to be multiplied left and right of |
|
59 |
#' `center`, therefore needs to have as many columns as there are rows and columns |
|
60 |
#' in `center`. |
|
61 |
#' |
|
62 |
#' @keywords internal |
|
63 |
h_quad_form_mat <- function(mat, center) { |
|
64 | 119x |
assert_matrix(mat, mode = "numeric", any.missing = FALSE, min.cols = 1L) |
65 | 119x |
assert_matrix( |
66 | 119x |
center, |
67 | 119x |
mode = "numeric", |
68 | 119x |
any.missing = FALSE, |
69 | 119x |
nrows = ncol(center), |
70 | 119x |
ncols = ncol(center) |
71 |
) |
|
72 | 119x |
mat %*% tcrossprod(center, mat) |
73 |
} |
|
74 | ||
75 |
#' Computation of a Gradient Given Jacobian and Contrast Vector |
|
76 |
#' |
|
77 |
#' @description Computes the gradient of a linear combination of `beta` given the Jacobian matrix and |
|
78 |
#' variance parameters. |
|
79 |
#' |
|
80 |
#' @param jac_list (`list`)\cr Jacobian list produced e.g. by [h_jac_list()]. |
|
81 |
#' @param contrast (`numeric`)\cr contrast vector, which needs to have the |
|
82 |
#' same number of elements as there are rows and columns in each element of |
|
83 |
#' `jac_list`. |
|
84 |
#' |
|
85 |
#' @return Numeric vector which contains the quadratic forms of each element of |
|
86 |
#' `jac_list` with the `contrast` vector. |
|
87 |
#' |
|
88 |
#' @keywords internal |
|
89 |
h_gradient <- function(jac_list, contrast) { |
|
90 | 491x |
assert_list(jac_list) |
91 | 491x |
assert_numeric(contrast) |
92 | ||
93 | 491x |
vapply( |
94 | 491x |
jac_list, |
95 | 491x |
h_quad_form_vec, |
96 | 491x |
vec = contrast, |
97 | 491x |
numeric(1L) |
98 |
) |
|
99 |
} |
|
100 | ||
101 |
#' Calculation of Satterthwaite Degrees of Freedom for One-Dimensional Contrast |
|
102 |
#' |
|
103 |
#' @description Used in [df_1d()] if method is |
|
104 |
#' "Satterthwaite". |
|
105 |
#' |
|
106 |
#' @param object (`mmrm`)\cr the MMRM fit. |
|
107 |
#' @param contrast (`numeric`)\cr contrast vector. Note that this should not include |
|
108 |
#' elements for singular coefficient estimates, i.e. only refer to the |
|
109 |
#' actually estimated coefficients. |
|
110 |
#' |
|
111 |
#' @return List with `est`, `se`, `df`, `t_stat` and `p_val`. |
|
112 |
#' @keywords internal |
|
113 |
h_df_1d_sat <- function(object, contrast) { |
|
114 | 456x |
assert_class(object, "mmrm") |
115 | 456x |
contrast <- as.numeric(contrast) |
116 | 456x |
assert_numeric(contrast, len = length(component(object, "beta_est"))) |
117 | ||
118 | 456x |
df <- if (identical(object$vcov, "Asymptotic")) { |
119 | 444x |
grad <- h_gradient(component(object, "jac_list"), contrast) |
120 | 444x |
v_num <- 2 * h_quad_form_vec(contrast, component(object, "beta_vcov"))^2 |
121 | 444x |
v_denom <- h_quad_form_vec(grad, component(object, "theta_vcov")) |
122 | 444x |
v_num / v_denom |
123 | 456x |
} else if (object$vcov %in% c("Empirical", "Empirical-Jackknife", "Empirical-Bias-Reduced")) { |
124 | 12x |
contrast_matrix <- Matrix::.bdiag(rep(list(matrix(contrast, nrow = 1)), component(object, "n_subjects"))) |
125 | 12x |
contrast_matrix <- as.matrix(contrast_matrix) |
126 | 12x |
g_matrix <- h_quad_form_mat(contrast_matrix, object$empirical_df_mat) |
127 | 12x |
h_tr(g_matrix)^2 / sum(g_matrix^2) |
128 |
} |
|
129 | ||
130 | 456x |
h_test_1d(object, contrast, df) |
131 |
} |
|
132 | ||
133 |
#' Calculating Denominator Degrees of Freedom for the Multi-Dimensional Case |
|
134 |
#' |
|
135 |
#' @description Calculates the degrees of freedom for multi-dimensional contrast. |
|
136 |
#' |
|
137 |
#' @param t_stat_df (`numeric`)\cr `n` t-statistic derived degrees of freedom. |
|
138 |
#' |
|
139 |
#' @return Usually the calculation is returning `2 * E / (E - n)` where |
|
140 |
#' `E` is the sum of `t / (t - 2)` over all `t_stat_df` values `t`. |
|
141 |
#' |
|
142 |
#' @note If the input values are two similar to each other then just the average |
|
143 |
#' of them is returned. If any of the inputs is not larger than 2 then 2 is |
|
144 |
#' returned. |
|
145 |
#' |
|
146 |
#' @keywords internal |
|
147 |
h_md_denom_df <- function(t_stat_df) { |
|
148 | 24x |
assert_numeric(t_stat_df, min.len = 1L, lower = .Machine$double.xmin, any.missing = FALSE) |
149 | ||
150 | 24x |
if (test_scalar(t_stat_df)) { |
151 | 1x |
t_stat_df |
152 | 23x |
} else if (all(abs(diff(t_stat_df)) < sqrt(.Machine$double.eps))) { |
153 | 1x |
mean(t_stat_df) |
154 | 22x |
} else if (any(t_stat_df <= 2)) { |
155 | 2x |
2 |
156 |
} else { |
|
157 | 20x |
e <- sum(t_stat_df / (t_stat_df - 2)) |
158 | 20x |
2 * e / (e - (length(t_stat_df))) |
159 |
} |
|
160 |
} |
|
161 | ||
162 |
#' Creating F-Statistic Results from One-Dimensional Contrast |
|
163 |
#' |
|
164 |
#' @description Creates multi-dimensional result from one-dimensional contrast from [df_1d()]. |
|
165 |
#' |
|
166 |
#' @param object (`mmrm`)\cr model fit. |
|
167 |
#' @param contrast (`numeric`)\cr one-dimensional contrast. |
|
168 |
#' |
|
169 |
#' @return The one-dimensional degrees of freedom are calculated and then |
|
170 |
#' based on that the p-value is calculated. |
|
171 |
#' |
|
172 |
#' @keywords internal |
|
173 |
h_df_md_from_1d <- function(object, contrast) { |
|
174 | 134x |
res_1d <- h_df_1d_sat(object, contrast) |
175 | 134x |
list( |
176 | 134x |
num_df = 1, |
177 | 134x |
denom_df = res_1d$df, |
178 | 134x |
f_stat = res_1d$t_stat^2, |
179 | 134x |
p_val = stats::pf(q = res_1d$t_stat^2, df1 = 1, df2 = res_1d$df, lower.tail = FALSE) |
180 |
) |
|
181 |
} |
|
182 | ||
183 |
#' Calculation of Satterthwaite Degrees of Freedom for Multi-Dimensional Contrast |
|
184 |
#' |
|
185 |
#' @description Used in [df_md()] if method is "Satterthwaite". |
|
186 |
#' |
|
187 |
#' @param object (`mmrm`)\cr the MMRM fit. |
|
188 |
#' @param contrast (`matrix`)\cr numeric contrast matrix, if given a `numeric` |
|
189 |
#' then this is coerced to a row vector. Note that this should not include |
|
190 |
#' elements for singular coefficient estimates, i.e. only refer to the |
|
191 |
#' actually estimated coefficients. |
|
192 |
#' |
|
193 |
#' @return List with `num_df`, `denom_df`, `f_stat` and `p_val` (2-sided p-value). |
|
194 |
#' @keywords internal |
|
195 |
h_df_md_sat <- function(object, contrast) { |
|
196 | 151x |
assert_class(object, "mmrm") |
197 | 151x |
assert_matrix(contrast, mode = "numeric", any.missing = FALSE, ncols = length(component(object, "beta_est"))) |
198 |
# Early return if we are in the one-dimensional case. |
|
199 | 151x |
if (identical(nrow(contrast), 1L)) { |
200 | 132x |
return(h_df_md_from_1d(object, contrast)) |
201 |
} |
|
202 | ||
203 | 19x |
contrast_cov <- h_quad_form_mat(contrast, component(object, "beta_vcov")) |
204 | 19x |
eigen_cont_cov <- eigen(contrast_cov) |
205 | 19x |
eigen_cont_cov_vctrs <- eigen_cont_cov$vectors |
206 | 19x |
eigen_cont_cov_vals <- eigen_cont_cov$values |
207 | ||
208 | 19x |
eps <- sqrt(.Machine$double.eps) |
209 | 19x |
tol <- max(eps * eigen_cont_cov_vals[1], 0) |
210 | 19x |
rank_cont_cov <- sum(eigen_cont_cov_vals > tol) |
211 | 19x |
assert_number(rank_cont_cov, lower = .Machine$double.xmin) |
212 | 19x |
rank_seq <- seq_len(rank_cont_cov) |
213 | 19x |
vctrs_cont_prod <- crossprod(eigen_cont_cov_vctrs, contrast)[rank_seq, , drop = FALSE] |
214 | ||
215 |
# Early return if rank 1. |
|
216 | 19x |
if (identical(rank_cont_cov, 1L)) { |
217 | 1x |
return(h_df_md_from_1d(object, vctrs_cont_prod)) |
218 |
} |
|
219 | ||
220 | 18x |
t_squared_nums <- drop(vctrs_cont_prod %*% object$beta_est)^2 |
221 | 18x |
t_squared_denoms <- eigen_cont_cov_vals[rank_seq] |
222 | 18x |
t_squared <- t_squared_nums / t_squared_denoms |
223 | 18x |
f_stat <- sum(t_squared) / rank_cont_cov |
224 | 18x |
t_stat_df_nums <- 2 * eigen_cont_cov_vals^2 |
225 | 18x |
t_stat_df <- if (identical(object$vcov, "Asymptotic")) { |
226 | 18x |
grads_vctrs_cont_prod <- lapply( |
227 | 18x |
rank_seq, |
228 | 18x |
function(m) h_gradient(component(object, "jac_list"), contrast = vctrs_cont_prod[m, ]) |
229 |
) |
|
230 | 18x |
t_stat_df_denoms <- vapply( |
231 | 18x |
grads_vctrs_cont_prod, |
232 | 18x |
h_quad_form_vec, |
233 | 18x |
center = component(object, "theta_vcov"), |
234 | 18x |
numeric(1) |
235 |
) |
|
236 | 18x |
t_stat_df_nums / t_stat_df_denoms |
237 |
} else { |
|
238 | ! |
vapply( |
239 | ! |
rank_seq, |
240 | ! |
function(m) { |
241 | ! |
contrast_matrix <- Matrix::.bdiag( |
242 | ! |
rep(list(vctrs_cont_prod[m, , drop = FALSE]), component(object, "n_subjects")) |
243 |
) |
|
244 | ! |
contrast_matrix <- as.matrix(contrast_matrix) |
245 | ! |
g_matrix <- h_quad_form_mat(contrast_matrix, object$empirical_df_mat) |
246 | ! |
h_tr(g_matrix)^2 / sum(g_matrix^2) |
247 |
}, |
|
248 | ! |
FUN.VALUE = 0 |
249 |
) |
|
250 |
} |
|
251 | 18x |
denom_df <- h_md_denom_df(t_stat_df) |
252 | ||
253 | 18x |
list( |
254 | 18x |
num_df = rank_cont_cov, |
255 | 18x |
denom_df = denom_df, |
256 | 18x |
f_stat = f_stat, |
257 | 18x |
p_val = stats::pf(q = f_stat, df1 = rank_cont_cov, df2 = denom_df, lower.tail = FALSE) |
258 |
) |
|
259 |
} |
1 |
#' Component Access for `mmrm_tmb` Objects |
|
2 |
#' |
|
3 |
#' @description `r lifecycle::badge("stable")` |
|
4 |
#' |
|
5 |
#' @param object (`mmrm_tmb`)\cr the fitted MMRM. |
|
6 |
#' @param name (`character`)\cr the component(s) to be retrieved. |
|
7 |
#' @return The corresponding component of the object, see details. |
|
8 |
#' |
|
9 |
#' @details Available `component()` names are as follows: |
|
10 |
#' - `call`: low-level function call which generated the model. |
|
11 |
#' - `formula`: model formula. |
|
12 |
#' - `dataset`: data set name. |
|
13 |
#' - `cov_type`: covariance structure type. |
|
14 |
#' - `n_theta`: number of parameters. |
|
15 |
#' - `n_subjects`: number of subjects. |
|
16 |
#' - `n_timepoints`: number of modeled time points. |
|
17 |
#' - `n_obs`: total number of observations. |
|
18 |
#' - `reml`: was REML used (ML was used if `FALSE`). |
|
19 |
#' - `neg_log_lik`: negative log likelihood. |
|
20 |
#' - `convergence`: convergence code from optimizer. |
|
21 |
#' - `conv_message`: message accompanying the convergence code. |
|
22 |
#' - `evaluations`: number of function evaluations for optimization. |
|
23 |
#' - `method`: Adjustment method which was used (for `mmrm` objects), |
|
24 |
#' otherwise `NULL` (for `mmrm_tmb` objects). |
|
25 |
#' - `beta_vcov`: estimated variance-covariance matrix of coefficients |
|
26 |
#' (excluding aliased coefficients). When Kenward-Roger/Empirical adjusted |
|
27 |
#' coefficients covariance matrix is used, the adjusted covariance matrix is returned (to still obtain the |
|
28 |
#' original asymptotic covariance matrix use `object$beta_vcov`). |
|
29 |
#' - `beta_vcov_complete`: estimated variance-covariance matrix including |
|
30 |
#' aliased coefficients with entries set to `NA`. |
|
31 |
#' - `varcor`: estimated covariance matrix for residuals. If there are multiple |
|
32 |
#' groups, a named list of estimated covariance matrices for residuals will be |
|
33 |
#' returned. The names are the group levels. |
|
34 |
#' - `theta_est`: estimated variance parameters. |
|
35 |
#' - `beta_est`: estimated coefficients (excluding aliased coefficients). |
|
36 |
#' - `beta_est_complete`: estimated coefficients including aliased coefficients |
|
37 |
#' set to `NA`. |
|
38 |
#' - `beta_aliased`: whether each coefficient was aliased (i.e. cannot be estimated) |
|
39 |
#' or not. |
|
40 |
#' - `theta_vcov`: estimated variance-covariance matrix of variance parameters. |
|
41 |
#' - `x_matrix`: design matrix used (excluding aliased columns). |
|
42 |
#' - `xlev`: a named list of character vectors giving the full set of levels to be assumed for each factor. |
|
43 |
#' - `contrasts`: a list of contrasts used for each factor. |
|
44 |
#' - `y_vector`: response vector used. |
|
45 |
#' - `jac_list`: Jacobian, see [h_jac_list()] for details. |
|
46 |
#' - `full_frame`: `data.frame` with `n` rows containing all variables needed in the model. |
|
47 |
#' |
|
48 |
#' @seealso In the `lme4` package there is a similar function `getME()`. |
|
49 |
#' |
|
50 |
#' @examples |
|
51 |
#' fit <- mmrm( |
|
52 |
#' formula = FEV1 ~ RACE + SEX + ARMCD * AVISIT + us(AVISIT | USUBJID), |
|
53 |
#' data = fev_data |
|
54 |
#' ) |
|
55 |
#' # Get all available components. |
|
56 |
#' component(fit) |
|
57 |
#' # Get convergence code and message. |
|
58 |
#' component(fit, c("convergence", "conv_message")) |
|
59 |
#' # Get modeled formula as a string. |
|
60 |
#' component(fit, c("formula")) |
|
61 |
#' |
|
62 |
#' @export |
|
63 |
component <- function(object, |
|
64 |
name = c( |
|
65 |
"cov_type", "subject_var", "n_theta", "n_subjects", "n_timepoints", |
|
66 |
"n_obs", "beta_vcov", "beta_vcov_complete", |
|
67 |
"varcor", "formula", "dataset", "n_groups", |
|
68 |
"reml", "convergence", "evaluations", "method", "optimizer", |
|
69 |
"conv_message", "call", "theta_est", |
|
70 |
"beta_est", "beta_est_complete", "beta_aliased", |
|
71 |
"x_matrix", "y_vector", "neg_log_lik", "jac_list", "theta_vcov", |
|
72 |
"full_frame", "xlev", "contrasts" |
|
73 |
)) { |
|
74 | 5115x |
assert_class(object, "mmrm_tmb") |
75 | 5115x |
name <- match.arg(name, several.ok = TRUE) |
76 | ||
77 | 5115x |
list_components <- sapply( |
78 | 5115x |
X = name, |
79 | 5115x |
FUN = switch, |
80 | 5115x |
"call" = object$call, |
81 |
# Strings. |
|
82 | 5115x |
"cov_type" = object$formula_parts$cov_type, |
83 | 5115x |
"subject_var" = object$formula_parts$subject_var, |
84 | 5115x |
"formula" = deparse(object$call$formula), |
85 | 5115x |
"dataset" = object$call$data, |
86 | 5115x |
"reml" = object$reml, |
87 | 5115x |
"conv_message" = object$opt_details$message, |
88 |
# Numeric of length 1. |
|
89 | 5115x |
"convergence" = object$opt_details$convergence, |
90 | 5115x |
"neg_log_lik" = object$neg_log_lik, |
91 | 5115x |
"n_theta" = length(object$theta_est), |
92 | 5115x |
"n_subjects" = object$tmb_data$n_subjects, |
93 | 5115x |
"n_timepoints" = object$tmb_data$n_visits, |
94 | 5115x |
"n_obs" = length(object$tmb_data$y_vector), |
95 | 5115x |
"n_groups" = ifelse(is.list(object$cov), length(object$cov), 1L), |
96 |
# Numeric of length > 1. |
|
97 | 5115x |
"evaluations" = unlist(ifelse(is.null(object$opt_details$evaluations), |
98 | 5115x |
list(object$opt_details$counts), |
99 | 5115x |
list(object$opt_details$evaluations) |
100 |
)), |
|
101 | 5115x |
"method" = object$method, |
102 | 5115x |
"optimizer" = object$optimizer, |
103 | 5115x |
"beta_est" = object$beta_est, |
104 | 5115x |
"beta_est_complete" = |
105 | 5115x |
if (any(object$tmb_data$x_cols_aliased)) { |
106 | 8x |
stats::setNames( |
107 | 8x |
object$beta_est[names(object$tmb_data$x_cols_aliased)], |
108 | 8x |
names(object$tmb_data$x_cols_aliased) |
109 |
) |
|
110 |
} else { |
|
111 | 54x |
object$beta_est |
112 |
}, |
|
113 | 5115x |
"beta_aliased" = object$tmb_data$x_cols_aliased, |
114 | 5115x |
"theta_est" = object$theta_est, |
115 | 5115x |
"y_vector" = object$tmb_data$y_vector, |
116 | 5115x |
"jac_list" = object$jac_list, |
117 |
# Matrices. |
|
118 | 5115x |
"beta_vcov" = |
119 | 5115x |
if (is.null(object$vcov) || identical(object$vcov, "Asymptotic")) { |
120 | 985x |
object$beta_vcov |
121 |
} else { |
|
122 | 66x |
object$beta_vcov_adj |
123 |
}, |
|
124 | 5115x |
"beta_vcov_complete" = |
125 | 5115x |
if (any(object$tmb_data$x_cols_aliased)) { |
126 | 2x |
stats::.vcov.aliased( |
127 | 2x |
aliased = object$tmb_data$x_cols_aliased, |
128 | 2x |
vc = component(object, "beta_vcov"), |
129 | 2x |
complete = TRUE |
130 |
) |
|
131 |
} else { |
|
132 | 4x |
object$beta_vcov |
133 |
}, |
|
134 | 5115x |
"varcor" = object$cov, |
135 | 5115x |
"x_matrix" = object$tmb_data$x_matrix, |
136 | 5115x |
"xlev" = stats::.getXlevels(terms(object), object$tmb_data$full_frame), |
137 | 5115x |
"contrasts" = attr(object$tmb_data$x_matrix, "contrasts"), |
138 | 5115x |
"theta_vcov" = object$theta_vcov, |
139 | 5115x |
"full_frame" = object$tmb_data$full_frame, |
140 |
# If not found. |
|
141 | 5115x |
"..foo.." = |
142 | 5115x |
stop(sprintf( |
143 | 5115x |
"component '%s' is not available", |
144 | 5115x |
name, paste0(class(object), collapse = ", ") |
145 |
)), |
|
146 | 5115x |
simplify = FALSE |
147 |
) |
|
148 | ||
149 | 23x |
if (length(name) == 1) list_components[[1]] else list_components |
150 |
} |
1 |
#' Determine Within or Between for each Design Matrix Column |
|
2 |
#' |
|
3 |
#' @description Used in [h_df_bw_calc()] to determine whether a variable |
|
4 |
#' differs only between subjects or also within subjects. |
|
5 |
#' |
|
6 |
#' @param x_matrix (`matrix`)\cr the design matrix with column names. |
|
7 |
#' @param subject_ids (`factor`)\cr the subject IDs. |
|
8 |
#' |
|
9 |
#' @return Character vector with "intercept", "within" or "between" for each |
|
10 |
#' design matrix column identified via the names of the vector. |
|
11 |
#' |
|
12 |
#' @keywords internal |
|
13 |
h_within_or_between <- function(x_matrix, subject_ids) { |
|
14 | 19x |
assert_matrix(x_matrix, col.names = "unique", min.cols = 1L) |
15 | 19x |
assert_factor(subject_ids, len = nrow(x_matrix)) |
16 | ||
17 | 19x |
n_subjects <- length(unique(subject_ids)) |
18 | 19x |
vapply( |
19 | 19x |
colnames(x_matrix), |
20 | 19x |
function(x) { |
21 | 112x |
if (x == "(Intercept)") { |
22 | 19x |
"intercept" |
23 |
} else { |
|
24 | 93x |
n_unique <- nrow(unique(cbind(x_matrix[, x], subject_ids))) |
25 | 43x |
if (n_unique > n_subjects) "within" else "between" |
26 |
} |
|
27 |
}, |
|
28 | 19x |
character(1L) |
29 |
) |
|
30 |
} |
|
31 | ||
32 |
#' Calculation of Between-Within Degrees of Freedom |
|
33 |
#' |
|
34 |
#' @description Used in [h_df_1d_bw()] and [h_df_md_bw()]. |
|
35 |
#' |
|
36 |
#' @param object (`mmrm`)\cr the fitted MMRM. |
|
37 |
#' |
|
38 |
#' @return List with: |
|
39 |
#' - `coefs_between_within` calculated via [h_within_or_between()] |
|
40 |
#' - `ddf_between` |
|
41 |
#' - `ddf_within` |
|
42 |
#' |
|
43 |
#' @keywords internal |
|
44 |
h_df_bw_calc <- function(object) { |
|
45 | 18x |
assert_class(object, "mmrm") |
46 | ||
47 | 18x |
n_subjects <- component(object, "n_subjects") |
48 | 18x |
n_obs <- component(object, "n_obs") |
49 | 18x |
x_mat <- component(object, "x_matrix") |
50 | ||
51 | 18x |
subject_var <- component(object, "subject_var") |
52 | 18x |
full_frame <- component(object, "full_frame") |
53 | 18x |
subject_ids <- full_frame[[subject_var]] |
54 | ||
55 | 18x |
coefs_between_within <- h_within_or_between(x_mat, subject_ids) |
56 | 18x |
n_coefs_between <- sum(coefs_between_within == "between") |
57 | 18x |
n_intercept <- sum(coefs_between_within == "intercept") |
58 | 18x |
n_coefs_within <- sum(coefs_between_within == "within") |
59 | 18x |
ddf_between <- n_subjects - n_coefs_between - n_intercept |
60 | 18x |
ddf_within <- n_obs - n_subjects - n_coefs_within |
61 | ||
62 | 18x |
list( |
63 | 18x |
coefs_between_within = coefs_between_within, |
64 | 18x |
ddf_between = ddf_between, |
65 | 18x |
ddf_within = ddf_within |
66 |
) |
|
67 |
} |
|
68 | ||
69 |
#' Assign Minimum Degrees of Freedom Given Involved Coefficients |
|
70 |
#' |
|
71 |
#' @description Used in [h_df_1d_bw()] and [h_df_md_bw()]. |
|
72 |
#' |
|
73 |
#' @param bw_calc (`list`)\cr from [h_df_bw_calc()]. |
|
74 |
#' @param is_coef_involved (`logical`)\cr whether each coefficient is involved |
|
75 |
#' in the contrast. |
|
76 |
#' |
|
77 |
#' @return The minimum of the degrees of freedom assigned to each involved |
|
78 |
#' coefficient according to its between-within categorization. |
|
79 |
#' |
|
80 |
#' @keywords internal |
|
81 |
h_df_min_bw <- function(bw_calc, is_coef_involved) { |
|
82 | 17x |
assert_list(bw_calc) |
83 | 17x |
assert_names(names(bw_calc), identical.to = c("coefs_between_within", "ddf_between", "ddf_within")) |
84 | 17x |
assert_logical(is_coef_involved, len = length(bw_calc$coefs_between_within)) |
85 | 17x |
assert_true(sum(is_coef_involved) > 0) |
86 | ||
87 | 17x |
coef_categories <- bw_calc$coefs_between_within[is_coef_involved] |
88 | 17x |
coef_dfs <- vapply( |
89 | 17x |
X = coef_categories, |
90 | 17x |
FUN = switch, |
91 | 17x |
intercept = bw_calc$ddf_within, |
92 | 17x |
between = bw_calc$ddf_between, |
93 | 17x |
within = bw_calc$ddf_within, |
94 | 17x |
FUN.VALUE = integer(1) |
95 |
) |
|
96 | 17x |
min(coef_dfs) |
97 |
} |
|
98 | ||
99 |
#' Calculation of Between-Within Degrees of Freedom for One-Dimensional Contrast |
|
100 |
#' |
|
101 |
#' @description Used in [df_1d()] if method is "Between-Within". |
|
102 |
#' |
|
103 |
#' @inheritParams h_df_1d_sat |
|
104 |
#' @inherit h_df_1d_sat return |
|
105 |
#' @keywords internal |
|
106 |
h_df_1d_bw <- function(object, contrast) { |
|
107 | 7x |
assert_class(object, "mmrm") |
108 | 7x |
assert_numeric(contrast, len = length(component(object, "beta_est"))) |
109 | ||
110 | 7x |
bw_calc <- h_df_bw_calc(object) |
111 | 7x |
is_coef_involved <- contrast != 0 |
112 | 7x |
df <- h_df_min_bw(bw_calc, is_coef_involved) |
113 | 7x |
h_test_1d(object, contrast, df) |
114 |
} |
|
115 | ||
116 |
#' Calculation of Between-Within Degrees of Freedom for Multi-Dimensional Contrast |
|
117 |
#' |
|
118 |
#' @description Used in [df_md()] if method is "Between-Within". |
|
119 |
#' |
|
120 |
#' @inheritParams h_df_md_sat |
|
121 |
#' @inherit h_df_md_sat return |
|
122 |
#' @keywords internal |
|
123 |
h_df_md_bw <- function(object, contrast) { |
|
124 | 7x |
assert_class(object, "mmrm") |
125 | 7x |
assert_matrix(contrast, mode = "numeric", any.missing = FALSE, ncols = length(component(object, "beta_est"))) |
126 | ||
127 | 7x |
bw_calc <- h_df_bw_calc(object) |
128 | 7x |
is_coef_involved <- apply(X = contrast != 0, MARGIN = 2L, FUN = any) |
129 | 7x |
df <- h_df_min_bw(bw_calc, is_coef_involved) |
130 | 7x |
h_test_md(object, contrast, df) |
131 |
} |
1 |
#' Methods for `mmrm` Objects |
|
2 |
#' |
|
3 |
#' @description `r lifecycle::badge("stable")` |
|
4 |
#' |
|
5 |
#' @param object (`mmrm`)\cr the fitted MMRM including Jacobian and call etc. |
|
6 |
#' @param ... not used. |
|
7 |
#' @return Depends on the method, see Details and Functions. |
|
8 |
#' |
|
9 |
#' @details |
|
10 |
#' While printing the summary of (`mmrm`)\cr object, the following will be displayed: |
|
11 |
#' 1. Formula. The formula used in the model. |
|
12 |
#' 2. Data. The data used for analysis, including number of subjects, number of valid observations. |
|
13 |
#' 3. Covariance. The covariance structure and number of variance parameters. |
|
14 |
#' 4. Method. Restricted maximum likelihood(REML) or maximum likelihood(ML). |
|
15 |
#' 5. Model selection criteria. AIC, BIC, log likelihood and deviance. |
|
16 |
#' 6. Coefficients. Coefficients of the covariates. |
|
17 |
#' 7. Covariance estimate. The covariance estimate(for each group). |
|
18 |
#' 1. If the covariance structure is non-spatial, the covariance matrix of all categorical time points available |
|
19 |
#' in data will be displayed. |
|
20 |
#' 2. If the covariance structure is spatial, the covariance matrix of two time points with unit distance |
|
21 |
#' will be displayed. |
|
22 |
#' |
|
23 |
#' `confint` is used to obtain the confidence intervals for the coefficients. |
|
24 |
#' Please note that this is different from the confidence interval of difference |
|
25 |
#' of least square means from `emmeans`. |
|
26 |
#' |
|
27 |
#' @name mmrm_methods |
|
28 |
#' |
|
29 |
#' @seealso [`mmrm_tmb_methods`], [`mmrm_tidiers`] for additional methods. |
|
30 |
#' |
|
31 |
#' @examples |
|
32 |
#' formula <- FEV1 ~ RACE + SEX + ARMCD * AVISIT + us(AVISIT | USUBJID) |
|
33 |
#' object <- mmrm(formula, fev_data) |
|
34 |
NULL |
|
35 | ||
36 |
#' Coefficients Table for MMRM Fit |
|
37 |
#' |
|
38 |
#' This is used by [summary.mmrm()] to obtain the coefficients table. |
|
39 |
#' |
|
40 |
#' @param object (`mmrm`)\cr model fit. |
|
41 |
#' |
|
42 |
#' @return Matrix with one row per coefficient and columns |
|
43 |
#' `Estimate`, `Std. Error`, `df`, `t value` and `Pr(>|t|)`. |
|
44 |
#' |
|
45 |
#' @keywords internal |
|
46 |
h_coef_table <- function(object) { |
|
47 | 40x |
assert_class(object, "mmrm") |
48 | ||
49 | 40x |
coef_est <- component(object, "beta_est") |
50 | 40x |
coef_contrasts <- diag(x = rep(1, length(coef_est))) |
51 | 40x |
rownames(coef_contrasts) <- names(coef_est) |
52 | 40x |
coef_table <- t(apply( |
53 | 40x |
coef_contrasts, |
54 | 40x |
MARGIN = 1L, |
55 | 40x |
FUN = function(contrast) unlist(df_1d(object, contrast)) |
56 |
)) |
|
57 | 40x |
assert_names( |
58 | 40x |
colnames(coef_table), |
59 | 40x |
identical.to = c("est", "se", "df", "t_stat", "p_val") |
60 |
) |
|
61 | 40x |
colnames(coef_table) <- c("Estimate", "Std. Error", "df", "t value", "Pr(>|t|)") |
62 | ||
63 | 40x |
coef_aliased <- component(object, "beta_aliased") |
64 | 40x |
if (any(coef_aliased)) { |
65 | 2x |
names_coef_na <- names(which(coef_aliased)) |
66 | 2x |
coef_na_table <- matrix( |
67 | 2x |
data = NA, |
68 | 2x |
nrow = length(names_coef_na), |
69 | 2x |
ncol = ncol(coef_table), |
70 | 2x |
dimnames = list(names_coef_na, colnames(coef_table)) |
71 |
) |
|
72 | 2x |
coef_table <- rbind(coef_table, coef_na_table)[names(coef_aliased), ] |
73 |
} |
|
74 | ||
75 | 40x |
coef_table |
76 |
} |
|
77 | ||
78 |
#' @describeIn mmrm_methods summarizes the MMRM fit results. |
|
79 |
#' @exportS3Method |
|
80 |
#' @examples |
|
81 |
#' # Summary: |
|
82 |
#' summary(object) |
|
83 |
summary.mmrm <- function(object, ...) { |
|
84 | 20x |
aic_list <- list( |
85 | 20x |
AIC = AIC(object), |
86 | 20x |
BIC = BIC(object), |
87 | 20x |
logLik = logLik(object), |
88 | 20x |
deviance = deviance(object) |
89 |
) |
|
90 | 20x |
coefficients <- h_coef_table(object) |
91 | 20x |
call <- stats::getCall(object) |
92 | 20x |
components <- component(object, c( |
93 | 20x |
"cov_type", "reml", "n_groups", "n_theta", |
94 | 20x |
"n_subjects", "n_timepoints", "n_obs", |
95 | 20x |
"beta_vcov", "varcor" |
96 |
)) |
|
97 | 20x |
components$method <- object$method |
98 | 20x |
components$vcov <- object$vcov |
99 | 20x |
structure( |
100 | 20x |
c( |
101 | 20x |
components, |
102 | 20x |
list( |
103 | 20x |
coefficients = coefficients, |
104 | 20x |
n_singular_coefs = sum(component(object, "beta_aliased")), |
105 | 20x |
aic_list = aic_list, |
106 | 20x |
call = call |
107 |
) |
|
108 |
), |
|
109 | 20x |
class = "summary.mmrm" |
110 |
) |
|
111 |
} |
|
112 | ||
113 |
#' Printing MMRM Function Call |
|
114 |
#' |
|
115 |
#' This is used in [print.summary.mmrm()]. |
|
116 |
#' |
|
117 |
#' @param call (`call`)\cr original [mmrm()] function call. |
|
118 |
#' @param n_obs (`int`)\cr number of observations. |
|
119 |
#' @param n_subjects (`int`)\cr number of subjects. |
|
120 |
#' @param n_timepoints (`int`)\cr number of timepoints. |
|
121 |
#' |
|
122 |
#' @keywords internal |
|
123 |
h_print_call <- function(call, n_obs, n_subjects, n_timepoints) { |
|
124 | 9x |
pass <- 0 |
125 | 9x |
if (!is.null(tmp <- call$formula)) { |
126 | 9x |
cat("Formula: ", deparse(tmp), fill = TRUE) |
127 | 9x |
rhs <- tmp[[2]] |
128 | 9x |
pass <- nchar(deparse(rhs)) |
129 |
} |
|
130 | 9x |
if (!is.null(call$data)) { |
131 | 9x |
cat( |
132 | 9x |
"Data: ", deparse(call$data), "(used", n_obs, "observations from", |
133 | 9x |
n_subjects, "subjects with maximum", n_timepoints, "timepoints)", |
134 | 9x |
fill = TRUE |
135 |
) |
|
136 |
} |
|
137 |
# Display the expression of weights |
|
138 | 9x |
if (!is.null(call$weights)) { |
139 | 4x |
cat("Weights: ", deparse(call$weights), fill = TRUE) |
140 |
} |
|
141 |
} |
|
142 | ||
143 |
#' Printing MMRM Covariance Type |
|
144 |
#' |
|
145 |
#' This is used in [print.summary.mmrm()]. |
|
146 |
#' |
|
147 |
#' @param cov_type (`string`)\cr covariance structure abbreviation. |
|
148 |
#' @param n_theta (`count`)\cr number of variance parameters. |
|
149 |
#' @param n_groups (`count`)\cr number of groups. |
|
150 |
#' @keywords internal |
|
151 |
h_print_cov <- function(cov_type, n_theta, n_groups) { |
|
152 | 9x |
assert_string(cov_type) |
153 | 9x |
assert_count(n_theta, positive = TRUE) |
154 | 9x |
assert_count(n_groups, positive = TRUE) |
155 | 9x |
cov_definition <- switch(cov_type, |
156 | 9x |
us = "unstructured", |
157 | 9x |
toep = "Toeplitz", |
158 | 9x |
toeph = "heterogeneous Toeplitz", |
159 | 9x |
ar1 = "auto-regressive order one", |
160 | 9x |
ar1h = "heterogeneous auto-regressive order one", |
161 | 9x |
ad = "ante-dependence", |
162 | 9x |
adh = "heterogeneous ante-dependence", |
163 | 9x |
cs = "compound symmetry", |
164 | 9x |
csh = "heterogeneous compound symmetry", |
165 | 9x |
sp_exp = "spatial exponential" |
166 |
) |
|
167 | ||
168 | 9x |
catstr <- sprintf( |
169 | 9x |
"Covariance: %s (%d variance parameters%s)\n", |
170 | 9x |
cov_definition, |
171 | 9x |
n_theta, |
172 | 9x |
ifelse(n_groups == 1L, "", sprintf(" of %d groups", n_groups)) |
173 |
) |
|
174 | 9x |
cat(catstr) |
175 |
} |
|
176 | ||
177 |
#' Printing AIC and other Model Fit Criteria |
|
178 |
#' |
|
179 |
#' This is used in [print.summary.mmrm()]. |
|
180 |
#' |
|
181 |
#' @param aic_list (`list`)\cr list as part of from [summary.mmrm()]. |
|
182 |
#' @param digits (`number`)\cr number of decimal places used with [round()]. |
|
183 |
#' |
|
184 |
#' @keywords internal |
|
185 |
h_print_aic_list <- function(aic_list, |
|
186 |
digits = 1) { |
|
187 | 6x |
diag_vals <- round(unlist(aic_list), digits) |
188 | 6x |
diag_vals <- format(diag_vals) |
189 | 6x |
print(diag_vals, quote = FALSE) |
190 |
} |
|
191 | ||
192 |
#' @describeIn mmrm_methods prints the MMRM fit summary. |
|
193 |
#' @exportS3Method |
|
194 |
#' @keywords internal |
|
195 |
print.summary.mmrm <- function(x, |
|
196 |
digits = max(3, getOption("digits") - 3), |
|
197 |
signif.stars = getOption("show.signif.stars"), # nolint |
|
198 |
...) { |
|
199 | 5x |
cat("mmrm fit\n\n") |
200 | 5x |
h_print_call(x$call, x$n_obs, x$n_subjects, x$n_timepoints) |
201 | 5x |
h_print_cov(x$cov_type, x$n_theta, x$n_groups) |
202 | 5x |
cat("Method: ", x$method, "\n", sep = "") |
203 | 5x |
cat("Vcov Method: ", x$vcov, "\n", sep = "") |
204 | 5x |
cat("Inference: ") |
205 | 5x |
cat(ifelse(x$reml, "REML", "ML")) |
206 | 5x |
cat("\n\n") |
207 | 5x |
cat("Model selection criteria:\n") |
208 | 5x |
h_print_aic_list(x$aic_list) |
209 | 5x |
cat("\n") |
210 | 5x |
cat("Coefficients: ") |
211 | 5x |
if (x$n_singular_coefs > 0) { |
212 | 1x |
cat("(", x$n_singular_coefs, " not defined because of singularities)", sep = "") |
213 |
} |
|
214 | 5x |
cat("\n") |
215 | 5x |
stats::printCoefmat( |
216 | 5x |
x$coefficients, |
217 | 5x |
zap.ind = 3, |
218 | 5x |
digits = digits, |
219 | 5x |
signif.stars = signif.stars |
220 |
) |
|
221 | 5x |
cat("\n") |
222 | 5x |
cat("Covariance estimate:\n") |
223 | 5x |
if (is.list(x$varcor)) { |
224 | 1x |
for (v in names(x$varcor)) { |
225 | 2x |
cat(sprintf("Group: %s\n", v)) |
226 | 2x |
print(round(x$varcor[[v]], digits = digits)) |
227 |
} |
|
228 |
} else { |
|
229 | 4x |
print(round(x$varcor, digits = digits)) |
230 |
} |
|
231 | 5x |
cat("\n") |
232 | 5x |
invisible(x) |
233 |
} |
|
234 | ||
235 | ||
236 |
#' @describeIn mmrm_methods obtain the confidence intervals for the coefficients. |
|
237 |
#' @exportS3Method |
|
238 |
#' @examples |
|
239 |
#' # Confidence Interval: |
|
240 |
#' confint(object) |
|
241 |
confint.mmrm <- function(object, parm, level = 0.95, ...) { |
|
242 | 20x |
cf <- coef(object) |
243 | 20x |
pnames <- names(cf) |
244 | 20x |
if (missing(parm)) { |
245 | 15x |
parm <- pnames |
246 |
} |
|
247 | 20x |
assert( |
248 | 20x |
check_subset(parm, pnames), |
249 | 20x |
check_integerish(parm, lower = 1L, upper = length(cf)) |
250 |
) |
|
251 | 2x |
if (is.numeric(parm)) parm <- pnames[parm] |
252 | 18x |
assert_number(level, lower = 0, upper = 1) |
253 | 18x |
a <- (1 - level) / 2 |
254 | 18x |
pct <- paste(format(100 * c(a, 1 - a), trim = TRUE, scientific = FALSE, digits = 3), "%") |
255 | 18x |
coef_table <- h_coef_table(object) |
256 | 18x |
df <- coef_table[parm, "df"] |
257 | 18x |
ses <- coef_table[parm, "Std. Error"] |
258 | 18x |
fac <- stats::qt(a, df = df) |
259 | 18x |
ci <- array(NA, dim = c(length(parm), 2L), dimnames = list(parm, pct)) |
260 | 18x |
sefac <- ses * fac |
261 | 18x |
ci[] <- cf[parm] + c(sefac, -sefac) |
262 | 18x |
ci |
263 |
} |
1 |
#' Tidying Methods for `mmrm` Objects |
|
2 |
#' |
|
3 |
#' @description `r lifecycle::badge("stable")` |
|
4 |
#' |
|
5 |
#' These methods tidy the estimates from an `mmrm` object into a |
|
6 |
#' summary. |
|
7 |
#' |
|
8 |
#' @param x (`mmrm`)\cr fitted model. |
|
9 |
#' @param conf.int (`flag`)\cr if `TRUE` columns for the lower (`conf.low`) and upper bounds |
|
10 |
#' (`conf.high`) of coefficient estimates are included. |
|
11 |
#' @param conf.level (`number`)\cr defines the range of the optional confidence internal. |
|
12 |
#' @param newdata (`data.frame` or `NULL`)\cr optional new data frame. |
|
13 |
#' @param se_fit (`flag`)\cr whether to return standard errors of fit. |
|
14 |
#' @param interval (`string`)\cr type of interval calculation. |
|
15 |
#' @param type.residuals (`string`)\cr passed on to [residuals.mmrm_tmb()]. |
|
16 |
#' @param ... only used by `augment()` to pass arguments to the [predict.mmrm_tmb()] method. |
|
17 |
#' |
|
18 |
#' @name mmrm_tidiers |
|
19 |
#' @aliases mmrm_tidiers |
|
20 |
#' |
|
21 |
#' @seealso [`mmrm_methods`], [`mmrm_tmb_methods`] for additional methods. |
|
22 |
#' |
|
23 |
#' @examples |
|
24 |
#' fit <- mmrm( |
|
25 |
#' formula = FEV1 ~ RACE + SEX + ARMCD * AVISIT + us(AVISIT | USUBJID), |
|
26 |
#' data = fev_data |
|
27 |
#' ) |
|
28 |
NULL |
|
29 | ||
30 |
#' @describeIn mmrm_tidiers derives tidy `tibble` from an `mmrm` object. |
|
31 |
#' @exportS3Method |
|
32 |
#' @examples |
|
33 |
#' # Applying tidy method to return summary table of covariate estimates. |
|
34 |
#' fit |> tidy() |
|
35 |
#' fit |> tidy(conf.int = TRUE, conf.level = 0.9) |
|
36 |
tidy.mmrm <- function(x, # nolint |
|
37 |
conf.int = FALSE, # nolint |
|
38 |
conf.level = 0.95, # nolint |
|
39 |
...) { |
|
40 | 5x |
assert_flag(conf.int) |
41 | 5x |
assert_number(conf.level, lower = 0, upper = 1) |
42 | 5x |
tbl <- tibble::as_tibble(summary(x)$coefficients, rownames = "term") |
43 | 5x |
colnames(tbl) <- c("term", "estimate", "std.error", "df", "statistic", "p.value") |
44 | 5x |
coefs <- coef(x) |
45 | 5x |
if (length(coefs) != nrow(tbl)) { |
46 | ! |
coefs <- tibble::enframe(coefs, name = "term", value = "estimate") |
47 | ! |
tbl <- merge(coefs, tbl, by = c("term", "estimate")) |
48 |
} |
|
49 | 5x |
if (conf.int) { |
50 | 4x |
ci <- h_tbl_confint_terms(x, level = conf.level) |
51 | 4x |
tbl <- tibble::as_tibble(merge(tbl, ci, by = "term")) |
52 |
} |
|
53 | 5x |
tbl |
54 |
} |
|
55 | ||
56 |
#' @describeIn mmrm_tidiers derives `glance` `tibble` from an `mmrm` object. |
|
57 |
#' @exportS3Method |
|
58 |
#' @examples |
|
59 |
#' # Applying glance method to return summary table of goodness of fit statistics. |
|
60 |
#' fit |> glance() |
|
61 |
glance.mmrm <- function(x, ...) { # nolint |
|
62 | 1x |
tibble::as_tibble(summary(x)$aic_list) |
63 |
} |
|
64 | ||
65 |
#' @describeIn mmrm_tidiers derives `augment` `tibble` from an `mmrm` object. |
|
66 |
#' @exportS3Method |
|
67 |
#' @examples |
|
68 |
#' # Applying augment method to return merged `tibble` of model data, fitted and residuals. |
|
69 |
#' fit |> augment() |
|
70 |
#' fit |> augment(interval = "confidence") |
|
71 |
#' fit |> augment(type.residuals = "pearson") |
|
72 |
augment.mmrm <- function(x, # nolint |
|
73 |
newdata = NULL, |
|
74 |
interval = c("none", "confidence", "prediction"), |
|
75 |
se_fit = (interval != "none"), |
|
76 |
type.residuals = c("response", "pearson", "normalized"), # nolint |
|
77 |
...) { |
|
78 | 9x |
type.residuals <- match.arg(type.residuals) # nolint |
79 | 9x |
resid_df <- NULL |
80 | 9x |
if (is.null(newdata)) { |
81 | 4x |
newdata <- stats::get_all_vars(x, data = stats::na.omit(x$data)) |
82 | 4x |
resid_df <- data.frame( |
83 | 4x |
.rownames = rownames(newdata), |
84 | 4x |
.resid = unname(residuals(x, type = type.residuals)) |
85 |
) |
|
86 |
} |
|
87 | 9x |
interval <- match.arg(interval) |
88 | ||
89 | 9x |
tbl <- h_newdata_add_pred( |
90 | 9x |
x, |
91 | 9x |
newdata = newdata, |
92 | 9x |
se_fit = se_fit, |
93 | 9x |
interval = interval, |
94 |
... |
|
95 |
) |
|
96 | 9x |
if (!is.null(resid_df)) { |
97 | 4x |
tbl <- merge(tbl, resid_df, by = ".rownames") |
98 | 4x |
tbl$.rownames <- as.numeric(tbl$.rownames) |
99 | 4x |
tbl <- tbl[order(tbl$.rownames), , drop = FALSE] |
100 |
} |
|
101 | 9x |
tibble::as_tibble(tbl) |
102 |
} |
|
103 | ||
104 |
#' Extract `tibble` with Confidence Intervals and Term Names |
|
105 |
#' |
|
106 |
#' This is used in [tidy.mmrm()]. |
|
107 |
#' |
|
108 |
#' @param x (`mmrm`)\cr fit object. |
|
109 |
#' @param ... passed to [stats::confint()], hence not used at the moment. |
|
110 |
#' |
|
111 |
#' @return A `tibble` with `term`, `conf.low`, `conf.high` columns. |
|
112 |
#' |
|
113 |
#' @keywords internal |
|
114 |
h_tbl_confint_terms <- function(x, ...) { |
|
115 | 8x |
df <- stats::confint(x, ...) |
116 | 8x |
tbl <- tibble::as_tibble(df, rownames = "term", .name_repair = "minimal") |
117 | 8x |
names(tbl) <- c("term", "conf.low", "conf.high") |
118 | 8x |
tbl |
119 |
} |
|
120 | ||
121 |
#' Add Prediction Results to New Data |
|
122 |
#' |
|
123 |
#' This is used in [augment.mmrm()]. |
|
124 |
#' |
|
125 |
#' @param x (`mmrm`)\cr fit. |
|
126 |
#' @param newdata (`data.frame`)\cr data to predict. |
|
127 |
#' @param se_fit (`flag`)\cr whether to return standard error of prediction, |
|
128 |
#' can only be used when `interval` is not "none". |
|
129 |
#' @param interval (`string`)\cr type of interval. |
|
130 |
#' @param ... passed to [predict.mmrm_tmb()]. |
|
131 |
#' |
|
132 |
#' @return The `newdata` as a `tibble` with additional columns `.fitted`, |
|
133 |
#' `.lower`, `.upper` (if interval is not `none`) and `.se.fit` (if `se_fit` |
|
134 |
#' requested). |
|
135 |
#' |
|
136 |
#' @keywords internal |
|
137 |
h_newdata_add_pred <- function(x, |
|
138 |
newdata, |
|
139 |
se_fit, |
|
140 |
interval, |
|
141 |
...) { |
|
142 | 13x |
assert_class(x, "mmrm") |
143 | 13x |
assert_data_frame(newdata) |
144 | 13x |
assert_flag(se_fit) |
145 | 13x |
assert_string(interval) |
146 | 13x |
if (interval == "none") { |
147 | 7x |
assert_false(se_fit) |
148 |
} |
|
149 | ||
150 | 12x |
tbl <- h_df_to_tibble(newdata) |
151 | 12x |
pred_results <- predict( |
152 | 12x |
x, |
153 | 12x |
newdata = newdata, |
154 | 12x |
na.action = stats::na.pass, |
155 | 12x |
se.fit = se_fit, |
156 | 12x |
interval = interval, |
157 |
... |
|
158 |
) |
|
159 | 12x |
if (interval == "none") { |
160 | 6x |
assert_numeric(pred_results) |
161 | 6x |
tbl$.fitted <- unname(pred_results) |
162 |
} else { |
|
163 | 6x |
assert_matrix(pred_results) |
164 | 6x |
tbl$.fitted <- unname(pred_results[, "fit"]) |
165 | 6x |
tbl$.lower <- unname(pred_results[, "lwr"]) |
166 | 6x |
tbl$.upper <- unname(pred_results[, "upr"]) |
167 |
} |
|
168 | 12x |
if (se_fit) { |
169 | 5x |
tbl$.se.fit <- unname(pred_results[, "se"]) |
170 |
} |
|
171 | 12x |
tbl |
172 |
} |
|
173 | ||
174 |
#' Coerce a Data Frame to a `tibble` |
|
175 |
#' |
|
176 |
#' This is used in [h_newdata_add_pred()]. |
|
177 |
#' |
|
178 |
#' @details This is only a thin wrapper around [tibble::as_tibble()], except |
|
179 |
#' giving a useful error message and it checks for `rownames` and adds them |
|
180 |
#' as a new column `.rownames` if they are not just a numeric sequence as |
|
181 |
#' per the [tibble::has_rownames()] decision. |
|
182 |
#' |
|
183 |
#' @param data (`data.frame`)\cr what to coerce. |
|
184 |
#' |
|
185 |
#' @return The `data` as a `tibble`, potentially with a `.rownames` column. |
|
186 |
#' |
|
187 |
#' @keywords internal |
|
188 |
h_df_to_tibble <- function(data) { |
|
189 | 15x |
tryCatch(tbl <- tibble::as_tibble(data), error = function(cnd) { |
190 | 1x |
stop("Could not coerce data to `tibble`. Try explicitly passing a", |
191 | 1x |
"dataset to either the `data` or `newdata` argument.", |
192 | 1x |
call. = FALSE |
193 |
) |
|
194 |
}) |
|
195 | 14x |
if (tibble::has_rownames(data)) { |
196 | 5x |
tbl <- tibble::add_column(tbl, .rownames = rownames(data), .before = TRUE) |
197 |
} |
|
198 | 14x |
tbl |
199 |
} |
1 |
#' Calculation of Residual Degrees of Freedom for One-Dimensional Contrast |
|
2 |
#' |
|
3 |
#' @description Used in [df_1d()] if method is |
|
4 |
#' "Residual". |
|
5 |
#' |
|
6 |
#' @inheritParams h_df_1d_sat |
|
7 |
#' @inherit h_df_1d_sat return |
|
8 |
#' @keywords internal |
|
9 |
h_df_1d_res <- function(object, contrast) { |
|
10 | 1x |
assert_class(object, "mmrm") |
11 | 1x |
assert_numeric(contrast, len = length(component(object, "beta_est"))) |
12 | ||
13 | 1x |
df <- component(object, "n_obs") - length(component(object, "beta_est")) |
14 | ||
15 | 1x |
h_test_1d(object, contrast, df) |
16 |
} |
|
17 | ||
18 |
#' Calculation of Residual Degrees of Freedom for Multi-Dimensional Contrast |
|
19 |
#' |
|
20 |
#' @description Used in [df_md()] if method is "Residual". |
|
21 |
#' |
|
22 |
#' @inheritParams h_df_md_sat |
|
23 |
#' @inherit h_df_md_sat return |
|
24 |
#' @keywords internal |
|
25 |
h_df_md_res <- function(object, contrast) { |
|
26 | 1x |
assert_class(object, "mmrm") |
27 | 1x |
assert_matrix(contrast, mode = "numeric", any.missing = FALSE, ncols = length(component(object, "beta_est"))) |
28 | ||
29 | 1x |
df <- component(object, "n_obs") - length(component(object, "beta_est")) |
30 | ||
31 | 1x |
h_test_md(object, contrast, df) |
32 |
} |
1 |
#' Register `mmrm` For Use With `tidymodels` |
|
2 |
#' |
|
3 |
#' @inheritParams base::requireNamespace |
|
4 |
#' @return A logical value indicating whether registration was successful. |
|
5 |
#' |
|
6 |
#' @details We can use `parsnip::show_model_info("linear_reg")` to check the |
|
7 |
#' registration with `parsnip` and thus the wider `tidymodels` ecosystem. |
|
8 |
#' |
|
9 |
#' @keywords internal |
|
10 |
parsnip_add_mmrm <- function(quietly = FALSE) { |
|
11 | 1x |
if (!requireNamespace("parsnip", quietly = quietly)) { |
12 | ! |
return(FALSE) |
13 |
} |
|
14 | ||
15 | 1x |
parsnip::set_model_engine( |
16 | 1x |
model = "linear_reg", |
17 | 1x |
eng = "mmrm", |
18 | 1x |
mode = "regression" |
19 |
) |
|
20 | ||
21 | 1x |
parsnip::set_dependency( |
22 | 1x |
pkg = "mmrm", |
23 | 1x |
model = "linear_reg", |
24 | 1x |
eng = "mmrm", |
25 | 1x |
mode = "regression" |
26 |
) |
|
27 | ||
28 | 1x |
parsnip::set_encoding( |
29 | 1x |
model = "linear_reg", |
30 | 1x |
eng = "mmrm", |
31 | 1x |
mode = "regression", |
32 | 1x |
options = list( |
33 | 1x |
predictor_indicators = "none", |
34 | 1x |
compute_intercept = FALSE, |
35 | 1x |
remove_intercept = FALSE, |
36 | 1x |
allow_sparse_x = TRUE |
37 |
) |
|
38 |
) |
|
39 | ||
40 | 1x |
parsnip::set_fit( |
41 | 1x |
model = "linear_reg", |
42 | 1x |
eng = "mmrm", |
43 | 1x |
mode = "regression", |
44 | 1x |
value = list( |
45 | 1x |
interface = "formula", |
46 | 1x |
protect = c("formula", "data", "weights"), |
47 | 1x |
data = c(formula = "formula", data = "data", weights = "weights"), |
48 | 1x |
func = c(pkg = "mmrm", fun = "mmrm"), |
49 | 1x |
defaults = list() |
50 |
) |
|
51 |
) |
|
52 | ||
53 | 1x |
parsnip::set_pred( |
54 | 1x |
model = "linear_reg", |
55 | 1x |
eng = "mmrm", |
56 | 1x |
mode = "regression", |
57 | 1x |
type = "numeric", |
58 | 1x |
value = parsnip::pred_value_template( |
59 |
# This is boilerplate. |
|
60 | 1x |
func = c(fun = "predict"), |
61 | 1x |
object = quote(object$fit), |
62 | 1x |
newdata = quote(new_data) |
63 |
) |
|
64 |
) |
|
65 | ||
66 | 1x |
parsnip::set_pred( |
67 | 1x |
model = "linear_reg", |
68 | 1x |
eng = "mmrm", |
69 | 1x |
mode = "regression", |
70 |
# This type allows to pass arguments via `opts` to `parsnip::predict.model_fit`. |
|
71 | 1x |
type = "raw", |
72 | 1x |
value = parsnip::pred_value_template( |
73 |
# This is boilerplate. |
|
74 | 1x |
func = c(fun = "predict"), |
75 | 1x |
object = quote(object$fit), |
76 | 1x |
newdata = quote(new_data) |
77 |
# We don't specify additional argument defaults here since otherwise |
|
78 |
# the user is not able to change them (they will be fixed). |
|
79 |
) |
|
80 |
) |
|
81 | ||
82 | 1x |
TRUE |
83 |
} |
1 |
#' Search For the Position of a Symbol |
|
2 |
#' |
|
3 |
#' A thin wrapper around [base::Position()] to search through a list of language |
|
4 |
#' objects, as produced by [flatten_call()] or [flatten_expr()], for the |
|
5 |
#' presence of a specific symbol. |
|
6 |
#' |
|
7 |
#' @param x (`list` of `language`)\cr a list of language objects in which to |
|
8 |
#' search for a specific symbol. |
|
9 |
#' @param sym (`name` or `symbol` or `character`)\cr a symbol to search for in |
|
10 |
#' `x`. |
|
11 |
#' @param ... Additional arguments passed to `Position()`. |
|
12 |
#' |
|
13 |
#' @return The position of the symbol if found, or the `nomatch` value |
|
14 |
#' otherwise. |
|
15 |
#' |
|
16 |
#' @keywords internal |
|
17 |
position_symbol <- function(x, sym, ...) { |
|
18 | 550x |
Position(function(i) identical(i, as.symbol(sym)), x, ...) |
19 |
} |
|
20 | ||
21 |
#' Flatten Expressions for Non-standard Evaluation |
|
22 |
#' |
|
23 |
#' Used primarily to support the parsing of covariance structure definitions |
|
24 |
#' from formulas, these functions flatten the syntax tree into a hierarchy-less |
|
25 |
#' grammar, allowing for parsing that doesn't abide by R's native operator |
|
26 |
#' precedence. |
|
27 |
#' |
|
28 |
#' Where \code{1 + 2 | 3} in R's syntax tree is \code{(|, (+, 1, 2), 3)}, |
|
29 |
#' flattening it into its visual order produces \code{(1, +, 2, |, 3)}, which |
|
30 |
#' makes for more fluent interpretation of non-standard grammar rules used in |
|
31 |
#' formulas. |
|
32 |
#' |
|
33 |
#' @param call,expr (`language`)\cr a language object to flatten. |
|
34 |
#' |
|
35 |
#' @return A list of atomic values, symbols, infix operator names and |
|
36 |
#' subexpressions. |
|
37 |
#' |
|
38 |
#' @name flat_expr |
|
39 |
#' @keywords internal |
|
40 |
NULL |
|
41 | ||
42 |
#' @describeIn flat_expr |
|
43 |
#' Flatten a call into a list of names and argument expressions. |
|
44 |
#' |
|
45 |
#' The call name and all arguments are flattened into the same list, meaning a |
|
46 |
#' call of the form \code{sp_exp(a, b, c | d / e)} produces a list of the form |
|
47 |
#' \code{(sp_exp, a, b, c, |, d, /, e)}. |
|
48 |
#' |
|
49 |
#' ``` |
|
50 |
#' flatten_call(quote(sp_exp(a, b, c | d / e))) |
|
51 |
#' ``` |
|
52 |
#' |
|
53 |
#' @keywords internal |
|
54 |
flatten_call <- function(call) { |
|
55 | 275x |
flattened_args <- unlist(lapply(call[-1], flatten_expr)) |
56 | 275x |
c(flatten_expr(call[[1]]), flattened_args) |
57 |
} |
|
58 | ||
59 |
#' @describeIn flat_expr |
|
60 |
#' Flatten nested expressions |
|
61 |
#' |
|
62 |
#' ``` |
|
63 |
#' flatten_expr(quote(1 + 2 + 3 | 4)) |
|
64 |
#' ``` |
|
65 |
#' |
|
66 |
#' @keywords internal |
|
67 |
flatten_expr <- function(expr) { |
|
68 | 1235x |
if (length(expr) > 1 && is_infix(expr[[1]])) { |
69 | 332x |
op <- list(expr[[1]]) |
70 | 332x |
lhs <- flatten_expr(expr[[2]]) |
71 | 332x |
rhs <- flatten_expr(expr[[3]]) |
72 | 332x |
c(lhs, op, rhs) |
73 |
} else { |
|
74 | 903x |
list(expr) |
75 |
} |
|
76 |
} |
|
77 | ||
78 |
#' Extract Right-Hand-Side (rhs) from Formula |
|
79 |
#' |
|
80 |
#' @param f (`formula`)\cr a formula. |
|
81 |
#' |
|
82 |
#' @return A formula without a response, derived from the right-hand-side of the |
|
83 |
#' formula, `f`. |
|
84 |
#' |
|
85 |
#' ``` |
|
86 |
#' formula_rhs(a ~ b + c) |
|
87 |
#' formula_rhs(~ b + c) |
|
88 |
#' ``` |
|
89 |
#' |
|
90 |
#' @keywords internal |
|
91 |
formula_rhs <- function(f) { |
|
92 | 294x |
if (length(f) == 2) { |
93 | 9x |
f |
94 |
} else { |
|
95 | 285x |
f[-2] |
96 |
} |
|
97 |
} |
|
98 | ||
99 |
#' Test Whether a Symbol is an Infix Operator |
|
100 |
#' |
|
101 |
#' @param name (`symbol` or `name` or `string`)\cr a possible reference to an |
|
102 |
#' infix operator to check. |
|
103 |
#' |
|
104 |
#' @return A logical indicating whether the name is the name of an infix |
|
105 |
#' operator. |
|
106 |
#' |
|
107 |
#' ``` |
|
108 |
#' is_infix(as.name("|")) |
|
109 |
#' is_infix("|") |
|
110 |
#' is_infix("c") |
|
111 |
#' ``` |
|
112 |
#' |
|
113 |
#' @keywords internal |
|
114 |
is_infix <- function(name) { |
|
115 | 339x |
"Ops" %in% methods::getGroup(as.character(name), recursive = TRUE) |
116 |
} |
|
117 | ||
118 |
#' Format Symbol Objects |
|
119 |
#' |
|
120 |
#' For printing, variable names are converted to symbols and deparsed to use R's |
|
121 |
#' built-in formatting of variables that may contain spaces or quote characters. |
|
122 |
#' |
|
123 |
#' @param x (`character`) A vector of variable names. |
|
124 |
#' |
|
125 |
#' @return A formatted string of comma-separated variables. |
|
126 |
#' |
|
127 |
#' @keywords internal |
|
128 |
format_symbols <- function(x) { |
|
129 | 12x |
paste0(collapse = ", ", lapply(x, function(i) { |
130 | 16x |
utils::capture.output(as.symbol(i)) |
131 |
})) |
|
132 |
} |
1 |
#' Obtain Empirical/Jackknife/Bias-Reduced Covariance |
|
2 |
#' |
|
3 |
#' @description Obtain the empirical or Jackknife covariance for \eqn{\beta}. |
|
4 |
#' Used in `mmrm` fitting if method is "Empirical", "Empirical-Jackknife" or |
|
5 |
#' "Empirical-Bias-Reduced". |
|
6 |
#' |
|
7 |
#' @param tmb_data (`mmrm_tmb_data`)\cr produced by [h_mmrm_tmb_data()]. |
|
8 |
#' @param theta (`numeric`)\cr theta estimate. |
|
9 |
#' @param beta (`numeric`)\cr beta estimate. |
|
10 |
#' @param beta_vcov (`matrix`)\cr covariance of beta estimate. |
|
11 |
#' @param type (`string`)\cr type of empirical method, including "Empirical", "Empirical-Jackknife" |
|
12 |
#' and "Empirical-Bias-Reduced". |
|
13 |
#' |
|
14 |
#' @return Named list with elements: |
|
15 |
#' - `cov`: `matrix` empirical covariance. |
|
16 |
#' - `df_mat`: `matrix` to calculate Satterthwaite degree of freedom. |
|
17 |
#' |
|
18 |
#' @keywords internal |
|
19 |
h_get_empirical <- function(tmb_data, theta, beta, beta_vcov, type) { |
|
20 | 34x |
assert_class(tmb_data, "mmrm_tmb_data") |
21 | 34x |
assert_numeric(theta) |
22 | 34x |
n_beta <- ncol(tmb_data$x_matrix) |
23 | 34x |
assert_numeric(beta, finite = TRUE, any.missing = FALSE, len = n_beta) |
24 | 34x |
assert_matrix(beta_vcov, mode = "numeric", any.missing = FALSE, nrows = n_beta, ncols = n_beta) |
25 | 34x |
assert_subset(type, c("Empirical", "Empirical-Jackknife", "Empirical-Bias-Reduced")) |
26 | 34x |
.Call(`_mmrm_get_empirical`, PACKAGE = "mmrm", tmb_data, theta, beta, beta_vcov, type) |
27 |
} |
1 |
# Internal functions used for skipping tests or examples. |
|
2 | ||
3 |
# Predicate whether currently running R version is under development. |
|
4 |
is_r_devel <- function() { |
|
5 | 21x |
grepl("devel", R.version$status) |
6 |
} |
|
7 | ||
8 |
# Predicate whether currently running on a Linux operating system. |
|
9 |
is_linux <- function() { |
|
10 | 1x |
tolower(Sys.info()[["sysname"]]) == "linux" |
11 |
} |
|
12 | ||
13 |
# Get the compiler information. Workaround for older R versions |
|
14 |
# where R_compiled_by() is not available. |
|
15 |
get_compiler <- function() { |
|
16 | 3x |
r_cmd <- file.path(R.home("bin"), "R") |
17 | 3x |
system2(r_cmd, args = "CMD config CC", stdout = TRUE) |
18 |
} |
|
19 | ||
20 |
# Predicate whether currently using a clang compiler. |
|
21 |
is_using_clang <- function() { |
|
22 | 2x |
grepl("clang", get_compiler()) |
23 |
} |
|
24 | ||
25 |
# Predicate whether an R-devel version is running on Linux Fedora or |
|
26 |
# Debian with a clang compiler. |
|
27 |
is_r_devel_linux_clang <- function() { |
|
28 | 20x |
is_r_devel() && |
29 | 20x |
is_linux() && |
30 | 20x |
is_using_clang() |
31 |
} |
1 |
#ifndef CHOL_CACHE_INCLUDED_ |
|
2 |
#define CHOL_CACHE_INCLUDED_ |
|
3 | ||
4 |
#include "covariance.h" |
|
5 |
#include "utils.h" |
|
6 | ||
7 |
// Base class of spatial and non-spatial Cholesky. |
|
8 |
template <class Type> |
|
9 |
struct lower_chol_base { |
|
10 | 10629x |
virtual ~lower_chol_base() {} |
11 |
virtual matrix<Type> get_chol(std::vector<int> visits, matrix<Type> dist) = 0; |
|
12 |
virtual matrix<Type> get_sigma(std::vector<int> visits, matrix<Type> dist) = 0; |
|
13 |
virtual matrix<Type> get_sigma_inverse(std::vector<int> visits, matrix<Type> dist) = 0; |
|
14 |
}; |
|
15 |
// Struct to obtain Cholesky for non-spatial. |
|
16 |
template <class Type> |
|
17 |
struct lower_chol_nonspatial: virtual lower_chol_base<Type> { |
|
18 |
std::map<std::vector<int>, matrix<Type>> chols; |
|
19 |
std::map<std::vector<int>, matrix<Type>> sigmas; |
|
20 |
std::map<std::vector<int>, matrix<Type>> sigmas_inv; |
|
21 |
std::string cov_type; |
|
22 |
int n_visits; |
|
23 |
std::vector<int> full_visit; |
|
24 |
int n_theta; |
|
25 |
vector<Type> theta; |
|
26 |
matrix<Type> chol_full; |
|
27 |
matrix<Type> sigma_full; |
|
28 |
lower_chol_nonspatial() { |
|
29 |
// This default constructor is needed because the use of `[]` in map. |
|
30 |
} |
|
31 |
// Constructor from theta, n_visits and cov_type, and cache full_visits values. |
|
32 | 10720x |
lower_chol_nonspatial(vector<Type> theta, int n_visits, std::string cov_type): cov_type(cov_type), n_visits(n_visits), full_visit(std::vector<int>(n_visits)) { |
33 | 10720x |
this->theta = theta; |
34 | 10720x |
std::iota(std::begin(this->full_visit), std::end(this->full_visit), 0); |
35 | 10720x |
this->n_theta = theta.size(); |
36 | 10720x |
this->chol_full = get_covariance_lower_chol(this->theta, this->n_visits, this->cov_type); |
37 | 10716x |
this->chols[full_visit] = this->chol_full; |
38 | 10716x |
this->sigma_full = tcrossprod(this->chol_full, true); |
39 |
} |
|
40 | 1163543x |
matrix<Type> get_chol(std::vector<int> visits, matrix<Type> dist) { |
41 | 1163543x |
auto target = this->chols.find(visits); |
42 | 1163543x |
if (target != this->chols.end()) { |
43 | 1074642x |
return target->second; |
44 |
} else { |
|
45 | 177802x |
matrix<Type> cov_i = this->get_sigma(visits, dist); |
46 | 88901x |
Eigen::LLT<Eigen::Matrix<Type,Eigen::Dynamic,Eigen::Dynamic> > cov_i_chol(cov_i); |
47 | 88901x |
matrix<Type> Li = cov_i_chol.matrixL(); |
48 | 88901x |
this->chols[visits] = Li; |
49 | 88901x |
return Li; |
50 |
} |
|
51 |
} |
|
52 | 609761x |
matrix<Type> get_sigma(std::vector<int> visits, matrix<Type> dist) { |
53 | 609761x |
auto target = this->sigmas.find(visits); |
54 | 609761x |
if (target != this->sigmas.end()) { |
55 | 484366x |
return target->second; |
56 |
} else { |
|
57 | 250790x |
matrix<Type> ret = subset_matrix<matrix<Type>, vector<int>>(sigma_full, visits, visits); |
58 | 125395x |
this->sigmas[visits] = ret; |
59 | 125395x |
return ret; |
60 |
} |
|
61 |
} |
|
62 | 208732x |
matrix<Type> get_sigma_inverse(std::vector<int> visits, matrix<Type> dist) { |
63 | 208732x |
auto target = this->sigmas_inv.find(visits); |
64 | 208732x |
if (target != this->sigmas_inv.end()) { |
65 | 182476x |
return target->second; |
66 |
} else { |
|
67 | 52512x |
matrix<Type> ret = this->get_sigma(visits, dist).inverse(); |
68 | 26256x |
this->sigmas_inv[visits] = ret; |
69 | 26256x |
return ret; |
70 |
} |
|
71 |
} |
|
72 |
}; |
|
73 | ||
74 | ||
75 |
// Struct to obtain Cholesky for spatial exponential. |
|
76 |
template <class Type> |
|
77 |
struct lower_chol_spatial: virtual lower_chol_base<Type> { |
|
78 |
vector<Type> theta; |
|
79 |
std::string cov_type; |
|
80 |
lower_chol_spatial() { |
|
81 |
// This default constructor is needed because the use of `[]` in map. |
|
82 |
} |
|
83 |
// Constructor from theta. For now the cholesky does not need to be cached. |
|
84 | 200x |
lower_chol_spatial(vector<Type> theta, std::string cov_type): theta(theta), cov_type(cov_type) { |
85 |
} |
|
86 | 44897x |
matrix<Type> get_chol(std::vector<int> visits, matrix<Type> dist) { |
87 | 44897x |
return get_spatial_covariance_lower_chol(this->theta, dist, this->cov_type); |
88 |
} |
|
89 | 15780x |
matrix<Type> get_sigma(std::vector<int> visits, matrix<Type> dist) { |
90 | 15780x |
return tcrossprod(this->get_chol(visits, dist), true); |
91 |
} |
|
92 | 5912x |
matrix<Type> get_sigma_inverse(std::vector<int> visits, matrix<Type> dist) { |
93 | 5912x |
return this->get_sigma(visits, dist).inverse(); |
94 |
} |
|
95 |
}; |
|
96 | ||
97 |
template <class T, class Base, class D1, class D2> |
|
98 |
struct cache_obj { |
|
99 |
std::map<int, std::shared_ptr<Base>> cache; |
|
100 |
int n_groups; |
|
101 |
bool is_spatial; |
|
102 |
int n_visits; |
|
103 | 10024x |
cache_obj(vector<T> theta, int n_groups, bool is_spatial, std::string cov_type, int n_visits): n_groups(n_groups), is_spatial(is_spatial), n_visits(n_visits) { |
104 |
// Get number of variance parameters for one group. |
|
105 | 10024x |
int theta_one_group_size = theta.size() / n_groups; |
106 | 20934x |
for (int r = 0; r < n_groups; r++) { |
107 |
// Use unique pointers here to better manage resource. |
|
108 | 10914x |
if (is_spatial) { |
109 | 198x |
this->cache[r] = std::make_shared<D1>(theta.segment(r * theta_one_group_size, theta_one_group_size), cov_type); |
110 |
} else { |
|
111 | 10716x |
this->cache[r] = std::make_shared<D2>(theta.segment(r * theta_one_group_size, theta_one_group_size), n_visits, cov_type); |
112 |
} |
|
113 |
} |
|
114 |
} |
|
115 |
}; |
|
116 | ||
117 |
template <class Type> |
|
118 |
struct chol_cache_groups: cache_obj<Type, lower_chol_base<Type>, lower_chol_spatial<Type>, lower_chol_nonspatial<Type>> { |
|
119 | 9698x |
chol_cache_groups(vector<Type> theta, int n_groups, bool is_spatial, std::string cov_type, int n_visits): cache_obj<Type, lower_chol_base<Type>, lower_chol_spatial<Type>, lower_chol_nonspatial<Type>>(theta, n_groups, is_spatial, cov_type, n_visits) { |
120 | ||
121 |
} |
|
122 |
// Return covariance lower Cholesky factor from lower_chol_base objects. |
|
123 |
// For non-spatial return for full visits, for spatial return on two points that the distance is 1. |
|
124 | 6300x |
matrix<Type> get_default_chol() { |
125 | 6300x |
std::vector<int> visit(this->n_visits); |
126 | 6300x |
std::iota(std::begin(visit), std::end(visit), 0); |
127 | 6300x |
matrix<Type> dist(2, 2); |
128 | 6300x |
dist << 0, 1, 1, 0; |
129 | 6300x |
int dim = this->is_spatial?2:this->n_visits; |
130 | 6300x |
matrix<Type> covariance_lower_chol = matrix<Type>::Zero(dim * this->n_groups, dim); |
131 | 13248x |
for (int r = 0; r < this->n_groups; r++) { |
132 | 6948x |
covariance_lower_chol.block(r * dim, 0, dim, dim) = this->cache[r]->get_chol(visit, dist); |
133 |
} |
|
134 | 12600x |
return covariance_lower_chol; |
135 |
} |
|
136 |
}; |
|
137 | ||
138 |
#endif |
1 |
#ifndef COV_INCLUDED_ |
|
2 |
#define COV_INCLUDED_ |
|
3 | ||
4 |
#include "utils.h" |
|
5 | ||
6 |
// Unstructured covariance: |
|
7 |
// Cholesky factor. |
|
8 |
template <class T> |
|
9 | 17320x |
matrix<T> get_unstructured(const vector<T>& theta, int n_visits) { |
10 | 17320x |
vector<T> sd_values = exp(theta.head(n_visits)); |
11 | 17320x |
vector<T> lower_tri_chol_values = theta.tail(theta.size() - n_visits); |
12 | 17320x |
matrix<T> covariance_lower_chol = matrix<T>::Zero(n_visits, n_visits); |
13 | 17320x |
int k = 0; |
14 | 86568x |
for(int i = 0; i < n_visits; i++) { |
15 | 69248x |
covariance_lower_chol(i, i) = sd_values(i); |
16 | 173184x |
for(int j = 0; j < i; j++){ |
17 | 103936x |
covariance_lower_chol(i, j) = sd_values(i) * lower_tri_chol_values(k++); |
18 |
} |
|
19 |
} |
|
20 | 34640x |
return covariance_lower_chol; |
21 |
} |
|
22 | ||
23 |
// Ante-dependence: |
|
24 | ||
25 |
// Correlation function. |
|
26 |
template <class T> |
|
27 |
struct corr_fun_ante_dependence : generic_corr_fun<T> { |
|
28 |
using generic_corr_fun<T>::generic_corr_fun; |
|
29 | 4452x |
const T operator() (int i, int j) const { |
30 | 4452x |
return this->corr_values.segment(j, i - j).prod(); |
31 |
} |
|
32 |
}; |
|
33 |
// Homogeneous Ante-dependence Cholesky factor. |
|
34 |
template <class T> |
|
35 | 316x |
matrix<T> get_ante_dependence(const vector<T>& theta, int n_visits) { |
36 | 316x |
T const_sd = exp(theta(0)); |
37 | 316x |
corr_fun_ante_dependence<T> fun(theta.tail(n_visits - 1)); |
38 | 316x |
matrix<T> ad_cor_mat_chol = get_corr_mat_chol(n_visits, fun); |
39 | 632x |
return const_sd * ad_cor_mat_chol; |
40 |
} |
|
41 |
// Heterogeneous Ante-dependence Cholesky factor. |
|
42 |
template <class T> |
|
43 | 476x |
matrix<T> get_ante_dependence_heterogeneous(const vector<T>& theta, int n_visits) { |
44 | 476x |
vector<T> sd_values = exp(theta.head(n_visits)); |
45 | 476x |
corr_fun_ante_dependence<T> fun(theta.tail(n_visits - 1)); |
46 | 952x |
return get_heterogeneous_cov(sd_values, fun); |
47 |
} |
|
48 | ||
49 |
// Toeplitz: |
|
50 | ||
51 |
// Correlation function. |
|
52 |
template <class T> |
|
53 |
struct corr_fun_toeplitz : generic_corr_fun<T> { |
|
54 |
using generic_corr_fun<T>::generic_corr_fun; |
|
55 | 5076x |
const T operator() (int i, int j) const { |
56 | 5076x |
int index = (i - j) - 1; // Note: We need to start at 0. |
57 | 5076x |
return this->corr_values(index); |
58 |
} |
|
59 |
}; |
|
60 |
// Homogeneous Toeplitz Cholesky factor. |
|
61 |
template <class T> |
|
62 | 416x |
matrix<T> get_toeplitz(const vector<T>& theta, int n_visits) { |
63 | 416x |
T const_sd = exp(theta(0)); |
64 | 416x |
corr_fun_toeplitz<T> fun(theta.tail(n_visits - 1)); |
65 | 416x |
matrix<T> toep_cor_mat_chol = get_corr_mat_chol(n_visits, fun); |
66 | 832x |
return const_sd * toep_cor_mat_chol; |
67 |
} |
|
68 |
// Heterogeneous Toeplitz Cholesky factor. |
|
69 |
template <class T> |
|
70 | 428x |
matrix<T> get_toeplitz_heterogeneous(const vector<T>& theta, int n_visits) { |
71 | 428x |
vector<T> sd_values = exp(theta.head(n_visits)); |
72 | 428x |
corr_fun_toeplitz<T> fun(theta.tail(n_visits - 1)); |
73 | 856x |
return get_heterogeneous_cov(sd_values, fun); |
74 |
} |
|
75 | ||
76 |
// Autoregressive: |
|
77 | ||
78 |
// Correlation function. |
|
79 |
template <class T> |
|
80 |
struct corr_fun_autoregressive : generic_corr_fun<T> { |
|
81 |
using generic_corr_fun<T>::generic_corr_fun; |
|
82 | 20600x |
const T operator() (int i, int j) const { |
83 | 20600x |
T diff = T((i - j) * 1.0); |
84 | 26336x |
return pow(this->corr_values(0), diff); // rho^{|i-j|} |
85 |
} |
|
86 |
}; |
|
87 |
// Homogeneous autoregressive Cholesky factor. |
|
88 |
template <class T> |
|
89 | 2996x |
matrix<T> get_auto_regressive(const vector<T>& theta, int n_visits) { |
90 | 2996x |
T const_sd = exp(theta(0)); |
91 | 2996x |
corr_fun_autoregressive<T> fun(theta.tail(1)); |
92 | 2996x |
matrix<T> ar1_cor_mat_chol = get_corr_mat_chol(n_visits, fun); |
93 | 5992x |
return const_sd * ar1_cor_mat_chol; |
94 |
} |
|
95 |
// Heterogeneous autoregressive Cholesky factor. |
|
96 |
template <class T> |
|
97 | 428x |
matrix<T> get_auto_regressive_heterogeneous(const vector<T>& theta, int n_visits) { |
98 | 428x |
vector<T> sd_values = exp(theta.head(n_visits)); |
99 | 428x |
corr_fun_autoregressive<T> fun(theta.tail(1)); |
100 | 856x |
return get_heterogeneous_cov(sd_values, fun); |
101 |
} |
|
102 | ||
103 |
// Compound symmetry: |
|
104 | ||
105 |
// Correlation function. |
|
106 |
template <class T> |
|
107 |
struct corr_fun_compound_symmetry : generic_corr_fun<T> { |
|
108 |
using generic_corr_fun<T>::generic_corr_fun; |
|
109 | 6876x |
const T operator() (int i, int j) const { |
110 | 6876x |
return this->corr_values(0); // rho (constant) |
111 |
} |
|
112 |
}; |
|
113 |
// Homogeneous compound symmetry Cholesky factor. |
|
114 |
template <class T> |
|
115 | 620x |
matrix<T> get_compound_symmetry(const vector<T>& theta, int n_visits) { |
116 | 620x |
T const_sd = exp(theta(0)); |
117 | 620x |
corr_fun_compound_symmetry<T> fun(theta.tail(1)); |
118 | 620x |
matrix<T> cs_cor_mat_chol = get_corr_mat_chol(n_visits, fun); |
119 | 1240x |
return const_sd * cs_cor_mat_chol; |
120 |
} |
|
121 |
// Heterogeneous compound symmetry Cholesky factor. |
|
122 |
template <class T> |
|
123 | 524x |
matrix<T> get_compound_symmetry_heterogeneous(const vector<T>& theta, int n_visits) { |
124 | 524x |
vector<T> sd_values = exp(theta.head(n_visits)); |
125 | 524x |
corr_fun_compound_symmetry<T> fun(theta.tail(1)); |
126 | 1048x |
return get_heterogeneous_cov(sd_values, fun); |
127 |
} |
|
128 | ||
129 |
// Spatial Exponential Cholesky factor. |
|
130 |
template <class T> |
|
131 | 44897x |
matrix<T> get_spatial_exponential(const vector<T>& theta, const matrix<T>& distance) { |
132 | 44897x |
T const_sd = exp(theta(0)); |
133 | 44897x |
T rho = invlogit(theta(1)); |
134 | 44897x |
matrix<T> expdist = exp(distance.array() * log(rho)); |
135 | 44897x |
matrix<T> result = expdist * const_sd; |
136 | 44897x |
Eigen::LLT<Eigen::Matrix<T,Eigen::Dynamic,Eigen::Dynamic> > cov_i_chol(result); |
137 | 89794x |
return cov_i_chol.matrixL(); |
138 |
} |
|
139 | ||
140 |
// Creates a new correlation object dynamically. |
|
141 |
template <class T> |
|
142 | 23492x |
matrix<T> get_covariance_lower_chol(const vector<T>& theta, int n_visits, std::string cov_type) { |
143 | 23492x |
matrix<T> result; |
144 | ||
145 | 23492x |
if (cov_type == "us") { |
146 | 17312x |
result = get_unstructured<T>(theta, n_visits); |
147 | 6180x |
} else if (cov_type == "toep") { |
148 | 412x |
result = get_toeplitz<T>(theta, n_visits); |
149 | 5768x |
} else if (cov_type == "toeph") { |
150 | 424x |
result = get_toeplitz_heterogeneous<T>(theta, n_visits); |
151 | 5344x |
} else if (cov_type == "ar1") { |
152 | 2992x |
result = get_auto_regressive<T>(theta, n_visits); |
153 | 2352x |
} else if (cov_type == "ar1h") { |
154 | 424x |
result = get_auto_regressive_heterogeneous<T>(theta, n_visits); |
155 | 1928x |
} else if (cov_type == "ad") { |
156 | 312x |
result = get_ante_dependence<T>(theta, n_visits); |
157 | 1616x |
} else if (cov_type == "adh") { |
158 | 472x |
result = get_ante_dependence_heterogeneous<T>(theta, n_visits); |
159 | 1144x |
} else if (cov_type == "cs") { |
160 | 616x |
result = get_compound_symmetry<T>(theta, n_visits); |
161 | 528x |
} else if (cov_type == "csh") { |
162 | 520x |
result = get_compound_symmetry_heterogeneous<T>(theta, n_visits); |
163 |
} else { |
|
164 | 4x |
Rf_error("%s", ("Unknown covariance type '" + cov_type + "'.").c_str()); |
165 |
} |
|
166 | ||
167 | 23484x |
return result; |
168 |
} |
|
169 | ||
170 |
// Creates a new spatial covariance cholesky. |
|
171 |
template <class T> |
|
172 | 44897x |
matrix<T> get_spatial_covariance_lower_chol(const vector<T>& theta, const matrix<T>& distance, std::string cov_type) { |
173 | 44897x |
matrix<T> result; |
174 | 44897x |
if (cov_type == "sp_exp") { |
175 | 44897x |
result = get_spatial_exponential<T>(theta, distance); |
176 |
} else { |
|
177 | ! |
Rf_error("%s", ("Unknown spatial covariance type '" + cov_type + "'.").c_str()); |
178 |
} |
|
179 | 44897x |
return result; |
180 |
} |
|
181 | ||
182 |
#endif |
1 |
#ifndef DERIVATIVE_INCLUDED_ |
|
2 |
#define DERIVATIVE_INCLUDED_ |
|
3 | ||
4 |
#include "chol_cache.h" |
|
5 | ||
6 |
using namespace Rcpp; |
|
7 |
using std::string; |
|
8 |
// Struct chol to obtain the cholesky factor given theta. |
|
9 |
// The reason to have it is that we need a functor that need only theta to |
|
10 |
// obtain the derivatives from autodiff. |
|
11 |
// Only non-spatial covariance structure here. |
|
12 |
struct chol { |
|
13 |
int dim_cov_mat; |
|
14 |
string cov_type; |
|
15 | 684x |
chol(int dim, string cov): dim_cov_mat(dim), cov_type(cov) {}; |
16 |
template <class T> |
|
17 | 2048x |
vector<T> operator() (vector<T> &theta) { |
18 | 2048x |
return get_covariance_lower_chol(theta, this->dim_cov_mat, this->cov_type).vec(); |
19 |
} |
|
20 |
}; |
|
21 |
// Struct chol_jacobian that has jacobian of the cholesky factor given theta. |
|
22 |
// The reason to have it is that we need hessian so we use jacobian twice. |
|
23 |
struct chol_jacobian { |
|
24 |
int dim_cov_mat; |
|
25 |
string cov_type; |
|
26 |
chol mychol; |
|
27 | 344x |
chol_jacobian(int dim, string cov): dim_cov_mat(dim), cov_type(cov), mychol(dim, cov) {}; |
28 |
template<class T> |
|
29 | 346x |
vector<T> operator() (vector<T> &theta) { |
30 | 346x |
return autodiff::jacobian(this->mychol, theta).vec(); |
31 |
} |
|
32 |
}; |
|
33 | ||
34 |
// Template function to obtain derivatives from visits, cov_type and theta. |
|
35 |
// Basically this is calculating the derivatives for the sigma |
|
36 |
// from the derivatives for the cholesky factor. |
|
37 |
template <class Type> |
|
38 | 340x |
std::map<std::string, matrix<Type>> derivatives(int n_visits, std::string cov_type, vector<Type> theta) { |
39 | 340x |
std::map<std::string, matrix<Type>> ret; |
40 | 340x |
chol chol_obj(n_visits, cov_type); |
41 | 340x |
chol_jacobian chol_jac_obj(n_visits, cov_type); |
42 | 340x |
matrix<Type> l = chol_obj(theta).matrix(); |
43 | 340x |
l.resize(n_visits, n_visits); |
44 | 680x |
vector<Type> chol_d1_vec = autodiff::jacobian(chol_obj, theta).vec(); // chol_d1_vec is (dim * dim * l_theta) |
45 | 680x |
vector<Type> chol_d2_vec = autodiff::jacobian(chol_jac_obj, theta).vec(); // chol_d2_vec is (dim * dim * l_theta * l_theta) |
46 | 340x |
matrix<Type> ret_d1 = matrix<Type>(n_visits * theta.size(), n_visits); |
47 | 340x |
matrix<Type> ret_d2 = matrix<Type>(n_visits * theta.size() * theta.size(), n_visits); |
48 | 340x |
int n_visits_sq = n_visits * n_visits; |
49 | 2174x |
for (int i = 0; i < theta.size(); i++) { |
50 | 1834x |
matrix<Type> ld1 = chol_d1_vec.segment(i * n_visits_sq, n_visits_sq).matrix(); |
51 | 1834x |
ld1.resize(n_visits, n_visits); |
52 | 1834x |
matrix<Type> ld1_lt = ld1 * l.transpose(); |
53 | 1834x |
auto sigma_d1_i = ld1_lt + ld1_lt.transpose(); |
54 | 1834x |
ret_d1.block(i * n_visits, 0, n_visits, n_visits) = sigma_d1_i; |
55 | 15992x |
for (int j = 0; j < theta.size(); j++) { |
56 | 14158x |
matrix<Type> ld2 = chol_d2_vec.segment( (j * theta.size() + i) * n_visits_sq, n_visits_sq).matrix(); |
57 | 14158x |
matrix<Type> ld1_j = chol_d1_vec.segment(j * n_visits_sq, n_visits_sq).matrix(); |
58 | 14158x |
ld2.resize(n_visits, n_visits); |
59 | 14158x |
ld1_j.resize(n_visits, n_visits); |
60 | 14158x |
auto ld2_lt = ld2 * l.transpose(); |
61 | 14158x |
auto ld1_ld1j = ld1 * ld1_j.transpose(); |
62 | 14158x |
auto sigma_d2_ij = ld2_lt + ld2_lt.transpose() + ld1_ld1j + ld1_ld1j.transpose(); |
63 | 14158x |
ret_d2.block((i * theta.size() + j) * n_visits, 0, n_visits, n_visits) = sigma_d2_ij; |
64 |
} |
|
65 |
} |
|
66 | 340x |
ret["derivative1"] = ret_d1; |
67 | 340x |
ret["derivative2"] = ret_d2; |
68 | 680x |
return ret; |
69 |
} |
|
70 |
// Base class of spatial and non-spatial derivatives. |
|
71 |
template <class Type> |
|
72 |
struct derivatives_base: virtual lower_chol_base<Type> { |
|
73 |
virtual matrix<Type> get_inverse_chol(std::vector<int> visits, matrix<Type> dist) = 0; |
|
74 |
virtual matrix<Type> get_sigma_derivative1(std::vector<int> visits, matrix<Type> dist) = 0; |
|
75 |
virtual matrix<Type> get_sigma_derivative2(std::vector<int> visits, matrix<Type> dist) = 0; |
|
76 |
virtual matrix<Type> get_inverse_derivative(std::vector<int> visits, matrix<Type> dist) = 0; |
|
77 |
// Create virtual destructor to avoid the default desctructor being called. |
|
78 | 364x |
virtual ~derivatives_base() {}; |
79 |
}; |
|
80 | ||
81 |
// Struct derivatives_nonspatial is created to get the derivatives with cache. |
|
82 |
// The main reason to have it is that we nearly always have duplicated visits |
|
83 |
// and the inverse of a matrix is calculation expensive. In addition, we can save |
|
84 |
// the resource needed for select matrix calculations. |
|
85 |
template <class Type> |
|
86 |
struct derivatives_nonspatial: public lower_chol_nonspatial<Type>, virtual derivatives_base<Type> { |
|
87 |
std::map<std::vector<int>, matrix<Type>> inverse_chol_cache; |
|
88 |
std::map<std::vector<int>, matrix<Type>> sigmad1_cache; |
|
89 |
std::map<std::vector<int>, matrix<Type>> sigmad2_cache; |
|
90 |
std::map<std::vector<int>, matrix<Type>> sigma_inverse_d1_cache; |
|
91 |
derivatives_nonspatial() { |
|
92 |
// This default constructor is needed because the use of `[]` in map. |
|
93 |
} |
|
94 |
// Constructor from theta, n_visits and cov_type, and cache full_visits values. |
|
95 | 340x |
derivatives_nonspatial(vector<Type> theta, int n_visits, std::string cov_type): lower_chol_nonspatial<Type>(theta, n_visits, cov_type) { |
96 | 680x |
std::map<std::string, tmbutils::matrix<Type>> allret = derivatives<Type>(this->n_visits, this->cov_type, this->theta); |
97 | 680x |
matrix<Type> sigma_d1 = allret["derivative1"]; |
98 | 680x |
matrix<Type> sigma_d2 = allret["derivative2"]; |
99 | 340x |
this->sigmad1_cache[this->full_visit] = sigma_d1; |
100 | 340x |
this->sigmad2_cache[this->full_visit] = sigma_d2; |
101 |
} |
|
102 |
// Cache and return the first order derivatives using select matrix. |
|
103 | 20600x |
matrix<Type> get_sigma_derivative1(std::vector<int> visits, matrix<Type> dist) override { |
104 | 20600x |
auto target = this->sigmad1_cache.find(visits); |
105 | 20600x |
if (target != this->sigmad1_cache.end()) { |
106 | 16822x |
return target->second; |
107 |
} else { |
|
108 | 3778x |
int n_visits_i = visits.size(); |
109 | 3778x |
matrix<Type> ret = matrix<Type>(this->n_theta * n_visits_i, n_visits_i); |
110 | 26068x |
for (int i = 0; i < this->n_theta; i++) { |
111 | 22290x |
ret.block(i * n_visits_i, 0, n_visits_i, n_visits_i) = subset_matrix<matrix<Type>, vector<int>>(this->sigmad1_cache[this->full_visit].block(i * this->n_visits, 0, this->n_visits, this->n_visits), visits, visits); |
112 |
} |
|
113 | 3778x |
this->sigmad1_cache[visits] = ret; |
114 | 3778x |
return ret; |
115 |
} |
|
116 |
} |
|
117 |
// Cache and return the second order derivatives using select matrix. |
|
118 | 16550x |
matrix<Type> get_sigma_derivative2(std::vector<int> visits, matrix<Type> dist) override { |
119 | 16550x |
auto target = this->sigmad2_cache.find(visits); |
120 | 16550x |
if (target != this->sigmad2_cache.end()) { |
121 | 15092x |
return target->second; |
122 |
} else { |
|
123 | 1458x |
int n_visits_i = visits.size(); |
124 | 1458x |
int theta_sq = this->n_theta * this->n_theta; |
125 | 1458x |
matrix<Type> ret = matrix<Type>(theta_sq * n_visits_i, n_visits_i); |
126 | 44586x |
for (int i = 0; i < theta_sq; i++) { |
127 | 43128x |
ret.block(i * n_visits_i, 0, n_visits_i, n_visits_i) = subset_matrix<matrix<Type>, vector<int>>(this->sigmad2_cache[this->full_visit].block(i * this->n_visits, 0, this->n_visits, this->n_visits), visits, visits); |
128 |
} |
|
129 | 1458x |
this->sigmad2_cache[visits] = ret; |
130 | 1458x |
return ret; |
131 |
} |
|
132 |
} |
|
133 |
// Cache and return the lower cholesky factor of inverse of sigma using select matrix. |
|
134 | 12608x |
matrix<Type> get_inverse_chol(std::vector<int> visits, matrix<Type> dist) override { |
135 | 12608x |
auto target = this->inverse_chol_cache.find(visits); |
136 | 12608x |
if (target != this->inverse_chol_cache.end()) { |
137 | 11648x |
return target->second; |
138 |
} else { |
|
139 | 1920x |
matrix<Type> sigmainv = this->get_sigma_inverse(visits, dist); |
140 | 960x |
Eigen::LLT<Eigen::Matrix<Type,Eigen::Dynamic,Eigen::Dynamic> > sigma_inv_chol(sigmainv); |
141 | 960x |
matrix<Type> Li = sigma_inv_chol.matrixL(); |
142 | 960x |
this->inverse_chol_cache[visits] = Li; |
143 | 960x |
return Li; |
144 |
} |
|
145 |
} |
|
146 |
// Cache and return the first order derivatives of inverse of sigma using select matrix. |
|
147 | 47282x |
matrix<Type> get_inverse_derivative(std::vector<int> visits, matrix<Type> dist) override { |
148 | 47282x |
auto target = this->sigma_inverse_d1_cache.find(visits); |
149 | 47282x |
if (target != this->sigma_inverse_d1_cache.end()) { |
150 | 43232x |
return target->second; |
151 |
} else { |
|
152 | 8100x |
auto sigma_d1 = this->get_sigma_derivative1(visits, dist); |
153 | 4050x |
matrix<Type> sigma_inv_d1(sigma_d1.rows(), sigma_d1.cols()); |
154 | 4050x |
int n_visits_i = visits.size(); |
155 | 8100x |
auto sigma_inv = this->get_sigma_inverse(visits, dist); |
156 | 27934x |
for (int r = 0; r < this->n_theta; r++) { |
157 | 23884x |
sigma_inv_d1.block(r * n_visits_i, 0, n_visits_i, n_visits_i) = - sigma_inv * sigma_d1.block(r * n_visits_i, 0, n_visits_i, n_visits_i) *sigma_inv; |
158 |
} |
|
159 | 4050x |
this->sigma_inverse_d1_cache[visits] = sigma_inv_d1; |
160 | 4050x |
return sigma_inv_d1; |
161 |
} |
|
162 |
} |
|
163 |
}; |
|
164 | ||
165 |
// derivatives_sp_exp struct is created to obtain the exact derivatives of spatial exponential |
|
166 |
// covariance structure, and its inverse. |
|
167 |
// No caching is used because the distance can be hardly the same for spatial covariance |
|
168 |
// structures. |
|
169 |
template <class Type> |
|
170 |
struct derivatives_sp_exp: public lower_chol_spatial<Type>, virtual derivatives_base<Type> { |
|
171 |
Type const_sd; |
|
172 |
Type rho; |
|
173 |
Type logrho; |
|
174 |
derivatives_sp_exp() { |
|
175 |
// This default constructor is needed because the use of `[]` in maps. |
|
176 |
} |
|
177 |
// Initialize the theta values; the reason to have theta is that for a fit, the theta |
|
178 |
// is the same for all subjects, while the distance between each visits for each subject |
|
179 |
// can be different. |
|
180 | 24x |
derivatives_sp_exp(vector<Type> theta, std::string cov_type): lower_chol_spatial<Type>(theta, cov_type) ,const_sd(exp(theta(0))), rho(invlogit(theta(1))) { |
181 | 24x |
this->logrho = log(this->rho); |
182 |
} |
|
183 |
// Obtain first order derivatives |
|
184 | 5124x |
matrix<Type> get_sigma_derivative1(std::vector<int> visits, matrix<Type> dist) override { |
185 | 5124x |
matrix<Type> ret(2 * dist.rows(), dist.cols()); |
186 |
// partial sigma / partial theta_1 = sigma. |
|
187 | 10248x |
auto sigma = this->get_sigma(visits, dist); |
188 | 5124x |
ret.block(0, 0, dist.rows(), dist.cols()) = sigma; |
189 | 5124x |
ret.block(dist.rows(), 0, dist.rows(), dist.cols()) = sigma.array() * dist.array() * (1 - this->rho); |
190 | 10248x |
return ret; |
191 |
} |
|
192 |
// Obtain second order derivatives. |
|
193 | 1972x |
matrix<Type> get_sigma_derivative2(std::vector<int> visits, matrix<Type> dist) override { |
194 | 1972x |
matrix<Type> ret(4 * dist.rows(), dist.cols()); |
195 | 3944x |
auto sigma = this->get_sigma(visits, dist); |
196 | 1972x |
ret.block(0, 0, dist.rows(), dist.cols()) = sigma; |
197 | 1972x |
Type rho_r = 1 - this->rho; |
198 | 1972x |
auto dtheta1dtheta2 = sigma.array() * dist.array() * rho_r; |
199 | 1972x |
ret.block(dist.rows(), 0, dist.rows(), dist.cols()) = dtheta1dtheta2; |
200 | 1972x |
ret.block(dist.rows() * 2, 0, dist.rows(), dist.cols()) = dtheta1dtheta2; |
201 | 1972x |
matrix<Type> dtheta2s = dtheta1dtheta2 * (dist.array() * rho_r - this->rho); |
202 | 1972x |
ret.block(dist.rows() * 3, 0, dist.rows(), dist.cols()) = dtheta2s; |
203 | 3944x |
return ret; |
204 |
} |
|
205 |
// Obtain the lower cholesky factor of inverse of sigma using select matrix. |
|
206 | 788x |
matrix<Type> get_inverse_chol(std::vector<int> visits, matrix<Type> dist) override { |
207 | 1576x |
auto sigmainv = this->get_sigma_inverse(visits, dist); |
208 | 788x |
Eigen::LLT<Eigen::Matrix<Type,Eigen::Dynamic,Eigen::Dynamic> > sigma_inv_chol(sigmainv); |
209 | 788x |
matrix<Type> Li = sigma_inv_chol.matrixL(); |
210 | 1576x |
return Li; |
211 |
} |
|
212 |
// Obtain first order derivatives for inverse of sigma. |
|
213 | 3152x |
matrix<Type> get_inverse_derivative(std::vector<int> visits, matrix<Type> dist) override { |
214 | 3152x |
matrix<Type> sigma_inv_d1 = matrix<Type>::Zero(2 * dist.rows(), dist.cols()); |
215 | 6304x |
auto sigma_inv = this->get_sigma_inverse(visits, dist); |
216 | 6304x |
auto sigma_d1 = this->get_sigma_derivative1(visits, dist); |
217 | 9456x |
for (int r = 0; r < 2; r++) { |
218 | 6304x |
sigma_inv_d1.block(r * dist.rows(), 0, dist.rows(), dist.cols()) = - sigma_inv * sigma_d1.block(r * dist.rows(), 0, dist.rows(), dist.cols()) *sigma_inv; |
219 |
} |
|
220 | 6304x |
return sigma_inv_d1; |
221 |
} |
|
222 |
}; |
|
223 | ||
224 |
#endif |
1 |
#include "derivatives.h" |
|
2 | ||
3 |
using namespace Rcpp; |
|
4 |
using std::string; |
|
5 |
// Obtain the empirical given beta, beta_vcov, theta. |
|
6 | 408x |
List get_empirical(List mmrm_data, NumericVector theta, NumericVector beta, NumericMatrix beta_vcov, string type) { |
7 | 816x |
NumericMatrix x = mmrm_data["x_matrix"]; |
8 | 408x |
matrix<double> x_matrix = as_num_matrix_tmb(x); |
9 | 816x |
NumericVector y = mmrm_data["y_vector"]; |
10 | 408x |
matrix<double> beta_vcov_matrix = as_num_matrix_tmb(beta_vcov); |
11 | 816x |
IntegerVector subject_zero_inds = mmrm_data["subject_zero_inds"]; |
12 | 408x |
int n_subjects = mmrm_data["n_subjects"]; |
13 | 408x |
int n_observations = x_matrix.rows(); |
14 | 816x |
IntegerVector subject_n_visits = mmrm_data["subject_n_visits"]; |
15 | 408x |
int n_visits = mmrm_data["n_visits"]; |
16 | 816x |
String cov_type = mmrm_data["cov_type"]; |
17 | 408x |
int is_spatial_int = mmrm_data["is_spatial_int"]; |
18 | 408x |
bool is_spatial = is_spatial_int == 1; |
19 | 408x |
int n_groups = mmrm_data["n_groups"]; |
20 | 816x |
IntegerVector subject_groups = mmrm_data["subject_groups"]; |
21 | 816x |
NumericVector weights_vector = mmrm_data["weights_vector"]; |
22 | 816x |
NumericMatrix coordinates = mmrm_data["coordinates"]; |
23 | 408x |
matrix<double> coords = as_num_matrix_tmb(coordinates); |
24 | 816x |
matrix<double> beta_m = as_num_vector_tmb(beta).matrix(); |
25 | 408x |
vector<double> theta_v = as_num_vector_tmb(theta); |
26 | 408x |
matrix<double> fitted = x_matrix * beta_m; |
27 | 816x |
matrix<double> y_matrix = as_num_vector_tmb(y).matrix(); |
28 | 408x |
matrix<double> residual = y_matrix - fitted; |
29 | 408x |
vector<double> G_sqrt = as_num_vector_tmb(sqrt(weights_vector)); |
30 | 408x |
int p = x.cols(); |
31 |
// Use map to hold these base class pointers (can also work for child class objects). |
|
32 | 816x |
auto derivatives_by_group = cache_obj<double, derivatives_base<double>, derivatives_sp_exp<double>, derivatives_nonspatial<double>>(theta_v, n_groups, is_spatial, cov_type, n_visits); |
33 | 408x |
matrix<double> meat = matrix<double>::Zero(p, p); |
34 | 408x |
matrix<double> xt_g_simga_inv_chol = matrix<double>::Zero(p, n_observations); |
35 | 408x |
matrix<double> ax = matrix<double>::Zero(n_observations, p); |
36 | 80784x |
for (int i = 0; i < n_subjects; i++) { |
37 | 80376x |
int start_i = subject_zero_inds[i]; |
38 | 80376x |
int n_visits_i = subject_n_visits[i]; |
39 | 80376x |
std::vector<int> visit_i(n_visits_i); |
40 | 80376x |
matrix<double> dist_i(n_visits_i, n_visits_i); |
41 | 80376x |
if (!is_spatial) { |
42 | 281856x |
for (int i = 0; i < n_visits_i; i++) { |
43 | 206208x |
visit_i[i] = int(coordinates(i + start_i, 0)); |
44 |
} |
|
45 |
} else { |
|
46 | 4728x |
dist_i = euclidean(matrix<double>(coords.block(start_i, 0, n_visits_i, coordinates.cols()))); |
47 |
} |
|
48 | 80376x |
int subject_group_i = subject_groups[i] - 1; |
49 | 160752x |
matrix<double> sigma_inv_chol = derivatives_by_group.cache[subject_group_i]->get_inverse_chol(visit_i, dist_i); |
50 | 80376x |
matrix<double> Xi = x_matrix.block(start_i, 0, n_visits_i, x_matrix.cols()); |
51 | 80376x |
matrix<double> residual_i = residual.block(start_i, 0, n_visits_i, 1); |
52 | 80376x |
matrix<double> gi_sqrt_root = G_sqrt.segment(start_i, n_visits_i).matrix().asDiagonal(); |
53 | 80376x |
matrix<double> gi_simga_inv_chol = gi_sqrt_root * sigma_inv_chol; |
54 | 80376x |
matrix<double> xt_gi_simga_inv_chol = Xi.transpose() * gi_simga_inv_chol; |
55 | 80376x |
matrix<double> ai = matrix<double>::Identity(n_visits_i, n_visits_i); |
56 | 80376x |
if (type != "Empirical") { |
57 | 23640x |
ai = ai - xt_gi_simga_inv_chol.transpose() * beta_vcov_matrix * xt_gi_simga_inv_chol; |
58 |
} |
|
59 | 80376x |
if (type == "Empirical-Jackknife") { |
60 | 14184x |
ai = ai.inverse(); |
61 | 66192x |
} else if(type == "Empirical-Bias-Reduced") { |
62 | 9456x |
ai = pseudoInverseSqrt(ai); |
63 |
} |
|
64 | 80376x |
matrix<double> xta = xt_gi_simga_inv_chol * ai; |
65 | 80376x |
matrix<double> z = xta * gi_simga_inv_chol.transpose() * residual_i; |
66 | 80376x |
meat = meat + z * z.transpose(); |
67 | 80376x |
xt_g_simga_inv_chol.block(0, start_i, p, n_visits_i) = xt_gi_simga_inv_chol; |
68 | 80376x |
ax.block(start_i, 0, n_visits_i, p) = xta.transpose(); |
69 |
} |
|
70 | 408x |
matrix<double> h = xt_g_simga_inv_chol.transpose() * beta_vcov_matrix * xt_g_simga_inv_chol; |
71 | 408x |
matrix<double> imh = matrix<double>::Identity(n_observations, n_observations) - h; |
72 | 408x |
matrix<double> ax_xtx = ax * beta_vcov_matrix; |
73 | 408x |
matrix<double> g = matrix<double>::Zero(n_observations, p * n_subjects); |
74 | 80784x |
for (int i = 0; i < n_subjects; i++) { |
75 | 80376x |
int start_i = subject_zero_inds[i]; |
76 | 80376x |
int n_visits_i = subject_n_visits[i]; |
77 | 80376x |
g.block(0, i * p, n_observations, p) = imh.block(0, start_i, n_observations, n_visits_i) * ax_xtx.block(start_i, 0, n_visits_i, p); |
78 |
} |
|
79 | 408x |
matrix<double> gtvg = g.transpose() * g; |
80 |
// beta_vcov already take gi into consideration; |
|
81 | 408x |
matrix<double> ret = beta_vcov_matrix * meat * beta_vcov_matrix; |
82 |
// Removed because this scale factor can be applied by user manually |
|
83 |
// not important. |
|
84 |
//if (jackknife) { |
|
85 |
// ret = ret * (n_subjects - 1) / n_subjects; |
|
86 |
//} |
|
87 |
return List::create( |
|
88 | 816x |
Named("cov") = as_num_matrix_rcpp(ret), |
89 | 816x |
Named("df_mat") = as_num_matrix_rcpp(gtvg) |
90 |
); |
|
91 |
} |
1 |
#ifndef UTILS_INCLUDED_ |
|
2 |
#define UTILS_INCLUDED_ |
|
3 |
#include <Rcpp.h> |
|
4 |
#define INCLUDE_RCPP |
|
5 |
#include "tmb_includes.h" |
|
6 | ||
7 |
#define as_num_matrix_tmb as_matrix<matrix<double>, NumericMatrix> |
|
8 |
#define as_num_matrix_rcpp as_matrix<NumericMatrix, matrix<double>> |
|
9 |
#define as_num_vector_tmb as_vector<vector<double>, NumericVector> |
|
10 |
#define as_num_vector_rcpp as_vector<NumericVector, vector<double>> |
|
11 | ||
12 |
// Obtain submatrix from index |
|
13 | ||
14 |
template <typename T1, typename T2> |
|
15 | 311443x |
T1 subset_matrix(T1 input, T2 index1, T2 index2) { |
16 |
#if EIGEN_VERSION_AT_LEAST(3,4,0) |
|
17 | 311443x |
T1 ret = input(index1, index2); |
18 |
#else |
|
19 |
T1 ret(index1.size(), index2.size()); |
|
20 |
for (decltype(index1.size()) i = 0; i < index1.size(); i++) { |
|
21 |
for (decltype(index2.size()) j = 0; j < index2.size(); j++) { |
|
22 |
ret(i, j) = input(index1[i], index2[j]); |
|
23 |
} |
|
24 |
} |
|
25 |
#endif |
|
26 | 311443x |
return ret; |
27 |
} |
|
28 | ||
29 |
template <typename T1, typename T2> |
|
30 | 239826x |
T1 subset_matrix(T1 input, T2 index1) { |
31 |
#if EIGEN_VERSION_AT_LEAST(3,4,0) |
|
32 | 239826x |
T1 ret = input(index1, Eigen::all); |
33 |
#else |
|
34 |
T1 ret(index1.size(), input.cols()); |
|
35 |
for (decltype(index1.size()) i = 0; i < index1.size(); i++) { |
|
36 |
for (int j = 0; j < input.cols(); j++) { |
|
37 |
ret(i, j) = input(index1[i], j); |
|
38 |
} |
|
39 |
} |
|
40 |
#endif |
|
41 | 239826x |
return ret; |
42 |
} |
|
43 | ||
44 | ||
45 |
// Conversion from Rcpp vector/matrix to eigen vector/matrix |
|
46 |
template <typename T1, typename T2> |
|
47 | 607139x |
T1 as_vector(T2 input) { |
48 | 607139x |
T1 ret(input.size()); |
49 | 2006688x |
for (int i = 0; i < input.size(); i++) { |
50 | 1399549x |
ret(i) = input(i); |
51 |
} |
|
52 | 607139x |
return ret; |
53 |
} |
|
54 | ||
55 |
template <typename T1, typename T2> |
|
56 | 415296x |
T1 as_matrix(T2 input) { |
57 | 415296x |
T1 ret(input.rows(), input.cols()); |
58 | 5769824x |
for (int i = 0; i < input.rows(); i++) { |
59 | 50241356x |
for (int j = 0; j < input.cols(); j++) { |
60 | 44886828x |
ret(i,j) = input(i,j); |
61 |
} |
|
62 |
} |
|
63 | 415296x |
return ret; |
64 |
} |
|
65 | ||
66 |
template <typename T> |
|
67 | 719474x |
T segment(T input, int start, int n) { |
68 | 719474x |
T ret(n); |
69 | 3594556x |
for (int i = 0, j = start; i < n; i++, j++) { |
70 | 2875082x |
ret(i) = input(j); |
71 |
} |
|
72 | 719474x |
return ret; |
73 |
} |
|
74 | ||
75 |
// Calculate tcrossprod(lower_chol) = lower_chol * t(lower_chol). |
|
76 |
// If complete, then adds the upper triangular part to the result as well. |
|
77 |
// By default only the lower triangular part is populated, as this should be |
|
78 |
// sufficient for downstream use of the result in most cases. |
|
79 |
template <class Type> |
|
80 | 1212218x |
matrix<Type> tcrossprod(const matrix<Type>& lower_chol, bool complete = false) { |
81 | 1212218x |
int n = lower_chol.rows(); |
82 | 1212218x |
matrix<Type> result = matrix<Type>::Zero(n, n); |
83 | 1212218x |
result.template selfadjointView<Eigen::Lower>().rankUpdate(lower_chol); |
84 | 1212218x |
if (complete) { |
85 | 26225x |
result.template triangularView<Eigen::Upper>() = result.transpose(); |
86 |
} |
|
87 | 1212218x |
return result; |
88 |
} |
|
89 | ||
90 |
// Calculate crossprod(x) = t(x) * x. |
|
91 |
// Only the lower triangular part is populated, as this should be |
|
92 |
// sufficient for downstream use of the result in most cases. |
|
93 |
// Note that x does not need to be symmetric or square. |
|
94 |
template <class Type> |
|
95 | 1234712x |
matrix<Type> crossprod(const matrix<Type>& x) { |
96 | 1234712x |
int n = x.cols(); |
97 | 1234712x |
matrix<Type> result = matrix<Type>::Zero(n, n); |
98 | 1234712x |
result.template selfadjointView<Eigen::Lower>().rankUpdate(x.transpose()); |
99 | 1234712x |
return result; |
100 |
} |
|
101 | ||
102 |
// Mapping from real values to correlation parameters in (-1, 1). |
|
103 |
template <class T> |
|
104 | 6232x |
vector<T> map_to_cor(const vector<T>& theta) { |
105 | 6232x |
return theta / sqrt(T(1.0) + theta * theta); |
106 |
} |
|
107 | ||
108 |
// Generic correlation function class containing and initializing correlation |
|
109 |
// values from variance parameters theta. |
|
110 |
template <class T> |
|
111 |
struct generic_corr_fun { |
|
112 |
const vector<T> corr_values; |
|
113 | ||
114 | 6224x |
generic_corr_fun(const vector<T>& theta) : |
115 | 6224x |
corr_values(map_to_cor(theta)) {} |
116 |
}; |
|
117 | ||
118 |
// Correlation function based Cholesky factor of correlation matrix. |
|
119 |
// This is used directly for homogeneous covariance matrices. |
|
120 |
template <class T, template<class> class F> |
|
121 | 6212x |
matrix<T> get_corr_mat_chol(int n_visits, const F<T>& corr_fun) { |
122 | 6212x |
matrix<T> correlation(n_visits, n_visits); |
123 | 6212x |
correlation.setIdentity(); |
124 | 30924x |
for(int i = 0; i < n_visits; i++) { |
125 | 61608x |
for(int j = 0; j < i; j++){ |
126 | 36896x |
correlation(i, j) = corr_fun(i, j); |
127 |
} |
|
128 |
} |
|
129 | 6212x |
Eigen::LLT<Eigen::Matrix<T,Eigen::Dynamic,Eigen::Dynamic> > correlation_chol(correlation); |
130 | 6212x |
matrix<T> L = correlation_chol.matrixL(); |
131 | 12424x |
return L; |
132 |
} |
|
133 | ||
134 |
// Heterogeneous covariance matrix calculation given vector of standard deviations (sd_values) |
|
135 |
// and a correlation function (corr_fun). |
|
136 |
template <class T, template<class> class F> |
|
137 | 1858x |
matrix<T> get_heterogeneous_cov(const vector<T>& sd_values, const F<T>& corr_fun) { |
138 | 1858x |
matrix<T> correlation_chol = get_corr_mat_chol(sd_values.size(), corr_fun); |
139 | 1858x |
Eigen::DiagonalMatrix<T,Eigen::Dynamic,Eigen::Dynamic> D = sd_values.matrix().asDiagonal(); |
140 | 1858x |
matrix<T> result = D * correlation_chol; |
141 | 3716x |
return result; |
142 |
} |
|
143 | ||
144 |
// Obtain the Euclidean distance |
|
145 |
template <class T> |
|
146 | 33703x |
matrix<T> euclidean(const matrix<T>& coordinates) { |
147 | 33703x |
matrix<T> result(coordinates.rows(), coordinates.rows()); |
148 | 126598x |
for (int i = 0; i < coordinates.rows(); i++) { |
149 | 92895x |
result(i, i) = 0; |
150 | 188400x |
for (int j = 0; j < i; j ++) { |
151 | 95505x |
vector<T> diff = coordinates.row(i) - coordinates.row(j); |
152 | 95505x |
T d = sqrt((diff * diff).sum()); |
153 | 95505x |
result(i, j) = d; |
154 | 95505x |
result(j, i) = d; |
155 |
} |
|
156 |
} |
|
157 | 33703x |
return result; |
158 |
} |
|
159 | ||
160 |
// Element wise power function of a matrix |
|
161 |
template <class T> |
|
162 | 1584x |
Eigen::Matrix<T, -1, -1> cpow(const Eigen::Matrix<T, -1, -1> & input, double p) { |
163 | 1584x |
Eigen::Matrix<T, -1, -1> ret = Eigen::Matrix<T, -1, -1>(input.rows(), input.cols()); |
164 | 5908x |
for (int i = 0; i < ret.rows(); i ++) { |
165 | 8664x |
for (int j = 0; j < ret.cols(); j++) { |
166 | 4340x |
ret(i, j) = std::pow(input(i, j), p); |
167 |
} |
|
168 |
} |
|
169 | 1584x |
return ret; |
170 |
} |
|
171 | ||
172 |
// Calculate the square root of the pseudo inverse of a matrix |
|
173 |
// adapted from the method for calculating the pseudo-Inverse as recommended by the Eigen developers |
|
174 |
template<typename T> |
|
175 | 1580x |
matrix<T> pseudoInverseSqrt(const matrix<T> &input, double epsilon = std::numeric_limits<double>::epsilon()) { |
176 | 1580x |
Eigen::Matrix<T, -1, -1> eigen_mat = as_matrix<Eigen::Matrix<T, -1, -1>, matrix<T>>(input); |
177 | 1580x |
Eigen::JacobiSVD< Eigen::Matrix<T, -1, -1> > svd(eigen_mat ,Eigen::ComputeFullU | Eigen::ComputeFullV); |
178 | 1580x |
double tolerance = epsilon * std::max(input.cols(), input.rows()) *svd.singularValues().array().abs()(0); |
179 | 1580x |
auto singular_vals = Matrix<T,-1,-1>((svd.singularValues().array() > tolerance).select(svd.singularValues().array().inverse(), 0).matrix()); |
180 | 1580x |
Eigen::Matrix<T, -1, -1> ret_eigen = svd.matrixV() * cpow(singular_vals, 0.5).asDiagonal() * svd.matrixU().adjoint(); |
181 | 3160x |
return as_matrix<matrix<T>, Eigen::Matrix<T, -1, -1>>(ret_eigen); |
182 |
} |
|
183 | ||
184 |
#endif |
1 |
#include <RcppEigen.h> |
|
2 |
#include "utils.h" |
|
3 | ||
4 |
using namespace Rcpp; |
|
5 | ||
6 |
#ifdef RCPP_USE_GLOBAL_ROSTREAM |
|
7 |
Rcpp::Rostream<true>& Rcpp::Rcout = Rcpp::Rcpp_cout_get(); |
|
8 |
Rcpp::Rostream<false>& Rcpp::Rcerr = Rcpp::Rcpp_cerr_get(); |
|
9 |
#endif |
|
10 | ||
11 |
List get_pqr(List mmrm_fit, NumericVector theta); |
|
12 | 517x |
RcppExport SEXP _mmrm_get_pqr(SEXP mmrm_fit_SEXP, SEXP theta_SEXP) { |
13 | 517x |
BEGIN_RCPP |
14 | 517x |
Rcpp::RObject rcpp_result_gen; |
15 | 517x |
Rcpp::RNGScope rcpp_rngScope_gen; |
16 | 517x |
Rcpp::traits::input_parameter< List >::type mmrm_fit(mmrm_fit_SEXP); |
17 | 517x |
Rcpp::traits::input_parameter< NumericVector >::type theta(theta_SEXP); |
18 | 517x |
rcpp_result_gen = Rcpp::wrap(get_pqr(mmrm_fit, theta)); |
19 | 517x |
return rcpp_result_gen; |
20 | 517x |
END_RCPP |
21 |
} |
|
22 | ||
23 |
List get_jacobian(List mmrm_fit, NumericVector theta, NumericMatrix beta_vcov); |
|
24 | 902x |
RcppExport SEXP _mmrm_get_jacobian(SEXP mmrm_fit_SEXP, SEXP theta_SEXP, SEXP beta_vcov_SEXP) { |
25 | 902x |
BEGIN_RCPP |
26 | 902x |
Rcpp::RObject rcpp_result_gen; |
27 | 902x |
Rcpp::RNGScope rcpp_rngScope_gen; |
28 | 902x |
Rcpp::traits::input_parameter< List >::type mmrm_fit(mmrm_fit_SEXP); |
29 | 902x |
Rcpp::traits::input_parameter< NumericVector >::type theta(theta_SEXP); |
30 | 902x |
Rcpp::traits::input_parameter< NumericMatrix >::type beta_vcov(beta_vcov_SEXP); |
31 | 902x |
rcpp_result_gen = Rcpp::wrap(get_jacobian(mmrm_fit, theta, beta_vcov)); |
32 | 902x |
return rcpp_result_gen; |
33 | 902x |
END_RCPP |
34 |
} |
|
35 | ||
36 |
List get_empirical(List mmrm_fit, NumericVector theta, NumericVector beta, NumericMatrix beta_vcov, std::string type); |
|
37 | 374x |
RcppExport SEXP _mmrm_get_empirical(SEXP mmrm_fit_SEXP, SEXP theta_SEXP, SEXP beta_SEXP, SEXP beta_vcov_SEXP, SEXP type_SEXP) { |
38 | 374x |
BEGIN_RCPP |
39 | 374x |
Rcpp::RObject rcpp_result_gen; |
40 | 374x |
Rcpp::RNGScope rcpp_rngScope_gen; |
41 | 374x |
Rcpp::traits::input_parameter< List >::type mmrm_fit(mmrm_fit_SEXP); |
42 | 374x |
Rcpp::traits::input_parameter< NumericVector >::type theta(theta_SEXP); |
43 | 374x |
Rcpp::traits::input_parameter< NumericVector >::type beta(beta_SEXP); |
44 | 374x |
Rcpp::traits::input_parameter< NumericMatrix >::type beta_vcov(beta_vcov_SEXP); |
45 | 374x |
Rcpp::traits::input_parameter< std::string >::type type(type_SEXP); |
46 | 374x |
rcpp_result_gen = Rcpp::wrap(get_empirical(mmrm_fit, theta, beta, beta_vcov, type)); |
47 | 374x |
return rcpp_result_gen; |
48 | 374x |
END_RCPP |
49 |
} |
|
50 | ||
51 |
List predict(List mmrm_fit, NumericVector theta, NumericVector beta, NumericMatrix beta_vcov); |
|
52 | 18656x |
RcppExport SEXP _mmrm_predict(SEXP mmrm_fit_SEXP, SEXP theta_SEXP, SEXP beta_SEXP, SEXP beta_vcov_SEXP) { |
53 | 18656x |
BEGIN_RCPP |
54 | 18656x |
Rcpp::RObject rcpp_result_gen; |
55 | 18656x |
Rcpp::RNGScope rcpp_rngScope_gen; |
56 | 18656x |
Rcpp::traits::input_parameter< List >::type mmrm_fit(mmrm_fit_SEXP); |
57 | 18656x |
Rcpp::traits::input_parameter< NumericVector >::type theta(theta_SEXP); |
58 | 18656x |
Rcpp::traits::input_parameter< NumericVector >::type beta(beta_SEXP); |
59 | 18656x |
Rcpp::traits::input_parameter< NumericMatrix >::type beta_vcov(beta_vcov_SEXP); |
60 | 18656x |
rcpp_result_gen = Rcpp::wrap(predict(mmrm_fit, theta, beta, beta_vcov)); |
61 | 18656x |
return rcpp_result_gen; |
62 | 18656x |
END_RCPP |
63 |
} |
|
64 | ||
65 | ||
66 |
RcppExport SEXP run_testthat_tests(SEXP); |
|
67 | ||
68 |
static const R_CallMethodDef CallEntries[] = { |
|
69 |
{"_mmrm_get_pqr", (DL_FUNC) &_mmrm_get_pqr, 2}, |
|
70 |
{"_mmrm_get_jacobian", (DL_FUNC) &_mmrm_get_jacobian, 3}, |
|
71 |
{"_mmrm_get_empirical", (DL_FUNC) &_mmrm_get_empirical, 5}, |
|
72 |
{"_mmrm_predict", (DL_FUNC) &_mmrm_predict, 4}, |
|
73 |
{"run_testthat_tests", (DL_FUNC) &run_testthat_tests, 1}, |
|
74 |
TMB_CALLDEFS, |
|
75 |
{NULL, NULL, 0} |
|
76 |
}; |
|
77 | ||
78 | 44x |
RcppExport void R_init_mmrm(DllInfo *dll) { |
79 | 44x |
R_registerRoutines(dll, NULL, CallEntries, NULL, NULL); |
80 | 44x |
R_useDynamicSymbols(dll, FALSE); |
81 |
#ifdef TMB_CCALLABLES |
|
82 | 44x |
TMB_CCALLABLES("mmrm"); |
83 |
#endif |
|
84 |
} |
1 |
#include "derivatives.h" |
|
2 | ||
3 |
using namespace Rcpp; |
|
4 |
using std::string; |
|
5 | ||
6 |
// Obtain Jacobian from a mmrm fit, given theta. |
|
7 | 820x |
List get_jacobian(List mmrm_fit, NumericVector theta, NumericMatrix beta_vcov) { |
8 | 1640x |
NumericMatrix x = mmrm_fit["x_matrix"]; |
9 | 820x |
matrix<double> x_matrix = as_num_matrix_tmb(x); |
10 | 1640x |
IntegerVector subject_zero_inds = mmrm_fit["subject_zero_inds"]; |
11 | 820x |
int n_subjects = mmrm_fit["n_subjects"]; |
12 | 1640x |
IntegerVector subject_n_visits = mmrm_fit["subject_n_visits"]; |
13 | 820x |
int n_visits = mmrm_fit["n_visits"]; |
14 | 1640x |
String cov_type = mmrm_fit["cov_type"]; |
15 | 820x |
int is_spatial_int = mmrm_fit["is_spatial_int"]; |
16 | 820x |
bool is_spatial = is_spatial_int == 1; |
17 | 820x |
int n_groups = mmrm_fit["n_groups"]; |
18 | 1640x |
IntegerVector subject_groups = mmrm_fit["subject_groups"]; |
19 | 1640x |
NumericVector weights_vector = mmrm_fit["weights_vector"]; |
20 | 1640x |
NumericMatrix coordinates = mmrm_fit["coordinates"]; |
21 | 820x |
matrix<double> coords = as_num_matrix_tmb(coordinates); |
22 | 820x |
matrix<double> beta_vcov_m = as_num_matrix_tmb(beta_vcov); |
23 | 820x |
vector<double> theta_v = as_num_vector_tmb(theta); |
24 | 820x |
vector<double> G_sqrt = as_num_vector_tmb(sqrt(weights_vector)); |
25 | 820x |
int n_theta = theta.size(); |
26 | 820x |
int theta_size_per_group = n_theta / n_groups; |
27 | 820x |
int p = x.cols(); |
28 | 820x |
matrix<double> P = matrix<double>::Zero(p * n_theta, p); |
29 |
// Use map to hold these base class pointers (can also work for child class objects). |
|
30 | 1640x |
auto derivatives_by_group = cache_obj<double, derivatives_base<double>, derivatives_sp_exp<double>, derivatives_nonspatial<double>>(theta_v, n_groups, is_spatial, cov_type, n_visits); |
31 | 160390x |
for (int i = 0; i < n_subjects; i++) { |
32 | 159570x |
int start_i = subject_zero_inds[i]; |
33 | 159570x |
int n_visits_i = subject_n_visits[i]; |
34 | 159570x |
std::vector<int> visit_i(n_visits_i); |
35 | 159570x |
matrix<double> dist_i(n_visits_i, n_visits_i); |
36 | 159570x |
if (!is_spatial) { |
37 | 573600x |
for (int i = 0; i < n_visits_i; i++) { |
38 | 419940x |
visit_i[i] = int(coordinates(i + start_i, 0)); |
39 |
} |
|
40 |
} else { |
|
41 | 5910x |
dist_i = euclidean(matrix<double>(coords.block(start_i, 0, n_visits_i, coordinates.cols()))); |
42 |
} |
|
43 | 159570x |
int subject_group_i = subject_groups[i] - 1; |
44 | 319140x |
matrix<double> sigma_inv_d1 = derivatives_by_group.cache[subject_group_i]->get_inverse_derivative(visit_i, dist_i); |
45 | ||
46 | 159570x |
matrix<double> Xi = x_matrix.block(start_i, 0, n_visits_i, x_matrix.cols()); |
47 | 159570x |
auto gi_sqrt_root = G_sqrt.segment(start_i, n_visits_i).matrix().asDiagonal(); |
48 | 1214050x |
for (int r = 0; r < theta_size_per_group; r ++) { |
49 | 1054480x |
auto Pi = Xi.transpose() * gi_sqrt_root * sigma_inv_d1.block(r * n_visits_i, 0, n_visits_i, n_visits_i) * gi_sqrt_root * Xi; |
50 | 1054480x |
P.block(r * p + theta_size_per_group * subject_group_i * p, 0, p, p) += Pi; |
51 |
} |
|
52 |
} |
|
53 | 820x |
if (Rcpp::any(Rcpp::is_infinite(as_num_matrix_rcpp(P)))) { |
54 | ! |
stop("Jacobian is not finite. The model can be over-parameterized."); |
55 |
} |
|
56 | 820x |
auto ret = List::create(); |
57 | 6390x |
for (int i = 0; i < n_theta; i++) { |
58 |
// the P is derivative of (XWX), the covariance is (XWX)^{-1}. |
|
59 | 5570x |
ret.push_back(as_num_matrix_rcpp(-beta_vcov_m * P.block(i * p, 0, p, p) * beta_vcov_m)); |
60 |
} |
|
61 | 1640x |
return ret; |
62 |
} |
1 |
#include "derivatives.h" |
|
2 | ||
3 |
using namespace Rcpp; |
|
4 |
using std::string; |
|
5 |
// Obtain P,Q,R element from a mmrm fit, given theta. |
|
6 | 423x |
List get_pqr(List mmrm_fit, NumericVector theta) { |
7 | 846x |
NumericMatrix x = mmrm_fit["x_matrix"]; |
8 | 423x |
matrix<double> x_matrix = as_num_matrix_tmb(x); |
9 | 846x |
IntegerVector subject_zero_inds = mmrm_fit["subject_zero_inds"]; |
10 | 423x |
int n_subjects = mmrm_fit["n_subjects"]; |
11 | 846x |
IntegerVector subject_n_visits = mmrm_fit["subject_n_visits"]; |
12 | 423x |
int n_visits = mmrm_fit["n_visits"]; |
13 | 846x |
String cov_type = mmrm_fit["cov_type"]; |
14 | 423x |
int is_spatial_int = mmrm_fit["is_spatial_int"]; |
15 | 423x |
bool is_spatial = is_spatial_int == 1; |
16 | 423x |
int n_groups = mmrm_fit["n_groups"]; |
17 | 846x |
IntegerVector subject_groups = mmrm_fit["subject_groups"]; |
18 | 846x |
NumericVector weights_vector = mmrm_fit["weights_vector"]; |
19 | 846x |
NumericMatrix coordinates = mmrm_fit["coordinates"]; |
20 | 423x |
matrix<double> coords = as_num_matrix_tmb(coordinates); |
21 | 423x |
vector<double> theta_v = as_num_vector_tmb(theta); |
22 | 423x |
vector<double> G_sqrt = as_num_vector_tmb(sqrt(weights_vector)); |
23 | 423x |
int n_theta = theta.size(); |
24 | 423x |
int theta_size_per_group = n_theta / n_groups; |
25 | 423x |
int p = x.cols(); |
26 | 423x |
matrix<double> P = matrix<double>::Zero(p * n_theta, p); |
27 | 423x |
matrix<double> Q = matrix<double>::Zero(p * theta_size_per_group * n_theta, p); |
28 | 423x |
matrix<double> R = matrix<double>::Zero(p * theta_size_per_group * n_theta, p); |
29 |
// Use map to hold these base class pointers (can also work for child class objects). |
|
30 | 846x |
auto derivatives_by_group = cache_obj<double, derivatives_base<double>, derivatives_sp_exp<double>, derivatives_nonspatial<double>>(theta_v, n_groups, is_spatial, cov_type, n_visits); |
31 | 83754x |
for (int i = 0; i < n_subjects; i++) { |
32 | 83331x |
int start_i = subject_zero_inds[i]; |
33 | 83331x |
int n_visits_i = subject_n_visits[i]; |
34 | 83331x |
std::vector<int> visit_i(n_visits_i); |
35 | 83331x |
matrix<double> dist_i(n_visits_i, n_visits_i); |
36 | 83331x |
if (!is_spatial) { |
37 | 277452x |
for (int i = 0; i < n_visits_i; i++) { |
38 | 202986x |
visit_i[i] = int(coordinates(i + start_i, 0)); |
39 |
} |
|
40 |
} else { |
|
41 | 8865x |
dist_i = euclidean(matrix<double>(coords.block(start_i, 0, n_visits_i, coordinates.cols()))); |
42 |
} |
|
43 | 83331x |
int subject_group_i = subject_groups[i] - 1; |
44 | 83331x |
matrix<double> sigma_inv, sigma_d1, sigma_d2, sigma, sigma_inv_d1; |
45 | ||
46 | 83331x |
sigma_inv = derivatives_by_group.cache[subject_group_i]->get_sigma_inverse(visit_i, dist_i); |
47 | 83331x |
sigma_d1 = derivatives_by_group.cache[subject_group_i]->get_sigma_derivative1(visit_i, dist_i); |
48 | 83331x |
sigma_d2 = derivatives_by_group.cache[subject_group_i]->get_sigma_derivative2(visit_i, dist_i); |
49 | 83331x |
sigma = derivatives_by_group.cache[subject_group_i]->get_sigma(visit_i, dist_i); |
50 | 83331x |
sigma_inv_d1 = derivatives_by_group.cache[subject_group_i]->get_inverse_derivative(visit_i, dist_i); |
51 | ||
52 | 83331x |
matrix<double> Xi = x_matrix.block(start_i, 0, n_visits_i, x_matrix.cols()); |
53 | 83331x |
auto gi_sqrt_root = G_sqrt.segment(start_i, n_visits_i).matrix().asDiagonal(); |
54 | 455661x |
for (int r = 0; r < theta_size_per_group; r ++) { |
55 | 372330x |
auto Pi = Xi.transpose() * gi_sqrt_root * sigma_inv_d1.block(r * n_visits_i, 0, n_visits_i, n_visits_i) * gi_sqrt_root * Xi; |
56 | 372330x |
P.block(r * p + theta_size_per_group * subject_group_i * p, 0, p, p) += Pi; |
57 | 2620494x |
for (int j = 0; j < theta_size_per_group; j++) { |
58 | 2248164x |
auto Qij = Xi.transpose() * gi_sqrt_root * sigma_inv_d1.block(r * n_visits_i, 0, n_visits_i, n_visits_i) * sigma * sigma_inv_d1.block(j * n_visits_i, 0, n_visits_i, n_visits_i) * gi_sqrt_root * Xi; |
59 |
// switch the order so that in the matrix partial(i) and partial(j) increase j first |
|
60 | 2248164x |
Q.block((r * theta_size_per_group + j + theta_size_per_group * theta_size_per_group * subject_group_i) * p, 0, p, p) += Qij; |
61 | 2248164x |
auto Rij = Xi.transpose() * gi_sqrt_root * sigma_inv * sigma_d2.block((j * theta_size_per_group + r) * n_visits_i, 0, n_visits_i, n_visits_i) * sigma_inv * gi_sqrt_root * Xi; |
62 | 2248164x |
R.block((r * theta_size_per_group + j + theta_size_per_group * theta_size_per_group * subject_group_i) * p, 0, p, p) += Rij; |
63 |
} |
|
64 |
} |
|
65 |
} |
|
66 |
return List::create( |
|
67 | 846x |
Named("P") = as_num_matrix_rcpp(P), |
68 | 846x |
Named("Q") = as_num_matrix_rcpp(Q), |
69 | 846x |
Named("R") = as_num_matrix_rcpp(R) |
70 |
); |
|
71 |
} |
1 |
#include "covariance.h" |
|
2 |
#include "chol_cache.h" |
|
3 |
// Definition: |
|
4 |
// |
|
5 |
// Y_i = X_i * beta + epsilon_i, i = 1, ..., n_subjects |
|
6 |
// where Y_i = (Y_i1, ..., Y_im) are the observations of subject i over the m |
|
7 |
// timepoints, |
|
8 |
// |
|
9 |
// and for the epsilon_i's : |
|
10 |
// epsilon_i ~iid N(0, Sigma) where Sigma is a covariance matrix |
|
11 |
// parameterized by a vector theta. |
|
12 |
// |
|
13 |
// Note: This is a special generalized least squares model |
|
14 |
// Y = X * beta + epsilon, |
|
15 |
// where we have a block structure for the covariance matrix of the epsilon |
|
16 |
// vector. |
|
17 |
// |
|
18 |
// beta itself is not a parameter for TMB here: |
|
19 |
// - For maximum likelihood estimation: |
|
20 |
// Given theta and therefore Sigma, and writing W = Sigma^-1, we can determine |
|
21 |
// the beta optimizing the likelihood via the weighted least squares equation |
|
22 |
// (X^T W X) beta = X^T W Y. |
|
23 |
// - For restricted maximum likelihood estimation: |
|
24 |
// Given theta, beta is integrated out from the likelihood. Weighted least |
|
25 |
// squares results are used to calculate integrated log likelihood. |
|
26 | ||
27 |
template<class Type> |
|
28 | 50432x |
Type objective_function<Type>::operator() () |
29 |
{ |
|
30 |
// Read data from R. |
|
31 | 50432x |
DATA_MATRIX(x_matrix); // Model matrix (dimension n x p). |
32 |
DATA_VECTOR(y_vector); // Response vector (length n). |
|
33 |
DATA_VECTOR(weights_vector); // Weights vector (length n). |
|
34 | 50432x |
DATA_MATRIX(coordinates); // Coordinates matrix. |
35 | 50432x |
DATA_INTEGER(n_visits); // Number of visits, which is the dimension of the covariance matrix. |
36 | 50432x |
DATA_INTEGER(n_subjects); // Number of subjects. |
37 | 50432x |
DATA_IVECTOR(subject_zero_inds); // Starting indices for each subject (0-based) (length n_subjects). |
38 | 50432x |
DATA_IVECTOR(subject_n_visits); // Number of observed visits for each subject (length n_subjects). |
39 | 50432x |
DATA_STRING(cov_type); // Covariance type name. |
40 | 50432x |
DATA_INTEGER(is_spatial_int); // Spatial covariance (1)? Otherwise non-spatial covariance. |
41 | 50432x |
DATA_INTEGER(reml); // REML (1)? Otherwise ML (0). |
42 | 50432x |
DATA_FACTOR(subject_groups); // subject groups vector(0-based) (length n_subjects). |
43 | 50432x |
DATA_INTEGER(n_groups); // number of total groups. |
44 |
// Read parameters from R. |
|
45 | 50432x |
PARAMETER_VECTOR(theta); // Covariance parameters (length k). Contents depend on covariance type. |
46 | ||
47 |
// X^T W X will be calculated incrementally into here. |
|
48 | 50432x |
matrix<Type> XtWX = matrix<Type>::Zero(x_matrix.cols(), x_matrix.cols()); |
49 |
// X^T W Y will be calculated incrementally into here. |
|
50 | 50432x |
matrix<Type> XtWY = matrix<Type>::Zero(x_matrix.cols(), 1); |
51 |
// W^T/2 X will be saved into here. |
|
52 | 50432x |
matrix<Type> x_mat_tilde = matrix<Type>::Zero(x_matrix.rows(), x_matrix.cols()); |
53 |
// W^T/2 Y will be saved into here. |
|
54 | 50432x |
vector<Type> y_vec_tilde = vector<Type>::Zero(y_vector.rows()); |
55 |
// Sum of the log determinant will be incrementally calculated here. |
|
56 | 50432x |
Type sum_log_det = 0.0; |
57 | ||
58 |
// Convert is_spatial_int to bool. |
|
59 | 50432x |
bool is_spatial = (is_spatial_int == 1); |
60 |
// Diagonal of weighted covariance |
|
61 | 50432x |
vector<Type> diag_cov_inv_sqrt(x_matrix.rows()); |
62 |
// Cholesky group object |
|
63 | 100832x |
auto chols_group = chol_cache_groups<Type>(theta, n_groups, is_spatial, cov_type, n_visits); |
64 |
// Go through all subjects and calculate quantities initialized above. |
|
65 | 9928080x |
for (int i = 0; i < n_subjects; i++) { |
66 |
// Start index and number of visits for this subject. |
|
67 | 9877680x |
int start_i = subject_zero_inds(i); |
68 | 9877680x |
int n_visits_i = subject_n_visits(i); |
69 | 9877680x |
std::vector<int> visit_i(n_visits_i); |
70 | 9877680x |
matrix<Type> dist_i(n_visits_i, n_visits_i); |
71 | 9877680x |
if (!is_spatial) { |
72 | 35878544x |
for (int j = 0; j < n_visits_i; j++) { |
73 | 26253024x |
visit_i[j] = int(asDouble(coordinates(start_i + j, 0))); |
74 |
} |
|
75 |
} else { |
|
76 | 252160x |
dist_i = euclidean(matrix<Type>(coordinates.block(start_i, 0, n_visits_i, coordinates.cols()))); |
77 |
} |
|
78 |
// Obtain Cholesky factor Li. |
|
79 | 19755360x |
matrix<Type> Li = chols_group.cache[subject_groups[i]]->get_chol(visit_i, dist_i); |
80 |
// Calculate weighted Cholesky factor for this subject. |
|
81 | 9877680x |
Eigen::DiagonalMatrix<Type,Eigen::Dynamic,Eigen::Dynamic> Gi_inv_sqrt = weights_vector.segment(start_i, n_visits_i).cwiseInverse().sqrt().matrix().asDiagonal(); |
82 | 9877680x |
Li = Gi_inv_sqrt * Li; |
83 |
// Calculate scaled design matrix and response vector for this subject. |
|
84 | 9877680x |
matrix<Type> Xi = x_matrix.block(start_i, 0, n_visits_i, x_matrix.cols()); |
85 | 9877680x |
matrix<Type> XiTilde = Li.template triangularView<Eigen::Lower>().solve(Xi); |
86 | 9877680x |
matrix<Type> Yi = y_vector.segment(start_i, n_visits_i).matrix(); |
87 | 9877680x |
matrix<Type> YiTilde = Li.template triangularView<Eigen::Lower>().solve(Yi); |
88 | ||
89 |
// Increment quantities. |
|
90 | 9877680x |
matrix<Type> XiTildeCrossprod = crossprod(XiTilde); |
91 | 9877680x |
XtWX += XiTildeCrossprod.template triangularView<Eigen::Lower>(); |
92 | 9877680x |
XtWY += XiTilde.transpose() * YiTilde; |
93 | 9877680x |
vector<Type> LiDiag = Li.diagonal(); |
94 | 9877680x |
sum_log_det += sum(log(LiDiag)); |
95 |
// Cache the reciprocal of square root of diagonal of covariance |
|
96 | 9877680x |
diag_cov_inv_sqrt.segment(start_i, n_visits_i) = vector<Type>(tcrossprod(Li).diagonal()).rsqrt(); |
97 |
// Save stuff. |
|
98 | 9877680x |
x_mat_tilde.block(start_i, 0, n_visits_i, x_matrix.cols()) = XiTilde; |
99 | 9877680x |
y_vec_tilde.segment(start_i, n_visits_i) = YiTilde.col(0); |
100 |
} |
|
101 | ||
102 |
// Solve for beta. |
|
103 | 50400x |
Eigen::LDLT<Eigen::Matrix<Type,Eigen::Dynamic,Eigen::Dynamic> > XtWX_decomposition(XtWX); |
104 | 50400x |
matrix<Type> beta_mat = XtWX_decomposition.solve(XtWY); |
105 | 50400x |
vector<Type> beta = beta_mat.col(0); |
106 | ||
107 |
// Define scaled residuals. |
|
108 | 100800x |
vector<Type> x_mat_tilde_beta = x_mat_tilde * beta; |
109 | 50400x |
vector<Type> epsilonTilde = y_vec_tilde - x_mat_tilde_beta; |
110 | ||
111 |
// Calculate negative log-likelihood. |
|
112 | 4000x |
Type neg_log_lik; |
113 | ||
114 |
// Always extract the D vector since we want to report this below. |
|
115 | 50400x |
vector<Type> XtWX_D = XtWX_decomposition.vectorD(); |
116 | ||
117 | 50400x |
if (reml == 1) { |
118 |
// Use restricted maximum likelihood. |
|
119 | 47904x |
Type XtWX_log_det = XtWX_D.log().sum(); |
120 | 47904x |
neg_log_lik = (x_matrix.rows() - x_matrix.cols()) / 2.0 * log(2.0 * M_PI) + |
121 | 47904x |
sum_log_det + |
122 | 95808x |
XtWX_log_det / 2.0 + |
123 | 51488x |
0.5 * (y_vec_tilde * y_vec_tilde).sum() - 0.5 * (x_mat_tilde_beta * x_mat_tilde_beta).sum(); |
124 |
} else { |
|
125 |
// Use maximum likelihood. |
|
126 | 2496x |
neg_log_lik = x_matrix.rows() / 2.0 * log(2.0 * M_PI) + |
127 | 416x |
sum_log_det + |
128 | 2912x |
0.5 * (epsilonTilde * epsilonTilde).sum(); |
129 |
} |
|
130 | ||
131 |
// Report quantities to R. |
|
132 | 46400x |
REPORT(beta); |
133 | ||
134 |
// We already compute the inverse of XtWX here because we already did the |
|
135 |
// matrix decomposition above. |
|
136 | 50400x |
matrix<Type> Identity(XtWX.rows(), XtWX.cols()); |
137 | 50400x |
Identity.setIdentity(); |
138 | 50400x |
matrix<Type> beta_vcov = XtWX_decomposition.solve(Identity); |
139 | 46400x |
REPORT(beta_vcov); |
140 | ||
141 |
// Also return the decomposition components L and D. |
|
142 | 50400x |
matrix<Type> XtWX_L(XtWX.rows(), XtWX.cols()); |
143 | 50400x |
XtWX_L = XtWX_decomposition.matrixL(); |
144 | 46400x |
REPORT(XtWX_L); |
145 | 46400x |
REPORT(XtWX_D); |
146 | ||
147 |
// normalized residual |
|
148 | 46400x |
REPORT(epsilonTilde); |
149 |
// inverse square root of diagonal of covariance |
|
150 | 46400x |
REPORT(diag_cov_inv_sqrt); |
151 | 50400x |
matrix<Type> covariance_lower_chol = chols_group.get_default_chol(); |
152 | 46400x |
REPORT(covariance_lower_chol); |
153 | ||
154 | 50400x |
return neg_log_lik; |
155 |
} |
1 |
#include "covariance.h" |
|
2 |
#include "chol_cache.h" |
|
3 | ||
4 |
using namespace Rcpp; |
|
5 |
using std::string; |
|
6 |
// Obtain the conditional mean/variance of `y` given `beta`, `beta_vcov`, `theta`. |
|
7 |
// Given any `theta`, we can obtain `beta` and `beta_vcov` through the `mrmm` fit, and then |
|
8 |
// we can use the provided `theta` to obtain the covariance matrix for the residual, |
|
9 |
// and use `beta_vcov` to obtain the covariance matrix for the mean of the fit, |
|
10 |
// and use `beta` to obtain the estimate of the mean of the fit. |
|
11 | 11872x |
List predict(List mmrm_data, NumericVector theta, NumericVector beta, NumericMatrix beta_vcov) { |
12 | 23744x |
NumericMatrix x = mmrm_data["x_matrix"]; |
13 | 23744x |
NumericVector y = mmrm_data["y_vector"]; |
14 | 11872x |
LogicalVector y_na = is_na(y); |
15 | 11872x |
LogicalVector y_vd = ! y_na; |
16 | 23744x |
IntegerVector subject_zero_inds = mmrm_data["subject_zero_inds"]; |
17 | 23744x |
IntegerVector subject_n_visits = mmrm_data["subject_n_visits"]; |
18 | 23744x |
String cov_type = mmrm_data["cov_type"]; |
19 | 23744x |
IntegerVector subject_groups = mmrm_data["subject_groups"]; |
20 | 23744x |
NumericMatrix coordinates = mmrm_data["coordinates"]; |
21 | ||
22 | 11872x |
matrix<double> x_matrix = as_num_matrix_tmb(x); |
23 | 11872x |
matrix<double> coordinates_m = as_num_matrix_tmb(coordinates); |
24 | 11872x |
matrix<double> beta_vcov_matrix = as_num_matrix_tmb(beta_vcov); |
25 | 11872x |
int n_subjects = mmrm_data["n_subjects"]; |
26 | 11872x |
int n_visits = mmrm_data["n_visits"]; |
27 | 11872x |
int is_spatial_int = mmrm_data["is_spatial_int"]; |
28 | 11872x |
bool is_spatial = is_spatial_int == 1; |
29 | 11872x |
int n_groups = mmrm_data["n_groups"]; |
30 | 11872x |
vector<double> beta_v = as_num_vector_tmb(beta); |
31 | 11872x |
vector<double> theta_v = as_num_vector_tmb(theta); |
32 |
// Use map to hold these base class pointers (can also work for child class objects). |
|
33 | 23744x |
auto chols_group = chol_cache_groups<double>(theta_v, n_groups, is_spatial, cov_type, n_visits); |
34 | 11872x |
NumericVector y_pred = clone(y); // Predict value of y; observed use the same value. |
35 | 11872x |
NumericVector var(y.size()); // Variance of y with 0 as default. |
36 | 11872x |
NumericVector conf_var(y.size()); // Confidence interval variance. |
37 | 11872x |
List covariance; |
38 | 11872x |
List index; |
39 | 11872x |
NumericMatrix empty(0, 0); |
40 |
// Go through all subjects and calculate quantities initialized above. |
|
41 | 851256x |
for (int i = 0; i < n_subjects; i++) { |
42 |
// Start index and number of visits for this subject. |
|
43 | 839384x |
int start_i = subject_zero_inds(i); |
44 | 839384x |
int n_visits_i = subject_n_visits(i); |
45 | 839384x |
NumericVector y_i = segment(y, start_i, n_visits_i); |
46 | 839384x |
LogicalVector y_na_i = segment(y_na, start_i, n_visits_i); |
47 | 839384x |
LogicalVector y_valid_i = segment(y_vd, start_i, n_visits_i); |
48 | 839384x |
IntegerVector visit_i(n_visits_i); |
49 | 839384x |
matrix<double> dist_i(n_visits_i, n_visits_i); |
50 | 839384x |
IntegerVector index_zero_i = seq(0, n_visits_i - 1); |
51 | 839384x |
if (!is_spatial) { |
52 | 4179644x |
for (int i = 0; i < n_visits_i; i++) { |
53 | 3343060x |
visit_i(i) = int(coordinates(i + start_i, 0)); |
54 |
} |
|
55 |
} else { |
|
56 | 2800x |
visit_i = seq(start_i, start_i + n_visits_i - 1); |
57 | 2800x |
dist_i = euclidean(matrix<double>(coordinates_m.block(start_i, 0, n_visits_i, coordinates_m.cols()))); |
58 |
} |
|
59 | 839384x |
std::vector<int> visit_std = as<std::vector<int>>(visit_i); |
60 | 839384x |
IntegerVector visit_na_vec = visit_i[y_na_i]; |
61 | 839384x |
IntegerVector visit_valid_vec = visit_i[y_valid_i]; |
62 | ||
63 | 839384x |
IntegerVector index_zero_i_na = index_zero_i[y_na_i]; |
64 | 839384x |
IntegerVector index_zero_i_valid = index_zero_i[y_valid_i]; |
65 | ||
66 | 839384x |
std::vector<int> visit_na = as<std::vector<int>>(visit_na_vec); |
67 | 839384x |
std::vector<int> visit_non_na = as<std::vector<int>>(visit_valid_vec); |
68 | 839384x |
matrix<double> Xi = x_matrix.block(start_i, 0, n_visits_i, x_matrix.cols()); |
69 |
// Subject_group starts with 1. |
|
70 | 839384x |
int subject_group_i = subject_groups(i) - 1; |
71 | 1678768x |
matrix<double> sigma_full = chols_group.cache[subject_group_i]->get_sigma(visit_std, dist_i); |
72 | 1678768x |
matrix<double> sigma_12 = subset_matrix(sigma_full, index_zero_i_na, index_zero_i_valid); |
73 | 839384x |
matrix<double> sigma_11; |
74 | 839384x |
if (!is_spatial) { |
75 | 836584x |
sigma_11 = chols_group.cache[subject_group_i]->get_sigma(visit_na, dist_i); |
76 |
} else { |
|
77 | 2800x |
sigma_11 = subset_matrix(sigma_full, index_zero_i_na, index_zero_i_na); |
78 |
} |
|
79 | 1678768x |
matrix<double> x_na = subset_matrix(Xi, index_zero_i_na); |
80 | 1678768x |
matrix<double> x_valid = subset_matrix(Xi, index_zero_i_valid); |
81 | 1678768x |
vector<double> y_valid = as_num_vector_tmb(y_i[y_valid_i]); |
82 | 839384x |
IntegerVector na_index = index_zero_i_na + start_i; |
83 | 839384x |
vector<double> y_hat, var_conf, var_y_on_theta; |
84 | 839384x |
if (visit_valid_vec.size() == 0) { |
85 |
// No observations with valid y. |
|
86 | 13727x |
y_hat = x_na * beta_v; |
87 | 13727x |
var_conf = (x_na * beta_vcov_matrix * x_na.transpose()).diagonal(); |
88 | 13727x |
var_y_on_theta = var_conf + vector<double>(sigma_full.diagonal()); |
89 | 13727x |
covariance.push_back(as_num_matrix_rcpp(sigma_full)); |
90 | 825657x |
} else if (visit_na_vec.size() > 0) { |
91 |
// There are observations with invalid y. |
|
92 | 657293x |
matrix<double> sigma_22_inv; |
93 | 657293x |
if (is_spatial) { |
94 | 2212x |
sigma_22_inv = subset_matrix(sigma_full, index_zero_i_valid, index_zero_i_valid).inverse(); // No cache available for spatial covariance. |
95 |
} else { |
|
96 | 655081x |
sigma_22_inv = chols_group.cache[subject_group_i]->get_sigma_inverse(visit_non_na, dist_i); // We have the inverse in cache for non spatial covariance. |
97 |
} |
|
98 | 657293x |
matrix<double> ss = sigma_12 * sigma_22_inv; |
99 | 657293x |
matrix<double> zz = x_na - ss * x_valid; |
100 | 657293x |
y_hat = zz * beta_v + ss * y_valid; |
101 | 657293x |
var_conf = (zz * beta_vcov_matrix * zz.transpose()).diagonal(); |
102 | 657293x |
matrix<double> conditional_sigma = sigma_11 - ss * sigma_12.transpose(); |
103 | 657293x |
var_y_on_theta = var_conf + vector<double>(conditional_sigma.diagonal()); |
104 | 657293x |
covariance.push_back(as_num_matrix_rcpp(conditional_sigma)); |
105 | 825657x |
} else if (visit_na_vec.size() == 0) { |
106 | 168364x |
covariance.push_back(empty); |
107 |
} |
|
108 | 839384x |
index.push_back(na_index); |
109 |
// Replace the values with fitted values. If no missing value there, the `na_index` will be length 0 |
|
110 |
// and the left hand side will hence not be modified. |
|
111 | 839384x |
y_pred[na_index] = as_num_vector_rcpp(y_hat); |
112 | 839384x |
conf_var[na_index] = as_num_vector_rcpp(var_conf); |
113 | 839384x |
var[na_index] = as_num_vector_rcpp(var_y_on_theta); |
114 |
} |
|
115 | 11872x |
NumericMatrix ret = cbind(y_pred, conf_var, var); |
116 | 11872x |
CharacterVector cnms = {"fit", "conf_var", "var"}; |
117 | 11872x |
colnames(ret) = cnms; |
118 |
return List::create( |
|
119 | 23744x |
Named("prediction") = ret, |
120 | 23744x |
Named("covariance") = covariance, |
121 | 23744x |
Named("index") = index |
122 |
); |
|
123 |
} |
1 |
#include "testthat-helpers.h" |
|
2 |
#include "chol_cache.h" |
|
3 | ||
4 | 6x |
context("cholesky cache") { |
5 | 6x |
test_that("cached cholesky stores result correctly") { |
6 | 12x |
vector<double> theta {{log(1.0), log(2.0), 3.0}}; |
7 | 12x |
auto chol = lower_chol_nonspatial<double>(theta, 2, "us"); |
8 | 6x |
matrix<double> chol1_expected(2, 2); |
9 | ! |
chol1_expected << |
10 | 6x |
1.0, 0.0, |
11 | 6x |
6.0, 2.0; |
12 | 6x |
std::vector<int> vis{0, 1}; |
13 | 6x |
matrix<double> dist; |
14 | 6x |
expect_equal_matrix(chol.get_chol(vis, dist), chol1_expected); |
15 | 6x |
expect_equal_matrix(chol.chols[vis], chol1_expected); |
16 | ||
17 | 6x |
matrix<double> simga1_expected(2, 2); |
18 | ! |
simga1_expected << |
19 | 6x |
1.0, 6.0, |
20 | 6x |
6.0, 40.0; |
21 | 6x |
expect_equal_matrix(chol.get_sigma(vis, dist), simga1_expected); |
22 | 6x |
expect_equal_matrix(chol.sigmas[vis], simga1_expected); |
23 | ||
24 | 12x |
matrix<double> simga1_inv = chol.get_sigma_inverse(vis, dist); |
25 | 6x |
matrix<double> simga1_inv_expected(2, 2); |
26 | ! |
simga1_inv_expected << |
27 | 6x |
10.0, -1.5, |
28 | 6x |
-1.5, 0.25; |
29 | 6x |
expect_equal_matrix(simga1_inv, simga1_inv_expected); |
30 | 6x |
expect_equal_matrix(chol.sigmas_inv[vis], simga1_inv_expected); |
31 | ||
32 | 6x |
matrix<double> chol2_expect(1, 1); |
33 | 6x |
chol2_expect << 1.0; |
34 | 6x |
std::vector<int> vis2{0}; |
35 | 6x |
expect_equal_matrix(chol.get_chol(vis2, dist), chol2_expect); |
36 | 6x |
expect_equal_matrix(chol.chols[vis2], chol2_expect); |
37 | ||
38 | 6x |
matrix<double> sigma2_expect(1, 1); |
39 | 6x |
sigma2_expect << 1.0; |
40 | 6x |
expect_equal_matrix(chol.get_sigma(vis2, dist), sigma2_expect); |
41 | 6x |
expect_equal_matrix(chol.sigmas[vis2], sigma2_expect); |
42 | ||
43 | 6x |
matrix<double> sigma2_inv_expect(1, 1); |
44 | 6x |
sigma2_inv_expect << 1.0; |
45 | 6x |
expect_equal_matrix(chol.get_sigma_inverse(vis2, dist), sigma2_inv_expect); |
46 | 6x |
expect_equal_matrix(chol.sigmas_inv[vis2], sigma2_inv_expect); |
47 |
} |
|
48 |
} |
|
49 | ||
50 | 6x |
context("cholesky group object") { |
51 | 6x |
test_that("cholesky group return result correctly") { |
52 | 12x |
vector<double> theta {{log(1.0), log(2.0), 3.0, log(2.0), log(4.0), 5}}; |
53 | 12x |
auto chol_group = chol_cache_groups<double>(theta, 2, false, "us", 2); |
54 | 6x |
matrix<double> chol1_expected(2, 2); |
55 | ! |
chol1_expected << |
56 | 6x |
1.0, 0.0, |
57 | 6x |
6.0, 2.0; |
58 | 6x |
std::vector<int> vis{0, 1}; |
59 | 6x |
matrix<double> dist; |
60 | 6x |
expect_equal_matrix(chol_group.cache[0]->get_chol(vis, dist), chol1_expected); |
61 | 6x |
matrix<double> chol2_expected(2, 2); |
62 | ! |
chol2_expected << |
63 | 6x |
2.0, 0.0, |
64 | 6x |
20.0, 4.0; |
65 | 6x |
expect_equal_matrix(chol_group.cache[1]->get_chol(vis, dist), chol2_expected); |
66 |
} |
|
67 |
} |
1 |
#ifndef TESTTHAT_WRAP_H |
|
2 |
#define TESTTHAT_WRAP_H |
|
3 |
#include <testthat.h> |
|
4 |
#include <limits> |
|
5 |
#include "utils.h" |
|
6 | ||
7 |
// Expect equal: Here use a default epsilon which gives around 1e-4 on |
|
8 |
// my computer here. |
|
9 |
#define expect_equal(TARGET, CURRENT) \ |
|
10 |
{ \ |
|
11 |
double const eps = \ |
|
12 |
std::pow(std::numeric_limits<double>::epsilon(), 0.25); \ |
|
13 |
\ |
|
14 |
if(std::abs((TARGET)) > eps) \ |
|
15 |
expect_true(std::abs((TARGET) - (CURRENT)) / \ |
|
16 |
std::abs((TARGET)) < eps); \ |
|
17 |
else \ |
|
18 |
expect_true(std::abs((TARGET) - (CURRENT)) < eps); \ |
|
19 |
} |
|
20 | ||
21 |
#define expect_equal_eps(TARGET, CURRENT, EPS) \ |
|
22 |
{ \ |
|
23 |
if(std::abs((TARGET)) > (EPS)) \ |
|
24 |
expect_true(std::abs((TARGET) - (CURRENT)) / \ |
|
25 |
std::abs((TARGET)) < (EPS)); \ |
|
26 |
else \ |
|
27 |
expect_true(std::abs((TARGET) - (CURRENT)) < (EPS)); \ |
|
28 |
} |
|
29 | ||
30 |
template <class T> |
|
31 | 49x |
void expect_equal_matrix(const T& target, const T& current) |
32 |
{ |
|
33 | 49x |
int nrow = target.rows(); |
34 | 49x |
int ncol = target.cols(); |
35 | ||
36 | ! |
expect_true(nrow == current.rows()); |
37 | ! |
expect_true(ncol == current.cols()); |
38 | ||
39 | 184x |
for (int i = 0; i < nrow; i++) { |
40 | 500x |
for (int j = 0; j < ncol; j++) { |
41 | ! |
expect_equal(target(i, j), current(i, j)); |
42 |
} |
|
43 |
} |
|
44 |
} |
|
45 | ||
46 |
template <class T> |
|
47 | 18x |
void expect_equal_vector(const T& target, const T& current) |
48 |
{ |
|
49 | 18x |
int n = target.size(); |
50 | ! |
expect_true(n == current.size()); |
51 | ||
52 | 108x |
for (int i = 0; i < n; i++) { |
53 | ! |
expect_equal(target(i), current(i)); |
54 |
} |
|
55 |
} |
|
56 | ||
57 |
#endif |
1 |
#include "testthat-helpers.h" |
|
2 |
#include "covariance.h" |
|
3 | ||
4 | 5x |
context("unstructured") { |
5 | 5x |
test_that("get_unstructured produces expected result") { |
6 | 10x |
vector<double> theta {{log(1.0), log(2.0), 3.0}}; |
7 | 5x |
matrix<double> result = get_unstructured(theta, 2); |
8 | 5x |
matrix<double> expected(2, 2); |
9 | ! |
expected << |
10 | 5x |
1.0, 0.0, |
11 | 5x |
6.0, 2.0; |
12 | 5x |
expect_equal_matrix(result, expected); |
13 |
} |
|
14 |
} |
|
15 | ||
16 | 15x |
context("ante_dependence") { |
17 | 15x |
test_that("corr_fun_ante_dependence works as expected") { |
18 | 10x |
vector<double> theta {{1.0, 2.0}}; |
19 | 5x |
corr_fun_ante_dependence<double> test_fun(theta); |
20 |
expect_equal(test_fun(1, 0), 0.7071068); |
|
21 |
expect_equal(test_fun(2, 0), 0.6324555); |
|
22 |
expect_equal(test_fun(2, 1), 0.8944272); |
|
23 |
} |
|
24 | ||
25 | 15x |
test_that("get_ante_dependence produces expected result") { |
26 | 10x |
vector<double> theta {{log(2.0), 1.0, 2.0}}; |
27 | 5x |
matrix<double> result = get_ante_dependence(theta, 3); |
28 | 5x |
matrix<double> expected(3, 3); |
29 | ! |
expected << |
30 | 5x |
2.0, 0.0, 0.0, |
31 | 5x |
sqrt(2.0), sqrt(2.0), 0.0, |
32 | 5x |
1.264911, 1.264911, 0.8944272; |
33 | 5x |
expect_equal_matrix(result, expected); |
34 |
} |
|
35 | ||
36 | 15x |
test_that("get_ante_dependence_heterogeneous produces expected result") { |
37 | 10x |
vector<double> theta {{log(1.0), log(2.0), log(3.0), 1.0, 2.0}}; |
38 | 5x |
matrix<double> result = get_ante_dependence_heterogeneous(theta, 3); |
39 | 5x |
matrix<double> expected(3, 3); |
40 | ! |
expected << |
41 | 5x |
1.0, 0.0, 0.0, |
42 | 5x |
sqrt(2.0), sqrt(2.0), 0.0, |
43 | 5x |
1.897367, 1.897367, 1.341641; |
44 | 5x |
expect_equal_matrix(result, expected); |
45 |
} |
|
46 |
} |
|
47 | ||
48 | 15x |
context("toeplitz") { |
49 | 15x |
test_that("corr_fun_toeplitz works as expected") { |
50 | 10x |
vector<double> theta {{1.0, 2.0}}; |
51 | 5x |
corr_fun_toeplitz<double> test_fun(theta); |
52 |
expect_equal(test_fun(1, 0), 0.7071068); |
|
53 |
expect_equal(test_fun(2, 0), 0.8944272); |
|
54 |
expect_equal(test_fun(2, 1), 0.7071068); |
|
55 |
} |
|
56 | ||
57 | 15x |
test_that("get_toeplitz produces expected result") { |
58 | 10x |
vector<double> theta {{log(2.0), 1.0, 2.0}}; |
59 | 5x |
matrix<double> result = get_toeplitz(theta, 3); |
60 | 5x |
matrix<double> expected(3, 3); |
61 | ! |
expected << |
62 | 5x |
2.0, 0.0, 0.0, |
63 | 5x |
sqrt(2.0), sqrt(2.0), 0.0, |
64 | 5x |
1.788854, 0.2111456, 0.8691476; |
65 | 5x |
expect_equal_matrix(result, expected); |
66 |
} |
|
67 | ||
68 | 15x |
test_that("get_toeplitz_heterogeneous produces expected result") { |
69 | 10x |
vector<double> theta {{log(1.0), log(2.0), log(3.0), 1.0, 2.0}}; |
70 | 5x |
matrix<double> result = get_toeplitz_heterogeneous(theta, 3); |
71 | 5x |
matrix<double> expected(3, 3); |
72 | ! |
expected << |
73 | 5x |
1.0, 0.0, 0.0, |
74 | 5x |
sqrt(2.0), sqrt(2.0), 0.0, |
75 | 5x |
2.683282, 0.3167184, 1.303721; |
76 | 5x |
expect_equal_matrix(result, expected); |
77 |
} |
|
78 |
} |
|
79 | ||
80 | 15x |
context("autoregressive") { |
81 | 15x |
test_that("corr_fun_autoregressive works as expected") { |
82 | 10x |
vector<double> theta {{1.0}}; |
83 | 5x |
corr_fun_autoregressive<double> test_fun(theta); |
84 |
expect_equal(test_fun(1, 0), 1 / sqrt(2)); |
|
85 |
expect_equal(test_fun(4, 1), 0.3535534); |
|
86 |
} |
|
87 | ||
88 | 15x |
test_that("get_auto_regressive produces expected result") { |
89 | 10x |
vector<double> theta {{log(2.0), 3.0}}; |
90 | 5x |
matrix<double> result = get_auto_regressive(theta, 3); |
91 | 5x |
matrix<double> expected(3, 3); |
92 | 5x |
expected << |
93 | 5x |
2, 0, 0, |
94 | 5x |
1.89736659610103, 0.632455532033676, 0, |
95 | 5x |
1.8, 0.6, 0.632455532033676; |
96 | 5x |
expect_equal_matrix(result, expected); |
97 |
} |
|
98 | ||
99 | 15x |
test_that("get_auto_regressive_heterogeneous produces expected result") { |
100 | 10x |
vector<double> theta {{log(1.0), log(2.0), log(3.0), 2.0}}; |
101 | 5x |
matrix<double> result = get_auto_regressive_heterogeneous(theta, 3); |
102 | 5x |
matrix<double> expected(3, 3); |
103 | 5x |
expected << |
104 | 5x |
1, 0, 0, |
105 | 5x |
1.78885438199983, 0.894427190999916, 0, |
106 | 5x |
2.4, 1.2, 1.34164078649987; |
107 | 5x |
expect_equal_matrix(result, expected); |
108 |
} |
|
109 |
} |
|
110 | ||
111 | 15x |
context("compound symmetry") { |
112 | 15x |
test_that("corr_fun_compound_symmetry works as expected") { |
113 | 10x |
vector<double> theta {{1.2}}; |
114 | 5x |
corr_fun_compound_symmetry<double> test_fun(theta); |
115 |
expect_equal(test_fun(1, 0), 0.7682213); |
|
116 |
expect_equal(test_fun(4, 1), 0.7682213); |
|
117 |
expect_equal(test_fun(3, 1), 0.7682213); |
|
118 |
} |
|
119 | ||
120 | 15x |
test_that("get_compound_symmetry produces expected result") { |
121 | 10x |
vector<double> theta {{log(2.0), 3.0}}; |
122 | 5x |
matrix<double> result = get_compound_symmetry(theta, 3); |
123 | 5x |
matrix<double> expected(3, 3); |
124 | 5x |
expected << |
125 | 5x |
2, 0, 0, |
126 | 5x |
1.89736659610103, 0.632455532033676, 0, |
127 | 5x |
1.89736659610103, 0.307900211696917, 0.552446793489648; |
128 | 5x |
expect_equal_matrix(result, expected); |
129 |
} |
|
130 | ||
131 | 15x |
test_that("get_compound_symmetry_heterogeneous produces expected result") { |
132 | 10x |
vector<double> theta {{log(1.0), log(2.0), log(3.0), 2.0}}; |
133 | 5x |
matrix<double> result = get_compound_symmetry_heterogeneous(theta, 3); |
134 | 5x |
matrix<double> expected(3, 3); |
135 | 5x |
expected << |
136 | 5x |
1, 0, 0, |
137 | 5x |
1.78885438199983, 0.894427190999916, 0, |
138 | 5x |
2.68328157299975, 0.633436854000505, 1.18269089452568; |
139 | 5x |
expect_equal_matrix(result, expected); |
140 |
} |
|
141 |
} |
|
142 | ||
143 | 5x |
context("get_covariance_lower_chol") { |
144 | 5x |
test_that("get_covariance_lower_chol gives expected unstructured result") { |
145 | 10x |
vector<double> theta {{log(1.0), log(2.0), 3.0}}; |
146 | 10x |
matrix<double> result = get_covariance_lower_chol(theta, 2, "us"); |
147 | 5x |
matrix<double> expected = get_unstructured(theta, 2); |
148 | 5x |
expect_equal_matrix(result, expected); |
149 |
} |
|
150 |
// No other tests needed here for now. |
|
151 |
} |
1 |
#include "testthat-helpers.h" |
|
2 |
#include "derivatives.h" |
|
3 |
#include <iostream> |
|
4 | ||
5 | 8x |
context("cho_jacobian") { |
6 | 8x |
test_that("cho_jacobian works as expected") { |
7 | 8x |
chol_jacobian chol_jac_obj(2, "ar1"); |
8 | 8x |
vector<double> theta {{1.0, 1.0}}; |
9 | 4x |
vector<double> result = chol_jac_obj(theta); |
10 | 4x |
vector<double> expected(8); |
11 |
// expected obtained from numDeriv::jacobian and ar1 sigma |
|
12 | 4x |
expected << 2.718282, 1.922116, 0, 1.922116, 0.0, 0.9610578, 0.0, -0.9610578; |
13 | 4x |
expect_equal_vector(result, expected); |
14 |
} |
|
15 | 8x |
test_that("cho_jacobian's jacabian using autodiff works as expected") { |
16 | 8x |
chol_jacobian chol_jac_obj(2, "ar1"); |
17 | 8x |
vector<double> theta {{1.0, 1.0}}; |
18 | 8x |
vector<double> result = autodiff::jacobian(chol_jac_obj,theta).vec(); |
19 | 4x |
vector<double> expected(16); |
20 |
// expected obtained from two numDeriv::jacobian and ar1 sigma |
|
21 | 4x |
expected << 2.718282, 1.9221164, 0, 1.9221167, 0.0, 0.9610586, 0.0, -0.9610586, 0.0, 0.9610586, 0.0, -0.9610586, 0.0, -1.4415871, 0.0, 0.4805284; |
22 | 4x |
expect_equal_vector(result, expected); |
23 |
} |
|
24 |
} |
|
25 | ||
26 | 4x |
context("derivatives_nonspatial struct works as expected") { |
27 | 4x |
test_that("derivatives_nonspatial struct correct sigma, inverse and derivatives") { |
28 | 8x |
vector<double> theta {{1.0, 1.0}}; |
29 | 8x |
auto mychol = derivatives_nonspatial<double>(theta, 4, "ar1"); |
30 | 4x |
std::vector<int> v1 {0, 1, 2}; |
31 | 4x |
std::vector<int> v_full {0, 1, 2, 3}; |
32 | 4x |
matrix<double> dist(0, 0); |
33 | 8x |
auto full_sigma = mychol.get_sigma(v_full, dist); |
34 | 8x |
auto part_sigma = mychol.get_sigma(v1, dist); |
35 | 8x |
auto full_inverse = matrix<double>(mychol.get_sigma_inverse(v_full, dist)); |
36 | 4x |
matrix<double> expected_inverse(4, 4); |
37 |
// expected values from R side solve |
|
38 | 4x |
expected_inverse << 0.2706706, -0.191393, 0, 0, -0.191393, 0.4060058, -0.191393, 0, 0, -0.191393, 0.4060058, -0.191393, 0,0,-0.191393, 0.2706706; |
39 | 4x |
expect_equal_matrix(expected_inverse, full_inverse); |
40 | ||
41 | 8x |
auto v1_inverse = matrix<double>(mychol.get_sigma_inverse(v1, dist)); |
42 | 4x |
matrix<double> expected_v1_inverse(3, 3); |
43 |
// expected values from R side solve |
|
44 | ! |
expected_v1_inverse << |
45 | 4x |
0.270670566473225, -0.191392993020822, 0, |
46 | 4x |
-0.191392993020822, 0.406005849709838, -0.191392993020822, |
47 | 4x |
0, -0.191392993020822, 0.270670566473225; |
48 | 4x |
expect_equal_matrix(expected_v1_inverse, v1_inverse); |
49 | ||
50 | 8x |
auto derivative1 = mychol.get_sigma_derivative1(v1, dist); |
51 | 4x |
matrix<double> expected_derivative1(3, 3); |
52 |
// expected values from R side numDeriv::jacobian |
|
53 | ! |
expected_derivative1 << |
54 | 4x |
14.7781121978613, 10.4497033482434, 7.38905609893065, |
55 | 4x |
10.4497033482434, 14.7781121978613, 10.4497033482434, |
56 | 4x |
7.38905609893065, 10.4497033482434, 14.7781121978613; |
57 | 4x |
expect_equal_matrix(matrix<double>(derivative1.block(0, 0, 3, 3)), expected_derivative1); |
58 | ||
59 | 8x |
auto derivative2 = mychol.get_sigma_derivative2(v1, dist); |
60 | 4x |
matrix<double> expected_derivative2(3, 3); |
61 |
// expected values from R side two numDeriv::jacobian |
|
62 | ! |
expected_derivative2 << |
63 | 4x |
29.5562243957226, 20.8994066964867, 14.7781121978613, |
64 | 4x |
20.8994066964867, 29.5562243957226, 20.8994066964867, |
65 | 4x |
14.7781121978613, 20.8994066964867, 29.5562243957226; |
66 | 4x |
expect_equal_matrix(matrix<double>(derivative2.block(0, 0, 3, 3)), expected_derivative2); |
67 | 8x |
auto inverse_derivative = mychol.get_inverse_derivative(v1, dist); |
68 | 4x |
expect_equal_matrix(matrix<double>(inverse_derivative.block(0, 0, 3, 3)), matrix<double>(- v1_inverse * derivative1.block(0, 0, 3, 3) * v1_inverse)); |
69 |
} |
|
70 |
} |
|
71 | ||
72 | 4x |
context("derivatives_sp_exp struct works as expected") { |
73 | 4x |
test_that("derivatives_sp_exp struct gives correct sigma, inverse and derivatives") { |
74 | 8x |
vector<double> theta {{1.0, 1.0}}; |
75 | 8x |
auto sp = derivatives_sp_exp<double>(theta, "sp_exp"); |
76 | 4x |
matrix<double> dist (3, 3); |
77 | 4x |
dist << |
78 | 4x |
0, 0.5, 1, |
79 | 4x |
0.5, 0, 0.5, |
80 | 4x |
1, 0.5, 0; |
81 | 4x |
std::vector<int> v(0); |
82 | 8x |
auto sigma = sp.get_sigma(v, dist); |
83 | 4x |
matrix<double> expected_sigma(3, 3); |
84 |
// expected values from R side two rho^dist * sigma |
|
85 | ! |
expected_sigma << |
86 | 4x |
2.718282, 2.324184, 1.987223, |
87 | 4x |
2.324184, 2.718282, 2.324184, |
88 | 4x |
1.987223, 2.324184, 2.718282; |
89 | 4x |
expect_equal_matrix(sigma, expected_sigma); |
90 | ||
91 | 8x |
auto sigma_d1 = sp.get_sigma_derivative1(v, dist); |
92 | 4x |
matrix<double> expected_sigma_d1(6, 3); |
93 |
// expected values from R side numDeriv::jacobian |
|
94 | ! |
expected_sigma_d1 << |
95 | 4x |
2.71828182844263, 2.32418434058079, 1.9872232498215, |
96 | 4x |
2.32418434058079, 2.71828182844263, 2.32418434058079, |
97 | 4x |
1.9872232498215, 2.32418434058079, 2.71828182844263, |
98 | 4x |
0, 0.312534720067585, 0.534446645412701, |
99 | 4x |
0.312534720067585, 0, 0.312534720067585, |
100 | 4x |
0.534446645412701, 0.312534720067585, 0; |
101 | 4x |
expect_equal_matrix(sigma_d1, expected_sigma_d1); |
102 | ||
103 | 8x |
auto sigma_d2 = sp.get_sigma_derivative2(v, dist); |
104 | 4x |
matrix<double> expected_sigma_d2(12, 3); |
105 |
// expected values from R side two times numDeriv::jacobian |
|
106 | ! |
expected_sigma_d2 << |
107 | 4x |
2.718281070007, 2.32418298874968, 1.98722393345662, |
108 | 4x |
2.32418298874968, 2.718281070007, 2.32418298874968, |
109 | 4x |
1.98722393345662, 2.32418298874968, 2.718281070007, |
110 | 4x |
0, 0.312537183788863, 0.534447011054242, |
111 | 4x |
0.312537183788863, 0, 0.312537183788863, |
112 | 4x |
0.534447011054242, 0.312537183788863, 0, |
113 | 4x |
0, 0.312537183793268, 0.53444701104616, |
114 | 4x |
0.312537183793268, 0, 0.312537183793268, |
115 | 4x |
0.53444701104616, 0.312537183793268, 0, |
116 | 4x |
0, -0.1864537925375, -0.246976228442905, |
117 | 4x |
-0.1864537925375, 0, -0.1864537925375, |
118 | 4x |
-0.246976228442905, -0.1864537925375, 0; |
119 | 4x |
expect_equal_matrix(sigma_d2, expected_sigma_d2); |
120 | ||
121 | 8x |
auto sigma_inv = sp.get_sigma_inverse(v, dist); |
122 | 4x |
matrix<double> expected_sigma_inv(3, 3); |
123 |
// expected values from R side use solve |
|
124 | ! |
expected_sigma_inv << |
125 | 4x |
1.367879, -1.169564, 0, |
126 | 4x |
-1.169564, 2.367879, -1.169564, |
127 | 4x |
0, -1.169564, 1.367879; |
128 | 4x |
expect_equal_matrix(sigma_inv, expected_sigma_inv); |
129 |
} |
|
130 |
} |
1 |
#include "testthat-helpers.h" |
|
2 |
#include "utils.h" |
|
3 | ||
4 |
using namespace Rcpp; |
|
5 | ||
6 | 2x |
context("subset_matrix") { |
7 | 2x |
test_that("subset_matrix works as expected") { |
8 | 2x |
matrix<double> mat(3, 3); |
9 | ! |
mat << |
10 | 2x |
1.0, 0.0, 0.5, |
11 | 2x |
6.0, 2.0, 1.0, |
12 | 2x |
3.0, 0.1, 0.2; |
13 | 2x |
std::vector<int> index {1, 0}; |
14 | 4x |
matrix<double> result1 = subset_matrix(mat, index, index); |
15 | 2x |
matrix<double> exp1(2, 2); |
16 | ! |
exp1 << |
17 | 2x |
2.0, 6.0, |
18 | 2x |
0.0, 1.0; |
19 | 2x |
expect_equal_matrix(result1, exp1); |
20 | ||
21 | 4x |
matrix<double> result2 = subset_matrix(mat, index); |
22 | ||
23 | 2x |
matrix<double> exp2(2, 3); |
24 | ! |
exp2 << |
25 | 2x |
6.0, 2.0, 1.0, |
26 | 2x |
1.0, 0.0, 0.5; |
27 | 2x |
expect_equal_matrix(result2, exp2); |
28 |
} |
|
29 |
} |
|
30 | ||
31 | 4x |
context("tcrossprod") { |
32 | 4x |
test_that("tcrossprod works as expected with complete") { |
33 | 2x |
matrix<double> lower_chol(2, 2); |
34 | ! |
lower_chol << |
35 | 2x |
1.0, 0.0, |
36 | 2x |
6.0, 2.0; |
37 | 2x |
matrix<double> result = tcrossprod(lower_chol, true); |
38 | 2x |
matrix<double> expected = lower_chol * lower_chol.transpose(); |
39 | 2x |
expect_equal_matrix(result, expected); |
40 |
} |
|
41 | ||
42 | 4x |
test_that("tcrossprod works as expected without complete (default)") { |
43 | 2x |
matrix<double> lower_chol(2, 2); |
44 | ! |
lower_chol << |
45 | 2x |
1.0, 0.0, |
46 | 2x |
6.0, 2.0; |
47 | 2x |
matrix<double> result = tcrossprod(lower_chol); // default: no complete. |
48 | 2x |
matrix<double> full = lower_chol * lower_chol.transpose(); |
49 | 2x |
matrix<double> expected = full.template triangularView<Eigen::Lower>(); |
50 | 2x |
expect_equal_matrix(result, expected); |
51 |
} |
|
52 |
} |
|
53 | ||
54 | 2x |
context("crossprod") { |
55 | 2x |
test_that("crossprod works as expected") { |
56 | 2x |
matrix<double> x(2, 3); |
57 | ! |
x << |
58 | 2x |
1.0, 0.0, 1.0, |
59 | 2x |
6.0, 2.0, 4.2; |
60 | 2x |
matrix<double> result = crossprod(x); |
61 | 2x |
matrix<double> full = x.transpose() * x; |
62 | 2x |
matrix<double> expected = full.template triangularView<Eigen::Lower>(); |
63 | 2x |
expect_equal_matrix(result, expected); |
64 |
} |
|
65 |
} |
|
66 | ||
67 | 2x |
context("map_to_cor") { |
68 | 2x |
test_that("map_to_cor works as expected") { |
69 | 4x |
vector<double> theta {{-5., 2., 10., 0.}}; |
70 | 2x |
vector<double> result = map_to_cor(theta); |
71 |
// Expected from R: |
|
72 |
// test <- c(-5, 2, 10, 0) |
|
73 |
// test / sqrt(1 + test^2) |
|
74 | 4x |
vector<double> expected {{-0.98058067569092, 0.894427190999916, 0.995037190209989, 0.0}}; |
75 | 2x |
expect_equal_vector(result, expected); |
76 |
} |
|
77 |
} |
|
78 | ||
79 | 2x |
context("generic_corr_fun") { |
80 | 2x |
test_that("generic_corr_fun is initialized as expected") { |
81 | 4x |
vector<double> theta {{-5., 2., 10., 0.}}; |
82 | 2x |
generic_corr_fun<double> result(theta); |
83 | 2x |
vector<double> expected_corr_values = map_to_cor(theta); |
84 | 2x |
expect_equal_vector(result.corr_values, expected_corr_values); |
85 |
} |
|
86 |
} |
|
87 | ||
88 |
template <class T> |
|
89 |
struct const_cor { |
|
90 | 6x |
const T operator() (int& i, int& j) const { |
91 | 6x |
return T(0.5); |
92 |
} |
|
93 |
}; |
|
94 | 2x |
context("get_corr_mat_chol") { |
95 | 2x |
test_that("get_corr_mat_chol works as expected") { |
96 |
const_cor<double> const_fun; |
|
97 | 2x |
matrix<double> result = get_corr_mat_chol(3, const_fun); |
98 | 2x |
matrix<double> expected(3, 3); |
99 | 2x |
expected << |
100 | 2x |
1, 0, 0, |
101 | 2x |
0.5, 0.866025403784439, 0, |
102 | 2x |
0.5, 0.288675134594813, 0.816496580927726; |
103 | 2x |
expect_equal_matrix(result, expected); |
104 |
} |
|
105 |
} |
|
106 | ||
107 |
template <class T> |
|
108 |
struct test_cor { |
|
109 | 6x |
const T operator() (int& i, int& j) const { |
110 | 6x |
return T(0.0); |
111 |
} |
|
112 |
}; |
|
113 | 2x |
context("get_heterogeneous_cov") { |
114 | 2x |
test_that("get_heterogeneous_cov works as expected") { |
115 | 4x |
vector<double> sd_values {{1., 2., 3.}}; |
116 |
test_cor<double> test_fun; |
|
117 | 2x |
matrix<double> result = get_heterogeneous_cov(sd_values, test_fun); |
118 | 2x |
matrix<double> expected(3, 3); |
119 | ! |
expected << |
120 | 2x |
1.0, 0.0, 0.0, |
121 | 2x |
0.0, 2.0, 0.0, |
122 | 2x |
0.0, 0.0, 3.0; |
123 | 2x |
expect_equal_matrix(result, expected); |
124 |
} |
|
125 |
} |
|
126 | ||
127 | 4x |
context("euclidean distance") { |
128 | 4x |
test_that("euclidean works as expected") { |
129 | 2x |
matrix<double> coord(4, 1); |
130 | 2x |
coord << 1, 2, 3, 4; |
131 | 2x |
matrix<double> expected(4, 4); |
132 | 2x |
expected << |
133 | 2x |
0, 1, 2, 3, |
134 | 2x |
1, 0, 1, 2, |
135 | 2x |
2, 1, 0, 1, |
136 | 2x |
3, 2, 1, 0; |
137 | 2x |
expect_equal_matrix(euclidean(coord), expected); |
138 |
} |
|
139 | 4x |
test_that("euclidean works as expected for matrix") { |
140 | 2x |
matrix<double> coord(4, 2); |
141 | 2x |
coord << 1, 2, 3, 4, 5, 6, 7, 8; |
142 | 2x |
matrix<double> expected(4, 4); |
143 | 2x |
expected << |
144 | 2x |
0, 2, 4, 6, |
145 | 2x |
2, 0, 2, 4, |
146 | 2x |
4, 2, 0, 2, |
147 | 2x |
6, 4, 2, 0; |
148 | 2x |
expected = expected * sqrt(2); |
149 | 2x |
expect_equal_matrix(euclidean(coord), expected); |
150 |
} |
|
151 |
} |
|
152 | ||
153 | 4x |
context("cpow works") { |
154 | 4x |
test_that("cpow gives correct power by element for power 0.5") { |
155 | 2x |
matrix<double> tmb_mat(4, 2); |
156 | 2x |
tmb_mat << 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0; |
157 | 2x |
matrix<double> expected(4, 2); |
158 | 2x |
expected << 1.0, sqrt(2.0), sqrt(3.0), 2.0, sqrt(5.0), sqrt(6.0), sqrt(7.0), sqrt(8.0); |
159 | 2x |
expect_equal_matrix(as_matrix<matrix<double>, Eigen::Matrix<double, -1, -1>>(cpow(as_matrix<Eigen::Matrix<double, -1, -1>, matrix<double>>(tmb_mat), 0.5)), expected); |
160 |
} |
|
161 | 4x |
test_that("cpow gives correct power by element for power 2") { |
162 | 2x |
matrix<double> tmb_mat(4, 2); |
163 | 2x |
tmb_mat << 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0; |
164 | 2x |
matrix<double> expected(4, 2); |
165 | 2x |
expected << 1.0, 4.0, 9.0, 16.0, 25.0, 36.0, 49.0, 64.0; |
166 | 2x |
expect_equal_matrix(as_matrix<matrix<double>, Eigen::Matrix<double, -1, -1>>(cpow(as_matrix<Eigen::Matrix<double, -1, -1>, matrix<double>>(tmb_mat), 2.0)), expected); |
167 |
} |
|
168 |
} |
|
169 | ||
170 | 4x |
context("pseudoInverseSqrt works") { |
171 | 4x |
test_that("pseudoInverseSqrt gives correct result") { |
172 | 2x |
matrix<double> tmb_mat(3, 3); |
173 | 2x |
tmb_mat << 5.483417, 2.861011, 3.478399, |
174 | 2x |
2.861011, 3.169936, -1.075550, |
175 | 2x |
3.478399, -1.075550, 10.525825; |
176 | ||
177 | 2x |
matrix<double> expected(3, 3); |
178 | 2x |
expected << 0.8235633, -0.5514385, -0.2586037, |
179 | 2x |
-0.5514385, 1.0568775, 0.2548210, |
180 | 2x |
-0.2586037, 0.2548210, 0.4095994; |
181 | 2x |
expect_equal_matrix(pseudoInverseSqrt(tmb_mat), expected); |
182 |
} |
|
183 | ||
184 | 4x |
test_that("pseudoInverseSqrt gives correct result for rank-deficient matrix") { |
185 | 2x |
matrix<double> tmb_mat(3, 3); |
186 | 2x |
tmb_mat << 5.483417, 2.861011, 0, |
187 | 2x |
2.861011, 3.169936, 0, |
188 | 2x |
0, 0, 0; |
189 | ||
190 | 2x |
matrix<double> expected(3, 3); |
191 | 2x |
expected << 0.5331152, -0.2459070, 0.0, |
192 | 2x |
-0.2459070, 0.7319613, 0.0, |
193 | 2x |
0.0000000, 0.0000000, 0.0; |
194 | 2x |
expect_equal_matrix(pseudoInverseSqrt(tmb_mat), expected); |
195 |
} |
|
196 |
} |
|
197 | ||
198 | 2x |
context("Rcpp and eigen conversion") { |
199 | 2x |
test_that("conversions do not change values") { |
200 | 2x |
NumericVector v1 = NumericVector::create(1.0, 2.0, 3.0); |
201 | 2x |
vector<double> v1_vec = as_vector< vector<double>, NumericVector>(v1); |
202 | 2x |
NumericVector v2 = as_vector<NumericVector, vector<double>>(v1_vec); |
203 | 2x |
vector<double> v3(3); |
204 | 2x |
v3 << 1.0, 2.0, 3.0; |
205 | 2x |
expect_equal_vector(v1_vec, v3); |
206 | 2x |
expect_equal_vector(v1, v2); |
207 | ||
208 | 2x |
IntegerVector v4 = IntegerVector::create(1, 2, 3); |
209 | 2x |
vector<int> v4_vec = as_vector<vector<int>, IntegerVector>(v4); |
210 | 2x |
IntegerVector v5 = as_vector<IntegerVector, vector<int>>(v4_vec); |
211 | 2x |
vector<int> v6(3); |
212 | 2x |
v6 << 1, 2, 3; |
213 | 2x |
expect_equal_vector<vector<int>>(v4_vec, v6); |
214 | 2x |
expect_equal_vector<IntegerVector>(v4, v5); |
215 | ||
216 | 2x |
NumericVector v_m = NumericVector::create(1.0, 2.0, 3.0, 4.0); |
217 | 2x |
NumericMatrix m1(2, 2, v_m.begin()); |
218 | 2x |
matrix<double> m2(2, 2); |
219 | 2x |
m2 << 1.0, 3.0, 2.0, 4.0; |
220 | 2x |
expect_equal_matrix(m2, as_matrix<matrix<double>, NumericMatrix>(m1)); |
221 | 2x |
expect_equal_matrix(m1, as_matrix<NumericMatrix, matrix<double>>(m2)); |
222 |
} |
|
223 |
} |
|
224 | ||
225 | 2x |
context("segment works for Rcpp Vector") { |
226 | 2x |
test_that("segment have correct values") { |
227 | 2x |
NumericVector v1 = NumericVector::create(1.0, 2.0, 3.0); |
228 | 2x |
NumericVector v2 = segment<NumericVector>(v1, 1, 1); |
229 | 2x |
NumericVector v3 = NumericVector::create(2.0); |
230 | 2x |
expect_equal_vector(v2, v3); |
231 |
} |
|
232 |
} |