[Georob-commits] r13 - in pkg: . R man
noreply at r-forge.r-project.org
noreply at r-forge.r-project.org
Tue Jul 16 09:17:28 CEST 2013
Author: papritz
Date: 2013-07-16 09:17:28 +0200 (Tue, 16 Jul 2013)
New Revision: 13
Modified:
pkg/ChangeLog
pkg/DESCRIPTION
pkg/NAMESPACE
pkg/R/georob.exported.functions.R
pkg/R/georob.private.functions.R
pkg/man/georob.Rd
pkg/man/georob.control.Rd
Log:
solving estimating equations by BBsolve{BB} (in addition to nleqlsv)
Modified: pkg/ChangeLog
===================================================================
--- pkg/ChangeLog 2013-07-10 09:02:14 UTC (rev 12)
+++ pkg/ChangeLog 2013-07-16 07:17:28 UTC (rev 13)
@@ -131,3 +131,8 @@
* georob.exported.functions.R (georob): computing robust initial variogram parameter estimates by minimizing sum of squared estimating equations
* georob.private.functions.R (georob.fit, compute.estimating.equations): computing robust initial variogram parameter estimates by minimizing sum of squared estimating equations
+
+2013-07-12 Andreas Papritz <papritz at env.ethz.ch>
+
+* georob.exported.functions.R (georob, georob.control, bbsolve.control): solving estimating equations by BBsolve{BB} (in addition to nleqlsv)
+* georob.private.functions.R (compute.estimating.equations, compute.expanded.estimating.equations, estimating.eqations.xihat, estimate.xihat, georob.fit, gradient.negative.restricted.loglikelihood, negative.restr.loglikelihood, prepare.likelihood.calculations): solving estimating equations by BBsolve{BB} (in addition to nleqlsv)
Modified: pkg/DESCRIPTION
===================================================================
--- pkg/DESCRIPTION 2013-07-10 09:02:14 UTC (rev 12)
+++ pkg/DESCRIPTION 2013-07-16 07:17:28 UTC (rev 13)
@@ -8,7 +8,7 @@
email = "andreas.papritz at env.ethz.ch" ),
person( "Cornelia", "Schwierz", role = "ctb" ))
Depends: R(>= 2.14.0), lmtest, nlme, robustbase, sp(>= 0.9-60)
-Imports: constrainedKriging(>= 0.1-9), nleqslv, quantreg,
+Imports: BB, constrainedKriging(>= 0.2-1), nleqslv, quantreg,
RandomFields(>= 2.0.55), spatialCovariance(>= 0.6-4)
Suggests: geoR
Description: The georob package provides functions for fitting linear models
Modified: pkg/NAMESPACE
===================================================================
--- pkg/NAMESPACE 2013-07-10 09:02:14 UTC (rev 12)
+++ pkg/NAMESPACE 2013-07-16 07:17:28 UTC (rev 13)
@@ -1,5 +1,6 @@
import( stats, parallel )
+importFrom( BB, BBsolve )
importFrom( constrainedKriging, covmodel, f.point.block.cov, K, preCKrige )
importFrom( lmtest, waldtest, waldtest.default )
importFrom( nlme, fixef, fixed.effects, ranef, random.effects )
@@ -10,6 +11,7 @@
# exported functions
export(
+ bbsolve.control, # ok
bwd.transf, # ok
compress, # ok
cv, # ok
@@ -75,8 +77,10 @@
##
## compute.covariances,
## compute.estimating.equations,
+## compute.expanded.estimating.equations,
## compute.semivariance,
## dcorr.dparam,
+## estimating.eqations.xihat,
## estimate.xihat,
## gcr,
## georob.fit,
Modified: pkg/R/georob.exported.functions.R
===================================================================
--- pkg/R/georob.exported.functions.R 2013-07-10 09:02:14 UTC (rev 12)
+++ pkg/R/georob.exported.functions.R 2013-07-16 07:17:28 UTC (rev 13)
@@ -19,6 +19,7 @@
aniso = c( f1 = 1., f2 = 1., omega = 90., phi = 90., zeta = 0. ),
fit.aniso = c( f1 = FALSE, f2 = FALSE, omega = FALSE, phi = FALSE, zeta = FALSE ),
tuning.psi = 2, initial.param = c( "minimize", "exclude", "no" ),
+ root.finding = c( "nleqslv", "bbsolve" ),
control = georob.control( ... ), verbose = 0,
...
)
@@ -78,6 +79,7 @@
## 2013-06-03 AP handling design matrices with rank < ncol(x)
## 2013-06-12 AP substituting [["x"]] for $x in all lists
## 2013-07-02 AP new transformation of rotation angles
+ ## 2013-07-12 AP solving estimating equations by BBsolve{BB} (in addition to nleqlsv)
## check whether input is complete
@@ -315,11 +317,11 @@
aniso.missing <- missing( aniso ) && missing( fit.aniso )
- ## prune data set for computing initial values of variogram and
- ## anisotropy parameters by reml
-
initial.param <- match.arg( initial.param )
+ root.finding <- match.arg( root.finding )
+ ## compute initial values of variogram and anisotropy parameters
+
if( tuning.psi < control[["tuning.psi.nr"]] ){
if( identical( initial.param, "exclude" ) ){
@@ -362,6 +364,7 @@
## estimate model parameters with pruned data set
t.georob <- georob.fit(
+ root.finding = root.finding,
slv = TRUE,
envir = envir,
initial.objects = initial.objects,
@@ -393,6 +396,8 @@
zero.dist = control[["zero.dist"]],
nleqslv.method = control[["nleqslv"]][["method"]],
nleqslv.control = control[["nleqslv"]][["control"]],
+ bbsolve.method = control[["bbsolve"]][["method"]],
+ bbsolve.control = control[["bbsolve"]][["control"]],
optim.method = control[["optim"]][["method"]],
optim.lower = control[["optim"]][["lower"]],
optim.upper = control[["optim"]][["upper"]],
@@ -429,6 +434,7 @@
## estimate model parameters by minimizing sum( gradient^2)
t.georob <- georob.fit(
+ root.finding = root.finding,
slv = FALSE,
envir = envir,
initial.objects = initial.objects,
@@ -460,6 +466,8 @@
zero.dist = control[["zero.dist"]],
nleqslv.method = control[["nleqslv"]][["method"]],
nleqslv.control = control[["nleqslv"]][["control"]],
+ bbsolve.method = control[["bbsolve"]][["method"]],
+ bbsolve.control = control[["bbsolve"]][["control"]],
optim.method = control[["optim"]][["method"]],
optim.lower = control[["optim"]][["lower"]],
optim.upper = control[["optim"]][["upper"]],
@@ -500,6 +508,7 @@
if( verbose > 0 ) cat( "computing final parameter estimates ...\n" )
r.georob <- georob.fit(
+ root.finding = root.finding,
slv = TRUE,
envir = envir,
initial.objects = initial.objects,
@@ -531,6 +540,8 @@
zero.dist = control[["zero.dist"]],
nleqslv.method = control[["nleqslv"]][["method"]],
nleqslv.control = control[["nleqslv"]][["control"]],
+ bbsolve.method = control[["bbsolve"]][["method"]],
+ bbsolve.control = control[["bbsolve"]][["control"]],
optim.method = control[["optim"]][["method"]],
optim.lower = control[["optim"]][["lower"]],
optim.upper = control[["optim"]][["upper"]],
@@ -611,75 +622,23 @@
rq = rq.control(),
lmrob = lmrob.control(),
nleqslv = nleqslv.control(),
+ bbsolve = bbsolve.control(),
optim = optim.control(),
full.output = TRUE
)
{
## auxiliary function to set meaningful default values for the
- ## arguments of the function georob.fit
+
+ ## Arguments:
- ## Arguments:
-
- ## initial.method character scalar, controlling how the intitial estimate of the fixed-effects
- ## parameters are computed, possible values are
- ## "rq" to use rq{quantreg},
- ## "lmrob" to use lmrob{robustbase},
- ## param.tf list, used to pass arguents to param.tf{georob}
- ## parameters, implemented values are "log" or "identity" (no transformation)
- ## fwd.tf
- ## rho.function character, defining the rho/psi functions family
- ## tuning.psi.nr numeric, if tuning.psi exceeds tuning.psi.nr for
- ## logistic or huber rho.function then only one IRWLS iteration is executed
- ## to estimate beta and z
- ## min.rweight minimum robustness weights of lmrob fit required for
- ## including an observations into the pruned data set from which initial values
- ## of variogram and anisotropy parameters are computed by Gaussian REML
- ## irwls.initial logical, flag controlling whether IRWLS starts from the lmrob
- ## estimates of beta and from z=0 (TRUE) or from the previous IRWLS results
- ## irwls.maxiter integer, maximum number of IRWLS steps
- ## irwls.reltol numeric, relative convergence tolerance for IRWLS, see optim{stats}
- ## force.gradient logical, flag controlling whether the gradient (REML) or the
- ## estimation equations should be evaluated if all variogram parameter are
- ## fixed
- ## zero.dist observations from sampling locations less than zero.dist apart will be
- ## considered as multiple observations from same location
- ## cov.bhat logical, flag controlling whether the covariances of bhat should be computed
- ## full.cov.bhat logical, flag controlling whether the full covariance matrix of bhat
- ## is computed (TRUE) or only the diagonal elements (FALSE)
- ## cov.betahat logical, flag controlling whether the covariance matrix of betahat
- ## should be computed
- ## cov.bhat.betahat logical, flag controlling whether the covariance matrix of
- ## bhat and betahat should be computed
- ## cov.delta.bhat logical, flag controlling whether the covariances of z-bhat should be computed
- ## full.cov.delta.bhat logical, flag controlling whether the full covariance matrix of z-bhat
- ## is computed (TRUE) or only the diagonal elements (FALSE)
- ## cov.delta.bhat.betahat logical, flag controlling whether the covariance matrix of z-bhat
- ## and betahat should be computed
- ## cov.ehat logical, flag controlling whether the covariances of the resdiuals should be computed
- ## full.cov.ehat logical, flag controlling whether the full covariance matrix of the residuals
- ## is computed (TRUE) or only the diagonal elements (FALSE)
- ## cov.ehat.p.bhat logical, flag controlling whether the covariances of the resdiuals+bhat should be computed
- ## full.cov.ehat.p.bhat logical, flag controlling whether the full covariance matrix of the resdiuals+bhat
- ## is computed (TRUE) or only the diagonal elements (FALSE)
- ## aux.cov.pred.target logical, flag controlling whether the auxiliary matrix for computing the covariances
- ## of the predicted and true y should be computed
- ## is computed (TRUE) or only the diagonal elements (FALSE)
- ## min.condnum minimum condition number for a matrix to be numerically non-singular
- ## rq list, see rq{quantreg}
- ## lmrob list, see lmrob.control{robustbase}
- ## nleqslv list, used to pass arguent to nleqslv, see nleqslv.control{georob}
- ## optim list, used to pass arguments to optim, see optim.control{georob}
- ## full.output logical, flag used to control the amount of output returned by georob, warning:
- ## is TRUE then the output will not contain all required items required by some methods
-
-
## 2012-04-21 A. Papritz
## 2012-05-03 AP bounds for safe parameter values
## 2012-05-04 AP modifications for lognormal block kriging
## 2013-04-23 AP new names for robustness weights
## 2013-06-12 AP changes in stored items of Valpha object
## 2013-06-12 AP substituting [["x"]] for $x in all lists
+ ## 2013-07-12 AP solving estimating equations by BBsolve{BB} (in addition to nleqlsv)
if(
!( all( param.tf %in% names( fwd.tf ) ) &&
@@ -712,7 +671,7 @@
aux.cov.pred.target = aux.cov.pred.target,
min.condnum = min.condnum,
irf.models = c( "DeWijsian", "fractalB", "genB" ),
- rq = rq, lmrob = lmrob, nleqslv = nleqslv, optim = optim,
+ rq = rq, lmrob = lmrob, nleqslv = nleqslv, bbsolve = bbsolve, optim = optim,
full.output = full.output
)
@@ -834,10 +793,8 @@
## function sets meaningful defaults for selected arguments of function
## nleqslv{nleqslv}
- ## 2012-12-14 A. Papritz
+ ## 2013-07-12 A. Papritz
- aux <- function( trace = 0, ... ) list( trace = trace, ... )
-
list(
method = match.arg( nleqslv.method ),
global = match.arg( global ),
@@ -847,6 +804,25 @@
}
## ======================================================================
+bbsolve.control <-
+ function(
+ bbsolve.method = c( "2", "3", "1" ),
+ bbsolve.control = NULL
+ )
+{
+
+ ## function sets meaningful defaults for selected arguments of function
+ ## BBSolve{BB}
+
+ ## 2013-07-12 A. Papritz
+
+ list(
+ method = as.integer( match.arg( bbsolve.method ) ),
+ control = bbsolve.control
+ )
+}
+
+## ======================================================================
optim.control <-
function(
optim.method = c( "BFGS", "Nelder-Mead", "CG", "L-BFGS-B", "SANN", "Brent" ),
Modified: pkg/R/georob.private.functions.R
===================================================================
--- pkg/R/georob.private.functions.R 2013-07-10 09:02:14 UTC (rev 12)
+++ pkg/R/georob.private.functions.R 2013-07-16 07:17:28 UTC (rev 13)
@@ -795,11 +795,35 @@
}
+
## ##############################################################################
+estimating.eqations.xihat <- function(
+ res, TT, xihat, nugget, eta, Valpha.inverse.Palpha,
+ psi.function, tuning.psi
+){
+
+ ## auxiliary function to compute estimating equations for xihat
+
+ ## 2013-07-12 AP solving estimating equations by BBsolve{BB} (in addition to nleqlsv)
+
+ Ttpsi <- psi.function( res / sqrt( nugget ), tuning.psi )
+ TtT <- rep( 1, length( Ttpsi ) )
+
+ if( sum( duplicated( TT ) > 0 ) ){
+ Ttpsi <- as.vector( tapply( Ttpsi, factor( TT ), sum ) )
+ TtT <- as.vector( table( TT ) )
+ }
+
+ Ttpsi - drop( Valpha.inverse.Palpha %*% xihat ) / sqrt( nugget ) / eta
+}
+
+## ##############################################################################
+
estimate.xihat <-
function(
- XX, min.condnum, rankdef.x, yy, betahat, TT, xihat,
+ compute.xihat,
+ XX, min.condnum, rankdef.x, yy, TT, xihat,
psi.function, tuning.psi, tuning.psi.nr,
maxit, reltol,
nugget, eta, Valpha.inverse,
@@ -810,30 +834,13 @@
## 2013-02-04 AP solving estimating equations for xi
## 2013-06-03 AP handling design matrices with rank < ncol(x)
## 2013-06-12 AP substituting [["x"]] for $x in all lists
+ ## 2013-07-12 AP solving estimating equations by BBsolve{BB} (in addition to nleqlsv)
## function computes (1) estimates xihat, bhat, betahat by
## solving robustified estimating equations by IRWLS,
## (2) the weights of the IRWLS, (3) the unstandardized residuals
## (= estimated epsilons); the results are returned as a list
- ## auxiliary function to compute estimating equations for xihat
-
- f.eeq <- function(
- res, TT, xihat, nugget, eta, Valpha.inverse.Palpha,
- psi.function, tuning.psi
- ){
-
- Ttpsi <- psi.function( res / sqrt( nugget ), tuning.psi )
- TtT <- rep( 1, length( Ttpsi ) )
-
- if( sum( duplicated( TT ) > 0 ) ){
- Ttpsi <- as.vector( tapply( Ttpsi, factor( TT ), sum ) )
- TtT <- as.vector( table( TT ) )
- }
-
- Ttpsi - drop( Valpha.inverse.Palpha %*% xihat ) / sqrt( nugget ) / eta
- }
-
## compute projection matrix Palpha and related items
result <- list( error = FALSE )
@@ -866,102 +873,122 @@
rownames( result[["Valpha.inverse.Palpha"]] ) <- rownames( XX )
colnames( result[["Valpha.inverse.Palpha"]] ) <- rownames( XX )
- ## initialization
+ if( compute.xihat ){
- res <- yy - xihat[TT]
-
- eeq.old <- f.eeq(
- res, TT, xihat, nugget, eta, result[["Valpha.inverse.Palpha"]],
- psi.function, tuning.psi
- )
- eeq.old.l2 <- sum( eeq.old^2 )
-
- if( !is.finite( eeq.old.l2 ) ) {
- result[["error"]] <- TRUE
- return( result )
- }
-
- converged <- FALSE
-
- if( verbose > 2 ) cat(
- "\n IRWLS\n",
- " it L2.old L2.new delta.L2\n", sep = ""
- )
-
- ## IRWLS
-
- for( i in 1:maxit ){
+ ## initialization
- ## compute new estimates
+ res <- yy - xihat[TT]
- new <- update.xihat(
- XX, yy, res, TT,
- nugget, eta,
- result[["Valpha.inverse.Palpha"]],
- psi.function, tuning.psi,
- verbose
+ eeq.old <- estimating.eqations.xihat(
+ res, TT, xihat, nugget, eta, result[["Valpha.inverse.Palpha"]],
+ psi.function, tuning.psi
)
+ eeq.old.l2 <- sum( eeq.old^2 )
- if( new[["error"]] ) {
+ if( !is.finite( eeq.old.l2 ) ) {
result[["error"]] <- TRUE
return( result )
}
+ converged <- FALSE
- ## evaluate estimating equations for xi and compute its l2 norm
-
- eeq.new <- f.eeq(
- new[["residuals"]], TT, new[["xihat"]], nugget, eta, result[["Valpha.inverse.Palpha"]],
- psi.function, tuning.psi
+ if( verbose > 2 ) cat(
+ "\n IRWLS\n",
+ " it L2.old L2.new delta.L2\n", sep = ""
)
- eeq.new.l2 <- sum( eeq.new^2 )
- if( !is.finite( eeq.new.l2 ) ) {
- result[["error"]] <- TRUE
- return( result )
+ ## IRWLS
+
+ for( i in 1:maxit ){
+
+ ## compute new estimates
+
+ new <- update.xihat(
+ XX, yy, res, TT,
+ nugget, eta,
+ result[["Valpha.inverse.Palpha"]],
+ psi.function, tuning.psi,
+ verbose
+ )
+
+ if( new[["error"]] ) {
+ result[["error"]] <- TRUE
+ return( result )
+ }
+
+
+ ## evaluate estimating equations for xi and compute its l2 norm
+
+ eeq.new <- estimating.eqations.xihat(
+ new[["residuals"]], TT, new[["xihat"]], nugget, eta, result[["Valpha.inverse.Palpha"]],
+ psi.function, tuning.psi
+ )
+ eeq.new.l2 <- sum( eeq.new^2 )
+
+ if( !is.finite( eeq.new.l2 ) ) {
+ result[["error"]] <- TRUE
+ return( result )
+ }
+
+ if( verbose > 2 ) cat(
+ format( i, width = 8 ),
+ format(
+ signif(
+ c( eeq.old.l2, eeq.new.l2, eeq.old.l2 - eeq.new.l2 ), digits = 7
+ ), scientific = TRUE, width = 14
+ ), "\n", sep = ""
+ )
+
+ ## check for convergence (cf. help( optim ) )
+
+ if( max( abs( res - new[["residuals"]] ) ) < sqrt( reltol ) * sqrt( nugget ) ) {
+ converged <- TRUE
+ break
+ }
+
+ ## update xihat, residuals and eeq.old.l2
+
+ eeq.old.l2 <- eeq.new.l2
+ xihat <- new[["xihat"]]
+ res <- new[["residuals"]]
+
}
- if( verbose > 2 ) cat(
- format( i, width = 8 ),
- format(
- signif(
- c( eeq.old.l2, eeq.new.l2, eeq.old.l2 - eeq.new.l2 ), digits = 7
- ), scientific = TRUE, width = 14
- ), "\n", sep = ""
- )
+ ## collect output
- ## check for convergence (cf. help( optim ) )
-
- if( max( abs( res - new[["residuals"]] ) ) < sqrt( reltol ) * sqrt( nugget ) ) {
- converged <- TRUE
- break
- }
+ result[["xihat"]] <- new[["xihat"]]
+ names( result[["xihat"]] ) <- rownames( XX )
- ## update xihat, residuals and eeq.old.l2
+ result[["residuals"]] <- new[["residuals"]]
+ result[["rweights"]] <- new[["rweights"]]
+ result[["converged"]] <- converged
+ result[["nit"]] <- i
- eeq.old.l2 <- eeq.new.l2
- xihat <- new[["xihat"]]
- res <- new[["residuals"]]
+ } else {
+ result[["xihat"]] <- xihat
+ names( result[["xihat"]] ) <- rownames( XX )
+
+ result[["residuals"]] <- yy - xihat[TT]
+
+ result[["rweights"]] <- ifelse(
+ abs( std.res <- result[["residuals"]] / sqrt( nugget ) ) < sqrt( .Machine[["double.eps"]] ),
+ 1.,
+ psi.function( std.res, tuning.psi ) / std.res
+ )
+ result[["converged"]] <- NA
+ result[["nit"]] <- NA_integer_
+
}
- ## collect output
-
- result[["xihat"]] <- new[["xihat"]]
- names( result[["xihat"]] ) <- rownames( XX )
-
result[["bhat"]] <- drop( result[["Palpha"]] %*% result[["xihat"]] )
names( result[["bhat"]] ) <- rownames( XX )
result[["betahat"]] <- drop( result[["Aalpha"]] %*% result[["xihat"]] )
names( result[["betahat"]] ) <- colnames( XX )
- result[["residuals"]] <- new[["residuals"]]
- result[["rweights"]] <- new[["rweights"]]
result[["z.star"]] <- drop( Valpha.inverse %*% result[["bhat"]] )
- result[["converged"]] <- converged
- result[["nit"]] <- i
-
+
return( result )
}
@@ -1065,9 +1092,10 @@
adjustable.param, variogram.model, fixed.param, param.name, aniso.name,
param.tf, bwd.tf, safe.param,
lag.vectors,
- XX, min.condnum, rankdef.x, yy, betahat, TT, bhat,
+ XX, min.condnum, rankdef.x, yy, TT, xihat,
psi.function, dpsi.function, tuning.psi, tuning.psi.nr,
irwls.initial, irwls.maxiter, irwls.reltol,
+ compute.xihat = TRUE,
compute.Q,
verbose
)
@@ -1084,6 +1112,7 @@
## 2013-02-04 AP solving estimating equations for xi
## 2013-06-12 AP substituting [["x"]] for $x in all lists
## 2013-07-02 AP new transformation of rotation angles
+ ## 2013-07-12 AP solving estimating equations by BBsolve{BB} (in addition to nleqlsv)
## function transforms (1) the variogram parameters back to their
## original scale; computes (2) the correlation matrix, its inverse
@@ -1286,15 +1315,14 @@
## irwls iteration from initial.object or from previous iteration
if(
- !irwls.initial && !is.null( lik.item[["effects"]][["betahat"]] ) &&
- !is.null( lik.item[["effects"]][["bhat"]] )
+ !irwls.initial && !is.null( lik.item[["effects"]][["xihat"]] )
){
- betahat <- lik.item[["effects"]][["betahat"]]
- bhat <- lik.item[["effects"]][["bhat"]]
+ xihat <- lik.item[["effects"]][["xihat"]]
}
lik.item[["effects"]] <- estimate.xihat(
- XX, min.condnum, rankdef.x, yy, betahat, TT, bhat,
+ compute.xihat,
+ XX, min.condnum, rankdef.x, yy, TT, xihat,
psi.function, tuning.psi, tuning.psi.nr,
irwls.maxiter, irwls.reltol,
lik.item[["param"]]["nugget"], lik.item[["eta"]], lik.item[["Valpha"]][["Valpha.inverse"]],
@@ -2353,7 +2381,7 @@
variogram.model, fixed.param, param.name, aniso.name,
param.tf, bwd.tf, safe.param,
lag.vectors,
- XX, min.condnum, rankdef.x, yy, betahat, TT, bhat,
+ XX, min.condnum, rankdef.x, yy, TT, xihat,
psi.function, dpsi.function,
tuning.psi, tuning.psi.nr,
irwls.initial, irwls.maxiter, irwls.reltol,
@@ -2373,6 +2401,7 @@
## 2013-04-23 AP new names for robustness weights
## 2013-05-06 AP changes for solving estimating equations for xi
## 2013-06-12 AP substituting [["x"]] for $x in all lists
+ ## 2013-07-12 AP solving estimating equations by BBsolve{BB} (in addition to nleqlsv)
## get lik.item
@@ -2381,11 +2410,11 @@
adjustable.param, variogram.model, fixed.param, param.name, aniso.name,
param.tf, bwd.tf, safe.param,
lag.vectors,
- XX, min.condnum, rankdef.x, yy, betahat, TT, bhat,
+ XX, min.condnum, rankdef.x, yy, TT, xihat,
psi.function, dpsi.function, tuning.psi, tuning.psi.nr,
irwls.initial, irwls.maxiter, irwls.reltol,
- compute.Q = FALSE,
- verbose
+ compute.xihat = TRUE, compute.Q = FALSE,
+ verbose = verbose
)
## check whether generalized covariance matrix is positive definite
@@ -2617,6 +2646,267 @@
## ##############################################################################
+compute.expanded.estimating.equations <-
+ function(
+ allpar,
+ slv,
+ envir,
+ variogram.model, fixed.param, param.name, aniso.name,
+ param.tf, bwd.tf, safe.param,
+ lag.vectors,
+ XX, min.condnum, rankdef.x, yy, TT,
+ psi.function, dpsi.function,
+ tuning.psi, tuning.psi.nr,
+ irwls.initial, irwls.maxiter, irwls.reltol,
+ force.gradient,
+ expectations,
+ verbose
+ )
+{
+
+ ## function evaluates the robustified estimating equations of
+ ## variogram parameters derived from the Gaussian log-likelihood
+
+ ## 2013-07-12 AP solving estimating equations by BBsolve{BB} (in addition to nleqlsv)
+
+ ## select xihat and variogram parameters
+
+ xihat <- allpar[ 1:NROW(XX) ]
+ adjustable.param <- allpar[ -(1:NROW(XX)) ]
+
+ ## get lik.item
+
+ lik.item <- prepare.likelihood.calculations(
+ envir,
+ adjustable.param, variogram.model, fixed.param, param.name, aniso.name,
+ param.tf, bwd.tf, safe.param,
+ lag.vectors,
+ XX, min.condnum, rankdef.x, yy, TT, xihat,
+ psi.function, dpsi.function, tuning.psi, tuning.psi.nr,
+ irwls.initial, irwls.maxiter, irwls.reltol,
+ compute.xihat = FALSE, compute.Q = FALSE,
+ verbose
+ )
+
+ ## check whether generalized covariance matrix is positive definite
+
+ if( lik.item[["Valpha"]][["error"]] ) {
+ if( verbose > 0 ) cat(
+ "\n(generalized) correlation matrix Valpha is not positive definite\n"
+ )
+ t.result <- rep( Inf, length( adjustable.param ) )
+ names( t.result ) <- names( adjustable.param )
+ return( t.result )
+ }
+
+ ## check whether estimating equations should be computed for fixed parameters
+
+ if( length( adjustable.param ) == 0 && force.gradient ){
+ adjustable.param <- fixed.param
+ }
+
+ ## evaluate estimating equations
+
+ ## compute auxiliary items
+
+ TtT <- as.vector( table( TT ) )
+
+ ## compute Cov[bhat]
+
+ r.cov <- compute.covariances(
+ Valpha.objects = lik.item[["Valpha"]],
+ Aalpha = lik.item[["effects"]][["Aalpha"]],
+ Palpha = lik.item[["effects"]][["Palpha"]],
+ rweights = lik.item[["effects"]][["rweights"]],
+ XX = XX, TT = TT, names.yy = names( yy ),
+ nugget = lik.item[["param"]]["nugget"],
+ eta = lik.item[["eta"]],
+ expectations = expectations,
+ cov.bhat = TRUE, full.cov.bhat = TRUE,
+ cov.betahat = FALSE,
+ cov.bhat.betahat = FALSE,
+ cov.delta.bhat = FALSE, full.cov.delta.bhat = FALSE,
+ cov.delta.bhat.betahat = FALSE,
+ cov.ehat = FALSE, full.cov.ehat = FALSE,
+ cov.ehat.p.bhat = FALSE, full.cov.ehat.p.bhat = FALSE,
+ aux.cov.pred.target = FALSE,
+ extended.output = FALSE,
+ verbose = verbose
+ )
+
+ if( r.cov[["error"]] ) {
+ if( verbose > 0 ) cat(
+ "\nan error occurred when computing the covariances of fixed and random effects\n"
+ )
+ t.result <- rep( Inf, length( adjustable.param ) )
+ names( t.result ) <- names( adjustable.param )
+ return( t.result )
+ }
+
+ ## estimating equations for xihat
+
+ eeq.xihat <- estimating.eqations.xihat(
+ res = lik.item[["effects"]][["residuals"]],
+ TT = TT, xihat = xihat,
+ nugget = lik.item[["param"]]["nugget"],
+ eta = lik.item[["eta"]],
+ Valpha.inverse.Palpha = lik.item[["effects"]][["Valpha.inverse.Palpha"]],
+ psi.function = psi.function,
+ tuning.psi = tuning.psi
+ )
+
+ ## initialize estimating equations for variogram parameters
+
+ eeq.emp <- rep( NA, length( adjustable.param ) )
+ names( eeq.emp ) <- names( adjustable.param )
+
+ eeq.exp <- rep( NA, length( adjustable.param ) )
+ names( eeq.exp ) <- names( adjustable.param )
+
+ ## estimation equation for nugget
+
+ if( "nugget" %in% names( adjustable.param ) ) {
+
+ ## compute trace of Cov[ psi( residuals/sqrt(nugget) ) ]
+
+ eeq.exp["nugget"] <- sum(
+ diag(
+ lik.item[["Valpha"]][["Valpha.inverse"]] %*%
+ ( 1/TtT * lik.item[["Valpha"]][["Valpha.inverse"]] ) %*%
+ r.cov[["cov.bhat"]]
+ )
+ )
+ eeq.emp["nugget"] <- sum(
+ ( lik.item[["effects"]][["z.star"]] )^2 / TtT
+ )
+
+ }
+
+ ## estimation equation for spatial nugget
+
+ if( "snugget" %in% names( adjustable.param ) ) {
+
+ ## compute trace( Valpha^-1 Cov[bhat] )
+
+ eeq.exp["snugget"] <- sum(
+ rowSums(
+ (lik.item[["Valpha"]][["Valpha.inverse"]] %*% lik.item[["Valpha"]][["Valpha.inverse"]] ) *
+ r.cov[["cov.bhat"]]
+ )
+ )
+ eeq.emp["snugget"] <- sum( lik.item[["effects"]][["z.star"]]^2 )
+
+ }
+
+ ## estimation equation for variance
+
+ if( "variance" %in% names( adjustable.param ) ) {
+
+ ## compute trace( Valpha^-1 Cov[bhat] )
+
+ eeq.exp["variance"] <- sum(
+ rowSums(
+ ( lik.item[["Valpha"]][["Valpha.inverse"]] %*% lik.item[["Valpha"]][["Valpha0"]] %*% lik.item[["Valpha"]][["Valpha.inverse"]] ) *
+ r.cov[["cov.bhat"]]
+ )
+ )
+ eeq.emp["variance"] <- sum(
+ lik.item[["effects"]][["z.star"]] * drop( lik.item[["Valpha"]][["Valpha0"]] %*% lik.item[["effects"]][["z.star"]] )
+ )
+
+ }
+
+ ## estimation equations for scale, extra variogram and anisotropy
+ ## parameters
+
+ extra.par <- names( adjustable.param )[ !(
+ names( adjustable.param ) %in% c( "variance", "snugget", "nugget" )
+ )]
+
+ for( t.i in extra.par ){
+
+ ## compute trace( Valpha^-1 * dValpha/dalpha * Valpha^-1 * Cov[bhat] )
+
+ dValpha <- dcorr.dparam(
+ x = lag.vectors, variogram.model = variogram.model, param = lik.item[["param"]],
+ d.param = t.i,
+ aniso = lik.item[["aniso"]],
+ verbose = verbose
+ )
+ ## if( identical( class( dValpha ), "try-error" ) ){
+ ## if( verbose > 0 ) cat( "error in dcorr.dparam\n\n" )
+ ## t.result <- rep( Inf, length( adjustable.param ) )
+ ## names( t.result ) <- names( adjustable.param )
+ ## return( t.result )
+ ## }
+
+ eeq.exp[t.i] <- sum(
+ rowSums(
+ (lik.item[["Valpha"]][["Valpha.inverse"]] %*% dValpha %*% lik.item[["Valpha"]][["Valpha.inverse"]]) *
+ r.cov[["cov.bhat"]]
+ )
+ )
+ eeq.emp[t.i] <- sum(
+ lik.item[["effects"]][["z.star"]] * drop( dValpha %*% lik.item[["effects"]][["z.star"]] )
+ )
+
+ }
+
+ if( verbose > 1 ) {
+ cat( "\n ",
+ format( c( "min(xihat)", "max(xihat)" ), width = 14, justify = "right" ),
+ "\n", sep =""
+ )
+ cat( " EEQ :",
+ format(
+ signif( range(eeq.xihat), digits = 7 ),
+ scientific = TRUE, width = 14
+ ), "\n", sep = ""
+ )
+ cat( "\n ",
+ format( names( eeq.emp), width = 14, justify = "right" ),
+ "\n", sep =""
+ )
+ cat( " EEQ :",
+ format(
+ signif( eeq.emp / eeq.exp - 1, digits = 7 ),
+ scientific = TRUE, width = 14
+ ), "\n", sep = ""
+ )
+ if( verbose > 2 ){
+ cat( " empirical terms:",
+ format(
+ signif( eeq.emp, digits = 7 ),
+ scientific = TRUE, width = 14
+ ), "\n", sep = ""
+ )
+ cat( " expected terms:",
+ format(
+ signif( eeq.exp, digits = 7 ),
+ scientific = TRUE, width = 14
+ ), "\n", sep = ""
+ )
+ }
+ cat("\n")
+ }
+
+ ## store terms in lik.item object
+
+ lik.item[["eeq"]] <- list(
+ eeq.xihat = eeq.xihat,
+ eeq.emp = eeq.emp,
+ eeq.exp = eeq.exp
+ )
+
+ assign( "lik.item", lik.item, pos = as.environment( envir ) )
+
+ return( c( eeq.xihat, eeq.emp / eeq.exp - 1. ) )
+
+}
+
+
+## ##############################################################################
+
negative.restr.loglikelihood <-
function(
adjustable.param,
@@ -2624,7 +2914,7 @@
variogram.model, fixed.param, param.name, aniso.name,
param.tf, bwd.tf, safe.param,
lag.vectors,
- XX, min.condnum, rankdef.x, yy, betahat, TT, bhat,
+ XX, min.condnum, rankdef.x, yy, TT, xihat,
psi.function, dpsi.function,
tuning.psi, tuning.psi.nr,
irwls.initial, irwls.maxiter, irwls.reltol,
@@ -2642,6 +2932,7 @@
## 2012-11-27 AP changes in parameter back-transformation
## 2013-06-03 AP changes for estimating xihat
## 2013-06-12 AP substituting [["x"]] for $x in all lists
+ ## 2013-07-12 AP solving estimating equations by BBsolve{BB} (in addition to nleqlsv)
# sel <- !c( param.name, aniso.name ) %in% names( fixed.param )
# names( adjustable.param ) <- c( param.name, aniso.name )[sel]
@@ -2654,10 +2945,10 @@
adjustable.param, variogram.model, fixed.param, param.name, aniso.name,
param.tf, bwd.tf, safe.param,
lag.vectors,
- XX, min.condnum, rankdef.x, yy, betahat, TT, bhat,
+ XX, min.condnum, rankdef.x, yy, TT, xihat,
psi.function, dpsi.function, tuning.psi, tuning.psi.nr,
irwls.initial, irwls.maxiter, irwls.reltol,
- compute.Q = TRUE,
+ compute.xihat = TRUE, compute.Q = TRUE,
verbose
)
@@ -2740,7 +3031,7 @@
variogram.model, fixed.param, param.name, aniso.name,
param.tf, deriv.fwd.tf, bwd.tf, safe.param,
lag.vectors,
- XX, min.condnum, rankdef.x, yy, betahat, TT, bhat,
+ XX, min.condnum, rankdef.x, yy, TT, xihat,
psi.function, dpsi.function, d2psi.function,
tuning.psi, tuning.psi.nr,
irwls.initial, irwls.maxiter, irwls.reltol,
@@ -2758,6 +3049,7 @@
## 2012-11-04 AP unscaled psi-function
## 2012-11-27 AP changes in parameter back-transformation
## 2013-06-12 AP substituting [["x"]] for $x in all lists
+ ## 2013-07-12 AP solving estimating equations by BBsolve{BB} (in addition to nleqlsv)
## dtrafo.fct <- list(
## log = function( x ) 1/x,
@@ -2771,10 +3063,10 @@
adjustable.param, variogram.model, fixed.param, param.name, aniso.name,
param.tf, bwd.tf, safe.param,
[TRUNCATED]
To get the complete diff run:
svnlook diff /svnroot/georob -r 13
More information about the Georob-commits
mailing list