[Gmm-commits] r41 - in pkg/gmm: . R man

noreply at r-forge.r-project.org noreply at r-forge.r-project.org
Tue Nov 1 18:58:07 CET 2011


Author: chaussep
Date: 2011-11-01 18:58:07 +0100 (Tue, 01 Nov 2011)
New Revision: 41

Modified:
   pkg/gmm/DESCRIPTION
   pkg/gmm/NEWS
   pkg/gmm/R/Methods.gmm.R
   pkg/gmm/R/getModel.R
   pkg/gmm/R/gmm.R
   pkg/gmm/R/momentEstim.R
   pkg/gmm/man/gmm.Rd
Log:
Several modifications: see NEWS

Modified: pkg/gmm/DESCRIPTION
===================================================================
--- pkg/gmm/DESCRIPTION	2011-07-19 14:21:47 UTC (rev 40)
+++ pkg/gmm/DESCRIPTION	2011-11-01 17:58:07 UTC (rev 41)
@@ -1,5 +1,5 @@
 Package: gmm
-Version: 1.3-8
+Version: 1.3-9
 Date: 2011-07-19
 Title: Generalized Method of Moments and Generalized Empirical
         Likelihood

Modified: pkg/gmm/NEWS
===================================================================
--- pkg/gmm/NEWS	2011-07-19 14:21:47 UTC (rev 40)
+++ pkg/gmm/NEWS	2011-11-01 17:58:07 UTC (rev 41)
@@ -1,3 +1,12 @@
+Changes in version 1.3-9
+
+o The method for GMM-CUE has been modified. Before, the weights for the kernel estimation of the weighting matrix
+  were flexible inside the optimizer which was making the algorithm long and unstable. It is now fixed using either the starting 
+  values provided by the user (for linear cases) or the first step GMM. 
+o You can now trace the convergence of the iterative GMM with the option traceIter=T
+o The weights for the kernel are also fixed for iter-GMM. It is faster and I don't think it should change at each iteration. I am open to comments on that.
+s
+
 Changes in version 1.3-8
 
 o A bug was found in the computation of linear GMM. The weighting matrix was not use properly. It is now fixed.

Modified: pkg/gmm/R/Methods.gmm.R
===================================================================
--- pkg/gmm/R/Methods.gmm.R	2011-07-19 14:21:47 UTC (rev 40)
+++ pkg/gmm/R/Methods.gmm.R	2011-11-01 17:58:07 UTC (rev 41)
@@ -27,6 +27,8 @@
         c("Estimate", "Std. Error", "t value", "Pr(>|t|)"))
 	ans$stest <- specTest(z)
         ans$algoInfo <- z$algoInfo
+	if(z$met=="cue")
+		ans$cue <- object$cue
 	class(ans) <- "summary.gmm"
 	ans
 	}
@@ -35,7 +37,11 @@
 	{
 	cat("\nCall:\n")
 	cat(paste(deparse(x$call), sep="\n", collapse = "\n"), "\n\n", sep="")
-	cat("\nMethod: ", x$met,"\n\n")
+	cat("\nMethod: ", x$met,"\n")
+	if (x$met=="cue")
+		cat("         (",x$cue$message,")\n\n")
+	else
+		cat("\n")
 	cat("Kernel: ", x$kernel,"\n\n")
 	cat("Coefficients:\n")
 	print.default(format(x$coefficients, digits=digits),

Modified: pkg/gmm/R/getModel.R
===================================================================
--- pkg/gmm/R/getModel.R	2011-07-19 14:21:47 UTC (rev 40)
+++ pkg/gmm/R/getModel.R	2011-11-01 17:58:07 UTC (rev 41)
@@ -65,7 +65,10 @@
     if(is.null(object$weightsMatrix))
       clname <- paste(class(object), "." ,object$type, sep = "")
     else
-      clname <- "fixedW"
+	{
+        clname <- "fixedW"
+	object$type <- "One step GMM with fixed W"
+	}
     if (!is.function(object$gradv))
       { 
       gradv <- .Gf

Modified: pkg/gmm/R/gmm.R
===================================================================
--- pkg/gmm/R/gmm.R	2011-07-19 14:21:47 UTC (rev 40)
+++ pkg/gmm/R/gmm.R	2011-11-01 17:58:07 UTC (rev 41)
@@ -14,7 +14,7 @@
 gmm <- function(g,x,t0=NULL,gradv=NULL, type=c("twoStep","cue","iterative"), wmatrix = c("optimal","ident"),  vcov=c("HAC","iid","TrueFixed"), 
 	      kernel=c("Quadratic Spectral","Truncated", "Bartlett", "Parzen", "Tukey-Hanning"),crit=10e-7,bw = bwAndrews, 
 	      prewhite = FALSE, ar.method = "ols", approx="AR(1)",tol = 1e-7, itermax=100,optfct=c("optim","optimize","nlminb"),
-	      model=TRUE, X=FALSE, Y=FALSE, TypeGmm = "baseGmm", centeredVcov = TRUE, weightsMatrix = NULL, data, ...)
+	      model=TRUE, X=FALSE, Y=FALSE, TypeGmm = "baseGmm", centeredVcov = TRUE, weightsMatrix = NULL, traceIter = FALSE, data, ...)
 {
 
 type <- match.arg(type)
@@ -33,7 +33,7 @@
 all_args<-list(data = data, g = g, x = x, t0 = t0, gradv = gradv, type = type, wmatrix = wmatrix, vcov = vcov, kernel = kernel,
                    crit = crit, bw = bw, prewhite = prewhite, ar.method = ar.method, approx = approx, 
                    weightsMatrix = weightsMatrix, centeredVcov = centeredVcov,
-                   tol = tol, itermax = itermax, optfct = optfct, model = model, X = X, Y = Y, call = match.call())
+                   tol = tol, itermax = itermax, optfct = optfct, model = model, X = X, Y = Y, call = match.call(), traceIter = traceIter)
 class(all_args)<-TypeGmm
 Model_info<-getModel(all_args)
 z <- momentEstim(Model_info, ...)
@@ -227,8 +227,7 @@
         gmat <- P$g(thet,x)
         class(gmat) <- "gmmFct"
       }
-    w2 <- kernHAC(gmat, kernel = P$kernel, bw = P$bw, prewhite = P$prewhite, 
-            ar.method = P$ar.method, approx = P$approx, tol = P$tol, sandwich = FALSE)
+    w2 <- vcovHAC(gmat, weights=P$fixedKernW, sandwich = FALSE)
    }
   obj <- crossprod(gbar,solve(w2,gbar))
   return(obj)

Modified: pkg/gmm/R/momentEstim.R
===================================================================
--- pkg/gmm/R/momentEstim.R	2011-07-19 14:21:47 UTC (rev 40)
+++ pkg/gmm/R/momentEstim.R	2011-11-01 17:58:07 UTC (rev 41)
@@ -283,8 +283,11 @@
           gmat <- g(tet, x)
           class(gmat) <- "gmmFct"
           }
-        w <- kernHAC(gmat, kernel = P$kernel, bw = P$bw, prewhite = P$prewhite, ar.method = P$ar.method, 
-                 approx = P$approx, tol = P$tol, sandwich = FALSE)
+	if (j==1)
+		fixedKernW <-  weightsAndrews(gmat, prewhite=P$prewhite,
+	 			   bw = P$bw, kernel = P$kernel, approx = P$approx, 
+ 			   	   ar.method = P$ar.method, tol = P$tol) 
+        w <- vcovHAC(gmat, weights = fixedKernW, sandwich = FALSE)
         }
       res <- .tetlin(x, w, dat$ny, dat$nh, dat$k, P$gradv, g)
       ch <- crossprod(abs(tet- res$par)/tet)^.5
@@ -416,8 +419,12 @@
           gmat <- P$g(tet, P$x)
           class(gmat) <- "gmmFct"
           }
-        w <- kernHAC(gmat, kernel = P$kernel, bw = P$bw, prewhite = P$prewhite, ar.method = P$ar.method, 
-                 approx = P$approx, tol = P$tol, sandwich = FALSE)
+	if (j==1)
+		fixedKernW <-  weightsAndrews(gmat, prewhite=P$prewhite,
+	 			   bw = P$bw, kernel = P$kernel, approx = P$approx, 
+ 			   	   ar.method = P$ar.method, tol = P$tol) 
+        w <- vcovHAC(gmat, weights = fixedKernW, sandwich = FALSE)
+
         }
 
       if (P$optfct == "optim")
@@ -457,12 +464,14 @@
         res$par <- res$minimum
         res$value <- res$objective
         }	
-        ch <- crossprod(abs(tet-res$par)/tet)^.5	
+        ch <- crossprod(tet-res$par)^.5/(1+crossprod(tet)^.5)
         if (j>P$itermax)
           {
           cat("No convergence after ", P$itermax, " iterations")
           ch <- P$crit
           }
+	if(P$traceIter)
+		print(c(iter=j,Objective=res$value,res$par))
         j <- j+1	
       }
     z = list(coefficients = res$par, objective = res$value,k=k, k2=k2, n=n, q=q, df=df)	
@@ -509,11 +518,25 @@
     w <- diag(q)
     res <- .tetlin(x, w, dat$ny, dat$nh, dat$k, P$gradv, g)
     z = list(coefficients = res$par, objective = res$value, dat = dat, k = k, k2 = k2, n = n, q = q, df = df)
+    P$weightMessage <- "No CUE needed because the model if just identified"
     }
   else
     {
     if (is.null(P$t0))
-      P$t0 <- .tetlin(x,diag(q), dat$ny, dat$nh, dat$k, P$gradv, g)$par
+	{
+	P$t0 <- .tetlin(x,diag(q), dat$ny, dat$nh, dat$k, P$gradv, g)$par
+	P$weightMessage <- "Weights for kernel estimate of the covariance are fixed and based on the first step estimate of Theta"	
+	}
+    else
+	P$weightMessage <- "Weights for kernel estimate of the covariance are fixed and based on the initial value of Theta provided by the user"	
+
+    gt0 <- g(P$t0,x)
+    gt0 <- lm(gt0~1)
+    P$fixedKernW <-  weightsAndrews(gt0, prewhite=P$prewhite,
+ 			   bw = P$bw, kernel = P$kernel, approx = P$approx, 
+ 			   ar.method = P$ar.method, tol = P$tol) 
+
+
     if (P$optfct == "optim")
       res2 <- optim(P$t0,.objCue, x = x, P = P, ...)
     if (P$optfct == "nlminb")
@@ -528,8 +551,10 @@
       res2$value <- res2$objective
       }
     z = list(coefficients = res2$par, objective = res2$value, dat = dat, k = k, k2 = k2, n = n, q = q, df = df)
-    if (P$optfct != "optimize")
-	z$convergence = res2$convergence
+    if (P$optfct == "optim")
+	z$algoInfo <- list(convergence = res2$convergence, counts = res2$counts, message = res2$message)
+    else if(P$optfct == "nlminb")
+	z$algoInfo <- list(convergence = res2$convergence, counts = res2$evaluations, message = res2$message)
     }
 
   z$gt <- g(z$coefficients, x) 
@@ -549,6 +574,7 @@
   z$gradv <- P$gradv
   z$iid <- P$iid
   z$g <- P$g
+  z$cue <- list(weights=P$fixedKernW,message=P$weightMessage)
   
   namex <- colnames(dat$x[,(dat$ny+1):(dat$ny+dat$k)])
   nameh <- colnames(dat$x[,(dat$ny+dat$k+1):(dat$ny+dat$k+dat$nh)])
@@ -573,75 +599,43 @@
   {
   P <- object
   x <- P$x
+
+  res <- try(gmm(P$g,P$x,P$t0,wmatrix="ident",optfct=P$optfct, ...))
+  if(class(res)=="try-error")
+	stop("Cannot get a first step estimate to compute the weights for the Kernel estimate of the covariance matrix; try different starting values")
+
+  n <- nrow(res$gt)
+  q <- ncol(res$gt)
+
   if (P$optfct == "optimize")
-    {
-    n = nrow(P$g(P$t0[1], x))
-    q = ncol(P$g(P$t0[1], x))
     k = 1
-    }
   else
-    {
-    n = nrow(P$g(P$t0, x))
-    q = ncol(P$g(P$t0, x))
     k = length(P$t0)
-    }
+
   k2 <- k
   df <- q - k
-  w=diag(q)
-  if (P$optfct == "optim")
-   {
-   if (P$gradvf)
-      {
-      gradvOptim <- P$gradv
-      gr2 <- function(thet, x,  w, gf, INV)
-		{
-		gt <- gf(thet, x)
-		Gbar <- gradvOptim(thet, x) 
-		gbar <- as.vector(colMeans(gt))
-		if (INV)		
-		  	obj <- crossprod(Gbar, solve(w, gbar))
-		else
-			obj <- crossprod(Gbar,w)%*%gbar
-		return(obj*2)
-		}
-      argDots <- list(...)
-      allArgOptim <- list(par = P$t0, fn = .obj1, gr = gr2, x = P$x, w = w, gf = P$g, INV = TRUE)
-      argDots$gr <- NULL
-      allArgOptim <- c(allArgOptim,argDots)
-      res <- do.call(optim,allArgOptim)
-      }
-    else
-      res <- optim(P$t0, .obj1, x = P$x, w = w, gf = P$g, ...)
-    }
-  if (P$optfct == "nlminb")
-    {
-    res <- nlminb(P$t0, .obj1, x = P$x, w = w, gf = P$g, ...)
-    res$value <- res$objective
-    }
-  if (P$optfct == "optimize")
-    {
-    res <- optimize(.obj1, P$t0, x = P$x, w = w, gf = P$g, ...)
-    res$par <- res$minimum
-    res$value <- res$objective
-    }	
 
   if (q == k2 | P$wmatrix == "ident")
     {
-    z <- list(coefficients = res$par, objective = res$value, k=k, k2=k2, n=n, q=q, df=df)
-    if (P$optfct == "optim")
-	z$algoInfo <- list(convergence = res$convergence, counts = res$counts, message = res$message)
-    else if(P$optfct == "nlminb")
-	z$algoInfo <- list(convergence = res$convergence, counts = res$evaluations, message = res$message)
+    z <- list(coefficients = res$coef, objective = res$objective, algoInfo = res$algoInfo, k=k, k2=k2, n=n, q=q, df=df)
+    P$weightMessage <- "No CUE needed because the model if just identified or you set wmatrix=identity"
     }	
   else
     {
+    gt0 <- P$g(res$coef,x)
+    gt0 <- lm(gt0~1)
+    P$fixedKernW <-  weightsAndrews(gt0, prewhite=P$prewhite,
+ 			   bw = P$bw, kernel = P$kernel, approx = P$approx, 
+ 			   ar.method = P$ar.method, tol = P$tol) 
+    P$weightMessage <- "Weights for kernel estimate of the covariance are fixed and based on the first step estimate of Theta"
+
     if (P$optfct == "optim")
       {
-      res2 <- optim(res$par, .objCue, x = x, P = P, ...)
+      res2 <- optim(res$coef, .objCue, x = x, P = P, ...)
       }
     if (P$optfct == "nlminb")
       {
-      res2 <- nlminb(res$par, .objCue, x = x, P = P, ...)
+      res2 <- nlminb(res$coef, .objCue, x = x, P = P, ...)
       res2$value <- res2$objective
       }
     if (P$optfct == "optimize")
@@ -668,6 +662,8 @@
   z$gt <- P$g(z$coefficients, P$x)
   z$iid <- P$iid
   z$g <- P$g
+  z$cue <- list(weights=P$fixedKernW,message=P$weightMessage)
+
  
   class(z) <- paste(P$TypeGmm, ".res", sep = "")	
   return(z)

Modified: pkg/gmm/man/gmm.Rd
===================================================================
--- pkg/gmm/man/gmm.Rd	2011-07-19 14:21:47 UTC (rev 40)
+++ pkg/gmm/man/gmm.Rd	2011-11-01 17:58:07 UTC (rev 41)
@@ -14,7 +14,7 @@
 "Parzen", "Tukey-Hanning"), crit = 10e-7, bw = bwAndrews, 
 prewhite = FALSE, ar.method = "ols", approx = "AR(1)", tol = 1e-7, 
 itermax = 100, optfct = c("optim","optimize","nlminb"), model=TRUE, X=FALSE, Y=FALSE, 
-TypeGmm = "baseGmm", centeredVcov = TRUE, weightsMatrix = NULL, data,  ...)
+TypeGmm = "baseGmm", centeredVcov = TRUE, weightsMatrix = NULL, traceIter = FALSE, data,  ...)
 }
 \arguments{
 \item{g}{A function of the form \eqn{g(\theta,x)} and which returns a \eqn{n \times q} matrix with typical element \eqn{g_i(\theta,x_t)} for \eqn{i=1,...q} and \eqn{t=1,...,n}. This matrix is then used to build the q sample moment conditions. It can also be a formula if the model is linear (see details below).}
@@ -57,6 +57,8 @@
 
 \item{weightsMatrix}{It allows users to provide \code{gmm} with a fixed weighting matrix. This matrix must be \eqn{q \times q}, symmetric and strictly positive definite. When provided, the \code{type} option becomes irrelevant. }
 
+\item{traceIter}{Tracing information for GMM of type "iter"}
+
 \item{data}{A data.frame or a matrix with column names (Optionnal). }
 
 \item{...}{More options to give to \code{\link{optim}}.}



More information about the Gmm-commits mailing list