[Coxflexboost-commits] r6 - in pkg: . R
noreply at r-forge.r-project.org
noreply at r-forge.r-project.org
Fri Dec 5 17:06:26 CET 2008
Author: hofner
Date: 2008-12-05 17:06:26 +0100 (Fri, 05 Dec 2008)
New Revision: 6
Modified:
pkg/DESCRIPTION
pkg/R/PMLE.r
pkg/R/bbs.r
pkg/R/cfboost.r
pkg/R/helpers.r
Log:
vers. 0.6-0
- new df2lambda
- other minor changes caused by new df2lambda
Modified: pkg/DESCRIPTION
===================================================================
--- pkg/DESCRIPTION 2008-11-04 10:31:33 UTC (rev 5)
+++ pkg/DESCRIPTION 2008-12-05 16:06:26 UTC (rev 6)
@@ -1,7 +1,7 @@
Package: CoxFlexBoost
Type: Package
Title: Boosting Flexible Cox Models (with Time-Varying Effects)
-Version: 0.5-0
+Version: 0.6-0
Date: 2008-10-24
Author: Benjamin Hofner
Maintainer: Benjamin Hofner <benjamin.hofner at imbe.med.uni-erlangen.de>
Modified: pkg/R/PMLE.r
===================================================================
--- pkg/R/PMLE.r 2008-11-04 10:31:33 UTC (rev 5)
+++ pkg/R/PMLE.r 2008-12-05 16:06:26 UTC (rev 6)
@@ -3,7 +3,7 @@
###################
## Function for penalized ML estimation
-PMLE <- function(y, x, offset, fit, ens, nu, maxit, subdivisions = 100, estimation = TRUE, trace){
+PMLE <- function(y, x, offset, fit, ens, nu, maxit, subdivisions = 100, estimation = TRUE){
time <- y[,1]
delta <- y[,2]
Modified: pkg/R/bbs.r
===================================================================
--- pkg/R/bbs.r 2008-11-04 10:31:33 UTC (rev 5)
+++ pkg/R/bbs.r 2008-12-05 16:06:26 UTC (rev 6)
@@ -102,10 +102,27 @@
lambda <- mboost:::df2lambda(X, df = df, dmat = K, weights =rep(1,nrow(X)))
+ df2lambda <- function(y, offset){
+ ## FIXME: was ist mit nu und maxit ##
+ ## FIXME: bessere Funktionenname
+ dummy <- helper_fct(y, X, offset, pen = K)
+ df2l <- function(lambda, df){
+ tmp <- dummy(lambda)$F %*% solve(dummy(lambda)$F_pen)
+ sum(diag(tmp)) - df
+ }
+ ## FIXME: lambda gibt es später nicht mehr - wie ist obere Intervallgrenze? ##
+ result <- uniroot(f= df2l, interval = c(0,lambda + 100), df=df)
+ result$root
+ }
+
+ penalty <- function(lambda){
+ lambda * K
+ }
+
attr(X, "designMat") <- designMat
attr(X, "df") <- df
- attr(X, "lambda") <- lambda
- attr(X, "pen") <- lambda * K
+ attr(X, "lambda") <- df2lambda
+ attr(X, "pen") <- penalty
attr(X, "timedep") <- timedep
attr(X, "coefs") <- rep(0, ncol(X))
attr(X, "predict") <- predictfun
Modified: pkg/R/cfboost.r
===================================================================
--- pkg/R/cfboost.r 2008-11-04 10:31:33 UTC (rev 5)
+++ pkg/R/cfboost.r 2008-12-05 16:06:26 UTC (rev 6)
@@ -67,11 +67,18 @@
if (trace)
cat("Offset: ", offset, "\n")
- df_est <- matrix(NA, nrow = mstop, ncol = length(x)) # matrix of estimated degrees of freedom
mstart <- 1
hSi <- 1 # number of iterations in the repeat loop
+ df_est <- matrix(NA, nrow = mstop, ncol = length(x)) # matrix of estimated degrees of freedom
+ for (i in 1:length(x)){
+ if (!is.null( attr(x[[i]], "lambda"))){
+ attr(x[[i]],"lambda") <- attr(x[[i]], "lambda")(y, offset)
+ attr(x[[i]],"pen") <- attr(x[[i]],"pen")(attr(x[[i]],"lambda"))
+ }
+ }
+
##################################
#### start boosting iteration ####
##################################
@@ -86,7 +93,7 @@
dummy_ens <- ens[1:m] # get the first m-1 selected base-learners
dummy_ens[m] <- i # and set the m-th base-learner temporarily to i
## try to compute the (component-wise) penalized MLE
- dummy <- try(PMLE(y, x, offset, fit, dummy_ens, nu, maxit, trace = trace))
+ dummy <- try(PMLE(y, x, offset, fit, dummy_ens, nu, maxit))
if (inherits(dummy, "try-error")) next
coefs[[i]] <- dummy$par
maxll[i] <- dummy$maxll
Modified: pkg/R/helpers.r
===================================================================
--- pkg/R/helpers.r 2008-11-04 10:31:33 UTC (rev 5)
+++ pkg/R/helpers.r 2008-12-05 16:06:26 UTC (rev 6)
@@ -152,3 +152,69 @@
return(RET_inbag)
}
}
+
+## helper function for df2lambda as used in bbs()
+helper_fct <- function(y, x, offset, pen, nu = 0.1, maxi= 30000, subdivisions = 100){
+
+ time <- y[,1]
+ delta <- y[,2]
+
+ ## number of coefficients to estimate
+ coefs <- rep(0, ncol(x))
+
+ if(attr(x, "timedep")){
+ ## make grid and compute grid-width if there is ANY time-dependent base-learner
+ sub = subdivisions ## subdivisions of time
+ n = length(time) ## number of observations
+
+ grid <- function(upper, length){
+ ## helper function to compute grid
+ seq(from = 0, to = upper, length = length)
+ }
+
+ ## make grid
+ grid <- lapply(time, grid, length = sub)
+
+ trapezoid_width <- rep(NA, n)
+ for (i in 1:n)
+ trapezoid_width[i] <- grid[[i]][2] # = second element, as first element == 0 and the grid equidistant for every i
+ } else {
+ grid = NULL
+ trapezoid_width = NULL
+ }
+
+ ## build design matrix for currently added base-learner
+ if (attr(x, "timedep")){
+ xd <- unlist(grid)
+ xname <- get("xname", environment(attr(x, "predict")))
+ zd <- get("z", environment(attr(x, "predict")))
+ if (!is.null(zd)){
+ zname <- get("zname", environment(attr(x, "predict")))
+ zd <- rep(zd, each = length(grid[[1]]))
+ newdata <- data.frame(cbind(xd, zd))
+ names(newdata) <- c(xname, zname)
+ } else {
+ newdata <- data.frame(xd)
+ names(newdata) <- c(xname)
+ }
+ desMat <- attr(x,"designMat")(newdata = newdata)
+ } else {
+ desMat <- attr(x,"designMat")()
+ }
+
+ exp_offset <- exp(offset)
+ exp_pred_tconst <- 1
+ exp_pred_td <- 1
+
+ F_pen <- function(lambda, coefficients=NULL){
+ if(!is.null(coefficients))
+ coefs <- coefficients
+ Fisher_mat <- integr_fisher(x, coefs, desMat,
+ predictions = list(offset = exp_offset, tconst = exp_pred_tconst, td = exp_pred_td),
+ controls = list(grid = grid, trapezoid_width = trapezoid_width, upper = time, nu = nu, which = "fisher"))
+ if (is.null(pen)) pen <- 0
+ return(list(F = Fisher_mat, F_pen = Fisher_mat + lambda * pen))
+ }
+
+ return(F_pen)
+}
More information about the Coxflexboost-commits
mailing list