[Roxygen-commits] r49 - in pkg: R sandbox
noreply at r-forge.r-project.org
noreply at r-forge.r-project.org
Sat Jul 19 23:04:34 CEST 2008
Author: pcd
Date: 2008-07-19 23:04:34 +0200 (Sat, 19 Jul 2008)
New Revision: 49
Added:
pkg/R/Rd.R
pkg/sandbox/Rd.R
pkg/sandbox/example-Rd-nlm.R
Modified:
pkg/R/list.R
pkg/R/parse.R
pkg/sandbox/namespace.R
Log:
preliminary Rd; sloppy parsing
Added: pkg/R/Rd.R
===================================================================
--- pkg/R/Rd.R (rev 0)
+++ pkg/R/Rd.R 2008-07-19 21:04:34 UTC (rev 49)
@@ -0,0 +1,71 @@
+Rd <- function(partita) {
+ relevators <- c('name',
+ 'aliases',
+ 'title',
+ 'description',
+ 'usage',
+ 'param',
+ 'return',
+ 'references',
+ 'note',
+ 'author',
+ 'seealso',
+ 'examples',
+ 'keywords')
+
+ Rd.expression <- function(key, expression)
+ sprintf('\\%s{%s}\n', key, expression)
+
+ parse.default <- function(key, expression)
+ cat(Rd.expression(key, expression))
+
+ parse.name <- Curry(parse.default, key='name')
+
+ parse.title <- Curry(parse.default, key='title')
+
+ parse.usage <- Curry(parse.default, key='usage')
+
+ parse.return <- Curry(parse.default, key='value')
+
+ parse.references <- Curry(parse.default, key='references')
+
+ parse.note <- Curry(parse.default, key='note')
+
+ parse.author <- Curry(parse.default, key='author')
+
+ parse.seealso <- Curry(parse.default, key='seealso')
+
+ parse.examples <- Curry(parse.default, key='examples')
+
+ parse.concept <- Curry(parse.default, key='concept')
+
+ parse.split <- function(key, expressions)
+ for (expression in strsplit(expressions, SPACE))
+ parse.default(key, expression)
+
+ parse.keywords <- Curry(parse.split, key='keyword')
+
+ parse.aliases <- Curry(parse.split, key='alias')
+
+ parse.noop <- function(expression) NULL
+
+ parsers <- list(name=parse.name,
+ title=parse.title,
+ usage=parse.usage,
+ return=parse.return,
+ references=parse.references,
+ note=parse.note,
+ author=parse.author,
+ seealso=parse.seealso,
+ examples=parse.examples,
+ concept=parse.concept,
+ aliases=parse.aliases,
+ keywords=parse.keywords)
+
+ parser <- function(key)
+ if (is.null(f <- parsers[[key]])) parse.noop else f
+
+ for (partitum in partita)
+ for (key.value in zip.list(attributes(partitum)$names, partitum))
+ do.call(parser(car(key.value)), cdr(key.value))
+}
Modified: pkg/R/list.R
===================================================================
--- pkg/R/list.R 2008-06-23 13:51:17 UTC (rev 48)
+++ pkg/R/list.R 2008-07-19 21:04:34 UTC (rev 49)
@@ -8,7 +8,14 @@
}
cdr <- function(list) {
- list[2:length(list)]
+ length <- length(list)
+ cdr <- switch(length + 1,
+ stop('CDRing a null list'),
+ nil)
+ if(is.null(cdr))
+ list[2:length]
+ else
+ cdr
}
caar <- function(list) {
Modified: pkg/R/parse.R
===================================================================
--- pkg/R/parse.R 2008-06-23 13:51:17 UTC (rev 48)
+++ pkg/R/parse.R 2008-07-19 21:04:34 UTC (rev 49)
@@ -1,12 +1,18 @@
-LINE.DELIMITER <- '#\''
+LINE.DELIMITER <- '#\' '
TAG.DELIMITER <- '@'
+SPACE <- '([[:space:]]|\n)'
+trim.left <- function(string)
+ gsub(sprintf('^%s+', SPACE), '', string)
+
+trim.right <- function(string)
+ gsub(sprintf('%s+$', SPACE), '', string)
+
trim <- function(string)
- gsub('^[[:space:]]+', '',
- gsub('[[:space:]]+$', '', string))
+ Compose(trim.left, trim.right)(string)
paste.list <- function(list) {
- do.call(paste, list)
+ do.call(paste, c(list, sep=" \n"))
}
#' Comment blocks (possibly null) that precede a file's expressions.
@@ -58,7 +64,7 @@
as.list(structure(args.to.string(...), names=key))
parse.preref <- function(key, ...) {
- parse.warning(key, 'is an unknown key')
+ parse.warning(sprintf('<%s>', key), 'is an unknown key')
parse.default(key, ...)
}
@@ -91,10 +97,30 @@
parse.importMethodsFrom <- Curry(parse.value, key='importMethodsFrom')
+## Rd stuff
+
+parse.name <- Curry(parse.value, key='name')
+
+parse.aliases <- Curry(parse.value, key='aliases')
+
+parse.title <- Curry(parse.value, key='title')
+
+parse.usage <- Curry(parse.value, key='usage')
+
+parse.references <- Curry(parse.value, key='references')
+
+parse.concept <- Curry(parse.value, key='concept')
+
+parse.note <- Curry(parse.value, key='note')
+
+parse.seealso <- Curry(parse.value, key='seealso')
+
+parse.examples <- Curry(parse.value, key='examples')
+
+parse.keywords <- Curry(parse.value, key='keywords')
+
parse.return <- Curry(parse.value, key='return')
-parse.reference <- Curry(parse.value, key='reference')
-
parse.author <- Curry(parse.value, key='author')
parse.name.description <- function(key, name, ...) {
@@ -111,7 +137,7 @@
parse.param <- Curry(parse.name.description, key='param')
-parse.name <- function(key, name, ...) {
+parse.name.internal <- function(key, name, ...) {
if (is.na(name))
parse.error(key, 'requires a name')
else if (Negate(is.empty)(...))
@@ -119,9 +145,9 @@
parse.default(key, name)
}
-parse.S3class <- Curry(parse.name, key='S3class')
+parse.S3class <- Curry(parse.name.internal, key='S3class')
-parse.returnType <- Curry(parse.name, key='returnType')
+parse.returnType <- Curry(parse.name.internal, key='returnType')
parse.toggle <- function(key, ...)
as.list(structure(T, names=key))
@@ -149,7 +175,7 @@
## Parser lookup
parser.default <- function(key, default) {
- f <- ls(1, pattern=sprintf('parse.%s', key))[1]
+ f <- ls(1, pattern=sprintf('parse.%s', trim(key)))[1]
if (is.na(f)) Curry(default, key=key) else f
}
@@ -175,14 +201,21 @@
trimmed.lines <-
Map(function(line) substr(line, nchar(LINE.DELIMITER) + 1, nchar(line)),
delimited.lines)
- ## Presumption: white-space is insignificant; there are no
- ## multi-line elements. This contradicts, for instance, verbatim or
- ## latex.
- joined.lines <- gsub(' {2,}', ' ', paste.list(trimmed.lines))
- elements <- Map(trim, car(strsplit(joined.lines, TAG.DELIMITER, fixed=T)))
- parsed.elements <- Reduce(function(parsed, element)
- append(parsed, parse.element(element)),
- cdr(elements), parse.description(car(elements)))
+ ## Presumption: white-space is insignificant.
+### joined.lines <- gsub(' {2,}', ' ', paste.list(trimmed.lines))
+ joined.lines <- paste.list(trimmed.lines)
+ if (is.nil(joined.lines))
+ nil
+ else {
+### print(joined.lines)
+ elements <- Map(trim, car(strsplit(joined.lines, TAG.DELIMITER, fixed=T)))
+### elements <- car(strsplit(joined.lines, TAG.DELIMITER, fixed=T))
+### print(str(elements))
+ parsed.elements <- Reduce(function(parsed, element)
+ append(parsed, parse.element(element)),
+ cdr(elements),
+ parse.description(car(elements)))
+ }
}
parse.ref.srcref <- function(srcref) {
Added: pkg/sandbox/Rd.R
===================================================================
--- pkg/sandbox/Rd.R (rev 0)
+++ pkg/sandbox/Rd.R 2008-07-19 21:04:34 UTC (rev 49)
@@ -0,0 +1,12 @@
+source('../R/functional.R')
+source('../R/list.R')
+source('../R/parse.R')
+source('../R/Rd.R')
+
+FILE <- 'example-Rd-nlm.R'
+
+argv <- commandArgs(trailingOnly=T)
+argc <- length(argv)
+file <- ifelse(argc > 0, car(argv), FILE)
+
+Rd(parse.file(file))
Added: pkg/sandbox/example-Rd-nlm.R
===================================================================
--- pkg/sandbox/example-Rd-nlm.R (rev 0)
+++ pkg/sandbox/example-Rd-nlm.R 2008-07-19 21:04:34 UTC (rev 49)
@@ -0,0 +1,190 @@
+#' This function carries out a minimization of the function \code{f}
+#' using a Newton-type algorithm. See the references for details.
+#'
+#'
+#' Note that arguments after \code{\dots} must be matched exactly.
+#'
+#' If a gradient or hessian is supplied but evaluates to the wrong mode
+#' or length, it will be ignored if \code{check.analyticals = TRUE} (the
+#' default) with a warning. The hessian is not even checked unless the
+#' gradient is present and passes the sanity checks.
+#'
+#' From the three methods available in the original source, we always use
+#' method \dQuote{1} which is line search.
+#'
+#' The functions supplied must always return finite (including not
+#' \code{NA} and not \code{NaN}) values.
+#'
+#' @name nlm
+#' @aliases nlm
+#' @title Non-Linear Minimization
+#' @concept optimization
+#' @usage
+#' nlm(f, p, \dots, hessian = FALSE, typsize = rep(1, length(p)),
+#' fscale = 1, print.level = 0, ndigit = 12, gradtol = 1e-6,
+#' stepmax = max(1000 * sqrt(sum((p/typsize)^2)), 1000),
+#' steptol = 1e-6, iterlim = 100, check.analyticals = TRUE)
+#'
+#' @param f the function to be minimized. If the function value has
+#' an attribute called \code{gradient} or both \code{gradient} and
+#' \code{hessian} attributes, these will be used in the calculation of
+#' updated parameter values. Otherwise, numerical derivatives are
+#' used. \code{\link{deriv}} returns a function with suitable
+#' \code{gradient} attribute. This should be a function of a vector of
+#' the length of \code{p} followed by any other arguments specified
+#' by the \code{\dots} argument.
+#' @param p starting parameter values for the minimization.
+#' @param \dots additional arguments to \code{f}.
+#' @param hessian if \code{TRUE}, the hessian of \code{f}
+#' at the minimum is returned.
+#' @param typsize an estimate of the size of each parameter
+#' at the minimum.
+#' @param fscale an estimate of the size of \code{f} at the minimum.
+#' @param print.level this argument determines the level of printing
+#' which is done during the minimization process. The default
+#' value of \code{0} means that no printing occurs, a value of \code{1}
+#' means that initial and final details are printed and a value
+#' of 2 means that full tracing information is printed.
+#' @param ndigit the number of significant digits in the function \code{f}.
+#' @param gradtol a positive scalar giving the tolerance at which the
+#' scaled gradient is considered close enough to zero to
+#' terminate the algorithm. The scaled gradient is a
+#' measure of the relative change in \code{f} in each direction
+#' \code{p[i]} divided by the relative change in \code{p[i]}.
+#' @param stepmax a positive scalar which gives the maximum allowable
+#' scaled step length. \code{stepmax} is used to prevent steps which
+#' would cause the optimization function to overflow, to prevent the
+#' algorithm from leaving the area of interest in parameter space, or to
+#' detect divergence in the algorithm. \code{stepmax} would be chosen
+#' small enough to prevent the first two of these occurrences, but should
+#' be larger than any anticipated reasonable step.
+#' @param steptol A positive scalar providing the minimum allowable
+#' relative step length.
+#' @param iterlim a positive integer specifying the maximum number of
+#' iterations to be performed before the program is terminated.
+#' @param check.analyticals a logical scalar specifying whether the
+#' analytic gradients and Hessians, if they are supplied, should be
+#' checked against numerical derivatives at the initial parameter
+#' values. This can help detect incorrectly formulated gradients or
+#' Hessians.
+#' @returnType list
+#' @return A list containing the following components:
+#' \item{minimum}{the value of the estimated minimum of \code{f}.}
+#' \item{estimate}{the point at which the minimum value of
+#' \code{f} is obtained.}
+#' \item{gradient}{the gradient at the estimated minimum of \code{f}.}
+#' \item{hessian}{the hessian at the estimated minimum of \code{f} (if
+#' requested).}
+#' \item{code}{an integer indicating why the optimization process terminated.
+#' \describe{
+#' \item{1:}{relative gradient is close to zero, current iterate is
+#' probably solution.}
+#' \item{2:}{successive iterates within tolerance, current iterate
+#' is probably solution.}
+#' \item{3:}{last global step failed to locate a point lower than
+#' \code{estimate}. Either \code{estimate} is an approximate local
+#' minimum of the function or \code{steptol} is too small.}
+#' \item{4:}{iteration limit exceeded.}
+#' \item{5:}{maximum step size \code{stepmax} exceeded five consecutive
+#' times. Either the function is unbounded below,
+#' becomes asymptotic to a finite value from above in
+#' some direction or \code{stepmax} is too small.}
+#' }
+#' }
+#' \item{iterations}{the number of iterations performed.}
+#'
+#' @references
+#' Dennis, J. E. and Schnabel, R. B. (1983) \emph{Numerical Methods for
+#' Unconstrained Optimization and Nonlinear Equations.} Prentice-Hall,
+#' Englewood Cliffs, NJ.
+#'
+#' Schnabel, R. B., Koontz, J. E. and Weiss, B. E. (1985) A modular
+#' system of algorithms for unconstrained minimization.
+#' \emph{ACM Trans. Math. Software}, \bold{11}, 419--440.
+#'
+#' @seealso
+#' \code{\link{optim}} and \code{\link{nlminb}}.
+#'
+#' \code{\link{constrOptim}} for constrained optimization,
+#' \code{\link{optimize}} for one-dimensional
+#' minimization and \code{\link{uniroot}} for root finding.
+#' \code{\link{deriv}} to calculate analytical derivatives.
+#'
+#' For nonlinear regression, \code{\link{nls}} may be better.
+#'
+#' @examples
+#' f <- function(x) sum((x-1:length(x))^2)
+#' nlm(f, c(10,10))
+#' nlm(f, c(10,10), print.level = 2)
+#' utils::str(nlm(f, c(5), hessian = TRUE))
+#'
+#' f <- function(x, a) sum((x-a)^2)
+#' nlm(f, c(10,10), a=c(3,5))
+#' f <- function(x, a)
+#' {
+#' res <- sum((x-a)^2)
+#' attr(res, "gradient") <- 2*(x-a)
+#' res
+#' }
+#' nlm(f, c(10,10), a=c(3,5))
+#'
+#' ## more examples, including the use of derivatives.
+#' \dontrun{demo(nlm)}
+#'
+#' @keywords nonlinear optimize
+nlm <- function(f, p, ..., hessian=FALSE, typsize=rep(1,length(p)),
+ fscale=1, print.level=0, ndigit=12, gradtol=1e-6,
+ stepmax=max(1000 * sqrt(sum((p/typsize)^2)), 1000),
+ steptol=1e-6, iterlim=100, check.analyticals=TRUE)
+{
+
+ print.level <- as.integer(print.level)
+ if(print.level < 0 || print.level > 2)
+ stop("'print.level' must be in {0,1,2}")
+ ## msg is collection of bits, i.e., sum of 2^k (k = 0,..,4):
+ msg <- (1 + c(8,0,16))[1+print.level]
+ if(!check.analyticals) msg <- msg + (2 + 4)
+ .Internal(nlm(function(x) f(x, ...), p, hessian, typsize, fscale,
+ msg, ndigit, gradtol, stepmax, steptol, iterlim))
+}
+
+optimize <- function(f, interval, ...,
+ lower=min(interval), upper=max(interval),
+ maximum=FALSE, tol=.Machine$double.eps^0.25)
+{
+ if(maximum) {
+ val <- .Internal(fmin(function(arg) -f(arg, ...), lower, upper, tol))
+ list(maximum = val, objective= f(val, ...))
+ } else {
+ val <- .Internal(fmin(function(arg) f(arg, ...), lower, upper, tol))
+ list(minimum = val, objective= f(val, ...))
+ }
+}
+
+##nice to the English (or rather the Scots)
+optimise <- optimize
+
+uniroot <- function(f, interval, ...,
+ lower = min(interval), upper = max(interval),
+ f.lower = f(lower, ...), f.upper = f(upper, ...),
+ tol = .Machine$double.eps^0.25, maxiter = 1000)
+{
+ if(!missing(interval) && length(interval) != 2)
+ stop("'interval' must be a vector of length 2")
+ if(!is.numeric(lower) || !is.numeric(upper) || lower >= upper)
+ stop("lower < upper is not fulfilled")
+ if(is.na(f.lower)) stop("f.lower = f(lower) is NA")
+ if(is.na(f.upper)) stop("f.upper = f(upper) is NA")
+ if(f.lower * f.upper > 0)
+ stop("f() values at end points not of opposite sign")
+ val <- .Internal(zeroin2(function(arg) f(arg, ...),
+ lower, upper, f.lower, f.upper,
+ tol, as.integer(maxiter)))
+ iter <- as.integer(val[2])
+ if(iter < 0) {
+ warning("_NOT_ converged in ", maxiter, " iterations")
+ iter <- maxiter
+ }
+ list(root = val[1], f.root = f(val[1], ...),
+ iter = iter, estim.prec = val[3])
+}
Modified: pkg/sandbox/namespace.R
===================================================================
--- pkg/sandbox/namespace.R 2008-06-23 13:51:17 UTC (rev 48)
+++ pkg/sandbox/namespace.R 2008-07-19 21:04:34 UTC (rev 49)
@@ -1,6 +1,7 @@
source('../R/functional.R')
source('../R/list.R')
source('../R/parse.R')
+source('../R/roclet.R')
source('../R/namespace.R')
FILES <- list('example-function-mcpi.R',
More information about the Roxygen-commits
mailing list