[Pastecs-commits] r7 - in pkg: . man

noreply at r-forge.r-project.org noreply at r-forge.r-project.org
Sun Jul 14 14:07:33 CEST 2013


Author: phgrosjean
Date: 2013-07-14 14:07:33 +0200 (Sun, 14 Jul 2013)
New Revision: 7

Modified:
   pkg/DESCRIPTION
   pkg/man/abund.Rd
   pkg/man/daystoyears.Rd
   pkg/man/escouf.Rd
   pkg/man/local.trend.Rd
   pkg/man/regul.Rd
   pkg/man/regul.adj.Rd
   pkg/man/regul.screen.Rd
   pkg/man/stat.slide.Rd
   pkg/man/tsd.Rd
   pkg/man/turnogram.Rd
   pkg/man/turnpoints.Rd
Log:
Elimination of synopsis\ from Rd files

Modified: pkg/DESCRIPTION
===================================================================
--- pkg/DESCRIPTION	2013-01-24 15:58:51 UTC (rev 6)
+++ pkg/DESCRIPTION	2013-07-14 12:07:33 UTC (rev 7)
@@ -1,7 +1,7 @@
 Package: pastecs
 Title: Package for Analysis of Space-Time Ecological Series
-Version: 1.3-13
-Date: 2013-01-24
+Version: 1.3-14
+Date: 2013-07-13
 Author: Frederic Ibanez <ibanez at obs-vlfr.fr>, Philippe Grosjean <phgrosjean at sciviews.org> & Michele Etienne <etienne at obs-vlfr.fr>
 Description: Regulation, decomposition and analysis of space-time series. The pastecs library is a PNEC-Art4 and IFREMER (Benoit Beliaeff <Benoit.Beliaeff at ifremer.fr>) initiative to bring PASSTEC 2000 (http://www.obs-vlfr.fr/~enseigne/anado/passtec/passtec.htm) functionalities to R.
 URL: http://www.sciviews.org/pastecs

Modified: pkg/man/abund.Rd
===================================================================
--- pkg/man/abund.Rd	2013-01-24 15:58:51 UTC (rev 6)
+++ pkg/man/abund.Rd	2013-07-14 12:07:33 UTC (rev 7)
@@ -11,57 +11,85 @@
 \encoding{latin1}
 
 \title{ Sort variables by abundance }
+
 \description{
-  Sort variables (usually species in a species x stations matrix) in function of their abundance, either in number of non-null values, or in number of individuals (in log). The \code{f} coefficient allows adjusting weight given to each of these two criteria.
+  Sort variables (usually species in a species x stations matrix) in function of
+  their abundance, either in number of non-null values, or in number of
+  individuals (in log). The \code{f} coefficient allows adjusting weight given to each of these two criteria.
 }
-\synopsis{
-abund(x, f=0.2) 
-\method{extract}{abund}(e, n, left=TRUE, ...)
-\method{identify}{abund}(x, label.pts=FALSE, lvert=TRUE, lvars=TRUE, col=2, lty=2, ...)
-\method{lines}{abund}(x, n=x$n, lvert=TRUE, lvars=TRUE, col=2, lty=2, ...)
-\method{plot}{abund}(x, n=x$n, lvert=TRUE, lvars=TRUE, lcol=2, llty=2, all=TRUE, dlab=c("cumsum", "\% log(ind.)", "\% non-zero"), dcol=c(1,2,4), dlty=c(par("lty"), par("lty"), par("lty")), dpos=c(1.5, 20), type="l", xlab="variables", ylab="abundance", main=paste("Abundance sorting for:",x$data, "with f =", round(x$f, 4)), ...)
+
+\usage{
+abund(x, f = 0.2)
+
+\method{extract}{abund}(e, n, left = TRUE, ...)
+\method{identify}{abund}(x, label.pts = FALSE, lvert = TRUE, lvars = TRUE, col = 2, lty = 2, ...)
+\method{lines}{abund}(x, n = x$n, lvert = TRUE, lvars = TRUE, col = 2, lty = 2, ...)
+\method{plot}{abund}(x, n = x$n, lvert = TRUE, lvars = TRUE, lcol = 2, llty = 2, all = TRUE,
+    dlab = c("cumsum", "\% log(ind.)", "\% non-zero"), dcol = c(1,2,4),
+    dlty = c(par("lty"), par("lty"), par("lty")), dpos = c(1.5, 20), type = "l",
+    xlab = "variables", ylab = "abundance",
+    main = paste("Abundance sorting for:",x$data, "with f =", round(x$f, 4)), ...)
 \method{print}{abund}(x, ...)
 \method{print}{summary.abund}(x, ...)
 \method{summary}{abund}(object, ...)
 }
-\usage{
-abund(x, f=0.2)
-\method{summary}{abund}(abd)
-\method{plot}{abund}(abd, n=abd$n, lvert=TRUE, lvars=TRUE, lcol=2, llty=2,
-        all=TRUE, dlab=c("cumsum", "\% log(ind.)", "\% non-zero"),
-        dcol=c(1, 2, 4), dlty, dpos=c(1.5, 20), \dots)
-\method{lines}{abund}(abd, n=abd$n, lvert=TRUE, lvars=TRUE, \dots)
-\method{identify}{abund}(abd, label.pts=FALSE, lvert=TRUE, lvars=TRUE, \dots)
-\method{extract}{abund}(abd, n=abd$n, left=TRUE)
-}
+
 \arguments{
-  \item{x}{ A data frame containing the variables to sort according to their abundance in columns }
-  \item{f}{ Weight given to the number of individuals criterium (strictly included between 0 and 1; weight for the non-null values is \code{1-f}. The default value, \code{f=0.2}, gives enough weight to the number of non-null values to get abundant species according to this criterium first, but allowing to get at the other extreme rare, but locally abundant species }
-  \item{abd}{ An 'abund' object returned by \code{abund} }
+  \item{x}{ A data frame containing the variables to sort according to their
+    abundance in columns for \code{abund}, or an 'abund' object for the methods }
+  \item{f}{ Weight given to the number of individuals criterium (strictly
+    included between 0 and 1; weight for the non-null values is \code{1-f}. The
+    default value, \code{f=0.2}, gives enough weight to the number of non-null
+    values to get abundant species according to this criterium first, but
+    allowing to get at the other extreme rare, but locally abundant species }
+  \item{object}{ An 'abund' object returned by \code{abund} }
+  \item{e}{ An 'abund' object returned by \code{abund} }
   \item{n}{ The number of variables selected at left }
-  \item{lvert}{ If \code{TRUE} then a vertical line separate the n variables at left from the others }
-  \item{lvars}{ If \code{TRUE} then the x-axis labels of the n left variables are printed in a different color to emphasize them }
-  \item{lcol}{ The color to use to draw the vertical line (\code{lvert=TRUE}) and the variables labels (\code{lvars=TRUE}) at left af the nth variable. By default, color 2 is used }
-  \item{llty}{ The style used to draw the vertical line (\code{lvert=TRUE}). By default, a dashed line is used }
-  \item{all}{ If \code{TRUE} then all lines are drawn (cumsum, \%log(ind.) and \%non-null). If \code{FALSE}, only the cumsum line is drawn }
+  \item{type}{ the type of graph to plot. By default, lines with 'l' }
+  \item{lvert}{ If \code{TRUE} then a vertical line separate the n variables at
+    left from the others }
+  \item{lvars}{ If \code{TRUE} then the x-axis labels of the n left variables
+    are printed in a different color to emphasize them }
+  \item{lcol}{ The color to use to draw the vertical line (\code{lvert=TRUE})
+    and the variables labels (\code{lvars=TRUE}) at left af the nth variable.
+    By default, color 2 is used }
+  \item{llty}{ The style used to draw the vertical line (\code{lvert=TRUE}).
+    By default, a dashed line is used }
+  \item{xlab}{ the label of the x-axis }
+  \item{ylab}{ the label of the y-axis }
+  \item{main}{ the main title of the graph}
+  \item{all}{ If \code{TRUE} then all lines are drawn (cumsum, \%log(ind.) and
+    \%non-null). If \code{FALSE}, only the cumsum line is drawn }
   \item{dlab}{ The legend labels }
   \item{dcol}{ Colors to use for drawing the various curves on the graph }
   \item{dlty}{ The line style to use for drawing the various curves on the graph }
-  \item{dpos}{ The position of the legend box on the graph (coordinates of its top-left corner). A legend box is drawn only if \code{all=TRUE} }
-  \item{\dots}{ additional graph parameters }
-  \item{label.pts}{ Do we have to label points on the graph or to chose an extraction level with the \code{identify()} method? }
-  \item{left}{ If \code{TRUE}, the n variables at left are extracted. Otherwise, the total-n variables at right are extracted }
+  \item{dpos}{ The position of the legend box on the graph (coordinates of its
+    top-left corner). A legend box is drawn only if \code{all=TRUE} }
+  \item{col}{ The color to use to draw lines }
+  \item{lty}{ The style used to draw lines }
+  \item{\dots}{ additional parameters }
+  \item{label.pts}{ Do we have to label points on the graph or to chose an
+    extraction level with the \code{identify()} method? }
+  \item{left}{ If \code{TRUE}, the n variables at left are extracted. Otherwise,
+    the total-n variables at right are extracted }
 }
 \details{
-  Successive sorts can be applied. For instance, a first sort with \code{f = 0.2}, followed by an extraction of rare species and another sort with \code{f = 1} allows to collect only rare but locally abundant species.
+  Successive sorts can be applied. For instance, a first sort with
+  \code{f = 0.2}, followed by an extraction of rare species and another sort
+  with \code{f = 1} allows to collect only rare but locally abundant species.
 }
 \value{
-  An object of type 'abund' is returned. It has methods \code{print()}, \code{summary()}, \code{plot()}, \code{lines()}, \code{identify()}, \code{extract()}.
+  An object of type 'abund' is returned. It has methods \code{print()},
+  \code{summary()}, \code{plot()}, \code{lines()}, \code{identify()}, \code{extract()}.
 }
 \references{
-Ibanez, F., J.-C. Dauvin & M. Etienne, 1993. \emph{Comparaison des évolutions à long terme (1977-1990) de deux peuplements macrobenthiques de la baie de Morlaix (Manche occidentale): relations avec les facteurs hydroclimatiques.} J. Exp. Mar. Biol. Ecol., 169:181-214.
+  Ibanez, F., J.-C. Dauvin & M. Etienne, 1993. \emph{Comparaison des évolutions
+  à long terme (1977-1990) de deux peuplements macrobenthiques de la baie de
+  Morlaix (Manche occidentale): relations avec les facteurs hydroclimatiques.}
+  J. Exp. Mar. Biol. Ecol., 169:181-214.
 }
-\author{ Philippe Grosjean (\email{phgrosjean at sciviews.org}), Frédéric Ibanez (\email{ibanez at obs-vlfr.fr}) }
+\author{ Philippe Grosjean (\email{phgrosjean at sciviews.org}),
+  Frédéric Ibanez (\email{ibanez at obs-vlfr.fr}) }
 
 \seealso{ \code{\link{escouf}} }
 

Modified: pkg/man/daystoyears.Rd
===================================================================
--- pkg/man/daystoyears.Rd	2013-01-24 15:58:51 UTC (rev 6)
+++ pkg/man/daystoyears.Rd	2013-07-14 12:07:33 UTC (rev 7)
@@ -8,10 +8,7 @@
 \description{
   Convert time scales. The time scale "days" corresponds to 1 unit per day. The time scale "years" uses 1 unit for 1 year. It is used in any analysis that requires seasonal decomposition and/or elimination. 
 }
-\synopsis{
-daystoyears(x, datemin=NULL, dateformat="m/d/Y")
-yearstodays(x, xmin=NULL)
-}
+
 \usage{
 daystoyears(x, datemin=NULL, dateformat="m/d/Y")
 yearstodays(x, xmin=NULL)

Modified: pkg/man/escouf.Rd
===================================================================
--- pkg/man/escouf.Rd	2013-01-24 15:58:51 UTC (rev 6)
+++ pkg/man/escouf.Rd	2013-07-14 12:07:33 UTC (rev 7)
@@ -14,33 +14,28 @@
 \description{
   Calculate equivalent vectors sensu Escoufier, that is, most significant variables from a multivariate data frame according to a principal component analysis (variables that are most correlated with the principal axes). This method is useful mainly for physical or chemical data where simply summarizing them with a PCA does not always gives easily interpretable principal axes.
 }
-\synopsis{
-escouf(x, level=1, verbose=TRUE)
-extract.escouf(e, n, level=e$level, ...)
-identify.escouf(x, lhorz=TRUE, lvert=TRUE, lvars=TRUE, col=2, lty=2, ...)
-lines.escouf(x, level=x$level, lhorz=TRUE, lvert=TRUE, lvars=TRUE, col=2, lty=2, ...)
-plot.escouf(x, level=x$level, lhorz=TRUE, lvert=TRUE, lvars=TRUE, lcol=2, llty=2, diff=TRUE, dlab="RV' (units not shown)", dcol=4, dlty=par("lty"), dpos=0.8, type="s", xlab="variables", ylab="RV", main=paste("Escoufier's equivalent vectors for:",x$data), ...)
-print.escouf(x, ...)
-print.summary.escouf(x, ...)
-summary.escouf(object, ...)  
-}
+
 \usage{
 escouf(x, level=1, verbose=TRUE)
-\method{summary}{escouf}(esc)
-\method{plot}{escouf}(esc, level=x$level, lhorz=TRUE, lvert=TRUE, lvars=TRUE,
+\method{print}{escouf}(x, \dots)
+\method{summary}{escouf}(object, \dots)
+\method{print}{summary.escouf}(x, \dots)
+\method{plot}{escouf}(x, level=x$level, lhorz=TRUE, lvert=TRUE, lvars=TRUE,
         lcol=2, llty=2, diff=TRUE, dlab="RV' (units not shown)", dcol=4,
-        dlty=par("lty"), dpos=0.8, \dots)
-\method{lines}{escouf}(esc, level=x$level, lhorz=TRUE, lvert=TRUE, lvars=TRUE,
-        lcol=2, llty=2, \dots)
-\method{identify}{escouf}(esc, lhorz=TRUE, lvert=TRUE, lvars=TRUE, lcol=2,
-        llty=2, \dots)
-\method{extract}{escouf}(esc, n=NULL, level=e$level)
+        dlty=par("lty"), dpos=0.8, type="s", xlab="variables", ylab="RV",
+        main=paste("Escoufier's equivalent vectors for:",x$data), \dots)
+\method{lines}{escouf}(x, level=x$level, lhorz=TRUE, lvert=TRUE, lvars=TRUE,
+        col=2, lty=2, \dots)
+\method{identify}{escouf}(x, lhorz=TRUE, lvert=TRUE, lvars=TRUE, col=2,
+        lty=2, \dots)
+\method{extract}{escouf}(e, n, level=e$level, \dots)
 }
 \arguments{
-  \item{x}{ A data frame containing the variables to sort according to the Escoufier's method }
+  \item{x}{ For \code{escouf()}, a data frame containing the variables to sort according to the Escoufier's method. For the other functions, an 'escouf' object }
   \item{level}{ The level of correlation at which to stop calculation. By default \code{level=1}, the highest value, and all variables are sorted. Specify a value lower than one to speed up calculation. If you specify a too low values you will not be able to extract all significant variables (extraction level must be lower than calculation level). We advise you keep 0.95 < level < 1 }
   \item{verbose}{ Print calculation steps. This allows to control the percentage of calculation already achieved when computation takes a long time (that is, with many variables to sort) }
-  \item{esc}{ An 'escouf' object returned by \code{escouf}}
+  \item{object}{ An 'escouf' object returned by \code{escouf}}
+  \item{e}{ An 'escouf' object returned by \code{escouf}}
   \item{lhorz}{ If \code{TRUE} then an horizontal line indicating the extraction level is drawn }
   \item{lvert}{ If \code{TRUE} then a vertical line separate the n extracted variables at left from the rest }
   \item{lvars}{ If \code{TRUE} then the x-axis labels of the n extracted variables at left are printed in a different color to emphasize them }
@@ -49,10 +44,16 @@
   \item{diff}{ If \code{TRUE} then the RV' curve is also plotted (by default) }
   \item{dlab}{ The label to use for the RV' curve. By default: \code{"RV' (units not shown)"} }
   \item{dcol}{ The color to use for the RV' curve (by default, color 4 is used) }
+  \item{type}{ The type of graph to plot }
+  \item{xlab}{ the label of the x-axis }
+  \item{ylab}{ the label of the y-axis }
+  \item{main}{ the main title of the graph}
   \item{dlty}{ The style for the RV' curve }
+  \item{col}{ The color to use to draw the lines (\code{lhorz=TRUE} and \code{lvert=TRUE}) and the variables labels (\code{lvars=TRUE}) of the n extracted variables. By default, color 2 is used }
+  \item{lty}{ The style used to draw the lines (\code{lhorz=TRUE} and \code{lvert=TRUE}). By default, lines are dashed }
   \item{dpos}{ The relative horizontal position of the label for the RV' curve. The default value of 0.8 means that the label is placed at 80\% of the horizontal axis.Vertical position of the label is automatically determined }
-  \item{\dots}{ additional graph parameters }
   \item{n}{ The number of variables to extract. If a value is given, it has the priority on \code{level} }
+  \item{\dots}{ additional parameters }
 }
 
 \value{

Modified: pkg/man/local.trend.Rd
===================================================================
--- pkg/man/local.trend.Rd	2013-01-24 15:58:51 UTC (rev 6)
+++ pkg/man/local.trend.Rd	2013-07-14 12:07:33 UTC (rev 7)
@@ -8,20 +8,20 @@
 \description{
   A simple method using cumulated sums that allows to detect changes in the tendency in a time series
 }
-\synopsis{
-  local.trend(x, k=mean(x), plotit=TRUE, type="l", cols=1:2, ltys=2:1, xlab="Time", ylab="cusum", ...)
-  identify.local.trend(x, ...)
-}
 \usage{
-local.trend(x, k=mean(x), plotit=TRUE, \dots)
-\method{identify}{local.trend}(loctrd)
+local.trend(x, k = mean(x), plotit = TRUE, type="l", cols=1:2, ltys=2:1, xlab="Time", ylab="cusum", \dots)
+\method{identify}{local.trend}(x, \dots)
 }
 \arguments{
-  \item{x}{ a regular time series (a 'rts' object under S+ or a 'ts' object under \R) }
+  \item{x}{ a regular time series (a 'ts' object) for \code{local.trend()} or a 'local.trend' object for \code{identify()} }
   \item{k}{ the reference value to substract from cumulated sums. By default, it is the mean of all observations in the series }
   \item{plotit}{ if \code{plotit=TRUE} (by default), a graph with the cumsum curve superposed to the original series is plotted }
+  \item{type}{ the type of plot (as usual notation for this argument) }
+  \item{cols}{ colors to use for original data and for the trend line }
+  \item{ltys}{ line types to use for original data and the trend line }
+  \item{xlab}{ label of the x-axis }
+  \item{ylab}{ label of the y-axis }
   \item{\dots}{ additional arguments for the graph }
-  \item{loctrd}{ a 'local.trend' object, as returned by the function \code{local.trend()} }
 }
 \details{
   With \code{local.trend()}, you can:

Modified: pkg/man/regul.Rd
===================================================================
--- pkg/man/regul.Rd	2013-01-24 15:58:51 UTC (rev 6)
+++ pkg/man/regul.Rd	2013-07-14 12:07:33 UTC (rev 7)
@@ -17,36 +17,32 @@
 \description{
   Regulate irregular time series or regular time series with gaps. Create a \code{regul} object from whose one or several regular time series can be extracted using \code{extract()} or \code{tseries()}. This is the function to apply most of the time to create regular time series ('rts' objects in Splus or 'ts' objects in \R) that will be further analyzed by other functions that apply to regular time series.
 }
-\synopsis{
-regul(x, y=NULL, xmin=min(x), n=length(x), units="days", frequency=NULL, deltat=1/frequency, datemin=NULL, dateformat="m/d/Y", tol=NULL, tol.type="both", methods="linear", rule=1, f=0, periodic=FALSE, window=(max(x) - min(x))/(n - 1), split=100, specs=NULL) 
-extract.regul(e, n, series=NULL, ...)
-hist.regul(x, nclass=30, col=c(4, 5, 2), xlab=paste("Time distance in", x$units, "with start =", min(x$x), , ylab=paste("Frequency, tol =", x$specs$tol), main="Number of matching observations", plotit=TRUE, ...)
-identify.regul(x, series=1, col=3, label="#", ...)
-lines.regul(x, series=1, col=3, lty=1, plot.pts=TRUE, ...)
-plot.regul(x, series=1, col=c(1, 2), lty=c(par("lty"), par("lty")), plot.pts=TRUE, leg=FALSE, llab=c("initial", x$specs$methods[series]), lpos=c(1.5, 10), xlab=paste("Time (", x$units, ")", sep = ""), ylab="Series", main=paste("Regulation of", names(x$y)[series]), ...)
-print.regul(x, ...)
-print.specs.regul(x, ...)
-print.summary.regul(x, ...)
-specs.regul(x, ...)
-summary.regul(object, ...)
-}
+
 \usage{
 regul(x, y=NULL, xmin=min(x), n=length(x), units="days", frequency=NULL,
         deltat=1/frequency, datemin=NULL, dateformat="m/d/Y", tol=NULL,
         tol.type="both", methods="linear", rule=1, f=0, periodic=FALSE,
         window=(max(x) - min(x))/(n - 1), split=100, specs=NULL)
-\method{summary}{regul}(reg)
-\method{plot}{regul}(reg, series=1, col=c(1, 2), lty, plot.pts=TRUE,
-        leg=FALSE, llab=c("initial", x$specs$methods[series]),
-        lpos=c(1.5, 10), \dots)
-\method{lines}{regul}(reg, series=1, col=3, lty=1, plot.pts=TRUE, \dots)
-\method{identify}{regul}(reg, series=1, col=3, label="#", \dots)
-\method{hist}{regul}(reg, nclass=30, col=c(4, 5, 2), plotit=TRUE, \dots)
-\method{extract}{regul}(reg, n=ncol(reg$y), series=NULL)
-\method{specs}{regul}(reg)
+\method{print}{regul}(x, \dots)
+\method{summary}{regul}(object, \dots)
+\method{print}{summary.regul}(x, \dots)
+\method{plot}{regul}(x, series=1, col=c(1, 2), lty=c(par("lty"), par("lty")), plot.pts=TRUE,
+        leg=FALSE, llab=c("initial", x$specs$methods[series]), lpos=c(1.5, 10),
+        xlab=paste("Time (", x$units, ")", sep = ""), ylab="Series",
+        main=paste("Regulation of", names(x$y)[series]), \dots)
+\method{lines}{regul}(x, series=1, col=3, lty=1, plot.pts=TRUE, \dots)
+\method{identify}{regul}(x, series=1, col=3, label="#", \dots)
+\method{hist}{regul}(x, nclass=30, col=c(4, 5, 2),
+        xlab=paste("Time distance in", x$units, "with start =", min(x$x),
+        ", n = ", length(x$x), ", deltat =", x$tspar$deltat),
+        ylab=paste("Frequency, tol =", x$specs$tol),
+        main="Number of matching observations", plotit=TRUE, \dots)
+\method{extract}{regul}(e, n, series=NULL, \dots)
+\method{specs}{regul}(x, \dots)
+\method{print}{specs.regul}(x, \dots)
 }
 \arguments{
-  \item{x}{ a vector containing times at which observations are sampled in the initial irregular time series. It can be expressed in any unit ("years", "days", "weeks", "hours", "min", "sec",...) as defined by the argument \code{units}. It is often expressed in "days" and the decimal part represents the part of the day, that is the time in hour:min:sec (dates coming from Excel, or even standard dates in S+ or \R are expressed like that) }
+  \item{x}{ for regul: a vector containing times at which observations are sampled in the initial irregular time series. It can be expressed in any unit ("years", "days", "weeks", "hours", "min", "sec",...) as defined by the argument \code{units}. It is often expressed in "days" and the decimal part represents the part of the day, that is the time in hour:min:sec (dates coming from Excel, or even standard dates in S+ or \R are expressed like that). For the methods, a 'tsd' object }
   \item{y}{ a vector (single series) or a matrix/data frame whose columns correspond to the various irregular time series to regulate. Rows are observations made at corresponding times in \code{x}. The number of rows must thus match the length of vector \code{x} }
   \item{xmin}{ allows to respecify the origin of time in \code{x}. By default, the origin is not redefined and thus, the smallest value in \code{x} is used }
   \item{n}{ the number of observations in the regular time series. By default, it is the same number than in the original irregular time series (i.e., \code{length(x)} }
@@ -64,7 +60,8 @@
   \item{window}{ parameter for the \code{"area"} regulation method. Size of the window to consider (see \code{regarea()}). By default, the mean interval between observations in the initial irregular time series is used. Give the same value as for deltat for working with adjacent windows }
   \item{split}{ other parameter for the \code{"area"} method. To optimise calculation time and to avoid to saturate memory, very long time series are splitted into smaller subunits (see \code{regarea()}). This is transparent for the user. The default value of \code{split=100} should be rarely changed. Give a lower value if the program fails and reports a memory problem during calculation }
   \item{specs}{ a \code{specs.regul} object returned by the function \code{specs()} applied to a \code{regul} object. Allows to collect parameterization of the \code{regul()} function and to apply them to another regulation }
-  \item{reg}{ A \code{regul} object as obtained after using the \code{regul()} function }
+  \item{object}{ A \code{regul} object as obtained after using the \code{regul()} function }
+  \item{e}{ A \code{regul} object as obtained after using the \code{regul()} function }
   \item{series}{ the series to plot. By default, \code{series=1}, corresponding to the first (or possibly the unique) series in the \code{regul} object }
   \item{col}{ (1) for \code{plot()}: the two colors to use to draw respectively the initial irregular series and the final regulated series. \code{col=c(1,2)} by default. (2) for \code{hist()}: the three colors to use to represent respectively the fist bar (exact coincidence), the middle bars (coincidence in a certain tolerance window) and the last bar (values always interpolated). By default, \code{col=c(4,5,2)} }
   \item{lty}{ the style to use to draw lines for the initial series and the regulated series, respectively. The default style is used for both lines if this argument is not provided }
@@ -72,10 +69,13 @@
   \item{leg}{ do we add a legend to the graph? By default, \code{leg=FALSE}, no legend is added }
   \item{llab}{ the labels to use for the initial irregular and the final regulated series, respectively. By default, it is \code{"initial"} for the first one and the name of the regulation method used for the second one (see \code{methods} argument) }
   \item{lpos}{ the position of the top-left corner of the legend box (x,y), in the graph coordinates }
-  \item{\dots}{ additional graph parameters }
+  \item{xlab}{ the label of the x-axis }
+  \item{ylab}{ the label of the y-axis }
+  \item{main}{ the main title of the graph}
   \item{label}{ the character to use to mark points interactively selected on the graph. By default, \code{label="#"} }
   \item{nclass}{ the number of classes to calculate in the histogram. This is indicative and this value is automatically adjusted to obtain a nicely-formatted histogram. By default, \code{nclass=30} }
   \item{plotit}{ If \code{plotit=TRUE} then the histogram is plotted. Otherwise, it is only calculated }
+  \item{\dots}{ additional parameters }
 }
 \details{
   Several irregular time series (for instance, contained in a data frame) can be treated at once. Specify a vector with \code{"constant"}, \code{"linear"}, \code{"spline"} or \code{"area"} for the argument \code{methods} to use a different regulation method for each series. See corresponding fonctions (\code{regconst()}, \code{reglin()}, \code{regspline()} and \code{regarea()}), respectively, for more details on these methods. Arguments can be saved in a \code{specs} object and reused for other similar regulation processes. Functions \code{regul.screen()} and \code{regul.adj()} are useful to chose best time interval in the computed regular time series. If you want to work on seasonal effects in the time series, you will better use a "years" time-scale (1 unit = 1 year), or convert into such a scale. If initial time unit is "days" (1 unit = 1 day), a conversion can be operated at the same time as the regulation by specifying \code{units="daystoyears"}.

Modified: pkg/man/regul.adj.Rd
===================================================================
--- pkg/man/regul.adj.Rd	2013-01-24 15:58:51 UTC (rev 6)
+++ pkg/man/regul.adj.Rd	2013-07-14 12:07:33 UTC (rev 7)
@@ -7,12 +7,13 @@
 \description{
   Calculate and plot an histogram of the distances between interpolated observations in a regulated time series and closest observations in the initial irregular time series. This allows to optimise the \code{tol} parameter
 }
-\synopsis{
-regul.adj(x, xmin=min(x), frequency=NULL, deltat=(max(x, na.rm = TRUE) - min(x, na.rm = TRUE))/(length(x) - 1), tol=deltat, tol.type="both", nclass=50, col=c(4, 5, 2), xlab=paste("Time distance"), ylab=paste("Frequency"), main="Number of matching observations", plotit=TRUE, ...)
-}
+
 \usage{
-regul.adj(x, xmin=min(x), frequency=NULL, deltat, tol=deltat,
-        tol.type="both", nclass=50, col=c(4, 5, 2), plotit=TRUE, \dots)
+regul.adj(x, xmin=min(x), frequency=NULL,
+     deltat=(max(x, na.rm = TRUE) - min(x, na.rm = TRUE))/(length(x) - 1),
+     tol=deltat, tol.type="both", nclass=50, col=c(4, 5, 2),
+     xlab=paste("Time distance"), ylab=paste("Frequency"),
+     main="Number of matching observations", plotit=TRUE, \dots)
 }
 \arguments{
   \item{x}{ a vector with times corresponding to the observations in the irregular initial time series }
@@ -23,6 +24,9 @@
   \item{tol.type}{ the type of window to use for the time-tolerance: \code{"left"}, \code{"right"}, \code{"both"} (by default) or \code{"none"}. If \code{tol.type="left"}, corresponding \code{x} values are seeked in a window ]xregul-tol, xregul]. If \code{tol.type="right"}, they are seeked in the window [xregul, xregul+tol[. If \code{tol.type="both"}, then they are seeked in the window ]xregul-tol, xregul+tol]. If several observations are in this window, the closest one is used. Finally, if \code{tol.type="none"}, then \emph{all} observations in the regulated time series are interpolated (even if exactly matching observations exist!) }
   \item{nclass}{ the number of classes to compute in the histogram. This is indicative, and will be adjusted by the algorithm to produce a nicely-formatted histogram. The default value is \code{nclass=50}. It is acceptable in many cases, but if the histogram is not correct, try a larger value }
   \item{col}{ the three colors to use to represent respectively the fist bar (exact coincidence), the middle bars (coincidence in a certain tolerance window) and the last bar (values always interpolated). By default, \code{col=c(4,5,2)} }
+  \item{xlab}{ the label of the x-axis }
+  \item{ylab}{ the label of the y-axis }
+  \item{main}{ the main title of the graph}
   \item{plotit}{ if \code{plotit=TRUE} then the histogram is plotted. Otherwise, it is only calculated }
   \item{\dots}{ additional graph parameters for the histogram }
 }

Modified: pkg/man/regul.screen.Rd
===================================================================
--- pkg/man/regul.screen.Rd	2013-01-24 15:58:51 UTC (rev 6)
+++ pkg/man/regul.screen.Rd	2013-07-14 12:07:33 UTC (rev 7)
@@ -7,12 +7,11 @@
 \description{
   Seek for the best combination of the number of observation, the interval between two successive observation and the position of the first observation in the regulated time series to match as much observations of the initial series as possible
 }
-\synopsis{
-regul.screen(x, weight=NULL, xmin=min(x), frequency=NULL, deltat=(max(x, na.rm = TRUE) - min(x, na.rm = TRUE))/(length(x) - 1), tol=deltat/5, tol.type="both")
-}
+
 \usage{
-regul.screen(x, weight=NULL, xmin=min(x), frequency=NULL, deltat,
-        tol=deltat/5, tol.type="both")
+regul.screen(x, weight=NULL, xmin=min(x), frequency=NULL,
+    deltat=(max(x, na.rm = TRUE) - min(x, na.rm = TRUE))/(length(x) - 1),
+    tol=deltat/5, tol.type="both")
 }
 \arguments{
   \item{x}{ a vector with times corresponding to the observations in the irregular initial time series }

Modified: pkg/man/stat.slide.Rd
===================================================================
--- pkg/man/stat.slide.Rd	2013-01-24 15:58:51 UTC (rev 6)
+++ pkg/man/stat.slide.Rd	2013-07-14 12:07:33 UTC (rev 7)
@@ -10,22 +10,20 @@
 \description{
 Statistical parameters are not constant along a time series: mean or variance can vary each year, or during particular intervals (radical or smooth changes due to a pollution, a very cold winter, a shift in the system behaviour, etc. Sliding statistics offer the potential to describe series on successive blocs defined along the space-time axis
 }
-\synopsis{
-stat.slide(x, y, xcut=NULL, xmin=min(x), n=NULL, frequency=NULL, deltat=1/frequency, basic=FALSE, desc=FALSE, norm=FALSE, pen=FALSE, p=0.95)
-lines.stat.slide(x, stat="mean", col=3, lty=1, ...)
-plot.stat.slide(x, stat="mean", col=c(1, 2), lty=c(par("lty"), par("lty")), leg=FALSE, llab=c("series", stat), lpos=c(1.5, 10), xlab="time", ylab="y", main=paste("Sliding statistics"), ...)
-print.stat.slide(x, ...)
-}
+
 \usage{
 stat.slide(x, y, xcut=NULL, xmin=min(x), n=NULL, frequency=NULL,
         deltat=1/frequency, basic=FALSE, desc=FALSE, norm=FALSE,
         pen=FALSE, p=0.95)
-\method{plot}{stat.slide}(statsl, stat="mean", col=c(1, 2), lty=c(par("lty"), par("lty")),
-        leg=FALSE, llab=c("series", stat), lpos=c(1.5, 10), \dots)
-\method{lines}{stat.slide}(statsl, stat="mean", col=3, lty=1, \dots)
+\method{print}{stat.slide}(x, \dots) 
+\method{plot}{stat.slide}(x, stat="mean", col=c(1, 2), lty=c(par("lty"), par("lty")),
+        leg=FALSE, llab=c("series", stat), lpos=c(1.5, 10), xlab="time", ylab="y",
+        main=paste("Sliding statistics"), \dots)
+\method{lines}{stat.slide}(x, stat="mean", col=3, lty=1, \dots)
 }
 \arguments{
-  \item{x}{ a vector with time data }
+  \item{x}{ a vector with time data for \code{stat.slide()}, or a 'stat.slide' object
+    for the methods }
   \item{y}{ a vector with observation at corresponding times }
   \item{xcut}{ a vector with the position in time of the breaks between successive blocs. \code{xcut=NULL} by default. In the later case, a vector with equally spaced blocs is constructed using \code{xmin}, \code{n} and \code{frequency} or \code{deltat}. If a value is provided for \code{xcut}, then it supersedes all these other parameters }
   \item{xmin}{ the minimal value in the time-scale to use for constructing a vector of equally spaced breaks }
@@ -37,14 +35,16 @@
   \item{norm}{ do we have to return normal distribution statistics (by default, it is FALSE)? the skewness coefficient g1 (skewness), its significant criterium (skew.2SE, that is, g1/2.SEg1; if skew.2SE > 1, then skewness is significantly different than zero), kurtosis coefficient g2 (kurtosis), its significant criterium (kurt.2SE, same remark than for skew.2SE), the statistic of a Shapiro-Wilk test of normality (normtest.W) and its associated probability (normtest.p) }
   \item{pen}{ do we have to return Pennington and other associated statistics (by default, it is FALSE)? pos.median, pos.mean, pos.var, pos.std.dev, respectively the median, the mean, the standard deviation and the variance, considering only non-null values; geo.mean, the geometric mean that is, the exponential of the mean of the logarithm of the observations, excluding null values. pen.mean, pen.var, pen.std.dev, pen.mean.var, respectively the mean, the variance, the standard deviation and the variance of the mean after Pennington's estimators (see \code{pennington()}) }
   \item{p}{ the probability level to use to calculate the confidence interval on the mean (CI.mean). By default, \code{p=0.95} }
[TRUNCATED]

To get the complete diff run:
    svnlook diff /svnroot/pastecs -r 7


More information about the Pastecs-commits mailing list