From noreply at r-forge.r-project.org Sun Jul 14 14:07:33 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 14 Jul 2013 14:07:33 +0200 (CEST) Subject: [Pastecs-commits] r7 - in pkg: . man Message-ID: <20130714120733.D8EE9183D80@r-forge.r-project.org> Author: phgrosjean Date: 2013-07-14 14:07:33 +0200 (Sun, 14 Jul 2013) New Revision: 7 Modified: pkg/DESCRIPTION pkg/man/abund.Rd pkg/man/daystoyears.Rd pkg/man/escouf.Rd pkg/man/local.trend.Rd pkg/man/regul.Rd pkg/man/regul.adj.Rd pkg/man/regul.screen.Rd pkg/man/stat.slide.Rd pkg/man/tsd.Rd pkg/man/turnogram.Rd pkg/man/turnpoints.Rd Log: Elimination of synopsis\ from Rd files Modified: pkg/DESCRIPTION =================================================================== --- pkg/DESCRIPTION 2013-01-24 15:58:51 UTC (rev 6) +++ pkg/DESCRIPTION 2013-07-14 12:07:33 UTC (rev 7) @@ -1,7 +1,7 @@ Package: pastecs Title: Package for Analysis of Space-Time Ecological Series -Version: 1.3-13 -Date: 2013-01-24 +Version: 1.3-14 +Date: 2013-07-13 Author: Frederic Ibanez , Philippe Grosjean & Michele Etienne Description: Regulation, decomposition and analysis of space-time series. The pastecs library is a PNEC-Art4 and IFREMER (Benoit Beliaeff ) initiative to bring PASSTEC 2000 (http://www.obs-vlfr.fr/~enseigne/anado/passtec/passtec.htm) functionalities to R. URL: http://www.sciviews.org/pastecs Modified: pkg/man/abund.Rd =================================================================== --- pkg/man/abund.Rd 2013-01-24 15:58:51 UTC (rev 6) +++ pkg/man/abund.Rd 2013-07-14 12:07:33 UTC (rev 7) @@ -11,57 +11,85 @@ \encoding{latin1} \title{ Sort variables by abundance } + \description{ - Sort variables (usually species in a species x stations matrix) in function of their abundance, either in number of non-null values, or in number of individuals (in log). The \code{f} coefficient allows adjusting weight given to each of these two criteria. + Sort variables (usually species in a species x stations matrix) in function of + their abundance, either in number of non-null values, or in number of + individuals (in log). The \code{f} coefficient allows adjusting weight given to each of these two criteria. } -\synopsis{ -abund(x, f=0.2) -\method{extract}{abund}(e, n, left=TRUE, ...) -\method{identify}{abund}(x, label.pts=FALSE, lvert=TRUE, lvars=TRUE, col=2, lty=2, ...) -\method{lines}{abund}(x, n=x$n, lvert=TRUE, lvars=TRUE, col=2, lty=2, ...) -\method{plot}{abund}(x, n=x$n, lvert=TRUE, lvars=TRUE, lcol=2, llty=2, all=TRUE, dlab=c("cumsum", "\% log(ind.)", "\% non-zero"), dcol=c(1,2,4), dlty=c(par("lty"), par("lty"), par("lty")), dpos=c(1.5, 20), type="l", xlab="variables", ylab="abundance", main=paste("Abundance sorting for:",x$data, "with f =", round(x$f, 4)), ...) + +\usage{ +abund(x, f = 0.2) + +\method{extract}{abund}(e, n, left = TRUE, ...) +\method{identify}{abund}(x, label.pts = FALSE, lvert = TRUE, lvars = TRUE, col = 2, lty = 2, ...) +\method{lines}{abund}(x, n = x$n, lvert = TRUE, lvars = TRUE, col = 2, lty = 2, ...) +\method{plot}{abund}(x, n = x$n, lvert = TRUE, lvars = TRUE, lcol = 2, llty = 2, all = TRUE, + dlab = c("cumsum", "\% log(ind.)", "\% non-zero"), dcol = c(1,2,4), + dlty = c(par("lty"), par("lty"), par("lty")), dpos = c(1.5, 20), type = "l", + xlab = "variables", ylab = "abundance", + main = paste("Abundance sorting for:",x$data, "with f =", round(x$f, 4)), ...) \method{print}{abund}(x, ...) \method{print}{summary.abund}(x, ...) \method{summary}{abund}(object, ...) } -\usage{ -abund(x, f=0.2) -\method{summary}{abund}(abd) -\method{plot}{abund}(abd, n=abd$n, lvert=TRUE, lvars=TRUE, lcol=2, llty=2, - all=TRUE, dlab=c("cumsum", "\% log(ind.)", "\% non-zero"), - dcol=c(1, 2, 4), dlty, dpos=c(1.5, 20), \dots) -\method{lines}{abund}(abd, n=abd$n, lvert=TRUE, lvars=TRUE, \dots) -\method{identify}{abund}(abd, label.pts=FALSE, lvert=TRUE, lvars=TRUE, \dots) -\method{extract}{abund}(abd, n=abd$n, left=TRUE) -} + \arguments{ - \item{x}{ A data frame containing the variables to sort according to their abundance in columns } - \item{f}{ Weight given to the number of individuals criterium (strictly included between 0 and 1; weight for the non-null values is \code{1-f}. The default value, \code{f=0.2}, gives enough weight to the number of non-null values to get abundant species according to this criterium first, but allowing to get at the other extreme rare, but locally abundant species } - \item{abd}{ An 'abund' object returned by \code{abund} } + \item{x}{ A data frame containing the variables to sort according to their + abundance in columns for \code{abund}, or an 'abund' object for the methods } + \item{f}{ Weight given to the number of individuals criterium (strictly + included between 0 and 1; weight for the non-null values is \code{1-f}. The + default value, \code{f=0.2}, gives enough weight to the number of non-null + values to get abundant species according to this criterium first, but + allowing to get at the other extreme rare, but locally abundant species } + \item{object}{ An 'abund' object returned by \code{abund} } + \item{e}{ An 'abund' object returned by \code{abund} } \item{n}{ The number of variables selected at left } - \item{lvert}{ If \code{TRUE} then a vertical line separate the n variables at left from the others } - \item{lvars}{ If \code{TRUE} then the x-axis labels of the n left variables are printed in a different color to emphasize them } - \item{lcol}{ The color to use to draw the vertical line (\code{lvert=TRUE}) and the variables labels (\code{lvars=TRUE}) at left af the nth variable. By default, color 2 is used } - \item{llty}{ The style used to draw the vertical line (\code{lvert=TRUE}). By default, a dashed line is used } - \item{all}{ If \code{TRUE} then all lines are drawn (cumsum, \%log(ind.) and \%non-null). If \code{FALSE}, only the cumsum line is drawn } + \item{type}{ the type of graph to plot. By default, lines with 'l' } + \item{lvert}{ If \code{TRUE} then a vertical line separate the n variables at + left from the others } + \item{lvars}{ If \code{TRUE} then the x-axis labels of the n left variables + are printed in a different color to emphasize them } + \item{lcol}{ The color to use to draw the vertical line (\code{lvert=TRUE}) + and the variables labels (\code{lvars=TRUE}) at left af the nth variable. + By default, color 2 is used } + \item{llty}{ The style used to draw the vertical line (\code{lvert=TRUE}). + By default, a dashed line is used } + \item{xlab}{ the label of the x-axis } + \item{ylab}{ the label of the y-axis } + \item{main}{ the main title of the graph} + \item{all}{ If \code{TRUE} then all lines are drawn (cumsum, \%log(ind.) and + \%non-null). If \code{FALSE}, only the cumsum line is drawn } \item{dlab}{ The legend labels } \item{dcol}{ Colors to use for drawing the various curves on the graph } \item{dlty}{ The line style to use for drawing the various curves on the graph } - \item{dpos}{ The position of the legend box on the graph (coordinates of its top-left corner). A legend box is drawn only if \code{all=TRUE} } - \item{\dots}{ additional graph parameters } - \item{label.pts}{ Do we have to label points on the graph or to chose an extraction level with the \code{identify()} method? } - \item{left}{ If \code{TRUE}, the n variables at left are extracted. Otherwise, the total-n variables at right are extracted } + \item{dpos}{ The position of the legend box on the graph (coordinates of its + top-left corner). A legend box is drawn only if \code{all=TRUE} } + \item{col}{ The color to use to draw lines } + \item{lty}{ The style used to draw lines } + \item{\dots}{ additional parameters } + \item{label.pts}{ Do we have to label points on the graph or to chose an + extraction level with the \code{identify()} method? } + \item{left}{ If \code{TRUE}, the n variables at left are extracted. Otherwise, + the total-n variables at right are extracted } } \details{ - Successive sorts can be applied. For instance, a first sort with \code{f = 0.2}, followed by an extraction of rare species and another sort with \code{f = 1} allows to collect only rare but locally abundant species. + Successive sorts can be applied. For instance, a first sort with + \code{f = 0.2}, followed by an extraction of rare species and another sort + with \code{f = 1} allows to collect only rare but locally abundant species. } \value{ - An object of type 'abund' is returned. It has methods \code{print()}, \code{summary()}, \code{plot()}, \code{lines()}, \code{identify()}, \code{extract()}. + An object of type 'abund' is returned. It has methods \code{print()}, + \code{summary()}, \code{plot()}, \code{lines()}, \code{identify()}, \code{extract()}. } \references{ -Ibanez, F., J.-C. Dauvin & M. Etienne, 1993. \emph{Comparaison des ?volutions ? long terme (1977-1990) de deux peuplements macrobenthiques de la baie de Morlaix (Manche occidentale): relations avec les facteurs hydroclimatiques.} J. Exp. Mar. Biol. Ecol., 169:181-214. + Ibanez, F., J.-C. Dauvin & M. Etienne, 1993. \emph{Comparaison des ?volutions + ? long terme (1977-1990) de deux peuplements macrobenthiques de la baie de + Morlaix (Manche occidentale): relations avec les facteurs hydroclimatiques.} + J. Exp. Mar. Biol. Ecol., 169:181-214. } -\author{ Philippe Grosjean (\email{phgrosjean at sciviews.org}), Fr?d?ric Ibanez (\email{ibanez at obs-vlfr.fr}) } +\author{ Philippe Grosjean (\email{phgrosjean at sciviews.org}), + Fr?d?ric Ibanez (\email{ibanez at obs-vlfr.fr}) } \seealso{ \code{\link{escouf}} } Modified: pkg/man/daystoyears.Rd =================================================================== --- pkg/man/daystoyears.Rd 2013-01-24 15:58:51 UTC (rev 6) +++ pkg/man/daystoyears.Rd 2013-07-14 12:07:33 UTC (rev 7) @@ -8,10 +8,7 @@ \description{ Convert time scales. The time scale "days" corresponds to 1 unit per day. The time scale "years" uses 1 unit for 1 year. It is used in any analysis that requires seasonal decomposition and/or elimination. } -\synopsis{ -daystoyears(x, datemin=NULL, dateformat="m/d/Y") -yearstodays(x, xmin=NULL) -} + \usage{ daystoyears(x, datemin=NULL, dateformat="m/d/Y") yearstodays(x, xmin=NULL) Modified: pkg/man/escouf.Rd =================================================================== --- pkg/man/escouf.Rd 2013-01-24 15:58:51 UTC (rev 6) +++ pkg/man/escouf.Rd 2013-07-14 12:07:33 UTC (rev 7) @@ -14,33 +14,28 @@ \description{ Calculate equivalent vectors sensu Escoufier, that is, most significant variables from a multivariate data frame according to a principal component analysis (variables that are most correlated with the principal axes). This method is useful mainly for physical or chemical data where simply summarizing them with a PCA does not always gives easily interpretable principal axes. } -\synopsis{ -escouf(x, level=1, verbose=TRUE) -extract.escouf(e, n, level=e$level, ...) -identify.escouf(x, lhorz=TRUE, lvert=TRUE, lvars=TRUE, col=2, lty=2, ...) -lines.escouf(x, level=x$level, lhorz=TRUE, lvert=TRUE, lvars=TRUE, col=2, lty=2, ...) -plot.escouf(x, level=x$level, lhorz=TRUE, lvert=TRUE, lvars=TRUE, lcol=2, llty=2, diff=TRUE, dlab="RV' (units not shown)", dcol=4, dlty=par("lty"), dpos=0.8, type="s", xlab="variables", ylab="RV", main=paste("Escoufier's equivalent vectors for:",x$data), ...) -print.escouf(x, ...) -print.summary.escouf(x, ...) -summary.escouf(object, ...) -} + \usage{ escouf(x, level=1, verbose=TRUE) -\method{summary}{escouf}(esc) -\method{plot}{escouf}(esc, level=x$level, lhorz=TRUE, lvert=TRUE, lvars=TRUE, +\method{print}{escouf}(x, \dots) +\method{summary}{escouf}(object, \dots) +\method{print}{summary.escouf}(x, \dots) +\method{plot}{escouf}(x, level=x$level, lhorz=TRUE, lvert=TRUE, lvars=TRUE, lcol=2, llty=2, diff=TRUE, dlab="RV' (units not shown)", dcol=4, - dlty=par("lty"), dpos=0.8, \dots) -\method{lines}{escouf}(esc, level=x$level, lhorz=TRUE, lvert=TRUE, lvars=TRUE, - lcol=2, llty=2, \dots) -\method{identify}{escouf}(esc, lhorz=TRUE, lvert=TRUE, lvars=TRUE, lcol=2, - llty=2, \dots) -\method{extract}{escouf}(esc, n=NULL, level=e$level) + dlty=par("lty"), dpos=0.8, type="s", xlab="variables", ylab="RV", + main=paste("Escoufier's equivalent vectors for:",x$data), \dots) +\method{lines}{escouf}(x, level=x$level, lhorz=TRUE, lvert=TRUE, lvars=TRUE, + col=2, lty=2, \dots) +\method{identify}{escouf}(x, lhorz=TRUE, lvert=TRUE, lvars=TRUE, col=2, + lty=2, \dots) +\method{extract}{escouf}(e, n, level=e$level, \dots) } \arguments{ - \item{x}{ A data frame containing the variables to sort according to the Escoufier's method } + \item{x}{ For \code{escouf()}, a data frame containing the variables to sort according to the Escoufier's method. For the other functions, an 'escouf' object } \item{level}{ The level of correlation at which to stop calculation. By default \code{level=1}, the highest value, and all variables are sorted. Specify a value lower than one to speed up calculation. If you specify a too low values you will not be able to extract all significant variables (extraction level must be lower than calculation level). We advise you keep 0.95 < level < 1 } \item{verbose}{ Print calculation steps. This allows to control the percentage of calculation already achieved when computation takes a long time (that is, with many variables to sort) } - \item{esc}{ An 'escouf' object returned by \code{escouf}} + \item{object}{ An 'escouf' object returned by \code{escouf}} + \item{e}{ An 'escouf' object returned by \code{escouf}} \item{lhorz}{ If \code{TRUE} then an horizontal line indicating the extraction level is drawn } \item{lvert}{ If \code{TRUE} then a vertical line separate the n extracted variables at left from the rest } \item{lvars}{ If \code{TRUE} then the x-axis labels of the n extracted variables at left are printed in a different color to emphasize them } @@ -49,10 +44,16 @@ \item{diff}{ If \code{TRUE} then the RV' curve is also plotted (by default) } \item{dlab}{ The label to use for the RV' curve. By default: \code{"RV' (units not shown)"} } \item{dcol}{ The color to use for the RV' curve (by default, color 4 is used) } + \item{type}{ The type of graph to plot } + \item{xlab}{ the label of the x-axis } + \item{ylab}{ the label of the y-axis } + \item{main}{ the main title of the graph} \item{dlty}{ The style for the RV' curve } + \item{col}{ The color to use to draw the lines (\code{lhorz=TRUE} and \code{lvert=TRUE}) and the variables labels (\code{lvars=TRUE}) of the n extracted variables. By default, color 2 is used } + \item{lty}{ The style used to draw the lines (\code{lhorz=TRUE} and \code{lvert=TRUE}). By default, lines are dashed } \item{dpos}{ The relative horizontal position of the label for the RV' curve. The default value of 0.8 means that the label is placed at 80\% of the horizontal axis.Vertical position of the label is automatically determined } - \item{\dots}{ additional graph parameters } \item{n}{ The number of variables to extract. If a value is given, it has the priority on \code{level} } + \item{\dots}{ additional parameters } } \value{ Modified: pkg/man/local.trend.Rd =================================================================== --- pkg/man/local.trend.Rd 2013-01-24 15:58:51 UTC (rev 6) +++ pkg/man/local.trend.Rd 2013-07-14 12:07:33 UTC (rev 7) @@ -8,20 +8,20 @@ \description{ A simple method using cumulated sums that allows to detect changes in the tendency in a time series } -\synopsis{ - local.trend(x, k=mean(x), plotit=TRUE, type="l", cols=1:2, ltys=2:1, xlab="Time", ylab="cusum", ...) - identify.local.trend(x, ...) -} \usage{ -local.trend(x, k=mean(x), plotit=TRUE, \dots) -\method{identify}{local.trend}(loctrd) +local.trend(x, k = mean(x), plotit = TRUE, type="l", cols=1:2, ltys=2:1, xlab="Time", ylab="cusum", \dots) +\method{identify}{local.trend}(x, \dots) } \arguments{ - \item{x}{ a regular time series (a 'rts' object under S+ or a 'ts' object under \R) } + \item{x}{ a regular time series (a 'ts' object) for \code{local.trend()} or a 'local.trend' object for \code{identify()} } \item{k}{ the reference value to substract from cumulated sums. By default, it is the mean of all observations in the series } \item{plotit}{ if \code{plotit=TRUE} (by default), a graph with the cumsum curve superposed to the original series is plotted } + \item{type}{ the type of plot (as usual notation for this argument) } + \item{cols}{ colors to use for original data and for the trend line } + \item{ltys}{ line types to use for original data and the trend line } + \item{xlab}{ label of the x-axis } + \item{ylab}{ label of the y-axis } \item{\dots}{ additional arguments for the graph } - \item{loctrd}{ a 'local.trend' object, as returned by the function \code{local.trend()} } } \details{ With \code{local.trend()}, you can: Modified: pkg/man/regul.Rd =================================================================== --- pkg/man/regul.Rd 2013-01-24 15:58:51 UTC (rev 6) +++ pkg/man/regul.Rd 2013-07-14 12:07:33 UTC (rev 7) @@ -17,36 +17,32 @@ \description{ Regulate irregular time series or regular time series with gaps. Create a \code{regul} object from whose one or several regular time series can be extracted using \code{extract()} or \code{tseries()}. This is the function to apply most of the time to create regular time series ('rts' objects in Splus or 'ts' objects in \R) that will be further analyzed by other functions that apply to regular time series. } -\synopsis{ -regul(x, y=NULL, xmin=min(x), n=length(x), units="days", frequency=NULL, deltat=1/frequency, datemin=NULL, dateformat="m/d/Y", tol=NULL, tol.type="both", methods="linear", rule=1, f=0, periodic=FALSE, window=(max(x) - min(x))/(n - 1), split=100, specs=NULL) -extract.regul(e, n, series=NULL, ...) -hist.regul(x, nclass=30, col=c(4, 5, 2), xlab=paste("Time distance in", x$units, "with start =", min(x$x), , ylab=paste("Frequency, tol =", x$specs$tol), main="Number of matching observations", plotit=TRUE, ...) -identify.regul(x, series=1, col=3, label="#", ...) -lines.regul(x, series=1, col=3, lty=1, plot.pts=TRUE, ...) -plot.regul(x, series=1, col=c(1, 2), lty=c(par("lty"), par("lty")), plot.pts=TRUE, leg=FALSE, llab=c("initial", x$specs$methods[series]), lpos=c(1.5, 10), xlab=paste("Time (", x$units, ")", sep = ""), ylab="Series", main=paste("Regulation of", names(x$y)[series]), ...) -print.regul(x, ...) -print.specs.regul(x, ...) -print.summary.regul(x, ...) -specs.regul(x, ...) -summary.regul(object, ...) -} + \usage{ regul(x, y=NULL, xmin=min(x), n=length(x), units="days", frequency=NULL, deltat=1/frequency, datemin=NULL, dateformat="m/d/Y", tol=NULL, tol.type="both", methods="linear", rule=1, f=0, periodic=FALSE, window=(max(x) - min(x))/(n - 1), split=100, specs=NULL) -\method{summary}{regul}(reg) -\method{plot}{regul}(reg, series=1, col=c(1, 2), lty, plot.pts=TRUE, - leg=FALSE, llab=c("initial", x$specs$methods[series]), - lpos=c(1.5, 10), \dots) -\method{lines}{regul}(reg, series=1, col=3, lty=1, plot.pts=TRUE, \dots) -\method{identify}{regul}(reg, series=1, col=3, label="#", \dots) -\method{hist}{regul}(reg, nclass=30, col=c(4, 5, 2), plotit=TRUE, \dots) -\method{extract}{regul}(reg, n=ncol(reg$y), series=NULL) -\method{specs}{regul}(reg) +\method{print}{regul}(x, \dots) +\method{summary}{regul}(object, \dots) +\method{print}{summary.regul}(x, \dots) +\method{plot}{regul}(x, series=1, col=c(1, 2), lty=c(par("lty"), par("lty")), plot.pts=TRUE, + leg=FALSE, llab=c("initial", x$specs$methods[series]), lpos=c(1.5, 10), + xlab=paste("Time (", x$units, ")", sep = ""), ylab="Series", + main=paste("Regulation of", names(x$y)[series]), \dots) +\method{lines}{regul}(x, series=1, col=3, lty=1, plot.pts=TRUE, \dots) +\method{identify}{regul}(x, series=1, col=3, label="#", \dots) +\method{hist}{regul}(x, nclass=30, col=c(4, 5, 2), + xlab=paste("Time distance in", x$units, "with start =", min(x$x), + ", n = ", length(x$x), ", deltat =", x$tspar$deltat), + ylab=paste("Frequency, tol =", x$specs$tol), + main="Number of matching observations", plotit=TRUE, \dots) +\method{extract}{regul}(e, n, series=NULL, \dots) +\method{specs}{regul}(x, \dots) +\method{print}{specs.regul}(x, \dots) } \arguments{ - \item{x}{ a vector containing times at which observations are sampled in the initial irregular time series. It can be expressed in any unit ("years", "days", "weeks", "hours", "min", "sec",...) as defined by the argument \code{units}. It is often expressed in "days" and the decimal part represents the part of the day, that is the time in hour:min:sec (dates coming from Excel, or even standard dates in S+ or \R are expressed like that) } + \item{x}{ for regul: a vector containing times at which observations are sampled in the initial irregular time series. It can be expressed in any unit ("years", "days", "weeks", "hours", "min", "sec",...) as defined by the argument \code{units}. It is often expressed in "days" and the decimal part represents the part of the day, that is the time in hour:min:sec (dates coming from Excel, or even standard dates in S+ or \R are expressed like that). For the methods, a 'tsd' object } \item{y}{ a vector (single series) or a matrix/data frame whose columns correspond to the various irregular time series to regulate. Rows are observations made at corresponding times in \code{x}. The number of rows must thus match the length of vector \code{x} } \item{xmin}{ allows to respecify the origin of time in \code{x}. By default, the origin is not redefined and thus, the smallest value in \code{x} is used } \item{n}{ the number of observations in the regular time series. By default, it is the same number than in the original irregular time series (i.e., \code{length(x)} } @@ -64,7 +60,8 @@ \item{window}{ parameter for the \code{"area"} regulation method. Size of the window to consider (see \code{regarea()}). By default, the mean interval between observations in the initial irregular time series is used. Give the same value as for deltat for working with adjacent windows } \item{split}{ other parameter for the \code{"area"} method. To optimise calculation time and to avoid to saturate memory, very long time series are splitted into smaller subunits (see \code{regarea()}). This is transparent for the user. The default value of \code{split=100} should be rarely changed. Give a lower value if the program fails and reports a memory problem during calculation } \item{specs}{ a \code{specs.regul} object returned by the function \code{specs()} applied to a \code{regul} object. Allows to collect parameterization of the \code{regul()} function and to apply them to another regulation } - \item{reg}{ A \code{regul} object as obtained after using the \code{regul()} function } + \item{object}{ A \code{regul} object as obtained after using the \code{regul()} function } + \item{e}{ A \code{regul} object as obtained after using the \code{regul()} function } \item{series}{ the series to plot. By default, \code{series=1}, corresponding to the first (or possibly the unique) series in the \code{regul} object } \item{col}{ (1) for \code{plot()}: the two colors to use to draw respectively the initial irregular series and the final regulated series. \code{col=c(1,2)} by default. (2) for \code{hist()}: the three colors to use to represent respectively the fist bar (exact coincidence), the middle bars (coincidence in a certain tolerance window) and the last bar (values always interpolated). By default, \code{col=c(4,5,2)} } \item{lty}{ the style to use to draw lines for the initial series and the regulated series, respectively. The default style is used for both lines if this argument is not provided } @@ -72,10 +69,13 @@ \item{leg}{ do we add a legend to the graph? By default, \code{leg=FALSE}, no legend is added } \item{llab}{ the labels to use for the initial irregular and the final regulated series, respectively. By default, it is \code{"initial"} for the first one and the name of the regulation method used for the second one (see \code{methods} argument) } \item{lpos}{ the position of the top-left corner of the legend box (x,y), in the graph coordinates } - \item{\dots}{ additional graph parameters } + \item{xlab}{ the label of the x-axis } + \item{ylab}{ the label of the y-axis } + \item{main}{ the main title of the graph} \item{label}{ the character to use to mark points interactively selected on the graph. By default, \code{label="#"} } \item{nclass}{ the number of classes to calculate in the histogram. This is indicative and this value is automatically adjusted to obtain a nicely-formatted histogram. By default, \code{nclass=30} } \item{plotit}{ If \code{plotit=TRUE} then the histogram is plotted. Otherwise, it is only calculated } + \item{\dots}{ additional parameters } } \details{ Several irregular time series (for instance, contained in a data frame) can be treated at once. Specify a vector with \code{"constant"}, \code{"linear"}, \code{"spline"} or \code{"area"} for the argument \code{methods} to use a different regulation method for each series. See corresponding fonctions (\code{regconst()}, \code{reglin()}, \code{regspline()} and \code{regarea()}), respectively, for more details on these methods. Arguments can be saved in a \code{specs} object and reused for other similar regulation processes. Functions \code{regul.screen()} and \code{regul.adj()} are useful to chose best time interval in the computed regular time series. If you want to work on seasonal effects in the time series, you will better use a "years" time-scale (1 unit = 1 year), or convert into such a scale. If initial time unit is "days" (1 unit = 1 day), a conversion can be operated at the same time as the regulation by specifying \code{units="daystoyears"}. Modified: pkg/man/regul.adj.Rd =================================================================== --- pkg/man/regul.adj.Rd 2013-01-24 15:58:51 UTC (rev 6) +++ pkg/man/regul.adj.Rd 2013-07-14 12:07:33 UTC (rev 7) @@ -7,12 +7,13 @@ \description{ Calculate and plot an histogram of the distances between interpolated observations in a regulated time series and closest observations in the initial irregular time series. This allows to optimise the \code{tol} parameter } -\synopsis{ -regul.adj(x, xmin=min(x), frequency=NULL, deltat=(max(x, na.rm = TRUE) - min(x, na.rm = TRUE))/(length(x) - 1), tol=deltat, tol.type="both", nclass=50, col=c(4, 5, 2), xlab=paste("Time distance"), ylab=paste("Frequency"), main="Number of matching observations", plotit=TRUE, ...) -} + \usage{ -regul.adj(x, xmin=min(x), frequency=NULL, deltat, tol=deltat, - tol.type="both", nclass=50, col=c(4, 5, 2), plotit=TRUE, \dots) +regul.adj(x, xmin=min(x), frequency=NULL, + deltat=(max(x, na.rm = TRUE) - min(x, na.rm = TRUE))/(length(x) - 1), + tol=deltat, tol.type="both", nclass=50, col=c(4, 5, 2), + xlab=paste("Time distance"), ylab=paste("Frequency"), + main="Number of matching observations", plotit=TRUE, \dots) } \arguments{ \item{x}{ a vector with times corresponding to the observations in the irregular initial time series } @@ -23,6 +24,9 @@ \item{tol.type}{ the type of window to use for the time-tolerance: \code{"left"}, \code{"right"}, \code{"both"} (by default) or \code{"none"}. If \code{tol.type="left"}, corresponding \code{x} values are seeked in a window ]xregul-tol, xregul]. If \code{tol.type="right"}, they are seeked in the window [xregul, xregul+tol[. If \code{tol.type="both"}, then they are seeked in the window ]xregul-tol, xregul+tol]. If several observations are in this window, the closest one is used. Finally, if \code{tol.type="none"}, then \emph{all} observations in the regulated time series are interpolated (even if exactly matching observations exist!) } \item{nclass}{ the number of classes to compute in the histogram. This is indicative, and will be adjusted by the algorithm to produce a nicely-formatted histogram. The default value is \code{nclass=50}. It is acceptable in many cases, but if the histogram is not correct, try a larger value } \item{col}{ the three colors to use to represent respectively the fist bar (exact coincidence), the middle bars (coincidence in a certain tolerance window) and the last bar (values always interpolated). By default, \code{col=c(4,5,2)} } + \item{xlab}{ the label of the x-axis } + \item{ylab}{ the label of the y-axis } + \item{main}{ the main title of the graph} \item{plotit}{ if \code{plotit=TRUE} then the histogram is plotted. Otherwise, it is only calculated } \item{\dots}{ additional graph parameters for the histogram } } Modified: pkg/man/regul.screen.Rd =================================================================== --- pkg/man/regul.screen.Rd 2013-01-24 15:58:51 UTC (rev 6) +++ pkg/man/regul.screen.Rd 2013-07-14 12:07:33 UTC (rev 7) @@ -7,12 +7,11 @@ \description{ Seek for the best combination of the number of observation, the interval between two successive observation and the position of the first observation in the regulated time series to match as much observations of the initial series as possible } -\synopsis{ -regul.screen(x, weight=NULL, xmin=min(x), frequency=NULL, deltat=(max(x, na.rm = TRUE) - min(x, na.rm = TRUE))/(length(x) - 1), tol=deltat/5, tol.type="both") -} + \usage{ -regul.screen(x, weight=NULL, xmin=min(x), frequency=NULL, deltat, - tol=deltat/5, tol.type="both") +regul.screen(x, weight=NULL, xmin=min(x), frequency=NULL, + deltat=(max(x, na.rm = TRUE) - min(x, na.rm = TRUE))/(length(x) - 1), + tol=deltat/5, tol.type="both") } \arguments{ \item{x}{ a vector with times corresponding to the observations in the irregular initial time series } Modified: pkg/man/stat.slide.Rd =================================================================== --- pkg/man/stat.slide.Rd 2013-01-24 15:58:51 UTC (rev 6) +++ pkg/man/stat.slide.Rd 2013-07-14 12:07:33 UTC (rev 7) @@ -10,22 +10,20 @@ \description{ Statistical parameters are not constant along a time series: mean or variance can vary each year, or during particular intervals (radical or smooth changes due to a pollution, a very cold winter, a shift in the system behaviour, etc. Sliding statistics offer the potential to describe series on successive blocs defined along the space-time axis } -\synopsis{ -stat.slide(x, y, xcut=NULL, xmin=min(x), n=NULL, frequency=NULL, deltat=1/frequency, basic=FALSE, desc=FALSE, norm=FALSE, pen=FALSE, p=0.95) -lines.stat.slide(x, stat="mean", col=3, lty=1, ...) -plot.stat.slide(x, stat="mean", col=c(1, 2), lty=c(par("lty"), par("lty")), leg=FALSE, llab=c("series", stat), lpos=c(1.5, 10), xlab="time", ylab="y", main=paste("Sliding statistics"), ...) -print.stat.slide(x, ...) -} + \usage{ stat.slide(x, y, xcut=NULL, xmin=min(x), n=NULL, frequency=NULL, deltat=1/frequency, basic=FALSE, desc=FALSE, norm=FALSE, pen=FALSE, p=0.95) -\method{plot}{stat.slide}(statsl, stat="mean", col=c(1, 2), lty=c(par("lty"), par("lty")), - leg=FALSE, llab=c("series", stat), lpos=c(1.5, 10), \dots) -\method{lines}{stat.slide}(statsl, stat="mean", col=3, lty=1, \dots) +\method{print}{stat.slide}(x, \dots) +\method{plot}{stat.slide}(x, stat="mean", col=c(1, 2), lty=c(par("lty"), par("lty")), + leg=FALSE, llab=c("series", stat), lpos=c(1.5, 10), xlab="time", ylab="y", + main=paste("Sliding statistics"), \dots) +\method{lines}{stat.slide}(x, stat="mean", col=3, lty=1, \dots) } \arguments{ - \item{x}{ a vector with time data } + \item{x}{ a vector with time data for \code{stat.slide()}, or a 'stat.slide' object + for the methods } \item{y}{ a vector with observation at corresponding times } \item{xcut}{ a vector with the position in time of the breaks between successive blocs. \code{xcut=NULL} by default. In the later case, a vector with equally spaced blocs is constructed using \code{xmin}, \code{n} and \code{frequency} or \code{deltat}. If a value is provided for \code{xcut}, then it supersedes all these other parameters } \item{xmin}{ the minimal value in the time-scale to use for constructing a vector of equally spaced breaks } @@ -37,14 +35,16 @@ \item{norm}{ do we have to return normal distribution statistics (by default, it is FALSE)? the skewness coefficient g1 (skewness), its significant criterium (skew.2SE, that is, g1/2.SEg1; if skew.2SE > 1, then skewness is significantly different than zero), kurtosis coefficient g2 (kurtosis), its significant criterium (kurt.2SE, same remark than for skew.2SE), the statistic of a Shapiro-Wilk test of normality (normtest.W) and its associated probability (normtest.p) } \item{pen}{ do we have to return Pennington and other associated statistics (by default, it is FALSE)? pos.median, pos.mean, pos.var, pos.std.dev, respectively the median, the mean, the standard deviation and the variance, considering only non-null values; geo.mean, the geometric mean that is, the exponential of the mean of the logarithm of the observations, excluding null values. pen.mean, pen.var, pen.std.dev, pen.mean.var, respectively the mean, the variance, the standard deviation and the variance of the mean after Pennington's estimators (see \code{pennington()}) } \item{p}{ the probability level to use to calculate the confidence interval on the mean (CI.mean). By default, \code{p=0.95} } [TRUNCATED] To get the complete diff run: svnlook diff /svnroot/pastecs -r 7 From noreply at r-forge.r-project.org Sun Jul 14 18:35:54 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 14 Jul 2013 18:35:54 +0200 (CEST) Subject: [Pastecs-commits] r8 - in pkg: . man Message-ID: <20130714163554.7903D184F99@r-forge.r-project.org> Author: phgrosjean Date: 2013-07-14 18:35:53 +0200 (Sun, 14 Jul 2013) New Revision: 8 Modified: pkg/DESCRIPTION pkg/man/deccensus.Rd pkg/man/local.trend.Rd pkg/man/tsd.Rd Log: Further clean up of Rd files (splitting too long lines in \usage and \example Modified: pkg/DESCRIPTION =================================================================== --- pkg/DESCRIPTION 2013-07-14 12:07:33 UTC (rev 7) +++ pkg/DESCRIPTION 2013-07-14 16:35:53 UTC (rev 8) @@ -1,6 +1,6 @@ Package: pastecs Title: Package for Analysis of Space-Time Ecological Series -Version: 1.3-14 +Version: 1.3-15 Date: 2013-07-13 Author: Frederic Ibanez , Philippe Grosjean & Michele Etienne Description: Regulation, decomposition and analysis of space-time series. The pastecs library is a PNEC-Art4 and IFREMER (Benoit Beliaeff ) initiative to bring PASSTEC 2000 (http://www.obs-vlfr.fr/~enseigne/anado/passtec/passtec.htm) functionalities to R. Modified: pkg/man/deccensus.Rd =================================================================== --- pkg/man/deccensus.Rd 2013-07-14 12:07:33 UTC (rev 7) +++ pkg/man/deccensus.Rd 2013-07-14 16:35:53 UTC (rev 8) @@ -42,7 +42,8 @@ \examples{ data(releve) # Get regulated time series with a 'years' time-scale -rel.regy <- regul(releve$Day, releve[3:8], xmin=6, n=87, units="daystoyears", frequency=24, tol=2.2, methods="linear", datemin="21/03/1989", dateformat="d/m/Y") +rel.regy <- regul(releve$Day, releve[3:8], xmin=6, n=87, units="daystoyears", + frequency=24, tol=2.2, methods="linear", datemin="21/03/1989", dateformat="d/m/Y") rel.ts <- tseries(rel.regy) # We must have complete cycles to allow using deccensus() start(rel.ts) Modified: pkg/man/local.trend.Rd =================================================================== --- pkg/man/local.trend.Rd 2013-07-14 12:07:33 UTC (rev 7) +++ pkg/man/local.trend.Rd 2013-07-14 16:35:53 UTC (rev 8) @@ -6,16 +6,21 @@ \title{ Calculate local trends using cumsum } \description{ - A simple method using cumulated sums that allows to detect changes in the tendency in a time series + A simple method using cumulated sums that allows to detect changes in the + tendency in a time series } \usage{ -local.trend(x, k = mean(x), plotit = TRUE, type="l", cols=1:2, ltys=2:1, xlab="Time", ylab="cusum", \dots) +local.trend(x, k=mean(x), plotit=TRUE, type="l", cols=1:2, ltys=2:1, + xlab="Time", ylab="cusum", \dots) \method{identify}{local.trend}(x, \dots) } \arguments{ - \item{x}{ a regular time series (a 'ts' object) for \code{local.trend()} or a 'local.trend' object for \code{identify()} } - \item{k}{ the reference value to substract from cumulated sums. By default, it is the mean of all observations in the series } - \item{plotit}{ if \code{plotit=TRUE} (by default), a graph with the cumsum curve superposed to the original series is plotted } + \item{x}{ a regular time series (a 'ts' object) for \code{local.trend()} or a + 'local.trend' object for \code{identify()} } + \item{k}{ the reference value to substract from cumulated sums. By default, it + is the mean of all observations in the series } + \item{plotit}{ if \code{plotit=TRUE} (by default), a graph with the cumsum + curve superposed to the original series is plotted } \item{type}{ the type of plot (as usual notation for this argument) } \item{cols}{ colors to use for original data and for the trend line } \item{ltys}{ line types to use for original data and the trend line } @@ -36,13 +41,22 @@ a 'local.trend' object is returned. It has the method \code{identify()} } \references{ -Ibanez, F., J.M. Fromentin & J. Castel, 1993. \emph{Application de la m?thode des sommes cumul?es ? l'analyse des s?ries chronologiques oc?anographiques.} C. R. Acad. Sci. Paris, Life Sciences, 316:745-748. +Ibanez, F., J.M. Fromentin & J. Castel, 1993. \emph{Application de la m?thode + des sommes cumul?es ? l'analyse des s?ries chronologiques oc?anographiques.} + C. R. Acad. Sci. Paris, Life Sciences, 316:745-748. } \author{ Fr?d?ric Ibanez (\email{ibanez at obs-vlfr.fr}), Philippe Grosjean (\email{phgrosjean at sciviews.org}) } -\note{ Once transitions are identified with this method, you can use \code{stat.slide()} to get more detailed information on each phase. A smoothing of the series using running medians (see \code{decmedian()}) allows also to detect various levels in a time series, but according to the median statistic. Under \R, see also the 'strucchange' package for a more complete, but more complex, implementation of cumsum applied to time series. } +\note{ + Once transitions are identified with this method, you can use + \code{stat.slide()} to get more detailed information on each phase. A + smoothing of the series using running medians (see \code{decmedian()}) allows + also to detect various levels in a time series, but according to the median + statistic. Under \R, see also the 'strucchange' package for a more complete, + but more complex, implementation of cumsum applied to time series. } -\seealso{ \code{\link{trend.test}}, \code{\link{stat.slide}}, \code{\link{decmedian}} } +\seealso{ \code{\link{trend.test}}, \code{\link{stat.slide}}, + \code{\link{decmedian}} } \examples{ data(bnr) Modified: pkg/man/tsd.Rd =================================================================== --- pkg/man/tsd.Rd 2013-07-14 12:07:33 UTC (rev 7) +++ pkg/man/tsd.Rd 2013-07-14 16:35:53 UTC (rev 8) @@ -12,84 +12,178 @@ \title{ Decomposition of one or several regular time series using various methods } \description{ - Use a decomposition method to split the series into two or more components. Decomposition methods are either series filtering/smoothing (difference, average, median, evf), deseasoning (loess) or model-based decomposition (reg, i.e., regression). + Use a decomposition method to split the series into two or more components. + Decomposition methods are either series filtering/smoothing (difference, + average, median, evf), deseasoning (loess) or model-based decomposition (reg, + i.e., regression). } \usage{ -tsd(x, specs=NULL, method="loess", type=if (method == "census") "multiplicative" else "additive", +tsd(x, specs=NULL, method="loess", + type=if (method == "census") "multiplicative" else "additive", lag=1, axes=1:5, order=1, times=1, sides=2, ends="fill", weights=NULL, s.window=NULL, s.degree=0, t.window=NULL, t.degree=2, robust=FALSE, trend=FALSE, xreg=NULL) \method{print}{tsd}(x, \dots) \method{summary}{tsd}(object, \dots) \method{print}{summary.tsd}(x, \dots) -\method{plot}{tsd}(x, series=1, stack=TRUE, resid=TRUE, col=par("col"), lty=par("lty"), - labels=dimnames(X)[[2]], leg=TRUE, lpos=c(0, 0), xlab="time", ylab="series", - main=paste("Series decomposition by", x$specs$method, "-", x$specs$type), \dots) +\method{plot}{tsd}(x, series=1, stack=TRUE, resid=TRUE, col=par("col"), + lty=par("lty"), labels=dimnames(X)[[2]], leg=TRUE, lpos=c(0, 0), xlab="time", + ylab="series", main=paste("Series decomposition by", x$specs$method, "-", + x$specs$type), \dots) \method{extract}{tsd}(e, n, series=NULL, components=NULL, \dots) \method{specs}{tsd}(x, \dots) \method{print}{specs.tsd}(x, \dots) } + \arguments{ - \item{x}{ an univariate or multivariate regular time series ('ts' object) to be - decomposed for \code{tsd()}, or a 'tsd' object for the methods } - \item{specs}{ specifications are collected from a 'tsd' object, using the \code{specs} method. This allows for reusing parameters issued from a previous similar analysis } - \item{method}{ the method to use to decompose the time series. Currently, possible values are: \code{"diff"}, \code{"average"}, \code{"median"}, \code{"evf"}, \code{"reg"}, \code{"loess"} (by default) or \code{"census"}. The corresponding function \code{decXXXX()} is applied to each of the series in \code{x} } - \item{type}{ the type of model to use: either \code{"additive"} (by default) or \code{"multiplicative"}. In the additive model, all components must be added to reconstruct the initial series. In the multiplicative model, they must be multiplied (one components has the same unit as the original series, and the other ones are dimensionless multiplicative factors) } - \item{lag}{ The lag between the two observations used to calculate differences. By default, \code{lag=1} } + \item{x}{ an univariate or multivariate regular time series ('ts' object) to + be decomposed for \code{tsd()}, or a 'tsd' object for the methods } + \item{specs}{ specifications are collected from a 'tsd' object, using the + \code{specs} method. This allows for reusing parameters issued from a + previous similar analysis } + \item{method}{ the method to use to decompose the time series. Currently, + possible values are: \code{"diff"}, \code{"average"}, \code{"median"}, + \code{"evf"}, \code{"reg"}, \code{"loess"} (by default) or \code{"census"}. + The corresponding function \code{decXXXX()} is applied to each of the series + in \code{x} } + \item{type}{ the type of model to use: either \code{"additive"} (by default) + or \code{"multiplicative"}. In the additive model, all components must be + added to reconstruct the initial series. In the multiplicative model, they + must be multiplied (one components has the same unit as the original series, + and the other ones are dimensionless multiplicative factors) } + \item{lag}{ The lag between the two observations used to calculate differences. + By default, \code{lag=1} } \item{axes}{ the number of axes to show in the plot } - \item{order}{ (1) for the method 'difference': the order of the difference corresponds to the number of times it is applied, by default \code{order=1}, - (2) for the method 'average': the order of the moving average (the window of the average being 2*order+1), centered around the current observation or at left of this observation depending upon the value of the \code{sides} argument. Weights are the same for all observations within the window. However, if the argument \code{weights} is provided, it supersedes \code{order}. One can also use \code{order="periodic"}. In this case, a deseasoning filter is calculated according to the value of \code{frequency}} + \item{order}{ (1) for the method 'difference': the order of the difference + corresponds to the number of times it is applied, by default \code{order=1}, + (2) for the method 'average': the order of the moving average (the window of + the average being 2*order+1), centered around the current observation or at + left of this observation depending upon the value of the \code{sides} + argument. Weights are the same for all observations within the window. + However, if the argument \code{weights} is provided, it supersedes + \code{order}. One can also use \code{order="periodic"}. In this case, a + deseasoning filter is calculated according to the value of \code{frequency}} \item{times}{ The number of times to apply the method (by default, once) } - \item{sides}{ If 2 (by default), the window is centered around the current observation. If 1, the window is at left of the current observation (including it) } - \item{ends}{ either "NAs" (fill first and last values that are not calculable with NAs), or "fill" (fill them with the average of observations before applying the filter, by default), or "circular" (use last values for estimating first ones and vice versa), or "periodic" (use entire periods of contiguous cycles, deseasoning) } - \item{weights}{ a vector indicating weight to give to all observations in the window. This argument has the priority over \code{order} } - \item{s.window}{ the width of the window used to extract the seasonal component. Use an odd value equal or just larger than the number of annual values (frequency of the time series). Use another value to extract other cycles (circadian, lunar,...). Using \code{s.window="periodic"} ensures a correct value for extracting a seasonal component when the time scale is in years units } - \item{s.degree}{ the order of the polynome to use to extract the seasonal component (0 or 1). By default \code{s.degree=0} } - \item{t.window}{ the width of the window to use to extract the general trend when \code{trend=TRUE} (indicate an odd value). If this parameter is not provided, a reasonable value is first calculated, and then used by the algorithm. } - \item{t.degree}{ the order of the polynome to use to extract the general trend (0, 1 or 2). By default \code{t.degree=2} } - \item{robust}{ if \code{TRUE} a robust regression method is used. Otherwise (\code{FALSE}), by default, a classical least-square regression is used } - \item{trend}{ If \code{TRUE} a trend is calculated (under R only). Otherwise, the series is decomposed into a seasonal component and residuals only } - \item{xreg}{ a second regular time series or a vector of the same length as \code{x} with corresponding values from the regression model } - \item{object}{ a 'tsd' object as returned by the function \code{tsd()}, or any of the \code{decXXXX()} functions } - \item{e}{ a 'tsd' object as returned by the function \code{tsd()}, or any of the \code{decXXXX()} functions } - \item{series}{ (1) for \code{plot()}: the series to plot. By default, \code{series=1}, the first (or possibly unique) series in the 'tsd' object is plotted. (2) for \code{extract}: the name or the index of the series to extract. If \code{series} is provided, then \code{n} is ignored. By default, \code{series=NULL}. It is also possible to use negative indices. In this case, all series are extracted, except those ones } - \item{stack}{ graphs of each component are either stacked (\code{stack=TRUE}, by default), or superposed on the same graph \code{stack=FALSE} } - \item{resid}{ do we have to plot also the "residuals" components (\code{resid=TRUE}, by default) or not? Usually, in a stacked graph, you would like to plot the residuals, while in a superposed graph, you would not } + \item{sides}{ If 2 (by default), the window is centered around the current + observation. If 1, the window is at left of the current observation + (including it) } + \item{ends}{ either "NAs" (fill first and last values that are not calculable + with NAs), or "fill" (fill them with the average of observations before + applying the filter, by default), or "circular" (use last values for + estimating first ones and vice versa), or "periodic" (use entire periods of + contiguous cycles, deseasoning) } + \item{weights}{ a vector indicating weight to give to all observations in the + window. This argument has the priority over \code{order} } + \item{s.window}{ the width of the window used to extract the seasonal + component. Use an odd value equal or just larger than the number of annual + values (frequency of the time series). Use another value to extract other + cycles (circadian, lunar,...). Using \code{s.window="periodic"} ensures a + correct value for extracting a seasonal component when the time scale is in + years units } + \item{s.degree}{ the order of the polynome to use to extract the seasonal + component (0 or 1). By default \code{s.degree=0} } + \item{t.window}{ the width of the window to use to extract the general trend + when \code{trend=TRUE} (indicate an odd value). If this parameter is not + provided, a reasonable value is first calculated, and then used by the + algorithm. } + \item{t.degree}{ the order of the polynome to use to extract the general trend + (0, 1 or 2). By default \code{t.degree=2} } + \item{robust}{ if \code{TRUE} a robust regression method is used. Otherwise + (\code{FALSE}), by default, a classical least-square regression is used } + \item{trend}{ If \code{TRUE} a trend is calculated (under R only). Otherwise, + the series is decomposed into a seasonal component and residuals only } + \item{xreg}{ a second regular time series or a vector of the same length as + \code{x} with corresponding values from the regression model } + \item{object}{ a 'tsd' object as returned by the function \code{tsd()}, or any + of the \code{decXXXX()} functions } + \item{e}{ a 'tsd' object as returned by the function \code{tsd()}, or any of + the \code{decXXXX()} functions } + \item{series}{ (1) for \code{plot()}: the series to plot. By default, + \code{series=1}, the first (or possibly unique) series in the 'tsd' object + is plotted. (2) for \code{extract}: the name or the index of the series to + extract. If \code{series} is provided, then \code{n} is ignored. By default, + \code{series=NULL}. It is also possible to use negative indices. In this + case, all series are extracted, except those ones } + \item{stack}{ graphs of each component are either stacked (\code{stack=TRUE}, + by default), or superposed on the same graph \code{stack=FALSE} } + \item{resid}{ do we have to plot also the "residuals" components + (\code{resid=TRUE}, by default) or not? Usually, in a stacked graph, you + would like to plot the residuals, while in a superposed graph, you would not } \item{col}{ color of the plot } \item{lty}{ line type for the plot } - \item{labels}{ the labels to use for all y-axes in a stacked graph, or in the legend for a superposed graph. By default, the names of the components ("trend", "seasonal", "deseasoned", "filtered", "residuals", ...) are used } - \item{leg}{ only used when \code{stack=FALSE}. Do we plot a legend (\code{leg=TRUE} or not? } - \item{lpos}{ position of the upper-left corner of the legend box in the graph coordinates (x,y). By default, \code{leg=c(0,0)} } + \item{labels}{ the labels to use for all y-axes in a stacked graph, or in the + legend for a superposed graph. By default, the names of the components + ("trend", "seasonal", "deseasoned", "filtered", "residuals", ...) are used } + \item{leg}{ only used when \code{stack=FALSE}. Do we plot a legend + (\code{leg=TRUE} or not? } + \item{lpos}{ position of the upper-left corner of the legend box in the graph + coordinates (x,y). By default, \code{leg=c(0,0)} } \item{xlab}{ the label of the x-axis } \item{ylab}{ the label of the y-axis } \item{main}{ the main title of the graph} - \item{n}{ the number of series to extract (from series 1 to series n). By default, n equals the number of series in the 'tsd' object. If both \code{series} and \code{components} arguments are NULL, all series and components are extracted and this method has exactly the same effect as \code{tseries} } - \item{components}{ the names or indices of the components to extract. If \code{components=NULL} (by default), then all components of the selected series are extracted. It is also possible to specify negative indices. In this case, all components are extracted, except those ones } - \item{\dots}{ (1) for \code{tsd()}: further arguments to pass to the corresponding \code{decXXXX()} function. (2) for \code{plot()}: further graphical arguments, (3) unused for the other functions or methods } + \item{n}{ the number of series to extract (from series 1 to series n). By + default, n equals the number of series in the 'tsd' object. If both + \code{series} and \code{components} arguments are NULL, all series and + components are extracted and this method has exactly the same effect as + \code{tseries} } + \item{components}{ the names or indices of the components to extract. If + \code{components=NULL} (by default), then all components of the selected + series are extracted. It is also possible to specify negative indices. In + this case, all components are extracted, except those ones } + \item{\dots}{ (1) for \code{tsd()}: further arguments to pass to the + corresponding \code{decXXXX()} function. (2) for \code{plot()}: further + graphical arguments, (3) unused for the other functions or methods } } + \details{ - To eliminate trend from a series, use "diff" or use "loess" with \code{trend=TRUE}. If you know the shape of the trend (linear, exponential, periodic, etc.), you can also use it with the "reg" (regression) method. To eliminate or extract seasonal components, you can use "loess" if the seasonal component is additive, or "census" if it is multiplicative. You can also use "average" with argument \code{order="periodic"} and with either an additive or a multiplicative model, although the later method is often less powerful than "loess" or "census". If you want to extract a seasonal cycle with a given shape (for instance, a sinusoid), use the "reg" method with a fitted sinusoidal equation. If you want to identify levels in the series, use the "median" method. To smooth the series, you can use preferably the "evf" (eigenvector filtering), or the "average" methods, but you can also use "median". To extract most important components from the series (no matter if they are cycles -seasonal or not-, or long-term trends), you should use the "evf" method. For more information on each of these methods, see online help of the corresponding \code{decXXXX()} functions. + To eliminate trend from a series, use "diff" or use "loess" with + \code{trend=TRUE}. If you know the shape of the trend (linear, exponential, + periodic, etc.), you can also use it with the "reg" (regression) method. To + eliminate or extract seasonal components, you can use "loess" if the seasonal + component is additive, or "census" if it is multiplicative. You can also use + "average" with argument \code{order="periodic"} and with either an additive or + a multiplicative model, although the later method is often less powerful than + "loess" or "census". If you want to extract a seasonal cycle with a given + shape (for instance, a sinusoid), use the "reg" method with a fitted + sinusoidal equation. If you want to identify levels in the series, use the + "median" method. To smooth the series, you can use preferably the "evf" + (eigenvector filtering), or the "average" methods, but you can also use + "median". To extract most important components from the series (no matter if + they are cycles -seasonal or not-, or long-term trends), you should use the + "evf" method. For more information on each of these methods, see online help + of the corresponding \code{decXXXX()} functions. } \value{ - An object of type 'tsd' is returned. It has methods \code{print()}, \code{summary()}, \code{plot()}, \code{extract()} and \code{specs()}. + An object of type 'tsd' is returned. It has methods \code{print()}, + \code{summary()}, \code{plot()}, \code{extract()} and \code{specs()}. } \references{ Kendall, M., 1976. \emph{Time-series.} Charles Griffin & Co Ltd. 197 pp. Laloire, J.C., 1972. \emph{M?thodes du traitement des chroniques.} Dunod, Paris, 194 pp. -Legendre, L. & P. Legendre, 1984. \emph{Ecologie num?rique. Tome 2: La structure des donn?es ?cologiques.} Masson, Paris. 335 pp. +Legendre, L. & P. Legendre, 1984. \emph{Ecologie num?rique. Tome 2: La structure + des donn?es ?cologiques.} Masson, Paris. 335 pp. Malinvaud, E., 1978. \emph{M?thodes statistiques de l'?conom?trie.} Dunod, Paris. 846 pp. -Philips, L. & R. Blomme, 1973. \emph{Analyse chronologique.} Universit? Catholique de Louvain. Vander ed. 339 pp. +Philips, L. & R. Blomme, 1973. \emph{Analyse chronologique.} Universit? + Catholique de Louvain. Vander ed. 339 pp. } -\author{ Fr?d?ric Ibanez (\email{ibanez at obs-vlfr.fr}), Philippe Grosjean (\email{phgrosjean at sciviews.org}) } -\note{ If you have to decompose a single time series, you could also use the corresponding \code{decXXXX()} function directly. In the case of a multivariate regular time series, \code{tsd()} is more convenient because it decompose all times series of a set at once! } -\seealso{ \code{\link{tseries}}, \code{\link{decdiff}}, \code{\link{decaverage}}, \code{\link{decmedian}}, \code{\link{decevf}}, \code{\link{decreg}}, \code{\link{decloess}}, \code{\link{deccensus}} } +\author{ Fr?d?ric Ibanez (\email{ibanez at obs-vlfr.fr}), + Philippe Grosjean (\email{phgrosjean at sciviews.org}) } +\note{ If you have to decompose a single time series, you could also use the + corresponding \code{decXXXX()} function directly. In the case of a multivariate + regular time series, \code{tsd()} is more convenient because it decompose all + times series of a set at once! } + +\seealso{ \code{\link{tseries}}, \code{\link{decdiff}}, \code{\link{decaverage}}, + \code{\link{decmedian}}, \code{\link{decevf}}, \code{\link{decreg}}, + \code{\link{decloess}}, \code{\link{deccensus}} } + \examples{ data(releve) # Regulate the series and extract them as a time series object @@ -116,6 +210,7 @@ # Extract residuals from the latter decomposition rel.res2 <- extract(rel.des.dec, components="residuals") } + \keyword{ ts } \keyword{ smooth } \keyword{ loess }