[Vegan-commits] r2305 - pkg/vegan/man

noreply at r-forge.r-project.org noreply at r-forge.r-project.org
Fri Sep 28 15:17:12 CEST 2012


Author: jarioksa
Date: 2012-09-28 15:17:12 +0200 (Fri, 28 Sep 2012)
New Revision: 2305

Modified:
   pkg/vegan/man/BCI.Rd
   pkg/vegan/man/add1.cca.Rd
   pkg/vegan/man/adipart.Rd
   pkg/vegan/man/adonis.Rd
   pkg/vegan/man/anosim.Rd
   pkg/vegan/man/anova.cca.Rd
   pkg/vegan/man/beals.Rd
   pkg/vegan/man/betadisper.Rd
   pkg/vegan/man/betadiver.Rd
   pkg/vegan/man/bgdispersal.Rd
   pkg/vegan/man/bioenv.Rd
Log:
proofread Rd files a..b

Modified: pkg/vegan/man/BCI.Rd
===================================================================
--- pkg/vegan/man/BCI.Rd	2012-09-28 06:41:10 UTC (rev 2304)
+++ pkg/vegan/man/BCI.Rd	2012-09-28 13:17:12 UTC (rev 2305)
@@ -21,7 +21,7 @@
   The data frame contains only the Barro Colorado Island subset of the
   original data.
 
-  The quadrats are located in a regular grid. See\code{examples} for the
+  The quadrats are located in a regular grid. See \code{examples} for the
   coordinates. 
 
 }

Modified: pkg/vegan/man/add1.cca.Rd
===================================================================
--- pkg/vegan/man/add1.cca.Rd	2012-09-28 06:41:10 UTC (rev 2304)
+++ pkg/vegan/man/add1.cca.Rd	2012-09-28 13:17:12 UTC (rev 2305)
@@ -4,7 +4,7 @@
 
 \title{Add or Drop  Single Terms to a Constrained Ordination Model }
 \description{
-Compute all single terms that can be added or dropped from a
+Compute all single terms that can be added to or dropped from a
 constrained ordination model.
 }
 \usage{
@@ -19,7 +19,7 @@
   \code{\link{cca}}, \code{\link{rda}} or \code{\link{capscale}}. }
   \item{scope}{ A formula giving the terms to be considered for adding
   or dropping; see \code{\link{add1}} for details.}
-  \item{test}{ Should a permutation test added using \code{\link{anova.cca}}. }
+  \item{test}{ Should a permutation test be added using \code{\link{anova.cca}}. }
   \item{pstep}{Number of permutations in one step, passed as argument
   \code{step} to \code{\link{anova.cca}}.}
   \item{perm.max}{ Maximum number of permutation in \code{\link{anova.cca}}. }
@@ -36,16 +36,16 @@
   Function \code{add1.cca} will implement a test for single term
   additions that is not directly available in \code{\link{anova.cca}}.
 
-  Functions are used implicitly in \code{\link{step}} and
-  \code{\link{ordistep}}. The \code{\link{deviance.cca}} and
-  \code{\link{deviance.rda}} used in \code{\link{step}} have no firm
-  basis, and setting argument \code{test = "permutation"} may help in
-  getting useful insight into validity of model building. Function
-  \code{\link{ordistep}} calls alternately \code{drop1.cca} and
-  \code{add1.cca} with argument \code{test = "permutation"} and
-  selects variables by their permutation \eqn{P}-values.  Meticulous
-  use of \code{add1.cca} and \code{drop1.cca} will allow more
-  judicious model building.
+  Functions are used implicitly in \code{\link{step}},
+  \code{\link{ordiR2step}} and \code{\link{ordistep}}. The
+  \code{\link{deviance.cca}} and \code{\link{deviance.rda}} used in
+  \code{\link{step}} have no firm basis, and setting argument \code{test
+  = "permutation"} may help in getting useful insight into validity of
+  model building. Function \code{\link{ordistep}} calls alternately
+  \code{drop1.cca} and \code{add1.cca} with argument \code{test =
+  "permutation"} and selects variables by their permutation
+  \eqn{P}-values.  Meticulous use of \code{add1.cca} and
+  \code{drop1.cca} will allow more judicious model building.
 
   The default \code{perm.max} is set to a low value, because
   permutation tests can take a long time. It should be sufficient to

Modified: pkg/vegan/man/adipart.Rd
===================================================================
--- pkg/vegan/man/adipart.Rd	2012-09-28 06:41:10 UTC (rev 2304)
+++ pkg/vegan/man/adipart.Rd	2012-09-28 13:17:12 UTC (rev 2305)
@@ -32,88 +32,122 @@
   \item{x}{A matrix with same number of rows as in \code{y}, columns
     coding the levels of sampling hierarchy. The number of groups within
     the hierarchy must decrease from left to right. If \code{x} is missing,
-    two levels are assumed: each row is a group in the first level, and
-    all rows are in the same group in the second level.}
-  \item{formula}{A two sided model formula in the form \code{y ~ x}, where \code{y} 
-    is the community data matrix with samples as rows and species as column. Right 
-    hand side (\code{x}) must grouping vaiables referring to levels of sampling hierarchy, 
-    terms from right to left will be treated as nested (first column is the lowest, 
-    last is the highest level, at least two levels specified). Interaction terms are not allowed.}
-  \item{data}{A data frame where to look for variables defined in the right hand side 
-    of \code{formula}. If missing, variables are looked in the global environment.}
+    function performs an overall decomposition into alpha, beta and
+    gamma diversities.}
+  \item{formula}{A two sided model formula in the form \code{y ~ x},
+    where \code{y} is the community data matrix with samples as rows and
+    species as column. Right hand side (\code{x}) must grouping vaiables
+    referring to levels of sampling hierarchy, terms from right to left
+    will be treated as nested (first column is the lowest, last is the
+    highest level, at least two levels specified). Interaction terms are
+    not allowed.}
+
+  \item{data}{A data frame where to look for variables defined in the
+    right hand side of \code{formula}. If missing, variables are looked
+    in the global environment.}
+
   \item{index}{Character, the diversity index to be calculated (see Details).}
-  \item{weights}{Character, \code{"unif"} for uniform weights, \code{"prop"} for 
-    weighting proportional to sample abundances to use in weighted averaging of individual 
-    alpha values within strata of a given level of the sampling hierarchy.}
-  \item{relative}{Logical, if \code{TRUE} then alpha and beta diversity values are given 
-    relative to the value of gamma for function \code{adipart}.}
-  \item{nsimul}{Number of permutation to use if \code{matr} is not of class 'permat'.
-    If \code{nsimul = 0}, only the \code{FUN} argument is evaluated. It is thus possible
-    to reuse the statistic values without using a null model.}
-  \item{FUN}{A function to be used by \code{hiersimu}. This must be fully specified,
-    because currently other arguments cannot be passed to this function via \code{\dots}.}
-  \item{location}{Character, identifies which function (mean or median) is to be used to 
-    calculate location of the samples.}
-  \item{drop.highest}{Logical, to drop the highest level or not. When \code{FUN} 
-    evaluates only arrays with at least 2 dimensions, highest level should be dropped, 
-    or not selected at all.}
-  \item{\dots}{Other arguments passed to functions, e.g. base of logarithm for 
-    Shannon diversity, or \code{method}, \code{thin} or \code{burnin} arguments for
-    \code{\link{oecosimu}}.}
+
+  \item{weights}{Character, \code{"unif"} for uniform weights,
+    \code{"prop"} for weighting proportional to sample abundances to use
+    in weighted averaging of individual alpha values within strata of a
+    given level of the sampling hierarchy.}
+
+  \item{relative}{Logical, if \code{TRUE} then alpha and beta diversity
+    values are given relative to the value of gamma for function
+    \code{adipart}.}
+
+  \item{nsimul}{Number of permutations to use if \code{matr} is not of
+    class 'permat'.  If \code{nsimul = 0}, only the \code{FUN} argument
+    is evaluated. It is thus possible to reuse the statistic values
+    without using a null model.}
+
+  \item{FUN}{A function to be used by \code{hiersimu}. This must be
+    fully specified, because currently other arguments cannot be passed
+    to this function via \code{\dots}.}
+
+  \item{location}{Character, identifies which function (mean or median)
+    is to be used to calculate location of the samples.}
+
+  \item{drop.highest}{Logical, to drop the highest level or not. When
+    \code{FUN} evaluates only arrays with at least 2 dimensions, highest
+    level should be dropped, or not selected at all.}
+
+  \item{\dots}{Other arguments passed to functions, e.g. base of
+    logarithm for Shannon diversity, or \code{method}, \code{thin} or
+    \code{burnin} arguments for \code{\link{oecosimu}}.}
 }
+
 \details{
-Additive diversity partitioning means that mean alpha and beta diversity adds up to gamma 
-diversity, thus beta diversity is measured in the same dimensions as alpha and gamma 
-(Lande 1996). This additive procedure is than extended across multiple scales in a 
-hierarchical sampling design with \eqn{i = 1, 2, 3, \ldots, m} levels of sampling 
-(Crist et al. 2003). Samples in lower hierarchical levels are nested within higher level 
-units, thus from \eqn{i=1} to \eqn{i=m} grain size is increasing under constant survey 
-extent. At each level \eqn{i}, \eqn{\alpha_i} denotes average diversity found within samples.
 
-At the highest sampling level, the diversity components are calculated as 
-\deqn{\beta_m = \gamma - \alpha_m}{beta_m = gamma - alpha_m} 
-For each lower sampling level as
-\deqn{\beta_i = \alpha_{i+1} - \alpha_i}{beta_i = alpha_i+1 - alpha_i}
-Then, the additive partition of diversity is 
-\deqn{\gamma = \alpha_1 + \sum_{i=1}^m \beta_i}{gamma = alpha_1 + sum(beta_i)}
+  Additive diversity partitioning means that mean alpha and beta
+  diversities add up to gamma diversity, thus beta diversity is measured
+  in the same dimensions as alpha and gamma (Lande 1996). This additive
+  procedure is then extended across multiple scales in a hierarchical
+  sampling design with \eqn{i = 1, 2, 3, \ldots, m} levels of sampling
+  (Crist et al. 2003). Samples in lower hierarchical levels are nested
+  within higher level units, thus from \eqn{i=1} to \eqn{i=m} grain size
+  is increasing under constant survey extent. At each level \eqn{i},
+  \eqn{\alpha_i} denotes average diversity found within samples.
 
-Average alpha components can be weighted uniformly (\code{weight="unif"}) to calculate 
-it as simple average, or proportionally to sample abundances (\code{weight="prop"}) to 
-calculate it as weighted average as follows
-\deqn{\alpha_i = \sum_{j=1}^{n_i} D_{ij} w_{ij}}{alpha_i = sum(D_ij*w_ij)}
-where \eqn{D_{ij}} is the diversity index and \eqn{w_{ij}} is the weight calculated for 
-the \eqn{j}th sample at the \eqn{i}th sampling level.
+  At the highest sampling level, the diversity components are calculated
+  as \deqn{\beta_m  = \gamma -  \alpha_m}{beta_m = gamma -  alpha_m} For
+  each  lower   sampling  level   as  \deqn{\beta_i  =   \alpha_{i+1}  -
+  \alpha_i}{beta_i =  alpha_i+1 - alpha_i} Then,  the additive partition
+  of diversity is \deqn{\gamma  = \alpha_1 + \sum_{i=1}^m \beta_i}{gamma
+  = alpha_1 + sum(beta_i)}
 
-The implementation of additive diversity partitioning in \code{adipart} follows Crist et 
-al. 2003. It is based on species richness (\eqn{S}, not \eqn{S-1}), Shannon's and 
-Simpson's diversity indices stated as the \code{index} argument.
+  Average alpha components can be weighted uniformly
+  (\code{weight="unif"}) to calculate it as simple average, or
+  proportionally to sample abundances (\code{weight="prop"}) to
+  calculate it as weighted average as follows \deqn{\alpha_i =
+  \sum_{j=1}^{n_i} D_{ij} w_{ij}}{alpha_i = sum(D_ij*w_ij)} where
+  \eqn{D_{ij}} is the diversity index and \eqn{w_{ij}} is the weight
+  calculated for the \eqn{j}th sample at the \eqn{i}th sampling level.
 
-The expected diversity components are calculated \code{nsimul} times by individual based 
-randomisation of the community data matrix. This is done by the \code{"r2dtable"} method
-in \code{\link{oecosimu}} by default.
+  The implementation of additive diversity partitioning in
+  \code{adipart} follows Crist et al. 2003. It is based on species
+  richness (\eqn{S}, not \eqn{S-1}), Shannon's and Simpson's diversity
+  indices stated as the \code{index} argument.
 
-\code{hiersimu} works almost the same as \code{adipart}, but without comparing the actual 
-statistic values returned by \code{FUN} to the highest possible value (cf. gamma diversity). 
-This is so, because in most of the cases, it is difficult to ensure additive properties of 
-the mean statistic values along the hierarchy.
+  The expected diversity components are calculated \code{nsimul} times
+  by individual based randomisation of the community data matrix. This
+  is done by the \code{"r2dtable"} method in \code{\link{oecosimu}} by
+  default.
+
+  \code{hiersimu} works almost in the same way as \code{adipart}, but
+  without comparing the actual statistic values returned by \code{FUN}
+  to the highest possible value (cf. gamma diversity).  This is so,
+  because in most of the cases, it is difficult to ensure additive
+  properties of the mean statistic values along the hierarchy.
+
 }
 \value{
-An object of class 'adipart' or 'hiersimu' with same structure as 'oecosimu' objects.
+
+  An object of class \code{"adipart"} or \code{"hiersimu"} with same
+  structure as \code{\link{oecosimu}} objects.
+
 }
+
 \references{
-Crist, T.O., Veech, J.A., Gering, J.C. and Summerville,
-K.S. (2003). Partitioning species diversity across landscapes and regions:
-a hierarchical analysis of \eqn{\alpha}, \eqn{\beta}, and
-\eqn{\gamma}-diversity.
-\emph{Am. Nat.}, \bold{162}, 734--743.
 
-Lande, R. (1996). Statistics and partitioning of species
-diversity, and similarity among multiple communities.
-\emph{Oikos}, \bold{76}, 5--13.
+  Crist,   T.O.,   Veech,    J.A.,   Gering,   J.C.   and   Summerville,
+  K.S.  (2003).  Partitioning species  diversity  across landscapes  and
+  regions:  a hierarchical  analysis of  \eqn{\alpha},  \eqn{\beta}, and
+  \eqn{\gamma}-diversity.  \emph{Am. Nat.}, \bold{162}, 734--743.
+
+  Lande, R.  (1996). Statistics and partitioning of species diversity,
+  and similarity among multiple communities.  \emph{Oikos}, \bold{76},
+  5--13.
+
 }
 
-\author{\enc{Péter Sólymos}{Peter Solymos}, \email{solymos at ualberta.ca}}
-\seealso{See \code{\link{oecosimu}} for permutation settings and calculating \eqn{p}-values.}
+\author{
+
+  \enc{Péter Sólymos}{Peter Solymos}, \email{solymos at ualberta.ca}}
+  \seealso{See \code{\link{oecosimu}} for permutation settings and
+  calculating \eqn{p}-values.  }
+
 \examples{
 ## NOTE: 'nsimul' argument usually needs to be >= 99
 ## here much lower value is used for demonstration

Modified: pkg/vegan/man/adonis.Rd
===================================================================
--- pkg/vegan/man/adonis.Rd	2012-09-28 06:41:10 UTC (rev 2304)
+++ pkg/vegan/man/adonis.Rd	2012-09-28 13:17:12 UTC (rev 2305)
@@ -7,7 +7,7 @@
 \description{Analysis of variance using distance matrices --- for
   partitioning distance matrices among sources of variation and fitting
   linear models (e.g., factors, polynomial regression) to distance 
-  matrices; uses a permutation test with pseudo-F ratios.}
+  matrices; uses a permutation test with pseudo-\eqn{F} ratios.}
 
 \usage{
 adonis(formula, data, permutations = 999, method = "bray",
@@ -65,7 +65,7 @@
 and nested factors.
 
 If the experimental design has nestedness, then use \code{strata} to
-test hypotheses. For instance, imagine we are testing the whether a
+test hypotheses. For instance, imagine we are testing whether a
 plant community is influenced by nitrate amendments, and we have two
 replicate plots at each of two levels of nitrate (0, 10 ppm). We have
 replicated the experiment in three fields with (perhaps) different
@@ -121,8 +121,8 @@
 
   \item{aov.tab}{Typical AOV table showing sources of variation,
     degrees of freedom, sequential sums of squares, mean squares,
-    \eqn{F} statistics, partial R-squared and \eqn{P} values, based on \eqn{N}
-    permutations. }
+    \eqn{F} statistics, partial \eqn{R^2}{R-squared} and \eqn{P}
+    values, based on \eqn{N} permutations. }
   \item{coefficients}{ matrix of coefficients of the linear model, with
     rows representing sources of variation and columns representing
     species; each column represents a fit of a species abundance to the

Modified: pkg/vegan/man/anosim.Rd
===================================================================
--- pkg/vegan/man/anosim.Rd	2012-09-28 06:41:10 UTC (rev 2304)
+++ pkg/vegan/man/anosim.Rd	2012-09-28 13:17:12 UTC (rev 2305)
@@ -22,7 +22,7 @@
   \item{permutations}{Number of permutations or a permutation matrix
     where each row gives the permuted indices.}
   \item{distance}{Choice of distance metric that measures the
-    dissimilarity between two observations . See \code{\link{vegdist}} for
+    dissimilarity between two observations. See \code{\link{vegdist}} for
     options.  This will be used if \code{dat} was not a dissimilarity
     structure or a symmetric square matrix.}  
   \item{strata}{An integer vector or factor specifying the strata for

Modified: pkg/vegan/man/anova.cca.Rd
===================================================================
--- pkg/vegan/man/anova.cca.Rd	2012-09-28 06:41:10 UTC (rev 2304)
+++ pkg/vegan/man/anova.cca.Rd	2012-09-28 13:17:12 UTC (rev 2305)
@@ -14,7 +14,7 @@
 \description{
   The function performs an ANOVA like permutation test for Constrained
   Correspondence Analysis (\code{\link{cca}}), Redundancy Analysis
-  (\code{\link{rda}}) or Constrained Analysis of Principal Coordinates
+  (\code{\link{rda}}) or distance-based Redundancy Analysis
   (\code{\link{capscale}}) to assess the significance of constraints.
 }
 \usage{
@@ -70,42 +70,6 @@
   Function \code{permutest.cca} is the proper workhorse, but
   \code{anova.cca} passes all parameters to \code{permutest.cca}.
 
-  In \code{anova.cca} the number of permutations is controlled by
-  targeted \dQuote{critical} \eqn{P} value (\code{alpha}) and accepted Type
-  II or rejection error (\code{beta}).  If the results of permutations
-  differ from the targeted \code{alpha} at risk level given by
-  \code{beta}, the permutations are
-  terminated.  If the current estimate of \eqn{P} does not
-  differ significantly from \code{alpha} of the alternative hypothesis,
-  the permutations are
-  continued with \code{step} new permutations (at the first step, the
-  number of permutations is \code{step - 1}).  However, with \code{by =
-    "terms"} a fixed number of permutations will be used, and this 
-  is given by argument \code{permutations}, or if this is missing,
-  by \code{step}.  
-  
-  The function \code{permutest.cca} implements a permutation test for
-  the \dQuote{significance} of constraints in \code{\link{cca}},
-  \code{\link{rda}} or \code{\link{capscale}}.  Community data are
-  permuted with choice \code{model = "direct"}, residuals after
-  partial CCA/RDA/CAP with choice \code{model = "reduced"} (default),
-  and residuals after CCA/RDA/CAP under choice \code{model = "full"}.
-  If there is no partial CCA/RDA/CAP stage, \code{model = "reduced"}
-  simply permutes the data and is equivalent to \code{model = "direct"}. 
-  The test statistic is ``pseudo-\eqn{F}'',
-  which is the ratio of constrained and unconstrained total Inertia
-  (Chi-squares, variances or something similar), each divided by their
-  respective ranks.  If there are no conditions (\dQuote{partial} terms), the
-  sum of all eigenvalues remains constant, so that pseudo-\eqn{F} and
-  eigenvalues would give equal results.  In partial CCA/RDA/CAP, the
-  effect of conditioning variables (\dQuote{covariables}) is removed before
-  permutation, and these residuals are added to the non-permuted fitted
-  values of partial CCA (fitted values of \code{X ~ Z}).  Consequently,
-  the total Chi-square is not fixed, and test based on pseudo-\eqn{F}
-  would differ from the test based on plain eigenvalues. CCA is a
-  weighted method, and environmental data are re-weighted at each
-  permutation step using permuted weights. 
-
   The default test is for the sum of all constrained eigenvalues.
   Setting \code{first = TRUE} will perform a test for the first
   constrained eigenvalue.  Argument \code{first} can be set either in
@@ -136,6 +100,38 @@
   will start from the same \code{\link{.Random.seed}}, and the seed
   will be advanced to the value after the longest permutation at the
   exit from the function.  
+  
+  In \code{anova.cca} the number of permutations is controlled by
+  targeted \dQuote{critical} \eqn{P} value (\code{alpha}) and accepted
+  Type II or rejection error (\code{beta}).  If the results of
+  permutations differ from the targeted \code{alpha} at risk level given
+  by \code{beta}, the permutations are terminated.  If the current
+  estimate of \eqn{P} does not differ significantly from \code{alpha} of
+  the alternative hypothesis, the permutations are continued with
+  \code{step} new permutations (at the first step, the number of
+  permutations is \code{step - 1}).  However, with \code{by="terms"} a
+  fixed number of permutations will be used, and this is given by
+  argument \code{permutations}, or if this is missing, by \code{step}.
+  
+  Community data are permuted with choice \code{model = "direct"},
+  residuals after partial CCA/RDA/CAP with choice \code{model =
+  "reduced"} (default), and residuals after CCA/RDA/CAP under choice
+  \code{model = "full"}.  If there is no partial CCA/RDA/CAP stage,
+  \code{model = "reduced"} simply permutes the data and is equivalent to
+  \code{model = "direct"}.  The test statistic is ``pseudo-\eqn{F}'',
+  which is the ratio of constrained and unconstrained total Inertia
+  (Chi-squares, variances or something similar), each divided by their
+  respective ranks.  If there are no conditions (\dQuote{partial}
+  terms), the sum of all eigenvalues remains constant, so that
+  pseudo-\eqn{F} and eigenvalues would give equal results.  In partial
+  CCA/RDA/CAP, the effect of conditioning variables
+  (\dQuote{covariables}) is removed before permutation, and these
+  residuals are added to the non-permuted fitted values of partial CCA
+  (fitted values of \code{X ~ Z}).  Consequently, the total Chi-square
+  is not fixed, and test based on pseudo-\eqn{F} would differ from the
+  test based on plain eigenvalues. CCA is a weighted method, and
+  environmental data are re-weighted at each permutation step using
+  permuted weights.
 }
 
 \value{ 
@@ -154,9 +150,9 @@
   and they may fail if data are unavailable.
  
   The default permutation \code{model} changed from \code{"direct"} to
-  \code{"reduced"} in \pkg{vegan} version 1.14-11 (release version
-  1.15-0), and you must explicitly set \code{model = "direct"} for
-  compatibility with the old version.
+  \code{"reduced"} in \pkg{vegan} version 1.15-0, and you must
+  explicitly set \code{model = "direct"} for compatibility with the old
+  version.
 
   Tests \code{by = "terms"} and \code{by = "margin"} are consistent
   only when \code{model = "direct"}.  

Modified: pkg/vegan/man/beals.Rd
===================================================================
--- pkg/vegan/man/beals.Rd	2012-09-28 06:41:10 UTC (rev 2304)
+++ pkg/vegan/man/beals.Rd	2012-09-28 13:17:12 UTC (rev 2305)
@@ -63,18 +63,17 @@
   conditioned probabilities and weighted averages.
 
   Beals smoothing was originally suggested as a method of data
-  transformation to remove excessive zeros (Beals 1984, McCune
-  1994).  However, it is not a suitable method for this purpose since it
-  does not maintain the information on species presences: a species may
-  have a higher probability of occurrence at a site where it does not
-  occur than at sites where it occurs. Moreover, it regularizes data
-  too strongly. The method may be useful in identifying species that
-  belong to the species pool (Ewald 2002) or to identify suitable
-  unoccupied patches in metapopulation analysis
-  (\enc{Münzbergová}{Munzbergova} & Herben 
-  2004). In this case, the function should be called with \code{include
-  = FALSE} for cross-validation smoothing for species; argument
-  \code{species} can be used if only one species is studied.
+  transformation to remove excessive zeros (Beals 1984, McCune 1994).
+  However, it is not a suitable method for this purpose since it does
+  not maintain the information on species presences: a species may have
+  a higher probability of occurrence at a site where it does not occur
+  than at sites where it occurs. Moreover, it regularizes data too
+  strongly. The method may be useful in identifying species that belong
+  to the species pool (Ewald 2002) or to identify suitable unoccupied
+  patches in metapopulation analysis (\enc{Münzbergová}{Munzbergova} &
+  Herben 2004). In this case, the function should be called with
+  \code{include=FALSE} for cross-validation smoothing for species;
+  argument \code{species} can be used if only one species is studied.
 
   Swan (1970) suggested replacing zero values with degrees of absence of
   a species in a community data matrix. Swan expressed the method in

Modified: pkg/vegan/man/betadisper.Rd
===================================================================
--- pkg/vegan/man/betadisper.Rd	2012-09-28 06:41:10 UTC (rev 2304)
+++ pkg/vegan/man/betadisper.Rd	2012-09-28 13:17:12 UTC (rev 2305)
@@ -49,7 +49,7 @@
   \item{group}{vector describing the group structure, usually a factor
     or an object that can be coerced to a factor using
     \code{\link[base]{as.factor}}. Can consist of a factor with a single
-    level (i.e.~one group).}
+    level (i.e., one group).}
   \item{type}{the type of analysis to perform. Use the spatial median or
     the group centroid? The spatial median is now the default.}
   \item{bias.adjust}{logical: adjust for small sample bias in beta diversity estimates?}
@@ -100,10 +100,11 @@
   \sqrt{\Delta^2(u_{ij}^+, c_i^+) - \Delta^2(u_{ij}^-, c_i^-)},}{z[ij]^c
   = sqrt(Delta^2(u[ij]^+, c[i]^+) - Delta^2(u[ij]^-, c[i]^-)),} where
   \eqn{\Delta^2}{Delta^2} is the squared Euclidean distance between
-  \eqn{u_{ij}}{u[ij]}, the principal coordinate for the \eqn{j^{th}}{jth}
-  point in the \eqn{i^{th}}{ith} group, and \eqn{c_i}{c[i]}, the
-  coordinate of the centroid for the \eqn{i^{th}}{ith} group. The
-  super-scripted \eqn{+} and \eqn{-} indicate the real and imaginary
+  \eqn{u_{ij}}{u[ij]}, the principal coordinate for the \eqn{j}th
+  point in the \eqn{i}th group, and \eqn{c_i}{c[i]}, the
+  coordinate of the centroid for the \eqn{i}th group. The
+  super-scripted \sQuote{\eqn{+}} and \sQuote{\eqn{-}} indicate
+  the real and imaginary
   parts respectively. This is equation (3) in Anderson (2006). If the
   imaginary part is greater in magnitude than the real part, then we
   would be taking the square root of a negative value, resulting in
@@ -114,9 +115,9 @@
   
   To test if one or more groups is more variable than the others, ANOVA
   of the distances to group centroids can be performed and parametric
-  theory used to interpret the significance of F. An alternative is to
+  theory used to interpret the significance of \eqn{F}. An alternative is to
   use a permutation test. \code{\link{permutest.betadisper}} permutes model
-  residuals to generate a permutation distribution of F under the Null
+  residuals to generate a permutation distribution of \eqn{F} under the Null
   hypothesis of no difference in dispersion between groups.
 
   Pairwise comparisons of group mean dispersions can also be performed
@@ -186,12 +187,12 @@
   prior to performing the analysis.
 }
 \section{Warning}{
-  Stewart Schultz noticed that the permutation test for \code{type =
-  "centroid"} had the wrong type I error and was anti-conservative. As
-  such, the default for \code{type} has been changed to \code{"median"},
-  which uses the spatial median as the group centroid. Tests suggests
-  that the permutation test for this type of analysis gives the correct
-  error rates.
+  Stewart Schultz noticed that the permutation test for
+  \code{type="centroid"} had the wrong type I error and was
+  anti-conservative. As such, the default for \code{type} has been
+  changed to \code{"median"}, which uses the spatial median as the group
+  centroid. Tests suggests that the permutation test for this type of
+  analysis gives the correct error rates.
 }
 \references{
   Anderson, M. J. (2001) A new method for non-parametric multivariate 

Modified: pkg/vegan/man/betadiver.Rd
===================================================================
--- pkg/vegan/man/betadiver.Rd	2012-09-28 06:41:10 UTC (rev 2304)
+++ pkg/vegan/man/betadiver.Rd	2012-09-28 13:17:12 UTC (rev 2305)
@@ -52,11 +52,10 @@
 
   Function \code{betadiver} finds all indices reviewed by Koleff et
   al. (2003). All these indices could be found with function
-  \code{\link{designdist}} which uses different notation, but the
-  current function provides a conventional shortcut. The function
-  only finds the indices. The proper analysis must be done with
-  functions such as \code{\link{betadisper}}, \code{\link{adonis}} or
-  \code{\link{mantel}}. 
+  \code{\link{designdist}}, but the current function provides a
+  conventional shortcut. The function only finds the indices. The proper
+  analysis must be done with functions such as \code{\link{betadisper}},
+  \code{\link{adonis}} or \code{\link{mantel}}.
 
   The indices are directly taken from Table 1 of Koleff et al. (2003),
   and they can be selected either by the index number or the subscript
@@ -92,11 +91,6 @@
   are two such similarity indices.
 }
 
-\note{The argument \code{method} was called \code{index} in older
-  versions of the function (upto \pkg{vegan} version
-  1.17-11). Argument \code{index} is deprecated, but still recognized
-  with a warning. }
-
 \references{
 
   Baselga, A. (2010) Partitioning the turnover and nestedness

Modified: pkg/vegan/man/bgdispersal.Rd
===================================================================
--- pkg/vegan/man/bgdispersal.Rd	2012-09-28 06:41:10 UTC (rev 2304)
+++ pkg/vegan/man/bgdispersal.Rd	2012-09-28 13:17:12 UTC (rev 2305)
@@ -48,22 +48,27 @@
 
 \value{
 Function \code{bgdispersal} returns a list containing the following matrices:
-\item{ DD1 }{ \eqn{DD1[j,k] = (a * (b - c))/((a + b + c)^2)} }
-\item{ DD2 }{ \eqn{DD2[j,k] = (2*a * (b - c))/((2*a + b + c) * (a + b +
+\item{ DD1 }{ \eqn{DD1_{j,k} = (a(b - c))/((a + b + c)^2)}{DD1[j,k] = (a * (b - c))/((a + b + c)^2)} }
+\item{ DD2 }{ \eqn{DD2_{j,k} = (2 a (b - c))/((2a + b + c)  (a + b +
+    c))}{DD2[j,k] = (2*a * (b - c))/((2*a + b + c) * (a + b +
     c))}
   where \eqn{a}, \eqn{b}, and \eqn{c} have the 
 same meaning as in the computation of binary 
 similarity coefficients. }
-\item{ DD3 }{ DD3[j,k] = \eqn{W*(A-B) / ((A+B-W)^2)} }
-\item{ DD4 }{ DD4[j,k] = \eqn{2*W*(A-B) / ((A+B)*(A+B-W)})
+\item{ DD3 }{ \eqn{DD3_{j,k} = {W(A-B) / (A+B-W)^2} }{DD3[j,k] = W*(A-B) / (A+B-W)^2} }
+\item{ DD4 }{ \eqn{DD4_{j,k} = 2W(A-B) / ((A+B)(A+B-W))}{DD4[j,k] = 2*W*(A-B) / ((A+B)*(A+B-W))}
 where \code{W = sum(pmin(vector1, vector2))}, \code{A = sum(vector1)},
 \code{B = sum(vector2)} }
+
 \item{ McNemar }{ McNemar chi-square statistic of asymmetry (Sokal and
-Rohlf 1995): \eqn{2*(b*log(b) + c*log(c) - (b+c)*log((b+c)/2)) / q}
-where \eqn{q = 1 + 1/(2*(b+c))} (Williams correction for continuity) }
+  Rohlf 1995):
+  \eqn{2(b \log(b) + c \log(c) - (b+c) \log((b+c)/2)) / q}{2*(b*log(b) + c*log(c) - (b+c)*log((b+c)/2)) / q},
+  where \eqn{q = 1 + 1/(2(b+c))}{q = 1 + 1/(2*(b+c))}
+  (Williams correction for continuity) }
 \item{ prob.McNemar }{ probabilities associated 
 with McNemar statistics, chi-square test. H0: no 
 asymmetry in \eqn{(b-c)}. }
+
 }
 
 

Modified: pkg/vegan/man/bioenv.Rd
===================================================================
--- pkg/vegan/man/bioenv.Rd	2012-09-28 06:41:10 UTC (rev 2304)
+++ pkg/vegan/man/bioenv.Rd	2012-09-28 13:17:12 UTC (rev 2305)
@@ -27,7 +27,7 @@
     in \code{\link{vegdist}}. This is ignored if \code{comm} are dissimilarities.}
   \item{upto}{Maximum number of parameters in studied subsets.}
   \item{formula, data}{Model \code{\link{formula}} and data.}
-  \item{trace}{Trace the advance of calculations }
+  \item{trace}{Trace the calculations }
   \item{partial}{Dissimilarities partialled out when inspecting
     variables in \code{env}.}
   \item{parallel}{Number of parallel processes or a predefined socket



More information about the Vegan-commits mailing list