[Zooimage-commits] r185 - in pkg/zooimage: . R man

noreply at r-forge.r-project.org noreply at r-forge.r-project.org
Mon Apr 12 09:35:33 CEST 2010


Author: phgrosjean
Date: 2010-04-12 09:35:20 +0200 (Mon, 12 Apr 2010)
New Revision: 185

Modified:
   pkg/zooimage/DESCRIPTION
   pkg/zooimage/NAMESPACE
   pkg/zooimage/R/RealTime.R
   pkg/zooimage/R/ZIClass.R
   pkg/zooimage/R/ZIRes.R
   pkg/zooimage/R/ZITrain.R
   pkg/zooimage/R/capabilities.R
   pkg/zooimage/R/catcher.R
   pkg/zooimage/R/errorHandling.R
   pkg/zooimage/R/gui.R
   pkg/zooimage/R/log.R
   pkg/zooimage/R/misc.R
   pkg/zooimage/R/programs.R
   pkg/zooimage/R/utilities.R
   pkg/zooimage/R/zic.R
   pkg/zooimage/R/zid.R
   pkg/zooimage/R/zie.R
   pkg/zooimage/R/zim.R
   pkg/zooimage/R/zip.R
   pkg/zooimage/R/zis.R
   pkg/zooimage/R/zzz.R
   pkg/zooimage/man/RealTime.Rd
   pkg/zooimage/man/ZIClass.Rd
   pkg/zooimage/man/ZIRes.Rd
   pkg/zooimage/man/ZITrain.Rd
   pkg/zooimage/man/utilities.Rd
   pkg/zooimage/man/zid.Rd
   pkg/zooimage/man/zie.Rd
   pkg/zooimage/man/zim.Rd
   pkg/zooimage/man/zip.Rd
   pkg/zooimage/man/zis.Rd
Log:
Some more cleaning up in R code

Modified: pkg/zooimage/DESCRIPTION
===================================================================
--- pkg/zooimage/DESCRIPTION	2010-04-09 16:45:04 UTC (rev 184)
+++ pkg/zooimage/DESCRIPTION	2010-04-12 07:35:20 UTC (rev 185)
@@ -5,8 +5,8 @@
 Date: 2010-04-06
 Author: Ph. Grosjean, K. Denis & R. Francois
 Maintainer: Ph. Grosjean <Philippe.Grosjean at umh.ac.be>
-Depends: R (>= 2.10.0), utils, tcltk, tcltk2, svMisc, svWidgets, svDialogs, MASS, randomForest, ipred, rpart, e1071, nnet, class, tree, RWeka, RColorBrewer, gplots
-Suggests: rJava
+Depends: R (>= 2.10.0), utils, tcltk, tcltk2, svMisc, svWidgets, svDialogs, grDevices, MASS, randomForest, ipred, rpart, e1071, nnet, class, tree, RColorBrewer, gplots
+Suggests: rJava, RWeka
 Description: ZooImage is a free (open source) solution for analyzing digital
 	images of zooplankton. In combination with ImageJ, a free image analysis
 	system, it processes digital images, measures individuals, trains for

Modified: pkg/zooimage/NAMESPACE
===================================================================
--- pkg/zooimage/NAMESPACE	2010-04-09 16:45:04 UTC (rev 184)
+++ pkg/zooimage/NAMESPACE	2010-04-12 07:35:20 UTC (rev 185)
@@ -4,9 +4,10 @@
  import(svMisc)
  import(svWidgets)
  import(svDialogs)
+ import(grDevices)
  import(ipred)
  import(MASS)
-#import(RandomForest)
+ import(randomForest)
  import(class)
  import(rpart)
  import(e1071)

Modified: pkg/zooimage/R/RealTime.R
===================================================================
--- pkg/zooimage/R/RealTime.R	2010-04-09 16:45:04 UTC (rev 184)
+++ pkg/zooimage/R/RealTime.R	2010-04-12 07:35:20 UTC (rev 185)
@@ -1,3 +1,21 @@
+# Copyright (c) 2008-2010, Ph. Grosjean <phgrosjean at sciviews.org>
+#
+# This file is part of ZooImage
+# 
+# ZooImage is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 2 of the License, or
+# (at your option) any later version.
+# 
+# ZooImage is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+# 
+# You should have received a copy of the GNU General Public License
+# along with ZooImage.  If not, see <http://www.gnu.org/licenses/>.
+
+
 # read.lst for both FlowCAM II and III by Kevin Denis
 read.lst <- function (x, skip = 2) {
   # Determine the version of the FlowCAM
@@ -73,21 +91,22 @@
 	if (!inherits(ZIDat, "ZIDat") && !inherits(ZIDat, "data.frame"))
 		stop("'ZIDat' must be a ZIDat object, or a data.frame!")
 	# Possibly load a specific package for prediction
-	package <- attr(object, "package")
-	if (is.null(package)) {
-        # This is for old version, we make sure to load
-        # MASS, RandomForest, class, rpart, e1071, ipred
-        # Rem: nnet has a special treatment in nnet2
-        (require(MASS) || stop("package 'MASS' is required!"))
-        (require(RandomForest) || stop("package 'RandomForest' is required!"))
-        (require(class) || stop("package 'class' is required!"))
-        (require(rpart) || stop("package 'rpart' is required!"))
-        (require(e1071) || stop("package 'e1071' is required!"))
-        (require(ipred) || stop("package 'ipred' is required!"))
-    } else {
-        # Make sure that the specific required package is loaded
-        eval(parse(text = paste("require(", package, ")", sep = "")))
-    }
+	# Note: this is done in NAMESPACE
+	#package <- attr(object, "package")
+	#if (is.null(package)) {
+    #    # This is for old version, we make sure to load
+    #    # MASS, RandomForest, class, rpart, e1071, ipred
+    #    # Rem: nnet has a special treatment in nnet2
+    #    (require(MASS) || stop("package 'MASS' is required!"))
+    #    (require(RandomForest) || stop("package 'RandomForest' is required!"))
+    #    (require(class) || stop("package 'class' is required!"))
+    #    (require(rpart) || stop("package 'rpart' is required!"))
+    #    (require(e1071) || stop("package 'e1071' is required!"))
+    #    (require(ipred) || stop("package 'ipred' is required!"))
+    #} else {
+    #    # Make sure that the specific required package is loaded
+    #    eval(parse(text = paste("require(", package, ")", sep = "")))
+    #}
 
   class(object) <- class(object)[-1]
 	data <- as.data.frame(ZIDat)
@@ -754,7 +773,8 @@
   # and give it the same name)
   # Indeed, .Tcl.callback(f) in tcltk package does the job... but it gives
   # cryptic names like R_call 0x13c7168
-  require(tcltk) || stop("Package 'tcltk' is needed!")
+  # Note: done in NAMESPACE
+  #require(tcltk) || stop("Package 'tcltk' is needed!")
   # Check that 'f' is a function with no arguments (cannot handle them, currently)
   is.function(f) || stop("'f' must be a function!")
   is.null(formals(f)) || stop("The function used cannot (yet) have arguments!")

Modified: pkg/zooimage/R/ZIClass.R
===================================================================
--- pkg/zooimage/R/ZIClass.R	2010-04-09 16:45:04 UTC (rev 184)
+++ pkg/zooimage/R/ZIClass.R	2010-04-12 07:35:20 UTC (rev 185)
@@ -1,6 +1,6 @@
-# {{{ Copyright (c) 2004, Ph. Grosjean <phgrosjean at sciviews.org>
+# Copyright (c) 2004-2010, Ph. Grosjean <phgrosjean at sciviews.org>
 #
-# This file is part of ZooImage .
+# This file is part of ZooImage
 #
 # ZooImage is free software: you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
@@ -14,98 +14,96 @@
 #
 # You should have received a copy of the GNU General Public License
 # along with ZooImage.  If not, see <http://www.gnu.org/licenses/>.
-# }}}
 
-# Version 1.2.0: check for package loading, and add a 'package' attribute to ZIClass
+# Version 1.2.0: check package loading, and add a 'package' attribute to ZIClass
 ### TODO: allow for defining parameters and use a plugin mechanism
 
-# {{{ ziclass
-# {{{ ZIClass
-#' Modifications in calculation of probabilities to accept variables selection v1.2-2
-"ZIClass" <- function(df,
-	algorithm = c("lda", "randomForest"), package = c("MASS", "randomForest"),
-	Formula = Class ~ logArea + Mean + StdDev + Mode + Min + Max + logPerim. +
-		logMajor + logMinor + Circ. + logFeret + IntDen + Elongation + CentBoxD +
-		GrayCentBoxD + CentroidsD + Range + MeanPos + SDNorm + CV,
-	calc.vars = "calc.vars", k.xval = 10, ...) {
+# Modifications in calculation of probabilities to accept variables selection v1.2-2
+"ZIClass" <- function (df, algorithm = c("lda", "randomForest"),
+package = c("MASS", "randomForest"), Formula = Class ~ logArea + Mean + StdDev +
+Mode + Min + Max + logPerim. + logMajor + logMinor + Circ. + logFeret + IntDen +
+Elongation + CentBoxD + GrayCentBoxD + CentroidsD + Range + MeanPos + SDNorm + CV,
+calc.vars = "calc.vars", k.xval = 10, ...)
+{
+	# Check package availability
+	# Note: this is supposed to be managed in the NAMESPACE
+	# package <- package[1]
+	# if (!is.null(package)) require( package, character.only = TRUE)
 
-	# check package availability
-	### TODO: add error checking in all evals!
-	require(ipred)
-	package <- package[1]
-	if (!is.null(package)){
-		require( package, character.only = TRUE )
-	}
-
-	# check calc.vars
+	# Check calc.vars
 	calc.vars <- calc.vars[1]
 	if (!is.null(calc.vars)) {
-		CV <- match.fun( calc.vars )
+		CV <- match.fun(calc.vars)
 		df <- CV(df)
 	}
 
-	# algorithm
+	# Algorithm
 	algorithm <- algorithm[1]
-	algo.fun  <- match.fun( algorithm )
+	algo.fun  <- match.fun(algorithm)
 	ZI.class <- algo.fun(Formula, data = df, ...)
-	ZI.class <- structure( ZI.class,
-		class     = c("ZIClass", class(ZI.class)),
+	ZI.class <- structure(ZI.class,
+		class = c("ZIClass", class(ZI.class)),
 		algorithm = algorithm,
-		package   = package,
+		package = package,
 		calc.vars = CV,
-		classes   = df[[ as.character(Formula)[2] ]] )
+		classes = df[[as.character(Formula)[2]]]
+	)
 
 	# Calculate predictions with full training set
-    attr(ZI.class, "predict") <- predict(ZI.class, df, calc.vars = FALSE, class.only = TRUE)
+    attr(ZI.class, "predict") <- predict(ZI.class, df, calc.vars = FALSE,
+		class.only = TRUE)
 
-	### Calculation of probabilities
+	# Calculation of probabilities
   	if (algorithm == "randomForest") {
   		# Use Formula for the probabilities v1.2-2
   		rf <- randomForest(formula = Formula, data = df)
-  		attr(ZI.class, "proba") <- predict(object = rf, newdata = df, type = "prob")
+  		attr(ZI.class, "proba") <- predict(object = rf, newdata = df,
+			type = "prob")
 	}
 
 	# Possibly make a k-fold cross-validation and check results
 	if (!is.null(k.xval)) {
 		mypredict <- if (algorithm == "lda") {
-			function(object, newdata) predict(object, newdata = newdata)$class
+			function (object, newdata)
+				predict(object, newdata = newdata)$class
 		} else {
-			function(object, newdata) predict(object, newdata = newdata, type = "class")
+			function (object, newdata)
+				predict(object, newdata = newdata, type = "class")
 		}
-    	res <- cv( attr(ZI.class, "classes" ), Formula, data = df, model = get(algorithm),
-			predict = mypredict, k = k.xval, predictions = TRUE, ...)$predictions
+    	res <- cv(attr(ZI.class, "classes"), Formula, data = df,
+			model = get(algorithm), predict = mypredict, k = k.xval,
+			predictions = TRUE, ...)$predictions
 		attr(ZI.class, "kfold.predict") <- res
 		attr(ZI.class, "k") <- k.xval
-    attr(ZI.class, "formula") <- Formula
-    attr(ZI.class, "path") <- attr(df, "path")
+		attr(ZI.class, "formula") <- Formula
+		attr(ZI.class, "path") <- attr(df, "path")
 	}
 	return(ZI.class)
 }
-#}}}
 
-# {{{ print.ZIClass
-"print.ZIClass" <- function(x, ...) {
-
+"print.ZIClass" <- function (x, ...)
+{
 	algorithm <- attr(x, "algorithm")
-	classes   <- attr(x, "classes")
-	lclasses  <- levels(classes)
-    predict   <- attr(x, "predict")
-	k         <- attr(x, "k")
+	classes <- attr(x, "classes")
+	lclasses <- levels(classes)
+    predict <- attr(x, "predict")
+	k <- attr(x, "k")
 	cat("A ZIClass object predicting for", length(lclasses), "classes:\n")
 	print(lclasses)
 	Confu <- confu(classes, predict)
-	mism <- 100 * (1 - ( sum(diag(Confu)) / sum(Confu) ) )
+	mism <- 100 * (1 - (sum(diag(Confu)) / sum(Confu)))
 
 	# Change the number of digits to display
 	oldDigits <- options(digits = 4); on.exit(options(oldDigits))
 	cat("\nAlgorithm used:", algorithm, "\n")
 	cat("Mismatch in classification: ", mism, "%\n", sep = "")
 	if (!is.null(k)) {
-    	cat("k-fold cross validation error estimation (k = ", k, "):\n", sep = "")
+    	cat("k-fold cross validation error estimation (k = ", k, "):\n",
+			sep = "")
 		kfold.predict <- attr(x, "kfold.predict")
-		prior         <- table(classes)
-		ok            <- diag(table(classes, kfold.predict))
-		err           <- 100 * (1 - (sum(ok) / sum(prior)) )
+		prior <- table(classes)
+		ok <- diag(table(classes, kfold.predict))
+		err <- 100 * (1 - (sum(ok) / sum(prior)))
 		cat(err, "%\n", sep = "")
 		cat("\nError per class:\n")
 		`Error (%)` <- sort(1 - (ok / prior)) * 100
@@ -113,77 +111,54 @@
 	}
 	return(invisible(x))
 }
-# }}}
 
-# {{{ predict.ZIClass
-"predict.ZIClass" <- function(object, ZIDat, calc.vars = TRUE, class.only = FALSE, ...) {
+"predict.ZIClass" <- function (object, ZIDat, calc.vars = TRUE,
+class.only = FALSE, ...)
+{
 
 	# Make sure we have correct objects
-	mustbe( object, "ZIClass" )
-	mustbe( ZIDat , c("ZIDat", "data.frame") )
+	mustbe(object, "ZIClass")
+	mustbe(ZIDat , c("ZIDat", "data.frame"))
 
 	# Possibly load a specific package for prediction
-	package <- attr(object, "package")
-	if (is.null(package)) {
-        # This is for old version, we make sure to load
-        # MASS, randomForest, class, rpart, e1071, ipred
-        # Rem: nnet has a special treatment in nnet2
-        require(MASS)
-        require(randomForest)
-        require(class)
-        require(rpart)
-        require(e1071)
-        require(ipred)
-    } else {
-        # Make sure that the specific required package is loaded
-        require( package, character.only = TRUE )
-    }
+	# Note: this is done in NAMESPACE
+	# package <- attr(object, "package")
+	# if (!is.null(package)) require(package, character.only = TRUE)
     class(object) <- class(object)[-1]
 	data <- as.data.frame(ZIDat)
-	if (calc.vars){
-		data <- attr(object, "calc.vars")(data)
-	}
+	if (calc.vars) data <- attr(object, "calc.vars")(data)
 	Ident <- predict(object, newdata = data, type = "class")
 
 	# Special case for prediction from an LDA (list with $class item)
-	if (inherits(Ident, "list") && "class" %in% names(Ident)){
+	if (inherits(Ident, "list") && "class" %in% names(Ident))
 		Ident <- Ident$class
-	}
 	if (!class.only) {
 		res <- cbind(ZIDat, Ident)
 		class(res) <- class(ZIDat)
-	} else {
-		res <- Ident
-	}
+	} else res <- Ident
 	return(res)
 }
-# }}}
-# }}}
 
-# {{{ confusion
-# {{{ confu
-"confu" <- function(classes1, classes2, classes.predicted = FALSE) {
-
+"confu" <- function (classes1, classes2, classes.predicted = FALSE)
+{
 	if (is.factor(classes1) || is.factor(classes2)) {
-		if (NROW(classes1) != NROW(classes2)){
+		if (NROW(classes1) != NROW(classes2))
 			stop("Not same number of items in classes1 and classes2")
-		}
 
 		# Check that levels match
-		mustmatch( levels(classes1), levels(classes2),
-			msg = "'Class' levels in the two objects do not match" )
+		mustmatch(levels(classes1), levels(classes2),
+			msg = "'Class' levels in the two objects do not match")
 		clCompa <- data.frame(Class.x = classes1, Class.y = classes2)
 	} else { # Merge two data frame according to common objects in "Id" column
 
 		# Check levels match
-		mustmatch( levels(classes1$Class), levels(classes22$Class),
+		mustmatch(levels(classes1$Class), levels(classes2$Class),
 			msg = "Levels for 'Class' in the two objects do not match")
 
 		# Are there common objects left?
 		clCompa <- merge(classes1, classes2, by = "Id")
-		if (nrow(clCompa) == 0){
+		if (nrow(clCompa) == 0)
 			stop("No common objects between the two 'classes' objects")
-		}
 	}
 
 	# How many common objects by level?
@@ -197,209 +172,201 @@
 	}
 
 	# Pourcent of common objects
-	Acc <- sum(diag(Conf)) / sum(Conf)*100
+	Acc <- sum(diag(Conf)) / sum(Conf) * 100
 
 	# Change labels to get a more compact presentation
 	colnames(Conf) <- formatC(1:ncol(Conf), digits = 1, flag = "0")
 	rownames(Conf) <- paste(colnames(Conf), rownames(Conf))
 
 	# Results
-	res <- Conf
-	attr(res, "accuracy") <- Acc
-	attr(res, "nbr.per.class") <- NbPerClass
-	return(res)
+	attr(Conf, "accuracy") <- Acc
+	attr(Conf, "nbr.per.class") <- NbPerClass
+	return(Conf)
 }
-# }}}
 
-# {{{ confu.map
-"confu.map" <- function(set1, set2, level = 1){
-
-	opar <- par(no.readonly = TRUE) ; on.exit(par = opar)
+"confu.map" <- function (set1, set2, level = 1)
+{
+	opar <- par(no.readonly = TRUE)
+	on.exit(par(opar))
     par(mar = c(5, 12, 4, 2) + 0.1)
 
 	n <- length(levels(set1))
-	image(1:n, 1:n, 1/ (t(confu(set1, set2)[n:1, 1:n])),
-		col = heat.colors(10),
+	image(1:n, 1:n, 1 / (t(confu(set1, set2)[n:1, 1:n])), col = heat.colors(10),
 		xlab = "", ylab = "", xaxt = "n", yaxt = "n")
     axis(1, at = 1:n, las = 2)
     axis(2, at = n:1, labels = paste(levels(set1), 1:n), las = 1)
     abline(h = (1:(n + 1)) - 0.5, lty = 2, col = "gray")
     abline(v = (1:(n + 1)) - 0.5, lty = 2, col = "gray")
 }
-# }}}
 
-# {{{ confusion.tree
 # New function v1.2-2 using library gplots
-confusion.tree <- function (confmat, maxval, margin=NULL, Rowv = TRUE, Colv = TRUE) {
+"confusion.tree" <- function (confmat, maxval, margin=NULL, Rowv = TRUE,
+Colv = TRUE)
+{
 	nX <- nrow(confmat)
 	nY <- ncol(confmat)
-	nZ <- nX*nY
-	confmat <- pmin( confmat, maxval )
+	nZ <- nX * nY
+	confmat <- pmin(confmat, maxval)
 
-	require(RColorBrewer)
+	# Note: done in NAMESPACE
+	# require(RColorBrewer)
+	# require(gplots)
 	mypalette <- brewer.pal(maxval-1, "Spectral")
-	library(gplots)
-	heatmap.2(confmat, col= c(0,mypalette), symm=TRUE, margin=margin,
-		trace="both", Rowv=Rowv, Colv=Colv, cexRow=0.2 + 1/log10(nX),
-		cexCol=0.2 + 1/log10(nY),tracecol="Black", linecol=FALSE)
+	heatmap.2(confmat, col= c(0,mypalette), symm = TRUE, margin = margin,
+		trace = "both", Rowv = Rowv, Colv = Colv, cexRow = 0.2 + 1 / log10(nX),
+		cexCol = 0.2 + 1 / log10(nY), tracecol = "Black", linecol = FALSE)
 }
-# }}}
 
-# {{{ confusion.bar
 # New function v 1.2-2 false positive and negative
-confusion.bar <- function(confmat, mar=NULL) {
-	mustbe(confmat, c("table", "matrix" ) )
+"confusion.bar" <- function (confmat, mar = NULL)
+{
+	mustbe(confmat, c("table", "matrix"))
 	Nn <- nrow(confmat)
 
 	## percent of correctly predicted objects in the test set
-	pred.tok <- diag(confmat) / colSums(confmat)*100
+	pred.tok <- diag(confmat) / colSums(confmat) * 100
 
-	# If there are no items good recognize 0/0 = NaN so replace NaN by 0 for calculation
-  	if (NaN %in% pred.tok){
+	# If there are no items good recognize 0/0 = NaN so replace NaN by 0 for
+	# calculation
+  	if (NaN %in% pred.tok)
 		pred.tok[pred.tok == "NaN"] <- 0
-  	}
 
-	# percent of items in the test set predicted in its category
-	pred.tfrac <- diag(confmat) / rowSums(confmat)*100
-	pred.tfrac[ is.nan( pred.tfrac) ] <- 0
+	# Percent of items in the test set predicted in its category
+	pred.tfrac <- diag(confmat) / rowSums(confmat) * 100
+	pred.tfrac[is.nan(pred.tfrac)] <- 0
 	prediction <- cbind(pred.tok, pred.tfrac)
 	prediction.df <- data.frame(prediction)
-	CR <- prediction[1:Nn,2] #
-	FN <- 100 - CR # faux négatif = objects which exist in the test set but not in the training set;
+	CR <- prediction[1:Nn, 2]
+	FN <- 100 - CR # flase negatives = objects which exist in the test set
+	# but not in the training set;
 
 	# they are wrongly predicted as not to belong to a particular group
 	prediction.df$FN <- FN
 
 	#put to scale
-	CR2              <- prediction[1:Nn,1]
-	FP               <- 100-CR2 # Faux positifs
+	CR2 <- prediction[1:Nn, 1]
+	FP <- 100-CR2 # False positives
 	prediction.df$FP <- FP
-	prediction.df    <- round(prediction.df,0) # arrondi les valeurs à des dombres entiers
-	Failure          <- prediction.df[c("FN", "FP")]
+	prediction.df <- round(prediction.df, 0)
+	Failure <- prediction.df[c("FN", "FP")]
 
-	# put all to scale
-	allN        <- CR+FN # all negative
-	allP        <- CR2+FP # all positive
-	cr          <- (CR/allN)*100 #% good identify by pc
-	cr2         <- (CR2/allP)*100 #% good identify by pc
-	fn          <- (FN/allN)*100 # percentage of FN
-	fp          <- (FP/allP)*100 # percentage of FP
-	all         <- matrix( c( fn, cr, cr2, fp), ncol = 4); colnames(all) <- c( "fn", "cr", "cr2", "fp")
-	Order       <- order( all[, 2] + all[, 3] , decreasing = TRUE) # trie du mieux reconnu au moin bon
-	all2        <- t(all[Order, ]) # transposer la matrice triée
-	Failure     <- Failure[Order,] # grp du moin au plus d'erreur
+	# Put all data to scale
+	allN <- CR + FN # all negative
+	allP <- CR2 + FP # all positive
+	cr <- (CR / allN) * 100 # % good identify by pc
+	cr2 <- (CR2 / allP) * 100 # % good identify by pc
+	fn <- (FN / allN) * 100 # percentage of FN
+	fp <- (FP / allP) * 100 # percentage of FP
+	all <- matrix(c(fn, cr, cr2, fp), ncol = 4)
+	colnames(all) <- c( "fn", "cr", "cr2", "fp")
+	Order <- order(all[, 2] + all[, 3] , decreasing = TRUE)
+	all2 <- t(all[Order, ]) # Transpose the sorted matrix
+	Failure <- Failure[Order, ] # Sort failures
 	Failure.mat <- as.matrix(Failure)
-	Nmat        <- ncol(all2)
+	Nmat <- ncol(all2)
 
-	#### Construction du graphe
-	valx  <- matrix( c(rep(2 , Nmat), rep(198, Nmat)),ncol=2) #matrix for location
-	valx2 <- matrix( c(rep(98, Nmat), rep(102, Nmat)),ncol=2) #matrix for location
-	omar  <- par("mar") ; on.exit( par(omar) ) # mar = margin size c(bottom, left, top, right)
-	par(mar=mar);
-	barplot(all2[,!is.na(all2[2,])], horiz=TRUE,
-		col=c("PeachPuff2", "green3", "green3", "lemonChiffon2"),
-		xaxt="n", las=1, space = 0)
-	text(valx  , row(valx) - 0.45 , Failure.mat , cex=0.7)
-	text(valx2 , row(valx2)- 0.45 , 100 - Failure.mat , cex=0.7)
+	# Plotting of the data
+	valx  <- matrix(c(rep(2 , Nmat), rep(198, Nmat)), ncol = 2)
+	valx2 <- matrix(c(rep(98, Nmat), rep(102, Nmat)), ncol = 2)
+	omar  <- par("mar")
+	on.exit(par(omar)) # mar = margin size c(bottom, left, top, right)
+	par(mar = mar)
+	barplot(all2[, !is.na(all2[2, ])], horiz = TRUE,
+		col = c("PeachPuff2", "green3", "green3", "lemonChiffon2"),
+		xaxt = "n", las = 1, space = 0)
+	text(valx, row(valx) - 0.45, Failure.mat , cex = 0.7)
+	text(valx2, row(valx2) - 0.45, 100 - Failure.mat , cex = 0.7)
 
-	#### Ajout des légendes
-  	legend(100, Nmat+(Nmat/15),
-		legend = c("false negative (FN)", "true positive (TP)", "false positive (FP)"),
+	# Add a legend
+  	legend(100, Nmat + (Nmat / 15), legend = c("false negative (FN)",
+		"true positive (TP)", "false positive (FP)"),
 		xjust = 0.5, fill = c("PeachPuff2", "green3", "lemonChiffon2"),
-		bty="n", horiz = TRUE)
-	legend(100, Nmat/55, "Percentage", xjust = 0.5, bty = "n")
-	segx0 <- rep(c(25, 50, 75, 125, 150, 175),2)
-	segy0 <- rep(c(0, Nmat),c(6,6))
-	segments(segx0[c(1:6)], segy0[c(1:6)], segx0[c(7:12)], segy0[c(7:12)], col="red", lty=2)
+		bty = "n", horiz = TRUE)
+	legend(100, Nmat / 55, "Percentage", xjust = 0.5, bty = "n")
+	segx0 <- rep(c(25, 50, 75, 125, 150, 175), 2)
+	segy0 <- rep(c(0, Nmat), c(6, 6))
+	segments(segx0[c(1:6)], segy0[c(1:6)], segx0[c(7:12)], segy0[c(7:12)],
+		col = "red", lty = 2)
 	valx3 <- c(25, 50, 75, 125, 150, 175)
-	text(valx3[1:6], -(Nmat/35), labels= segx0[c(1:3, 7:9)], cex=0.7)
+	text(valx3[1:6], -(Nmat / 35), labels = segx0[c(1:3, 7:9)], cex = 0.7)
 }
-# }}}
-# }}}
 
-# {{{ nnet2
+"nnet2" <- function (formula, data, size = 7, rang = 0.1, decay = 5e-4,
+maxit = 1000, ...)
+{
+ 	# Note: done in NAMESPACE
+	# require(nnet)
 
-# {{{ nnet2
-"nnet2" <- function(formula, data, size = 7, rang = 0.1, decay = 5e-4, maxit = 1000, ...) {
- 	require(nnet)
-
 	structure(
-		nnet(formula = formula, data = data, size = size, rang = rang, decay = decay, maxit = maxit, ...),
-		class = c("nnet2", "nnet.formula", "nnet") )
+		nnet(formula = formula, data = data, size = size, rang = rang,
+			decay = decay, maxit = maxit, ...),
+		class = c("nnet2", "nnet.formula", "nnet"))
 }
-# }}}
 
-# {{{ predict.nnet2
-"predict.nnet2" <- function (object, newdata, type = c("raw", "class"), ...) {
-
-	mustbe( object, "nnet2" )
-	require(nnet)
+"predict.nnet2" <- function (object, newdata, type = c("raw", "class"), ...)
+{
+	# Note: done in NAMESPACE
+	# require(nnet)
+	mustbe(object, "nnet2")
     class(object) <- class(object)[-1]
 	res <- predict(object, newdata = newdata, type = type, ...)
 	# If type is class, we got a character vector... but should get a factor
-	if (type == "class"){
+	if (type == "class")
     	res <- factor(res, levels = object$lev)
-	}
 	return(res)
 }
-# }}}
-# }}}
 
-# {{{ lvq
-# {{{ lvq
-#' Extract classes and training vars from data, according to formula lhs ~ rhs
-#' This is a very simplified way of doing it... It does not manage complex formula constructions!
-"lvq" <- function(formula, data, k = 5, size = NULL) {
-	require(class)
+# Extract classes and training vars from data, according to formula lhs ~ rhs
+# This is a very simplified way of doing it... It does not manage complex
+# formula constructions!
+"lvq" <- function (formula, data, k = 5, size = NULL)
+{
+	# Note: done in NAMESPACE
+	# require(class)
     vars <- all.vars(formula)
 	train <- data[, vars[-1]]
 	cl <- data[, vars[1]]
 	lev <- levels(cl)
 	codebk <- olvq1(train, cl, lvqinit(train, cl, k = k, size = size))
-	res <- list(codebook = codebk, data = data, vars = vars, classes = cl, lev = lev)
+	res <- list(codebook = codebk, data = data, vars = vars, classes = cl,
+		lev = lev)
 	class(res) <- "lvq"
 	return(res)
 }
-# }}}
 
-# {{{ predict.lvq
-"predict.lvq" <- function(object, newdata, type = "class", ...) {
-   	mustbe( object, "lvq" )
-	require(class)
-    if (missing(newdata)) {
-		newdata <- object$data
-	}
+"predict.lvq" <- function (object, newdata, type = "class", ...)
+{
+   	# Note: done in NAMESPACE
+	# require(class)
+	mustbe(object, "lvq")
+    if (missing(newdata)) newdata <- object$data
 	lvqtest(object$codebook, newdata[, object$vars[-1]])
 }
-# }}}
-# }}}
 
-# {{{ FormVarsSelect
-#' Formula calculation by variables selection for the classifier creation v1.2-2
-FormVarsSelect <- function(x){
-
+# Formula calculation by variables selection for the classifier creation v1.2-2
+FormVarsSelect <- function (x)
+{
 	# x must be a ZItrain object
-	mustbe( x, "ZI1Train" )
+	mustbe(x, "ZI1Train")
 
 	# Parameters measured on particles and new variables calculated
 	mes <- as.vector(colnames(calc.vars(x)))
 
 	# Selection of features for the creation of the classifier
-	keep <- select.list(list = mes,
-	  preselect = c("ECD", "FIT_Area_ABD", "FIT_Diameter_ABD", "FIT_Volume_ABD", "FIT_Diameter_ESD",
-    "FIT_Volume_ESD", "FIT_Length", "FIT_Width", "FIT_Aspect_Ratio", "FIT_Transparency",
-    "FIT_Intensity", "FIT_Sigma_Intensity", "FIT_Sum_Intensity", "FIT_Compactness",
-    "FIT_Elongation", "FIT_Perimeter", "FIT_Convex_Perimeter", "FIT_Roughness",
-    "FIT_Ch1_Peak", "FIT_Ch1_TOF", "FIT_Ch2_Peak", "FIT_Ch2_TOF",
-    "Area", "Mean", "StdDev", "Mode", "Min", "Max", "Perim.", "Width","Height",
-    "Major", "Minor", "Circ.", "Feret", "IntDen", "Median", "Skew", "Kurt", "Elongation",
-    "CentBoxD", "GrayCentBoxD", "CentroidsD", "Range", "MeanPos", "SDNorm", "CV", "logArea",
-    "logPerim.", "logMajor", "logMinor", "logFeret"),
-    multiple = TRUE, title = "Select variables to keep")
+	keep <- select.list(list = mes, preselect = c("ECD", "FIT_Area_ABD",
+		"FIT_Diameter_ABD", "FIT_Volume_ABD", "FIT_Diameter_ESD",
+		"FIT_Volume_ESD", "FIT_Length", "FIT_Width", "FIT_Aspect_Ratio",
+		"FIT_Transparency", "FIT_Intensity", "FIT_Sigma_Intensity",
+		"FIT_Sum_Intensity", "FIT_Compactness", "FIT_Elongation",
+		"FIT_Perimeter", "FIT_Convex_Perimeter", "FIT_Roughness",
+		"FIT_Ch1_Peak", "FIT_Ch1_TOF", "FIT_Ch2_Peak", "FIT_Ch2_TOF",
+		"Area", "Mean", "StdDev", "Mode", "Min", "Max", "Perim.", "Width",
+		"Height", "Major", "Minor", "Circ.", "Feret", "IntDen", "Median",
+		"Skew", "Kurt", "Elongation", "CentBoxD", "GrayCentBoxD", "CentroidsD",
+		"Range", "MeanPos", "SDNorm", "CV", "logArea", "logPerim.", "logMajor",
+		"logMinor", "logFeret"),
+		multiple = TRUE, title = "Select variables to keep")
 	# Creation of one formula for classifier calculation
 	res <- as.formula(paste("Class ~ ", paste(keep, collapse= "+")))
 	return(res)
 }
-# }}}
-
-# :tabSize=4:indentSize=4:noTabs=false:folding=explicit:collapseFolds=1:

Modified: pkg/zooimage/R/ZIRes.R
===================================================================
--- pkg/zooimage/R/ZIRes.R	2010-04-09 16:45:04 UTC (rev 184)
+++ pkg/zooimage/R/ZIRes.R	2010-04-12 07:35:20 UTC (rev 185)
@@ -1,6 +1,6 @@
-# {{{ Copyright (c) 2004, Ph. Grosjean <phgrosjean at sciviews.org>
+# Copyright (c) 2004-2010, Ph. Grosjean <phgrosjean at sciviews.org>
 #
-# This file is part of ZooImage .
+# This file is part of ZooImage
 # 
 # ZooImage is free software: you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
@@ -14,46 +14,45 @@
 # 
 # You should have received a copy of the GNU General Public License
 # along with ZooImage.  If not, see <http://www.gnu.org/licenses/>.
-# }}}
 
-# {{{ process.sample
-"process.sample" <- function(ZidFile, ZIClass, ZIDesc,
-		abd.taxa = NULL, abd.groups = NULL, abd.type = "absolute",
-		bio.taxa = NULL, bio.groups = NULL, bio.conv = c(1, 0, 1), headers = c("Abd", "Bio"),
-		spec.taxa = NULL, spec.groups = NULL, spec.breaks = seq(0.25, 2, by = 0.1), spec.use.Dil = TRUE,
-		exportdir = NULL, show.log = TRUE, SemiTab = NULL, Semi = FALSE) {
-    
+"process.sample" <- function (ZidFile, ZIClass, ZIDesc, abd.taxa = NULL,
+abd.groups = NULL, abd.type = "absolute", bio.taxa = NULL, bio.groups = NULL,
+bio.conv = c(1, 0, 1), headers = c("Abd", "Bio"), spec.taxa = NULL,
+spec.groups = NULL, spec.breaks = seq(0.25, 2, by = 0.1), spec.use.Dil = TRUE,
+exportdir = NULL, show.log = TRUE, SemiTab = NULL, Semi = FALSE)
+{    
 	# Check if the ZidFile exists
-	checkFileExists( ZidFile )
+	checkFileExists(ZidFile)
 	
 	# Check if ZIClass is of the right class
 	mustbe(ZIClass, "ZIClass")
 	
 	# Get ZIDat from the ZidFile
 	ZIDat <- read.zid(ZidFile)
-	Sample <- get.sampleinfo(ZidFile, type = "sample", ext = extensionPattern(".zid") )
+	Sample <- get.sampleinfo(ZidFile, type = "sample",
+		ext = extensionPattern(".zid"))
 	
 	# Check if one can get sample metadata from ZIDesc
 	RES <- ZIDesc[ZIDesc$Label == Sample, ] 
-	if (nrow(RES) != 1) {
-		stop( "ZIDesc has no data for that sample!" )
-	}
+	if (nrow(RES) != 1)
+		stop("ZIDesc has no data for that sample!")
 	
 	# Predict classes (add a new column Ident to the table)
 	ZIDat <- predict(ZIClass, ZIDat)
 	
   # Modif Kevin Denis for Semi Automatic classification
-	if(Semi){
-    if(is.null(SemiTab)){
-      stop("You must provide a table with semi automatic classification")
-    }
-    if(!inherits(SemiTab, "ZITrain")) stop("SemiTab must be a ZItrain object with manual classification")
-    # Extract ZidFile subtable from SemiTab (Semi automatic classification general table)
-    SemiClass <- SemiTab[sub("[+].*", "", as.character(SemiTab$Label)) %in% noext(ZidFile),]
-    # Repalce automatic recogntion by semi automatic one
-    for(j in 1: nrow(SemiClass)){
-      ZIDat[ZIDat$Item == j, ]$Ident <- SemiClass[SemiClass$Item == j,]$Class
-    }
+	if (isTRUE(Semi)) {
+		if(is.null(SemiTab))
+			stop("You must provide a table with semi automatic classification")
+		if (!inherits(SemiTab, "ZITrain"))
+			stop("SemiTab must be a ZItrain object with manual classification")
+		# Extract ZidFile subtable from SemiTab
+		# (Semi automatic classification general table)
+		SemiClass <- SemiTab[sub("[+].*", "", as.character(SemiTab$Label)) %in%
+			noext(ZidFile),]
+		# Replace automatic recogntion by semi automatic one
+		for (j in 1: nrow(SemiClass))
+		ZIDat[ZIDat$Item == j, ]$Ident <- SemiClass[SemiClass$Item == j, ]$Class
 	}
 
 	Grp <- levels(ZIDat$Ident)	
@@ -64,8 +63,8 @@
 	}
 	
 	# Process abundances
-	ABD <- Abd.sample(ZIDat, Sample, taxa = abd.taxa, groups = abd.groups, type = abd.type,
-		header = headers[1])
+	ABD <- Abd.sample(ZIDat, Sample, taxa = abd.taxa, groups = abd.groups,
+		type = abd.type, header = headers[1])
 	RES <- cbind(RES, t(ABD))
 	
 	# Process biomasses
@@ -87,8 +86,8 @@
 			spec.groups <- as.list(c("", Grp))
 			names(spec.groups) <- c("total", Grp)
 		}
-		SPC <- Spectrum.sample(ZIDat, Sample, taxa = spec.taxa, groups = spec.groups,
-			breaks = spec.breaks, use.Dil = spec.use.Dil)
+		SPC <- Spectrum.sample(ZIDat, Sample, taxa = spec.taxa,
+			groups = spec.groups, breaks = spec.breaks, use.Dil = spec.use.Dil)
 		SPClist <- list()
 		SPClist[[Sample]] <- SPC
 		attr(RES, "spectrum") <- SPClist
@@ -97,27 +96,24 @@
 	class(RES) <- c("ZI1Res", "ZIRes", "data.frame")
 	return(RES)
 }
-# }}}
 
-# {{{ process.samples
-"process.samples" <- function(path = ".", ZidFiles = NULL, ZIClass, 
-	ZIDesc = read.description("Description.zis"),
-	abd.taxa = NULL, abd.groups = NULL, abd.type = "absolute",
-	bio.taxa = NULL, bio.groups = NULL, bio.conv = c(1, 0, 1), headers = c("Abd", "Bio"),
-	spec.taxa = NULL, spec.groups = NULL, spec.breaks = seq(0.25, 2, by = 0.1), spec.use.Dil = TRUE,
-	exportdir = NULL, show.log = TRUE, bell = FALSE, SemiTab = NULL, Semi = FALSE) {
-
+"process.samples" <- function (path = ".", ZidFiles = NULL, ZIClass, 
+ZIDesc = read.description("Description.zis"), abd.taxa = NULL, abd.groups = NULL,
+abd.type = "absolute", bio.taxa = NULL, bio.groups = NULL, bio.conv = c(1, 0, 1),
+headers = c("Abd", "Bio"), spec.taxa = NULL, spec.groups = NULL,
+spec.breaks = seq(0.25, 2, by = 0.1), spec.use.Dil = TRUE, exportdir = NULL,
+show.log = TRUE, bell = FALSE, SemiTab = NULL, Semi = FALSE)
+{
 	# Determine which samples do we have to process...
 	if (is.null(ZidFiles)) {
     	# Get the list of files from ZIDesc
 		ZidFiles <- paste(ZIDesc$Label, ".zid", sep = "")
-		if (path == "."){
-			path <- getwd()
-		}
+		if (path == ".") path <- getwd()
 		ZidFiles <- file.path(path, ZidFiles)
 	} else { # Check that all zid files have entries in ZIDesc
-		Samples <- get.sampleinfo(ZidFiles, type = "sample", ext = extensionPattern(".zid") )
-		mustcontain( ZIDesc$Label, Samples, "One or more samples not in ZIDesc!" )
+		Samples <- get.sampleinfo(ZidFiles, type = "sample",
+			ext = extensionPattern(".zid"))
+		mustcontain(ZIDesc$Label, Samples, "One or more samples not in ZIDesc!")
 	}
 	
 	# Start the process
@@ -128,389 +124,392 @@
 	cat("Processing",  imax, "samples...\n")
 	logProcess(paste("Processing",  imax, "samples..."))
 	
-	results <- lapply( 1:imax, function(i){
+	results <- lapply(1:imax, function (i) {
 		Progress(i, imax)
 		
-    # Modif Kevin Denis for semi automatic recognition
-    if(Semi){
-      if(is.null(SemiTab)){
-        stop("You must provide a table with manual classification")
-      }
-      if(!inherits(SemiTab, "ZITrain")) stop("SemiTab must be a ZItrain object with manual classification")
+		# Modif Kevin Denis for semi automatic recognition
+		if (isTRUE(Semi)) {
+			if (is.null(SemiTab))
+				stop("You must provide a table with manual classification")
+			if (!inherits(SemiTab, "ZITrain"))
[TRUNCATED]

To get the complete diff run:
    svnlook diff /svnroot/zooimage -r 185


More information about the Zooimage-commits mailing list