[Diagnosismed-commits] r14 - in pkg/DiagnosisMed: . R man
noreply at r-forge.r-project.org
noreply at r-forge.r-project.org
Thu May 21 07:39:45 CEST 2009
Author: pedrobrasil
Date: 2009-05-21 07:39:45 +0200 (Thu, 21 May 2009)
New Revision: 14
Modified:
pkg/DiagnosisMed/DESCRIPTION
pkg/DiagnosisMed/R/ROC.r
pkg/DiagnosisMed/R/TGROC.r
pkg/DiagnosisMed/R/print.TGROC.r
pkg/DiagnosisMed/R/print.diag.r
pkg/DiagnosisMed/man/ROC.Rd
pkg/DiagnosisMed/man/TGROC.Rd
Log:
Modified: pkg/DiagnosisMed/DESCRIPTION
===================================================================
--- pkg/DiagnosisMed/DESCRIPTION 2009-04-27 11:19:32 UTC (rev 13)
+++ pkg/DiagnosisMed/DESCRIPTION 2009-05-21 05:39:45 UTC (rev 14)
@@ -1,9 +1,9 @@
Package: DiagnosisMed
-Version: 0.2
-Date: 2009-04-12
+Version: 0.2.1
+Date: 2009-05-21
Author: Pedro Brasil <pedro.brasil at ipec.fiocruz.br>
Maintainer: Pedro Brasil <pedro.brasil at ipec.fiocruz.br>
-Depends: R (>= 2.7.2),epitools,epicalc, TeachingDemos, tcltk
+Depends: R (>= 2.7.2),epitools,epicalc, TeachingDemos, tcltk, AMORE
Title: Diagnostic test accuracy evaluation for medical professionals.
Description: DiagnosisMed is a package to analyze data from diagnostic test accuracy evaluating health conditions. It is being built to be used by health professionals. This package is - or soon will be - able to estimate sample size for common situations in diagnostic test accuracy, estimate sensitivity and specificity from categorical and continuous test results including some evaluations of indeterminate results, or compare different analysis strategies into measures commonly used by health professionals.
-License: GPL version 2 or later
+License: GPL (>= 2)
Modified: pkg/DiagnosisMed/R/ROC.r
===================================================================
--- pkg/DiagnosisMed/R/ROC.r 2009-04-27 11:19:32 UTC (rev 13)
+++ pkg/DiagnosisMed/R/ROC.r 2009-05-21 05:39:45 UTC (rev 14)
@@ -24,6 +24,7 @@
if (Prevalence>0){
(pop.prevalence<-Prevalence)
}
+
if (is.numeric(gold)==TRUE){
# Making test summary, overall, disease only, without disease only
test.summary<-round(c(summary(test),sd(test)),digits=5)
@@ -39,6 +40,7 @@
X<-test[gold==0] # pag 209
Y<-test[gold==1] # pag 209
}
+
if (is.factor(gold)==TRUE){
# The same tests summary but with different reference standard codes
test.summary<-round(c(summary(test),sd(test)),digits=5)
@@ -51,6 +53,7 @@
X<-test[gold=="negative"] # pag 209
Y<-test[gold=="positive"] # pag 209
}
+
m<-length(X) # pag 209
n<-length(Y) # pag 209
D10X<-function(Xi){(1/n)*sum(Y>=Xi)} # pag 211
@@ -71,130 +74,82 @@
test.diag.table<-as.data.frame(test.values)
# Making a table with Se Sp PLR NLR PPV NPV and its confidence limits for each cut-off
for (i in 1:nrow(test.diag.table)) {
- test.diag.table$Sensitivity[i] <- round(sum(test.table[(i:nrow(test.table)),2])/
- sum(test.table[,2]),digits=4)
- test.diag.table$Se.inf.cl[i]<-round(as.numeric(binom.wilson(sum(test.table[i:nrow(
- test.table),2]),sum(test.table[,2]),conf.level=CL)[4]),digits=4)
- test.diag.table$Se.sup.cl[i]<-round(as.numeric(binom.wilson(sum(test.table[i:nrow(
- test.table),2]),sum(test.table[,2]),conf.level=CL)[5]),digits=4)
- test.diag.table$Specificity[i] <- round((sum(test.table[(1:i-1),1]))/(sum(
- test.table[,1])),digits=4)
- test.diag.table$Sp.inf.cl[i]<-round(as.numeric(binom.wilson(sum(test.table[(
- 1:i-1),1]),sum(test.table[,1]),conf.level=CL)[4]),digits=4)
- test.diag.table$Sp.sup.cl[i]<-round(as.numeric(binom.wilson(sum(test.table[
- (1:i-1),1]),sum(test.table[,1]),conf.level=CL)[5]),digits=4)
- test.diag.table$PPV[i]<-round((sum(test.table[i:nrow(test.table),2]))/(
- (sum(test.table[i:nrow(test.table),2]))+
- (sum(test.table[i:nrow(test.table),1]))),digits=4)
- test.diag.table$PPV.inf.cl[i]<-round(as.numeric(binom.wilson(sum(test.table[
- i:nrow(test.table),2]),((sum(test.table[i:nrow(test.table),2]))+(sum(
+ test.diag.table$Sensitivity[i] <- round(sum(test.table[(i:nrow(test.table)),2])/sum(test.table[,2]),digits=4)
+ test.diag.table$Se.inf.cl[i]<-round(as.numeric(binom.wilson(sum(test.table[i:nrow(test.table),2]),sum(test.table[,2]),conf.level=CL)[4]),digits=4)
+ test.diag.table$Se.sup.cl[i]<-round(as.numeric(binom.wilson(sum(test.table[i:nrow(test.table),2]),sum(test.table[,2]),conf.level=CL)[5]),digits=4)
+ test.diag.table$Specificity[i] <- round((sum(test.table[(1:i-1),1]))/(sum(test.table[,1])),digits=4)
+ test.diag.table$Sp.inf.cl[i]<-round(as.numeric(binom.wilson(sum(test.table[(1:i-1),1]),sum(test.table[,1]),conf.level=CL)[4]),digits=4)
+ test.diag.table$Sp.sup.cl[i]<-round(as.numeric(binom.wilson(sum(test.table[(1:i-1),1]),sum(test.table[,1]),conf.level=CL)[5]),digits=4)
+ test.diag.table$PPV[i]<-round((sum(test.table[i:nrow(test.table),2]))/((sum(test.table[i:nrow(test.table),2]))+(sum(test.table[i:nrow(test.table),1]))),digits=4)
+ test.diag.table$PPV.inf.cl[i]<-round(as.numeric(binom.wilson(sum(test.table[i:nrow(test.table),2]),((sum(test.table[i:nrow(test.table),2]))+(sum(
test.table[i:nrow(test.table),1]))),conf.level=CL)[4]),digits=4)
- test.diag.table$PPV.sup.cl[i]<-round(as.numeric(binom.wilson(sum(test.table[
- i:nrow(test.table),2]),((sum(test.table[i:nrow(test.table),2]))+(sum(
+ test.diag.table$PPV.sup.cl[i]<-round(as.numeric(binom.wilson(sum(test.table[i:nrow(test.table),2]),((sum(test.table[i:nrow(test.table),2]))+(sum(
test.table[i:nrow(test.table),1]))),conf.level=CL)[5]),digits=4)
- test.diag.table$NPV[i]<-round((sum(test.table[1:i-1,1]))/((sum(test.table[
- 1:i-1,1]))+sum(test.table[1:i-1,2])),digits=4)
- test.diag.table$NPV.inf.cl[i]<-round(as.numeric(binom.wilson(sum(test.table[
- 1:i-1,1]),((sum(test.table[1:i-1,1]))+(sum(test.table[1:i-1,])))
+ test.diag.table$NPV[i]<-round((sum(test.table[1:i-1,1]))/((sum(test.table[1:i-1,1]))+sum(test.table[1:i-1,2])),digits=4)
+ test.diag.table$NPV.inf.cl[i]<-round(as.numeric(binom.wilson(sum(test.table[1:i-1,1]),((sum(test.table[1:i-1,1]))+(sum(test.table[1:i-1,])))
,conf.level=CL)[4]),digits=4)
- test.diag.table$NPV.sup.cl[i]<-round(as.numeric(binom.wilson(sum(test.table[
- 1:i-1,1]),((sum(test.table[1:i-1,1]))+(sum(test.table[1:i-1,])))
+ test.diag.table$NPV.sup.cl[i]<-round(as.numeric(binom.wilson(sum(test.table[1:i-1,1]),((sum(test.table[1:i-1,1]))+(sum(test.table[1:i-1,])))
,conf.level=CL)[5]),digits=4)
}
test.diag.table$PLR<-round(test.diag.table$Sensitivity/(1-test.diag.table$Specificity),digits=2)
- test.diag.table$PLR.inf.cl<-round(exp(log(test.diag.table$PLR)-
- (qnorm(1-((1-CL)/2),mean=0,sd=1))*sqrt((1-test.diag.table$Sensitivity)/
- ((sum(test.diag.table[,2]))*test.diag.table$Specificity)+
- (test.diag.table$Specificity)/((sum(test.diag.table[,1]))*
- (1-test.diag.table$Specificity)))),digits=2)
- test.diag.table$PLR.sup.cl<-round(exp(log(test.diag.table$PLR)+(qnorm(1-((1-CL)/
- 2),mean=0,sd=1))*sqrt((1-test.diag.table$Sensitivity)/((sum(
- test.diag.table[,2]))*test.diag.table$Specificity)+(
- test.diag.table$Specificity)/((sum(test.diag.table[,1]))*(1-
- test.diag.table$Specificity)))),digits=2)
+ test.diag.table$PLR.inf.cl<-round(exp(log(test.diag.table$PLR)-(qnorm(1-((1-CL)/2),mean=0,sd=1))*sqrt((1-test.diag.table$Sensitivity)/
+ ((sum(test.diag.table[,2]))*test.diag.table$Specificity)+(test.diag.table$Specificity)/((sum(test.diag.table[,1]))*(1-test.diag.table$Specificity)))),digits=2)
+ test.diag.table$PLR.sup.cl<-round(exp(log(test.diag.table$PLR)+(qnorm(1-((1-CL)/2),mean=0,sd=1))*sqrt((1-test.diag.table$Sensitivity)/((sum(
+ test.diag.table[,2]))*test.diag.table$Specificity)+(test.diag.table$Specificity)/((sum(test.diag.table[,1]))*(1-test.diag.table$Specificity)))),digits=2)
test.diag.table$NLR<-round((1-test.diag.table$Sensitivity)/test.diag.table$Specificity,digits=2)
- test.diag.table$NLR.inf.cl<-round(exp(log(test.diag.table$NLR)-(qnorm(1-((1-CL)/2)
- ,mean=0,sd=1))*sqrt((test.diag.table$Sensitivity)/((sum(test.diag.table[,
- 2]))*(1-test.diag.table$Sensitivity))+(1-test.diag.table$Specificity)/((
- sum(test.diag.table[,1]))*(test.diag.table$Specificity)))),digits=2)
- test.diag.table$NLR.sup.cl<-round(exp(log(test.diag.table$NLR)+(qnorm(1-((1-CL)/2)
- ,mean=0,sd=1))*sqrt((test.diag.table$Sensitivity)/((sum(test.diag.table[,2])
- )*(1-test.diag.table$Sensitivity))+(1-test.diag.table$Specificity)/((sum(
- test.diag.table[,1]))*(test.diag.table$Specificity)))),digits=2)
+ test.diag.table$NLR.inf.cl<-round(exp(log(test.diag.table$NLR)-(qnorm(1-((1-CL)/2),mean=0,sd=1))*sqrt((test.diag.table$Sensitivity)/((sum(test.diag.table[,2]))*(1-test.diag.table$Sensitivity))+(1-test.diag.table$Specificity)/((sum(test.diag.table[,1]))*(test.diag.table$Specificity)))),digits=2)
+ test.diag.table$NLR.sup.cl<-round(exp(log(test.diag.table$NLR)+(qnorm(1-((1-CL)/2),mean=0,sd=1))*sqrt((test.diag.table$Sensitivity)/((sum(test.diag.table[,2]))*(1-test.diag.table$Sensitivity))+(1-test.diag.table$Specificity)/((sum(test.diag.table[,1]))*(test.diag.table$Specificity)))),digits=2)
# The following is not result but will help to find the best cut-offs
test.cutoff.table<-as.data.frame(test.values)
for(i in 1:nrow(test.cutoff.table)) {
#Accuracy is (TP+TN)/sample.size
- test.cutoff.table$Accuracy[i]<-round((sum(test.table[i:nrow(test.table),2])+
- sum(test.table[1:i-1,1]))/sample.size,digits=4)
- test.cutoff.table$DOR[i]<-round(((sum(test.table[i:nrow(test.table),2])*(sum(test.table[1:i-1,1])))/
- ((sum(test.table[i:nrow(test.table),1]))*(sum(test.table[1:i-1,2])))),digits=2)
- test.cutoff.table$Error.rate[i]<-round(((sum(test.table[1:i-1,2]))+(sum(
- test.table[i:nrow(test.table),1])))/sample.size,digits=4)
+ test.cutoff.table$Accuracy[i]<-sum(test.table[i:nrow(test.table),2])+sum(test.table[1:i-1,1])/sample.size
+ test.cutoff.table$DOR[i]<-(((sum(test.table[i:nrow(test.table),2])*(sum(test.table[1:i-1,1])))/
+ ((sum(test.table[i:nrow(test.table),1]))*(sum(test.table[1:i-1,2])))))
+ test.cutoff.table$Error.rate[i]<-((sum(test.table[1:i-1,2]))+(sum(test.table[i:nrow(test.table),1])))/sample.size
#Error.trade.off - error out of limits, only zeros can be used with negative subscripts
#Error.trade.off[i]<-(sum(test.table[1:i-1,2])-sum(test.table[1:i-2,2]))
# (sum(test.table[i:nrow(test.table),1])-sum(test.table[i+
# 1:nrow(test.table),1]))
- test.cutoff.table$Accuracy.area[i]<-round((sum(test.table[i:nrow(test.table)
- ,2])*sum(test.table[1:i-1,1]))/((sample.size-pop.prevalence)*pop.prevalence),digits=4)
+ test.cutoff.table$Accuracy.area[i]<-round((sum(test.table[i:nrow(test.table),2])*sum(test.table[1:i-1,1]))/((sample.size-pop.prevalence)*pop.prevalence),digits=4)
}
test.cutoff.table$DOR<-ifelse(test.cutoff.table$DOR==Inf,NA,test.cutoff.table$DOR)
- test.cutoff.table$Max.Se.Sp<-round((test.diag.table$Sensitivity+
- test.diag.table$Specificity),digits=4)
- test.cutoff.table$Youden<-test.diag.table$Sensitivity+test.diag.table$Specificity-1
+ test.cutoff.table$Max.Se.Sp<-(test.diag.table$Sensitivity + test.diag.table$Specificity)
+ test.cutoff.table$Youden<-test.diag.table$Sensitivity+test.diag.table$Specificity - 1
test.cutoff.table$Se.equals.Sp<-abs(test.diag.table$Specificity-test.diag.table$Sensitivity)
test.cutoff.table$MinRocDist<-(test.diag.table$Specificity-1)^2+(1-test.diag.table$Sensitivity)^2
# Efficiency= Se*prevalence+(1-prevalence)*Sp
- test.cutoff.table$Efficiency<-(test.diag.table$Sensitivity*(pop.prevalence)
- )+((1-(pop.prevalence))*test.diag.table$Specificity)
+ test.cutoff.table$Efficiency<-(test.diag.table$Sensitivity*(pop.prevalence))+((1-(pop.prevalence))*test.diag.table$Specificity)
# MissclassificatioCostTerm(MCT)=(1-prevalence)(1-Sp)+r*prevalence(1-Se) - r=cost(FN)/cost(FP)
- test.cutoff.table$MCT<-(1-(pop.prevalence))*(1-test.diag.table$Specificity)+(Cost*
- (pop.prevalence))*(1-test.diag.table$Sensitivity)
+ test.cutoff.table$MCT<-(1-(pop.prevalence))*(1-test.diag.table$Specificity)+(Cost*(pop.prevalence))*(1-test.diag.table$Sensitivity)
# Making a table with the test result for each best cut-off and attaching validity measures
+
best.cutoff<-test.cutoff.table$test.values[which.max(test.cutoff.table$Accuracy)]
- test.best.cutoff<-cbind(best.cutoff,test.diag.table[which.max(test.cutoff.table$Accuracy)
- ,2:7],test.diag.table[which.max(test.cutoff.table$Accuracy),14:16])
+ test.best.cutoff<-test.diag.table[test.diag.table$test.values==best.cutoff,c(1:7,14:16)]
best.cutoff<-test.cutoff.table$test.values[which.max(test.cutoff.table$DOR)]
- test.best.cutoff<-rbind(test.best.cutoff,cbind(best.cutoff,test.diag.table[
- which.max(test.cutoff.table$DOR),2:7],test.diag.table[which.max
- (test.cutoff.table$DOR),14:16]))
+ test.best.cutoff<-rbind(test.best.cutoff,test.diag.table[test.diag.table$test.values==best.cutoff,c(1:7,14:16)])
best.cutoff<-test.cutoff.table$test.values[which.min(test.cutoff.table$Error.rate)]
- test.best.cutoff<-rbind(test.best.cutoff,cbind(best.cutoff,test.diag.table[
- which.min(test.cutoff.table$Error.rate),2:7],test.diag.table[which.min
- (test.cutoff.table$Error.rate),14:16]))
+ test.best.cutoff<-rbind(test.best.cutoff,test.diag.table[test.diag.table$test.values==best.cutoff,c(1:7,14:16)])
best.cutoff<-test.cutoff.table$test.values[which.max(test.cutoff.table$Accuracy.area)]
- test.best.cutoff<-rbind(test.best.cutoff,cbind(best.cutoff,test.diag.table[
- which.max(test.cutoff.table$Accuracy.area),2:7],test.diag.table[which.max
- (test.cutoff.table$Accuracy.area),14:16]))
+ test.best.cutoff<-rbind(test.best.cutoff,test.diag.table[test.diag.table$test.values==best.cutoff,c(1:7,14:16)])
best.cutoff<-test.cutoff.table$test.values[which.max(test.cutoff.table$Max.Se.Sp)]
- test.best.cutoff<-rbind(test.best.cutoff,cbind(best.cutoff,test.diag.table[
- which.max(test.cutoff.table$Max.Se.Sp),2:7],test.diag.table[which.max
- (test.cutoff.table$Max.Se.Sp),14:16]))
+ test.best.cutoff<-rbind(test.best.cutoff,test.diag.table[test.diag.table$test.values==best.cutoff,c(1:7,14:16)])
best.cutoff<-test.cutoff.table$test.values[which.max(test.cutoff.table$Youden)]
- test.best.cutoff<-rbind(test.best.cutoff,cbind(best.cutoff,test.diag.table[
- which.max(test.cutoff.table$Youden),2:7],test.diag.table[which.max
- (test.cutoff.table$Youden),14:16]))
+ test.best.cutoff<-rbind(test.best.cutoff,test.diag.table[test.diag.table$test.values==best.cutoff,c(1:7,14:16)])
best.cutoff<-test.cutoff.table$test.values[which.min(test.cutoff.table$Se.equals.Sp)]
- test.best.cutoff<-rbind(test.best.cutoff,cbind(best.cutoff,test.diag.table[
- which.min(test.cutoff.table$Se.equals.Sp),2:7],test.diag.table[which.min
- (test.cutoff.table$Se.equals.Sp),14:16]))
+ test.best.cutoff<-rbind(test.best.cutoff,test.diag.table[test.diag.table$test.values==best.cutoff,c(1:7,14:16)])
best.cutoff<-test.cutoff.table$test.values[which.min(test.cutoff.table$MinRocDist)]
- test.best.cutoff<-rbind(test.best.cutoff,cbind(best.cutoff,test.diag.table[
- which.min(test.cutoff.table$MinRocDist),2:7],test.diag.table[which.min
- (test.cutoff.table$MinRocDist),14:16]))
+ test.best.cutoff<-rbind(test.best.cutoff,test.diag.table[test.diag.table$test.values==best.cutoff,c(1:7,14:16)])
best.cutoff<-test.cutoff.table$test.values[which.max(test.cutoff.table$Efficiency)]
- test.best.cutoff<-rbind(test.best.cutoff,cbind(best.cutoff,test.diag.table[
- which.max(test.cutoff.table$Efficiency),2:7],test.diag.table[which.max
- (test.cutoff.table$Efficiency),14:16]))
+ test.best.cutoff<-rbind(test.best.cutoff,test.diag.table[test.diag.table$test.values==best.cutoff,c(1:7,14:16)])
best.cutoff<-test.cutoff.table$test.values[which.min(test.cutoff.table$MCT)]
- test.best.cutoff<-rbind(test.best.cutoff,cbind(best.cutoff,test.diag.table[
- which.min(test.cutoff.table$MCT),2:7],test.diag.table[which.min
- (test.cutoff.table$MCT),14:16]))
+ test.best.cutoff<-rbind(test.best.cutoff,test.diag.table[test.diag.table$test.values==best.cutoff,c(1:7,14:16)])
rownames(test.best.cutoff)<- c("Max. Accuracy", "Max. DOR","Min. Error rate",
"Max. Accuracy area","Max. Sens+Spec","Max. Youden","Se=Sp","Min. ROC distance",
"Max. Efficiency", "Min. MCT")
rm(best.cutoff)
+
# the plot commands
if(Plot==TRUE){
plot(1-test.diag.table$Specificity,test.diag.table$Sensitivity,type="l",
@@ -347,11 +302,21 @@
}
#names(pop.prevalence)<-c("Informed disease prevalence - same as sample prevalence if not informed")
#names(sample.prevalence)<-c("Observed prevalence by gold standard")
- reteval<-list(pop.prevalence=pop.prevalence,sample.size=sample.size,sample.prevalence=sample.prevalence,test.summary=test.summary,AUC.summary=AUC.summary,test.best.cutoff=test.best.cutoff,test.diag.table=test.diag.table,CL=CL,test.cutoff.table=test.cutoff.table)
- invisible(reteval)
+ reteval<-list(pop.prevalence=pop.prevalence,
+ sample.size=sample.size,
+ sample.prevalence=sample.prevalence,
+ test.summary=test.summary,
+ AUC.summary=AUC.summary,
+ test.table=test.table,
+ test.best.cutoff=test.best.cutoff,
+ test.diag.table=test.diag.table,
+ CL=CL,
+ test.cutoff.table=test.cutoff.table)
+
class(reteval)<-"ROC"
if(Print==TRUE){
if(Print.full==TRUE){ print(reteval,Full=TRUE) }
else{ print(reteval) }
}
+ invisible(reteval)
}
\ No newline at end of file
Modified: pkg/DiagnosisMed/R/TGROC.r
===================================================================
--- pkg/DiagnosisMed/R/TGROC.r 2009-04-27 11:19:32 UTC (rev 13)
+++ pkg/DiagnosisMed/R/TGROC.r 2009-05-21 05:39:45 UTC (rev 14)
@@ -4,10 +4,25 @@
CL=0.95,
Inconclusive=0.95,
Prevalence=0,
- Plot=TRUE,
+ t.max=NULL,
+ t.min=NULL,
+ precision=.0001,
+ n.neurons=c(1,5,1),
+ learning.rate.global=1e-2,
+ momentum.global=0.3,
+ error.criterium="LMS",
+ Stao=NA,
+ hidden.layer="sigmoid",
+ output.layer="sigmoid",
+ method="ADAPTgdwm",
+ report=FALSE,
+ show.step=5000,
+ n.shows=1,
+ Plot="Both",
Plot.inc.range=TRUE,
Plot.Cl=FALSE,
Plot.cutoff="None",
+ cex=0.5,
cex.sub=0.85,
Print=TRUE){
@@ -19,8 +34,17 @@
test.table<-table(test,gold)
if (dim(test.table)[2] != 2){
- stop("It seems that your gold standard has more than 2 categories")
+ stop("It seems that your gold standard has more than 2 categories!")
}
+ if(is.null(precision)||!is.numeric(precision)){
+ stop("Precision must be set to a numeric value!")
+ }
+ if(Plot!="Both" & Plot!="Non-parametric" & Plot!="Parametric" & Plot!="None"){
+ stop("Plot must be set either to 'None','Both','Non-parametric' or 'Parametric'!")
+ }
+ if(Plot.cutoff!="Min.MCT" & Plot.cutoff!="Se=Sp" & Plot.cutoff!="Max.Efficiency" & Plot.cutoff!="None"){
+ stop("Plot.cutoff must be set either to 'None','Max.Efficiency','Min.MCT' or 'Se=Sp'!")
+ }
sample.prevalence<-(sum(test.table[,2]))/(sum(test.table))
if (Prevalence==0){
pop.prevalence<-sample.prevalence
@@ -28,6 +52,7 @@
if (Prevalence>0){
(pop.prevalence<-Prevalence)
}
+
names(sample.prevalence)<-c("Disease prevalence in the sample")
names(pop.prevalence)<-c("Informed disease prevalence in the population")
sample.size<-sum(test.table)
@@ -41,118 +66,274 @@
names(inc)<-"Inconclusive tolerance level"
test.values<-(as.numeric(rownames(unclass(test.table))))
non.parametric<-as.data.frame(test.values)
+
for (i in 1:nrow(non.parametric)) {
- non.parametric$Sensitivity[i] <- round(sum(test.table[(i:nrow(test.table)),2])/
- sum(test.table[,2]),digits=4)
- non.parametric$Se.inf.Cl[i]<-round(as.numeric(binom.wilson(sum(test.table[i:nrow(
- test.table),2]),sum(test.table[,2]),conf.level=CL)[4]),digits=4)
- non.parametric$Se.sup.Cl[i]<-round(as.numeric(binom.wilson(sum(test.table[i:nrow(
- test.table),2]),sum(test.table[,2]),conf.level=CL)[5]),digits=4)
- non.parametric$Specificity[i] <- round((sum(test.table[(1:i-1),1]))/(sum(
- test.table[,1])),digits=4)
- non.parametric$Sp.inf.Cl[i]<-round(as.numeric(binom.wilson(sum(test.table[(
- 1:i-1),1]),sum(test.table[,1]),conf.level=CL)[4]),digits=4)
- non.parametric$Sp.sup.Cl[i]<-round(as.numeric(binom.wilson(sum(test.table[
- (1:i-1),1]),sum(test.table[,1]),conf.level=CL)[5]),digits=4)
- }
+ non.parametric$Sensitivity[i] <- round(sum(test.table[(i:nrow(test.table)),2])/sum(test.table[,2]),digits=4)
+ non.parametric$Se.inf.Cl[i]<-round(as.numeric(binom.wilson(sum(test.table[i:nrow(test.table),2]),sum(test.table[,2]),
+ conf.level=CL)[4]),digits=4)
+ non.parametric$Se.sup.Cl[i]<-round(as.numeric(binom.wilson(sum(test.table[i:nrow(test.table),2]),sum(test.table[,2]),
+ conf.level=CL)[5]),digits=4)
+ non.parametric$Specificity[i] <- round((sum(test.table[(1:i-1),1]))/(sum(test.table[,1])),digits=4)
+ non.parametric$Sp.inf.Cl[i]<-round(as.numeric(binom.wilson(sum(test.table[(1:i-1),1]),sum(test.table[,1]),
+ conf.level=CL)[4]),digits=4)
+ non.parametric$Sp.sup.Cl[i]<-round(as.numeric(binom.wilson(sum(test.table[(1:i-1),1]),sum(test.table[,1]),
+ conf.level=CL)[5]),digits=4)
+ }
non.parametric$PLR<-round(non.parametric$Sensitivity/(1-non.parametric$Specificity),digits=2)
- non.parametric$PLR.inf.cl<-round(exp(log(non.parametric$PLR)-
- (qnorm(1-((1-CL)/2),mean=0,sd=1))*sqrt((1-non.parametric$Sensitivity)/
- ((sum(non.parametric[,2]))*non.parametric$Specificity)+
- (non.parametric$Specificity)/((sum(non.parametric[,1]))*
+ non.parametric$PLR.inf.cl<-round(exp(log(non.parametric$PLR)-(qnorm(1-((1-CL)/2),mean=0,sd=1))*sqrt((1-non.parametric$Sensitivity)/
+ ((sum(non.parametric[,2]))*non.parametric$Specificity)+(non.parametric$Specificity)/((sum(non.parametric[,1]))*
(1-non.parametric$Specificity)))),digits=2)
- non.parametric$PLR.sup.cl<-round(exp(log(non.parametric$PLR)+(qnorm(1-((1-CL)/
- 2),mean=0,sd=1))*sqrt((1-non.parametric$Sensitivity)/((sum(
- non.parametric[,2]))*non.parametric$Specificity)+(
- non.parametric$Specificity)/((sum(non.parametric[,1]))*(1-
- non.parametric$Specificity)))),digits=2)
-
- cutoff.table<-as.data.frame(test.values)
- cutoff.table$Se.equals.Sp<-abs(non.parametric$Specificity-non.parametric$Sensitivity)
+ non.parametric$PLR.sup.cl<-round(exp(log(non.parametric$PLR)+(qnorm(1-((1-CL)/2),mean=0,sd=1))*sqrt(
+ (1-non.parametric$Sensitivity)/((sum(non.parametric[,2]))*non.parametric$Specificity)+(
+ non.parametric$Specificity)/((sum(non.parametric[,1]))*(1-non.parametric$Specificity)))),digits=2)
+ # Se=Sp cut-off
+ non.parametric$Se.equals.Sp<-abs(non.parametric$Specificity-non.parametric$Sensitivity)
# Efficiency= Se*prevalence+(1-prevalence)*Se
- cutoff.table$Efficiency<-(non.parametric$Sensitivity*(pop.prevalence)
- )+((1-(pop.prevalence))*non.parametric$Specificity)
+ non.parametric$Efficiency<-(non.parametric$Sensitivity*(pop.prevalence))+((1-(pop.prevalence))*non.parametric$Specificity)
# MissClassificatioCost(MCT)=(1-prevalence)(1-Sp)+r*prevalence(1-Se) - r=cost(FN)/cost(FP)
- cutoff.table$MCT<-(1-(pop.prevalence))*(1-non.parametric$Specificity)+(Cost*
- (pop.prevalence))*(1-non.parametric$Sensitivity)
+ non.parametric$MCT<-(1-(pop.prevalence))*(1-non.parametric$Specificity)+(Cost*(pop.prevalence))*(1-non.parametric$Sensitivity)
+# np.test.best.cutoff<-subset(non.parametric,subset=c(
+# non.parametric[which.min(non.parametric$Se.equals.Sp)],
+# non.parametric[which.max(non.parametric$Efficiency)],
+# non.parametric[which.min(non.parametric$MCT)],
+# select=test.values:PLR.sup.cl
+# )) Does Not work... somethig worng with the lines or columns selection
- best.cutoff<-cutoff.table$test.values[which.min(cutoff.table$Se.equals.Sp)]
- test.best.cutoff<-cbind(best.cutoff,non.parametric[
- which.min(cutoff.table$Se.equals.Sp),2:10])
- best.cutoff<-cutoff.table$test.values[which.max(cutoff.table$Efficiency)]
- test.best.cutoff<-rbind(test.best.cutoff,cbind(best.cutoff,non.parametric[
- which.max(cutoff.table$Efficiency),2:10]))
- best.cutoff<-cutoff.table$test.values[which.min(cutoff.table$MCT)]
- test.best.cutoff<-rbind(test.best.cutoff,cbind(best.cutoff,non.parametric[
- which.min(cutoff.table$MCT),2:10]))
- rownames(test.best.cutoff)<- c("Se=Sp","Max. Efficiency","Min. MCT")
- best.cutoff<-test.best.cutoff
- rm(test.best.cutoff)
- non.parametric.inconclusive<-non.parametric[which.min(abs(
- Inconclusive-non.parametric$Sensitivity)),1:10]
- non.parametric.inconclusive<-rbind(non.parametric.inconclusive,non.parametric[
- which.min(abs(Inconclusive-non.parametric$Specificity)),1:10])
+ np.test.best.cutoff<-as.data.frame(rbind(
+ non.parametric[which.min(non.parametric$Se.equals.Sp),1:10],
+ non.parametric[which.max(non.parametric$Efficiency),1:10],
+ non.parametric[which.min(non.parametric$MCT),1:10]))
+
+ #best.cutoff,non.parametric[which.min(non.parametric$Se.equals.Sp),1:10])
+ #best.cutoff<-non.parametric$test.values[which.max(non.parametric$Efficiency)]
+ #np.test.best.cutoff<-rbind(np.test.best.cutoff,cbind(best.cutoff,non.parametric[
+ # which.max(non.parametric$Efficiency),2:10]))
+ #best.cutoff<-non.parametric$test.values[which.min(non.parametric$MCT)]
+ #np.test.best.cutoff<-rbind(np.test.best.cutoff,cbind(best.cutoff,non.parametric[
+ # which.min(non.parametric$MCT),2:10]))
+ rownames(np.test.best.cutoff)<- c("Se=Sp","Max. Efficiency","Min. MCT")
+
+ non.parametric.inconclusive<-as.data.frame(rbind(
+ non.parametric[which.min(abs(Inconclusive-non.parametric$Sensitivity)),1:10],
+ non.parametric[which.min(abs(Inconclusive-non.parametric$Specificity)),1:10]))
rownames(non.parametric.inconclusive)<-c("Lower inconclusive","Upper inconclusive")
- if(Plot==TRUE){
- plot(non.parametric$test.values,non.parametric$Sensitivity,
- ylim=c(0,1),type="l",col=2, xlab="test scale",ylab="Sensitivity & Specificity",
- lty=1)
- lines(non.parametric$test.values,non.parametric$Specificity,col=4,lty=2)
- leg.txt<-c("Se", "Sp")
- fill.col<-c(2,4)
- line.type<-c(1,2)
+
+ if(is.null(t.max)){
+ t.max<-max(non.parametric$test.values)
+ }
+ if(is.null(t.min)){
+ t.min<-min(non.parametric$test.values)
+ }
+ net <- newff(n.neurons=n.neurons, learning.rate.global=learning.rate.global, momentum.global=momentum.global,
+ error.criterium=error.criterium, Stao=Stao, hidden.layer=hidden.layer,
+ output.layer=output.layer, method=method)
+ net.Se <- train(net,P=non.parametric$test.values,T=non.parametric$Sensitivity,error.criterium=error.criterium,
+ report=report,show.step=show.step,n.shows=n.shows)
+ net.Sp <- train(net,P=non.parametric$test.values,T=non.parametric$Specificity,error.criterium=error.criterium,
+ report=report,show.step=show.step,n.shows=n.shows)
+ test.values<-seq(t.min,t.max,precision)
+ parametric<-as.data.frame(test.values)
+ parametric$Sensitivity <- as.numeric(sim(net.Se$net, test.values))
+ parametric$Se.inf.Cl<-parametric$Sensitivity - qnorm(1 - (1-conf.limit)/2) * sqrt(((parametric$Sensitivity *
+ (1 - parametric$Sensitivity))/(sample.size * sample.prevalence)))
+ parametric$Se.sup.Cl<-parametric$Sensitivity + qnorm(1 - (1-conf.limit)/2) * sqrt(((parametric$Sensitivity *
+ (1 - parametric$Sensitivity))/(sample.size * sample.prevalence)))
+ parametric$Specificity <- as.numeric(sim(net.Sp$net, test.values))
+ parametric$Sp.inf.Cl <- parametric$Specificity - qnorm(1 - (1-conf.limit)/2) * sqrt(((parametric$Specificity *
+ (1 - parametric$Specificity))/(sample.size * (1-sample.prevalence))))
+ parametric$Sp.sup.Cl <- parametric$Specificity + qnorm(1 - (1-conf.limit)/2) * sqrt(((parametric$Specificity *
+ (1 - parametric$Specificity))/(sample.size * (1-sample.prevalence))))
+ parametric$PLR <- parametric$Sensitivity/(1-parametric$Specificity)
+ parametric$PLR.inf.Cl<-exp(log(parametric$PLR)-(qnorm(1-((1-conf.limit)/2),mean=0,sd=1))*sqrt((1-parametric$Sensitivity)/
+ ((sample.size * sample.prevalence)*parametric$Specificity)+(parametric$Specificity)/((sample.size *
+ (1-sample.prevalence))*(1-parametric$Specificity))))
+ parametric$PLR.sup.Cl<-exp(log(parametric$PLR)+(qnorm(1-((1-conf.limit)/2),mean=0,sd=1))*sqrt((1-parametric$Sensitivity)/
+ ((sample.size * sample.prevalence)*parametric$Specificity)+(parametric$Specificity)/((sample.size *
+ (1-sample.prevalence))*(1-parametric$Specificity))))
+ #parametric$NLR <- (1-parametric$Specificity)/parametric$Sensitivity
+ #parametric$NLR.inf.Cl <- exp(log(parametric$NLR)-(qnorm(1-((1-(1-conf.limit))/2),mean=0,sd=1))*
+ # sqrt((parametric$Sensitivity)/((sample.size * sample.prevalence)*(1-parametric$Sensitivity))+(1-parametric$Specificity)/
+ # ((sample.size * (1-sample.prevalence))*(parametric$Specificity))))
+ #parametric$NLR.sup.Cl <- exp(log(parametric$NLR)+(qnorm(1-((1-conf.limit)/2),mean=0,sd=1))*sqrt((parametric$Sensitivity)/
+ # ((sample.size * sample.prevalence)*(1-parametric$Sensitivity))+(1-parametric$Specificity)/((sample.size *
+ # (1-sample.prevalence))*(parametric$Specificity))))
+ parametric$Se.equals.Sp<-abs(parametric$Sensitivity-parametric$Specificity)
+ parametric$Efficiency<-parametric$Sensitivity*pop.prevalence+(1-pop.prevalence)*parametric$Specificity
+ parametric$MCT<-(1-(pop.prevalence))*(1-parametric$Specificity)+(cost*(pop.prevalence))*(1-parametric$Sensitivity)
+
+ parametric.inconclusive<-as.data.frame(rbind(
+ parametric[which.min(abs(inc-parametric$Sensitivity)),1:10],
+ parametric[which.min(abs(inc-parametric$Specificity)),1:10]
+ ))
+ rownames(parametric.inconclusive)<-c("Lower inconclusive","Upper inconclusive")
+
+ par.test.best.cutoff<-as.data.frame(rbind(
+ parametric[which.min(parametric$Se.equals.Sp),1:10],
+ parametric[which.max(parametric$Efficiency),1:10],
+ parametric[which.min(parametric$MCT),1:10]
+ ))
+ rownames(par.test.best.cutoff)<- c("Se=Sp","Max. Efficiency","Min. MCT")
+
+# Plot Commands
+ if(Plot!="None"){
+ if(Plot=="Parametric"|Plot=="Both"){
+ plot(parametric$test.values,parametric$Sensitivity,ylim=c(0,1),type="l",col=2, xlab="Test scale",
+ ylab="Sensitivity & Specificity",cex=cex,lty=1)
+ lines(parametric$test.values,parametric$Specificity,col=4,type="l",lty=2,cex=cex)
+ if(Plot=="Both"){
+ lines(non.parametric$test.values,non.parametric$Sensitivity,col=2,type="o",lty=1,cex=cex)
+ lines(non.parametric$test.values,non.parametric$Specificity,col=4,type="o",lty=2,cex=cex)
+ }
+ leg.txt<-c("Se", "Sp")
+ fill.col<-c(2,4)
+ line.type<-c(1,2)
+ subtitle<-""
+ }
+ if(Plot=="Non-parametric"){
+ plot(non.parametric$test.values,non.parametric$Sensitivity,
+ ylim=c(0,1),type="o",col=2, xlab="test scale",ylab="Sensitivity & Specificity",
+ lty=1,cex=cex)
+ lines(non.parametric$test.values,non.parametric$Specificity,col=4,type="o",lty=2,cex=cex)
+ leg.txt<-c("Se", "Sp")
+ fill.col<-c(2,4)
+ line.type<-c(1,2)
+ subtitle<-""
+ }
if(Plot.inc.range==TRUE){
abline(h=Inconclusive,col="lightgray",lty=4)
- abline(v=(non.parametric.inconclusive[1,1]),col="lightgray",lty=4)
- abline(v=(non.parametric.inconclusive[2,1]),col="lightgray",lty=4)
+ if(Plot=="Parametric"|Plot=="Both"){
+ abline(v=(parametric.inconclusive[1,1]),col="lightgray",lty=4)
+ abline(v=(parametric.inconclusive[2,1]),col="lightgray",lty=4)
+ subtitle<-paste("Parametric inconclusive limits at",formatC(inc),"level:",formatC(parametric.inconclusive[1,1]),
+ "-",formatC(parametric.inconclusive[2,1]),".")
+ }
+ if(Plot=="Non-parametric"){
+ abline(v=(non.parametric.inconclusive[1,1]),col="lightgray",lty=4)
+ abline(v=(non.parametric.inconclusive[2,1]),col="lightgray",lty=4)
+ subtitle<-paste("Non-parametric inconclusive limits at",formatC(inc),"level:",formatC(non.parametric.inconclusive[1,1]),
+ "-",formatC(non.parametric.inconclusive[2,1]),".")
+ }
leg.txt<-c(leg.txt,c("Inc limits"))
fill.col<-c(fill.col,c("lightgray"))
line.type<-c(line.type,4)
- subtitle<-paste("Non-parametric inconclusive limits at",formatC(inc),"level:",formatC(non.parametric.inconclusive[1,1]),"-",formatC(non.parametric.inconclusive[2,1]),".")
+ }
+ if(Plot.Cl==TRUE){
+ if(Plot=="Both"|Plot=="Parametric"){
[TRUNCATED]
To get the complete diff run:
svnlook diff /svnroot/diagnosismed -r 14
More information about the Diagnosismed-commits
mailing list