[Diagnosismed-commits] r10 - in pkg/DiagnosisMed: . R man

noreply at r-forge.r-project.org noreply at r-forge.r-project.org
Mon Apr 13 22:29:56 CEST 2009


Author: pedrobrasil
Date: 2009-04-13 22:29:56 +0200 (Mon, 13 Apr 2009)
New Revision: 10

Added:
   pkg/DiagnosisMed/R/LRgraph.r
   pkg/DiagnosisMed/R/print.ROC.r
   pkg/DiagnosisMed/R/print.TGROC.r
   pkg/DiagnosisMed/man/LRgrgaph.Rd
   pkg/DiagnosisMed/man/TGROC.Rd
Modified:
   pkg/DiagnosisMed/DESCRIPTION
   pkg/DiagnosisMed/R/ROC.r
   pkg/DiagnosisMed/R/diagnosis.r
   pkg/DiagnosisMed/R/diagnosisI.r
   pkg/DiagnosisMed/man/ROC.Rd
   pkg/DiagnosisMed/man/diagnosis.Rd
   pkg/DiagnosisMed/man/interact.ROC.Rd
Log:


Modified: pkg/DiagnosisMed/DESCRIPTION
===================================================================
--- pkg/DiagnosisMed/DESCRIPTION	2009-03-15 03:02:55 UTC (rev 9)
+++ pkg/DiagnosisMed/DESCRIPTION	2009-04-13 20:29:56 UTC (rev 10)
@@ -1,6 +1,6 @@
 Package: DiagnosisMed
-Version: 0.1.2.3
-Date: 2009-03-13
+Version: 0.2
+Date: 2009-04-12
 Author: Pedro Brasil <pedro.brasil at ipec.fiocruz.br>
 Maintainer: Pedro Brasil <pedro.brasil at ipec.fiocruz.br>
 Depends: R (>= 2.7.2),epitools,epicalc, TeachingDemos, tcltk

Added: pkg/DiagnosisMed/R/LRgraph.r
===================================================================
--- pkg/DiagnosisMed/R/LRgraph.r	                        (rev 0)
+++ pkg/DiagnosisMed/R/LRgraph.r	2009-04-13 20:29:56 UTC (rev 10)
@@ -0,0 +1,16 @@
+LRgraph<-function(a=cbind(t1,t2),lwd=2,lty=1,cex=1,leg.cex=1.5,pt.cex=2){
+  plot(1-a[[6,1]],a[[4,1]],xlim=c(0,1),ylim=c(0,1),xlab="False positive rate",ylab="True positive rate",col=1,cex=cex,lwd=lwd,lty=lty)
+  abline(coef=c(0,((a[[4,1]])/(1-a[[6,1]]))),lwd=lwd)
+  abline(coef=c(1-1*((1-a[[4,1]])/(1-(1-a[[6,1]]))),(1-a[[4,1]])/(1-(1-a[[6,1]]))),lwd=lwd)
+  abline(v=1-a[[6,1]],lty=6,col="lightgray",lwd=lwd)
+  abline(h=a[[4,1]],lty=6,col="lightgray",lwd=lwd)
+  fill.col<-c(1)
+  symbol<-c(1)
+  for(i in 2:ncol(a))
+        {
+        points(1-a[[4,i]],a[[6,i]],col=i,pch=i,cex=cex,lwd=lwd,lty=lty)
+        fill.col<-c(fill.col,i)
+        symbol<-c(symbol,i)
+        }
+  legend("bottomright",legend=colnames(a),col=fill.col, pch=symbol, bty="n",cex=leg.cex,pt.cex=pt.cex,pt.lwd=lwd)
+}  

Modified: pkg/DiagnosisMed/R/ROC.r
===================================================================
--- pkg/DiagnosisMed/R/ROC.r	2009-03-15 03:02:55 UTC (rev 9)
+++ pkg/DiagnosisMed/R/ROC.r	2009-04-13 20:29:56 UTC (rev 10)
@@ -13,6 +13,7 @@
   if (dim(test.table)[2] != 2){
       stop("It seems that your gold standard has more than 2 categories")
   }
+  CL<-CL
   # Sample size
   sample.size<-sum(test.table)
   # Sample prevalence, replace by pop prevalence if adequate
@@ -58,7 +59,7 @@
   SD.AUC<-sqrt(VAR.AUC)
   alpha<-1-CL
   AUC.summary<-c(AUC- qnorm(1-alpha/2)*SD.AUC,AUC,AUC+ qnorm(1-alpha/2)*SD.AUC)
-  names(AUC.summary)<-c("AUC inf conf limit", "AUC","AUC sup conf limit")
+  #names(AUC.summary)<-c("AUC inf conf limit", "AUC","AUC sup conf limit")
   
   #TP sum(test.table[i:nrow(test.table),2])
   #FP sum(test.table[i:nrow(test.table),1])
@@ -344,13 +345,13 @@
          paste("AUC:",formatC(AUC,digits=4))
        )),bty="n")}
   }
-  names(pop.prevalence)<-c("Informed disease prevalence - same as sample prevalence if not informed")
-  names(sample.prevalence)<-c("Observed prevalence by gold standard")
-  if(Print.full==TRUE){
-    page(test.diag.table,method="print")}
-  reteval<-list(pop.prevalence=pop.prevalence,sample.size=sample.size,
-                sample.prevalence=sample.prevalence,test.summary=test.summary,
-                AUC.summary=AUC.summary,test.best.cutoff=test.best.cutoff)
+  #names(pop.prevalence)<-c("Informed disease prevalence - same as sample prevalence if not informed")
+  #names(sample.prevalence)<-c("Observed prevalence by gold standard")
+   reteval<-list(pop.prevalence=pop.prevalence,sample.size=sample.size,sample.prevalence=sample.prevalence,test.summary=test.summary,AUC.summary=AUC.summary,test.best.cutoff=test.best.cutoff,test.diag.table=test.diag.table,CL=CL,test.cutoff.table=test.cutoff.table)
   invisible(reteval)
-  if(Print==TRUE)  {print(reteval)}
-}
+  class(reteval)<-"ROC"
+  if(Print==TRUE){
+     if(Print.full==TRUE){ print(reteval,Full=TRUE) }
+     else{ print(reteval) }
+  }
+}
\ No newline at end of file

Modified: pkg/DiagnosisMed/R/diagnosis.r
===================================================================
--- pkg/DiagnosisMed/R/diagnosis.r	2009-03-15 03:02:55 UTC (rev 9)
+++ pkg/DiagnosisMed/R/diagnosis.r	2009-04-13 20:29:56 UTC (rev 10)
@@ -24,32 +24,32 @@
   p<-(TP+FN)/n
   # sensitivity and confidence limits
   Se<-TP/(TP+FN)
-  Se.cl<-as.numeric(binom.wilson(TP, TP+FN, conf.level = 0.95)[4:5])
+  Se.cl<-as.numeric(binom.wilson(TP, TP+FN, conf.level = CL)[4:5])
   # especificity and confidence limits
   Sp<-TN/(FP+TN)
-  Sp.cl<-as.numeric(binom.wilson(TN, FP+TN, conf.level = 0.95)[4:5])
+  Sp.cl<-as.numeric(binom.wilson(TN, FP+TN, conf.level = CL)[4:5])
   # positive and negative likelyhood ratios and confidence limits
   PLR<-Se/(1-Sp)
   # LR confidence limists inspired in epi.tests{epiR}
-  PLR.inf.cl<-exp(log(PLR)-(qnorm(1-((1-0.95)/2),mean=0,sd=1))*sqrt((1-Se)/(
+  PLR.inf.cl<-exp(log(PLR)-(qnorm(1-((1-CL)/2),mean=0,sd=1))*sqrt((1-Se)/(
     (TP+FN)*Sp)+(Sp)/((FP+TN)*(1-Sp))))
-  PLR.sup.cl<-exp(log(PLR)+(qnorm(1-((1-0.95)/2),mean=0,sd=1))*sqrt((1-Se)/(
+  PLR.sup.cl<-exp(log(PLR)+(qnorm(1-((1-CL)/2),mean=0,sd=1))*sqrt((1-Se)/(
     (TP+FN)*Sp)+(Sp)/((FP+TN)*(1-Sp))))
   NLR<-(1-Se)/Sp
-  NLR.inf.cl<-exp(log(NLR)-(qnorm(1-((1-0.95)/2),mean=0,sd=1))*sqrt((Se)/((TP+
+  NLR.inf.cl<-exp(log(NLR)-(qnorm(1-((1-CL)/2),mean=0,sd=1))*sqrt((Se)/((TP+
     FN)*(1-Se))+(1-Sp)/((FP+TN)*(Sp))))
-  NLR.sup.cl<-exp(log(NLR)+(qnorm(1-((1-0.95)/2),mean=0,sd=1))*sqrt((Se)/((TP+
+  NLR.sup.cl<-exp(log(NLR)+(qnorm(1-((1-CL)/2),mean=0,sd=1))*sqrt((Se)/((TP+
     FN)*(1-Se))+(1-Sp)/((FP+TN)*(Sp))))
   #accuracy and confidence limits
   accu<-(TP+TN)/n
-  accu.cl<-as.numeric(binom.wilson(TP+TN, n, conf.level = 0.95)[4:5])
+  accu.cl<-as.numeric(binom.wilson(TP+TN, n, conf.level = CL)[4:5])
   # positive and negative predictive values and confidence limits
   PPV<-TP/(TP+FP)
-  PPV.cl<-as.numeric(binom.wilson(TP, TP+FP, conf.level = 0.95)[4:5])
+  PPV.cl<-as.numeric(binom.wilson(TP, TP+FP, conf.level = CL)[4:5])
   NPV<-TN/(TN+FN)
-  NPV.cl<-as.numeric(binom.wilson(TN, TN+FN, conf.level = 0.95)[4:5])
+  NPV.cl<-as.numeric(binom.wilson(TN, TN+FN, conf.level = CL)[4:5])
   # diagnostic odds ratio and confidence limits
-  OR<-oddsratio(tab)
+  OR<-oddsratio(tab,conf.level = CL)
   DOR<-OR$measure[2,1]
   #DOR<-(TP*TN)/(FP*FN)
   DOR.inf.cl<-OR$measure[2,2]
@@ -58,7 +58,7 @@
   # error rate and error trade
   #ER<-((FN/(FN+TN))*p)+(((FP/(FP+TP))*(TN+FP))
   ER<-(FN+FP)/n
-  ER.cl<-as.numeric(binom.wilson(FN+FP, n, conf.level = 0.95)[4:5])
+  ER.cl<-as.numeric(binom.wilson(FN+FP, n, conf.level = CL)[4:5])
   ET<-(FN/FP)
   # pre-test and pos-test odds (to do)
   # area under ROC curve

Modified: pkg/DiagnosisMed/R/diagnosisI.r
===================================================================
--- pkg/DiagnosisMed/R/diagnosisI.r	2009-03-15 03:02:55 UTC (rev 9)
+++ pkg/DiagnosisMed/R/diagnosisI.r	2009-04-13 20:29:56 UTC (rev 10)
@@ -40,7 +40,7 @@
   NPV<-TN/(TN+FN)
   NPV.cl<-as.numeric(binom.wilson(TN, TN+FN, conf.level = CL)[4:5])
   # diagnostic odds ratio and confidence limits
-  OR<-oddsratio(tab)
+  OR<-oddsratio(tab,conf.level = CL)
   DOR<-OR$measure[2,1]
   #DOR<-(TP*TN)/(FP*FN)
   DOR.inf.cl<-OR$measure[2,2]
@@ -58,6 +58,7 @@
   if(plot==FALSE)
     {ROC<-roc.from.table(tab, graph = FALSE)}
   AUC<-ROC$auc
+  # gives same results as AUC<-(Se+Sp)/2
   Youden<-Se+Sp-1
   Youden.inf.cl<-Youden-qnorm(CL/2)*sqrt(((Se * (1 - Se))/(TP+FN) +
            ((Sp * (1 - Sp))/(TN+FP))))
@@ -75,4 +76,4 @@
   class(reteval) <- "diag"
   if(print==TRUE)  {print(reteval)}
   invisible(reteval)
-}
+}
\ No newline at end of file

Added: pkg/DiagnosisMed/R/print.ROC.r
===================================================================
--- pkg/DiagnosisMed/R/print.ROC.r	                        (rev 0)
+++ pkg/DiagnosisMed/R/print.ROC.r	2009-04-13 20:29:56 UTC (rev 10)
@@ -0,0 +1,16 @@
+print.ROC<-function(x,...,Full=FALSE){
+  if (Full==TRUE){ page(x$test.diag.table,method="print")}
+  cat("          Sample size:",paste(x$sample.size),"\n")
+  cat("    Sample prevalence:",paste(round(x$sample.prevalence,digits = 4)),"\n")
+  cat("Population prevalence:",paste(round(x$pop.prevalence,digits = 4))," - same as sample prevalence if not informed\n")
+  cat("\n\n")
+  cat("Non-parametric AUC (trapezoidal method) and its confidence",x$CL," limits (DeLong method)\n")  
+  cat(" Area under ROC curve:",paste(round(x$AUC.summary[2],digits = 4)),"[",paste(round(x$AUC.summary[1],digits = 4)),"-",
+        paste(round(x$AUC.summary[3],digits = 4)),"]\n")
+  cat("\n\n")
+  cat("Test summary-----------------------------------------------------\n")  
+  print(x$test.summary)
+  cat("\n\n")
+  cat("Best cut-off estimations with",x$CL,"confidence limits -----------\n")  
+  print(x$test.best.cutoff)
+}  

Added: pkg/DiagnosisMed/R/print.TGROC.r
===================================================================
--- pkg/DiagnosisMed/R/print.TGROC.r	                        (rev 0)
+++ pkg/DiagnosisMed/R/print.TGROC.r	2009-04-13 20:29:56 UTC (rev 10)
@@ -0,0 +1,16 @@
+print.TGROC<-function(x,...){
+  cat("                Sample size:",paste(x$sample.size),"\n")
+  cat("          Sample prevalence:",paste(round(x$sample.prevalence,digits = 4)),"\n")
+  cat("      Population prevalence:",paste(round(x$pop.prevalence,digits = 4))," - same as sample prevalence if not informed\n")
+  cat("      Informed cost - FP/FN:",paste(round(x$cost,digits = 4)),"\n")
+  cat("Informed inconclusive level:",paste(round(x$inc,digits = 4)),"\n")
+  cat("\n\n")
+  cat("Test summary.---------------------------------------------------------------\n")  
+  print(x$test.summary)
+  cat("\n\n")
+  cat("Non-paramentric inconclusive cut-off limits with",x$inc,"inconclusive tolerance. \n")  
+  print(x$non.parametric.inconclusive)
+  cat("\n\n")
+  cat("Best cut-off estimations with",x$conf.limit,"confidence limits. ----------------------\n")  
+  print(x$best.cutoff)
+}  

Added: pkg/DiagnosisMed/man/LRgrgaph.Rd
===================================================================
--- pkg/DiagnosisMed/man/LRgrgaph.Rd	                        (rev 0)
+++ pkg/DiagnosisMed/man/LRgrgaph.Rd	2009-04-13 20:29:56 UTC (rev 10)
@@ -0,0 +1,57 @@
+\name{LRgraph}
+\alias{LRgraph}
+\title{Comparing diagnositic tests: a simple graphic using likelihood ratios.}
+\description{
+LRgraph graphicaly compares two or more (all of them with the first test) diagnostic tests with binary results through their likelihood ratios, based on the rationale that the predictive hability of a test is a more interresting charachteristic than sensitivity and/or specificity. It is possible to see thruogh the graph that if the tests with smaller sensitivity or specifity may have superior predictive habilty, that is, increases the prediction hability with small sensitivity/specificity trade-off.
+}
+\usage{
+LRgraph(a = cbind(t1, t2), lwd = 2, lty = 1, cex = 1, leg.cex = 1.5, pt.cex = 2)
+}
+\arguments{
+  \item{a}{a is a matrix composed by tow or more tests. Therefore, the user should edit only what is between the parethesis in a=cbind().The user may insert as many tests as one wishes. See t1, t2 below.}
+  \item{t1,t2}{ t1, t2... tn are objects created by the \code{\link{diagnosis}} function (see example below). Therefore, one should first analyze data for each single test and store it in a object. Later, stick these objects in the LRgraph command. The objects names will appear in the graph legend.}
+  \item{lwd}{Line width. See \link[graphics]{par},\link[graphics]{points},\link[graphics]{legend}}
+  \item{lty}{Line type. See \link[graphics]{par}}
+  \item{cex}{Symbols and text size. See \link[graphics]{par},\link[graphics]{points}}
+  \item{leg.cex}{Legend text size, this will replace the cex option in the legend. See \link[graphics]{legend}}
+  \item{pt.cex}{Size of the symbols in the legend. See \link[graphics]{legend}}
+}
+\details{When a diagnostic test has both sensitivity and specificity higher than a competing test then is easy to see that the former is superior than the later. However, sometimes a test may have superior sensitivity and inferior specificity (or the other way around). In this case, a good decision may be toward the test that have a better prediction ability. The graph visually helps the user to see and compare these abilities. The graph is very similar to the ROC graph. The vertical and horizontal axis have the same length. However the diagnostic tests are represented as dots instead of curves. The solid line passing through (0,0) is the likelihood ratio positive-line and the solid line passing through (1,1) is the likelihood ratio negative-line. Both negative and positive likelihood are numerically equivalent to the slopes of the solid lines. The solid lines split the graph into four areas (run the example). Also, there is dashed lines representing the sensitivity and specificity of the first test plotted. One may see that there are areas that a test may have superior sensitivity (or specificity) and yet the dot may be below the likelihood solid line. That is, because in this situation, the sensitivity / specificity trade-off is not reasonable making the test with less predictive ability. 
+
+}
+\value{
+Returns only a graph which is divided in four areas by the black solid lines. The interpretation of the comparisons will depend on which area the tests will fall in. See and run the example to have the idea on how interpretation must be done.
+}
+\references{Biggerstaff, B.J. Comparing diagnostic tests: a simple graphic using likelihood ratios. Statistics in Medicine. 2000; 19(5):649-663}
+\author{Pedro Brasil - \email{diagnosismed-list at lists.r-forge.r-project.org}}
+\note{Bug reports, malfunctioning, or suggestions for further improvements or contributions can be sent, preferentially, through the DiagnosisMed email list, or R-Forge website \url{https://r-forge.r-project.org/projects/diagnosismed/}.
+}
+\seealso{\code{\link{diagnosis}}}
+\examples{
+# Making tests with diagnosis function with different performances for comparison.
+# mytest5 is the one which all others will be compared with.
+mytest5<-diagnosisI(80,20,20,80,print=FALSE)
+# mytest1 has higher sensitivity and specificity.
+# mytest1 is overall superior compared to mytest1.
+mytest1<-diagnosisI(90,10,10,90,print=FALSE)
+# mytest2 has lower sensitivity but higher specificity.
+# mytest2 is better to identify the absence of the target condition compared to mytest1.
+mytest2<-diagnosisI(72,28,3,97,print=FALSE)
+# mytest3 has higher sensitivity but lower specificity.
+# mytest3 is better to identify the presence of the target condition compared to mytest1.
+mytest3<-diagnosisI(92,8,37,63,print=FALSE)
+# mytest41 has lower sensitivity and specificity.
+# mytest41 is overall inferior compared to mytest1
+mytest41<-diagnosisI(72,28,35,65,print=FALSE)
+# mytest42 has lower sensitivity but higher specificity.
+# Yet, mytest42 is overall inferior compared to mytest1
+mytest42<-diagnosisI(82,18,42,58,print=FALSE)
+# But that becomes clear only after ploting the tests.
+LRgraph(a=cbind(mytest5,mytest1,mytest2,mytest3,mytest41,mytest42),cex=2.5)
+# The texts below are not part of the function but helps to understand the areas
+text(x=.5, y =.5, labels ="Area 4: Overall inferior", col="lightgray",cex=.8)
+text(x=.5, y =1, labels ="Area 2: Absence", col="lightgray",cex=.8)
+text(x=.07, y =.73, labels ="Area 3: Presence", col="lightgray",cex=.8)
+text(x=.1, y =1, labels ="Area 1: Overall superior", col="lightgray",cex=.8)
+}
+\keyword{iplot}

Modified: pkg/DiagnosisMed/man/ROC.Rd
===================================================================
--- pkg/DiagnosisMed/man/ROC.Rd	2009-03-15 03:02:55 UTC (rev 9)
+++ pkg/DiagnosisMed/man/ROC.Rd	2009-04-13 20:29:56 UTC (rev 10)
@@ -1,5 +1,6 @@
 \name{ROC}
 \alias{ROC}
+\alias{print.ROC}
 \title{Draw a ROC curve, estimate good cut-offs and compute validity measures
        for each cut-off}
 \description{
@@ -42,9 +43,9 @@
         Also, it can be inserted as any ratio such as 1/2.5 or 1/4.
         }
   \item{Prevalence}{Prevalence of the disease in the population who the test will
-        be performed. If left 0 (the default value), this will be replaced by the
+        be performed. It must be a value from 0 to 1. If left 0 (the default value), this will be replaced by the
         disease prevalence in the sample. This values will be used in the MCT and
-        Efficiency formulas to estime good cut-offs.}
+        Efficiency formulas to estimate good cut-offs.}
   \item{Plot}{If FALSE, the ROC curve plot will not be displayed. Default is TRUE.}
   \item{Plot.point}{The method of best cut-off estimation which will be displayed
         at ROC curve as a dot. Default is "Min.ROC.Dist". Possible options are:
@@ -135,10 +136,10 @@
 
 }
 
-\author{Pedro Brasil - \email{diagnosismed-list at lists.r-forge.r-project.org}}
+\author{Pedro Brasil; Beranrdo Rangel Tura - \email{diagnosismed-list at lists.r-forge.r-project.org}}
 \note{Bug reports, malfunctioning, or suggestions for further improvements or contributions can be sent, preferentially, through the DiagnosisMed email list, or R-Forge website \url{https://r-forge.r-project.org/projects/diagnosismed/}.
 }
-\seealso{\link[epitools]{binom.conf.int},\code{\link{diagnosis}},\code{\link{interact.ROC}},\link[ROCR]{performance}}
+\seealso{\link[epitools]{binom.conf.int},\code{\link{diagnosis}},\code{\link{interact.ROC}},\code{\link{TGROC}},\link[ROCR]{performance}}
 \examples{
 # loading a dataset
 data(tutorial)

Added: pkg/DiagnosisMed/man/TGROC.Rd
===================================================================
--- pkg/DiagnosisMed/man/TGROC.Rd	                        (rev 0)
+++ pkg/DiagnosisMed/man/TGROC.Rd	2009-04-13 20:29:56 UTC (rev 10)
@@ -0,0 +1,71 @@
+\name{TGROC}
+\alias{TGROC}
+\alias{print.TGROC}
+\title{TG-ROC - Two Graphic Receiver Operating Characteristic}
+\description{
+TGROC draws a graph of sensitivity and specificity with the variations of a diagnostic test scale. Also, it demonstrates which cut-offs (or decision thresholds) may trichotomize the test results into a range where the test is good to identify those with the target condition, a inconclusive range and a range where the test is good to identify those without the target disease according with the researcher tolerance. Also, it estimates and graphically demonstrates good cut-offs by different methods. 
+}
+\usage{
+TGROC(gold,
+      test,
+      Cost = 1,
+      CL = 0.95,
+      Inconclusive = 0.95,
+      Prevalence = 0,
+      Plot = TRUE,
+      Plot.inc.range = TRUE,
+      Plot.Cl = FALSE,
+      Plot.cutoff = "None",
+      cex.sub = 0.85,
+      Print = TRUE)
+}
+\arguments{
+  \item{gold}{The reference standard. A column in a data frame or a vector indicating the classification by the reference test. The reference standard must have two levels: must be coded either as 0 - without target disease - or 1 - with the target disease;  or could be coded \link[base]{as.factor} with the words "negative" - without target disease - and "positive"  - with the target disease.}
+  \item{test}{ The index test or test under evaluation. A column in a dataframe or vector indicating the test results in a continuous scale. May also work with discrete ordinal scale.}
+  \item{Cost}{Cost = cost(FN)/cost(FP). MCT (misclassification cost term) will be used to estimate a good cut-off. It is a value in a range from 0 to infinite. Could be financial cost or a health outcome with the perception that FN are more undesirable than FP (or the other way around). This item will run into MCT - (1-prevalence)*(1-Sp)+Cost*prevalence(1-Se). Cost=1 means FN and FP have even cost. Cost = 0.9 means FP are 10 percent more costly. Cost = 0.769 means that FP are 30 percent more costly. Cost =  0.555 means that FP are 80 percent more costly. Cost = 0.3 means that FP are 3 times more costly. Cost = 0.2 means that FP are 5 times more costly. Also, it can be inserted as any ratio such as 1/2.5 or 1/4.}
+  \item{CL}{ Confidence limit. The limits of the confidence interval. Must be coded as number in range from 0 to 1. Default value is 0.95}
+  \item{Inconclusive}{Inconclusive is a value that ranges from 0 to 1 that identify the test range where the performance of the test is not acceptable and thus considered inconclusive. It represents the researcher tolerance of how good the test should be. If it is set to 0.95 (which is the default value), test results that have less than 0.95 sensitivity and specificity will be in the inconclusive range.}
+  \item{Prevalence}{Prevalence of the disease in the population who the test will be performed. It must be a value from 0 to 1. If left 0 (the default value), this will be replaced by the disease prevalence in the sample. This values will be used in the MCT and Efficiency formulas to estimate good cut-offs.}
+  \item{Plot}{If FALSE, the TG-ROC curve plot will not be displayed. Default is TRUE.}
+  \item{Plot.inc.range}{Plot inconclusive range. If FALSE, the lines representing the limits of the inconclusive range will not be displayed. Default is TRUE. If Plot is FALSE than Plot.inc.range is not considered.}
+  \item{Plot.Cl}{Plot confidence limits. If TRUE, confidence bands for sensitivity and specificity curves will be displayed. Default is FLASE. If Plot is FALSE than Plot.Cl is not considered.}
+  \item{Plot.cutoff}{Draws a line representing the estimated best cut-off (threshold) will be displayed. If Plot is FALSE than Plot.cutoff is not considered. Default is "None". Possible values are:
+        "Se=Sp" - the cut-off which Sensitivity is equal to Specificity;
+
+        "Max.Efficiency" - the cut-off which maximize the efficiency;
+
+        "Min.MCT" - the cut-off which minimize the misclassification cost term.}
+  \item{cex.sub}{See \link[base]{par}. Controls the font size in the subtitle. If Plot is FALSE than cex.sub is not considered.}
+  \item{Print}{If FALSE, statistics estimated by TG-ROC will not be displayed in the output window. Default is TRUE.}
+}
+\details{
+There are two main advantages of TG-ROC over ROC analysis: (1) for the uninitiated is much easier to understand how sensitivity and specificity changes with different cut-offs; (2) and because of the graphical display is much easier to understand and estimate reasonable inconclusive test ranges. Occasionally the MCT or Efficiency cut-offs may be set outside the inconclusive range. This may happens with extreme values of Cost and population prevalence. If this is the case, perhaps the inconclusive range may not be of interest or not applicable. Tests results matching the cut-off values will be considered a positive test.  TG-ROC assumes that subjects with higher values of the test are with the target condition and those with lower values are without the target condition. Tests that behave like glucose (middle values are supposed to be normal and extreme values are supposed to be abnormal) and immunefluorescence (lower values - higher dilutions - are suppose to be abnormal) will not be correctly analyzed. In the latter, multiplying the test results by -1 or other transformation before analysis could make it work. The validity measures such as Sensitivity, Specificity and Likelihood ratios and its confidence limits are estimated as  in \code{\link{diagnosis}} function. MCT and Efficiency are estimated as in \code{\link{ROC}} function. So far, TG-ROC estimates only non-parametric statistics.}
+\value{
+  \item{Sample size}{Amount of subjects analyzed.}
+  \item{Sample prevalence}{Prevalence of target condition in the sample.}
+  \item{Population prevalence.}{Informed prevalence in the population.}
+  \item{Test summary}{A summary of central and dispersion tendencies of test results.}
+  \item{Non-parametric inconclusive limits.}{Estimate of the inconclusive limits of the tests and its corresponding validity measures.}
+  \item{Best cut-off}{The cut-offs estimated by different methods and its corresponding validity measures.}
+}
+\references{Greiner, M. (1996) Two-graph receiver operating characteristic (TG-ROC): update version supports optimisation of cut-off values that minimize overall misclassification costs. J.Immunol.Methods 191:93-94.
+
+M. Greiner (1995) Two-graph receiver operating characteristic (TG-ROC): a Microsoft-EXCEL template for the selection of cut-off values in diagnostic tests. Journal of Immunological Methods. 185(1):145-146.
+
+M. Greiner, D. Sohr, P. Gobel (1995) A modified ROC analysis for the selection of cut-off values and the definition of intermediate results of serodiagnostic tests. Journal of immunological methods. 185(1):123-132.
+}
+\author{Pedro Brasil; - \email{diagnosismed-list at lists.r-forge.r-project.org}}
+\note{Bug reports, malfunctioning, or suggestions for further improvements or contributions can be sent, preferentially, through the DiagnosisMed email list, or R-Forge website \url{https://r-forge.r-project.org/projects/diagnosismed/}.
+}
+\seealso{\code{\link{interact.ROC}},\code{\link{ROC}},\code{\link{diagnosis}},\link[ROCR]{performance},\link[epitools]{binom.conf.int},}
+\examples{
+# Loading a dataset.
+data(tutorial)
+# Attaching dataset
+attach(tutorial)
+# Running the analysis
+TGROC(gold=Gold,test=Test_B)
+}
+\keyword{iplot}
+\keyword{univar}
+\keyword{htest}

Modified: pkg/DiagnosisMed/man/diagnosis.Rd
===================================================================
--- pkg/DiagnosisMed/man/diagnosis.Rd	2009-03-15 03:02:55 UTC (rev 9)
+++ pkg/DiagnosisMed/man/diagnosis.Rd	2009-04-13 20:29:56 UTC (rev 10)
@@ -84,7 +84,7 @@
 \author{Pedro Brasil - \email{diagnosismed-list at lists.r-forge.r-project.org}}
 \note{Bug reports, malfunctioning, or suggestions for further improvements or contributions can be sent, preferentially, through the DiagnosisMed email list, or R-Forge website \url{https://r-forge.r-project.org/projects/diagnosismed/}.
 }
-\seealso{\code{\link{plot.diag}}, \code{\link{ROC}}, \link[epitools]{binom.conf.int},
+\seealso{\code{\link{plot.diag}}, \code{\link{ROC}},\code{\link{LRgraph}},\link[epitools]{binom.conf.int},
  \link[epibasix]{sensSpec},\link[epiR]{epi.tests}}
 \examples{
 # Simulating a dataset

Modified: pkg/DiagnosisMed/man/interact.ROC.Rd
===================================================================
--- pkg/DiagnosisMed/man/interact.ROC.Rd	2009-03-15 03:02:55 UTC (rev 9)
+++ pkg/DiagnosisMed/man/interact.ROC.Rd	2009-04-13 20:29:56 UTC (rev 10)
@@ -54,7 +54,7 @@
 \author{Pedro Brasil - \email{diagnosismed-list at lists.r-forge.r-project.org}}
 \note{Bug reports, malfunctioning, or suggestions for further improvements or contributions can be sent, preferentially, through the DiagnosisMed email list, or R-Forge website \url{https://r-forge.r-project.org/projects/diagnosismed/}.
 }
-\seealso{\code{\link{diagnosis}},\code{\link{ROC}},\link[ROCR]{performance},\link[TeachingDemos]{roc.demo}}
+\seealso{\code{\link{diagnosis}},\code{\link{ROC}},\code{\link{TGROC}},\link[ROCR]{performance},\link[TeachingDemos]{roc.demo}}
 \examples{
 data(rocdata)
 attach(rocdata)



More information about the Diagnosismed-commits mailing list