From noreply at r-forge.r-project.org Fri May 17 22:42:08 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 17 May 2013 22:42:08 +0200 (CEST) Subject: [Gsdesign-commits] r348 - pkg/gsDesign/R Message-ID: <20130517204208.E393B1851E2@r-forge.r-project.org> Author: keaven Date: 2013-05-17 22:42:08 +0200 (Fri, 17 May 2013) New Revision: 348 Modified: pkg/gsDesign/R/gsqplot.R Log: Updated gsHR to take null hypothesis hazard ratio into account. This should fix HR's in plots for non-inferiority designs. Modified: pkg/gsDesign/R/gsqplot.R =================================================================== --- pkg/gsDesign/R/gsqplot.R 2013-04-22 01:15:54 UTC (rev 347) +++ pkg/gsDesign/R/gsqplot.R 2013-05-17 20:42:08 UTC (rev 348) @@ -115,7 +115,7 @@ gsHR <- function(z, i, x, ratio=1, ylab="Estimated hazard ratio", ...) { c <- 1 / (1 + ratio) psi <- c * (1 - c) - hrHat <- exp(-z / sqrt(x$n.I[i] * psi)) + hrHat <- exp(-z / sqrt(x$n.I[i] * psi)) * x$hr0 hrHat } gsCPz <- function(z, i, x, theta=NULL, ylab=NULL, ...) @@ -414,7 +414,7 @@ { stop("Spending function plot not available for boundary families") } - if (x$upper$parname == "Points"){x$sfupar <- sfLinear} +# if (x$upper$parname == "Points"){x$sfupar <- sfLinear} t <- 0:40 / 40 * xmax From noreply at r-forge.r-project.org Sat May 18 12:05:57 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sat, 18 May 2013 12:05:57 +0200 (CEST) Subject: [Gsdesign-commits] r349 - pkg/gsDesign/R Message-ID: <20130518100558.09AD91808DF@r-forge.r-project.org> Author: keaven Date: 2013-05-18 12:05:57 +0200 (Sat, 18 May 2013) New Revision: 349 Modified: pkg/gsDesign/R/gsqplot.R Log: Updated gsHR to take null hypothesis hazard ratio into account. This should fix HR's in plots for non-inferiority designs. Modified: pkg/gsDesign/R/gsqplot.R =================================================================== --- pkg/gsDesign/R/gsqplot.R 2013-05-17 20:42:08 UTC (rev 348) +++ pkg/gsDesign/R/gsqplot.R 2013-05-18 10:05:57 UTC (rev 349) @@ -115,6 +115,7 @@ gsHR <- function(z, i, x, ratio=1, ylab="Estimated hazard ratio", ...) { c <- 1 / (1 + ratio) psi <- c * (1 - c) + if (is.null(x$hr0)) x$hr0 <- 1 hrHat <- exp(-z / sqrt(x$n.I[i] * psi)) * x$hr0 hrHat } From noreply at r-forge.r-project.org Mon May 27 13:27:23 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 27 May 2013 13:27:23 +0200 (CEST) Subject: [Gsdesign-commits] r350 - in pkg/gsDesign: . R man Message-ID: <20130527112723.3B1A218540B@r-forge.r-project.org> Author: keaven Date: 2013-05-27 13:27:22 +0200 (Mon, 27 May 2013) New Revision: 350 Modified: pkg/gsDesign/DESCRIPTION pkg/gsDesign/NAMESPACE pkg/gsDesign/R/gsBinomial.R pkg/gsDesign/man/binomial.Rd pkg/gsDesign/man/gsDesign.Rd Log: ssrCP: new function for sample size re-estimation based on conditional power. nBinomial: Fixed output when outtype=3, n=NULL, and either scale="RR" or scale="LNOR". varBinomial: new function for blinded variance estimation for trials comparing two arms with binary outcome; will be demonstrated with information based sample size re-estimation in future. vignettes: Two vignettes added demonstrating time-to-event designs using knitr and gsSurv. xtable.gsDesign: updated to comply with conventions for extensions of generic methods Modified: pkg/gsDesign/DESCRIPTION =================================================================== --- pkg/gsDesign/DESCRIPTION 2013-05-18 10:05:57 UTC (rev 349) +++ pkg/gsDesign/DESCRIPTION 2013-05-27 11:27:22 UTC (rev 350) @@ -1,10 +1,13 @@ Package: gsDesign -Version: 2.8-00 +Version: 2.8-01 Title: Group Sequential Design Author: Keaven Anderson Maintainer: Keaven Anderson Depends: ggplot2, xtable +Suggests: knitr +VignetteBuilder: knitr Description: gsDesign is a package that derives group sequential designs and describes their properties. License: GPL (>= 2) Copyright: Copyright 2010, Merck Research Laboratories Packaged: 2011-12-30 13:26:23 UTC; Anderkea +Date: 2012-11-12 Modified: pkg/gsDesign/NAMESPACE =================================================================== --- pkg/gsDesign/NAMESPACE 2013-05-18 10:05:57 UTC (rev 349) +++ pkg/gsDesign/NAMESPACE 2013-05-27 11:27:22 UTC (rev 350) @@ -2,9 +2,9 @@ export(gsBoundCP, gsCP, gsPP, gsPI, gsPOS, gsCPOS, gsDensity, gsPosterior) export(gsCPz, gsHR, gsDelta, gsBValue, gsRR, hrn2z, hrz2n, zn2hr) export(gsBound, gsBound1, gsDesign, gsProbability) -export(ciBinomial, nBinomial, simBinomial, testBinomial, gsBinomialExact) +export(ciBinomial, nBinomial, simBinomial, testBinomial, gsBinomialExact, varBinomial) export(nSurvival, nEvents, nNormal) -export(normalGrid) +export(normalGrid,ssrCP) export(plot.gsDesign, plot.gsProbability, print.gsProbability, print.gsDesign) export(print.nSurvival, xtable.gsDesign, gsBoundSummary, xtable.gsSurv) export(sfPower, sfHSD, sfExponential, sfBetaDist, sfLDOF, sfLDPocock, sfPoints, sfLogistic, sfExtremeValue, sfExtremeValue2, sfLinear, sfTruncated) Modified: pkg/gsDesign/R/gsBinomial.R =================================================================== --- pkg/gsDesign/R/gsBinomial.R 2013-05-18 10:05:57 UTC (rev 349) +++ pkg/gsDesign/R/gsBinomial.R 2013-05-27 11:27:22 UTC (rev 350) @@ -111,209 +111,198 @@ cbind(lower=lower,upper=upper) } -"nBinomial"<-function(p1, p2, alpha=0.025, beta=0.1, delta0=0, ratio=1, - sided=1, outtype=1, scale="Difference", n=NULL) -{ - # check input arguments - checkVector(p1, "numeric", c(0, 1), c(FALSE, FALSE)) - checkVector(p2, "numeric", c(0, 1), c(FALSE, FALSE)) - checkScalar(sided, "integer", c(1, 2)) - checkScalar(alpha, "numeric", c(0, 1 / sided), c(FALSE, FALSE)) - checkVector(beta, "numeric", c(0, 1 - alpha / sided), c(FALSE, FALSE)) - checkVector(delta0, "numeric") - checkVector(ratio, "numeric", c(0, Inf), c(FALSE, FALSE)) - checkScalar(outtype, "integer", c(1, 3)) - checkScalar(scale, "character") - if (!is.null(n)) checkVector(n, "numeric") - scale <- match.arg(tolower(scale), c("difference", "rr", "or", "lnor")) - if (is.null(n)) checkLengths(p1, p2, beta, delta0, ratio, allowSingle=TRUE) - else checkLengths(n, p1, p2, delta0, ratio, allowSingle=TRUE) - # make all vector arguments the same length - len<-max(sapply(list(p1, p2, beta, delta0, ratio),length)) - if (len > 1) - { if (length(p1) == 1) p1<-array(p1,len) - if (length(p2) == 1) p2<-array(p2,len) - if (length(alpha) == 1) alpha<-array(alpha,len) - if (length(beta) == 1) beta<-array(beta,len) - if (length(delta0) == 1) delta0<-array(delta0,len) - if (length(ratio) == 1) ratio<-array(ratio,len) +"nBinomial" <- function(p1, p2, alpha = 0.025, beta = 0.1, delta0 = 0, ratio = 1, + sided = 1, outtype = 1, scale = "Difference", n = NULL) +{ + checkVector(p1, "numeric", c(0, 1), c(FALSE, FALSE)) + checkVector(p2, "numeric", c(0, 1), c(FALSE, FALSE)) + checkScalar(sided, "integer", c(1, 2)) + checkScalar(alpha, "numeric", c(0, 1/sided), c(FALSE, FALSE)) + checkVector(beta, "numeric", c(0, 1 - alpha/sided), c(FALSE, + FALSE)) + checkVector(delta0, "numeric") + checkVector(ratio, "numeric", c(0, Inf), c(FALSE, FALSE)) + checkScalar(outtype, "integer", c(1, 3)) + checkScalar(scale, "character") + if (!is.null(n)) + checkVector(n, "numeric") + scale <- match.arg(tolower(scale), c("difference", "rr", + "or", "lnor")) + if (is.null(n)) + checkLengths(p1, p2, beta, delta0, ratio, allowSingle = TRUE) + else checkLengths(n, p1, p2, delta0, ratio, allowSingle = TRUE) + len <- max(sapply(list(p1, p2, beta, delta0, ratio), length)) + if (len > 1) { + if (length(p1) == 1) + p1 <- array(p1, len) + if (length(p2) == 1) + p2 <- array(p2, len) + if (length(alpha) == 1) + alpha <- array(alpha, len) + if (length(beta) == 1) + beta <- array(beta, len) + if (length(delta0) == 1) + delta0 <- array(delta0, len) + if (length(ratio) == 1) + ratio <- array(ratio, len) + } + if (max(delta0 == 0) > 0 && max(p1[delta0 == 0] == p2[delta0 == + 0]) > 0) { + stop("p1 may not equal p2 when delta0 is zero") + } + z.beta <- qnorm(1 - beta) + sided[sided != 2] <- 1 + z.alpha <- qnorm(1 - alpha/sided) + d0 <- (delta0 == 0) + if (scale == "difference") { + if (min(abs(p1 - p2 - delta0)) < 1e-11) { + stop("p1 - p2 may not equal delta0 when scale is \"Difference\"") } - if (max(delta0 == 0) > 0 && max(p1[delta0 == 0] == p2[delta0 == 0]) > 0) - { - stop("p1 may not equal p2 when delta0 is zero") + a <- 1 + ratio + b <- -(a + p1 + ratio * p2 + delta0 * (ratio + 2)) + c <- delta0^2 + delta0 * (2 * p1 + a) + p1 + ratio * + p2 + d <- -p1 * delta0 * (1 + delta0) + v <- (b/(3 * a))^3 - b * c/6/a^2 + d/2/a + u <- (sign(v) + (v == 0)) * sqrt((b/3/a)^2 - c/3/a) + w <- (pi + acos(v/u^3))/3 + p10 <- 2 * u * cos(w) - b/3/a + p20 <- p10 - delta0 + p10[d0] <- (p1[d0] + ratio[d0] * p2[d0])/(1 + ratio[d0]) + p20[d0] <- p10[d0] + sigma0 <- sqrt((p10 * (1 - p10) + p20 * (1 - p20)/ratio) * + (ratio + 1)) + sigma1 <- sqrt((p1 * (1 - p1) + p2 * (1 - p2)/ratio) * + (ratio + 1)) + if (is.null(n)) { + n <- ((z.alpha * sigma0 + z.beta * sigma1)/(p1 - p2 - delta0))^2 + if (outtype == 2) { + return(data.frame(cbind(n1 = n/(ratio + 1), n2 = ratio * + n/(ratio + 1)))) + } + else if (outtype == 3) { + return(data.frame(cbind(n = n, n1 = n/(ratio + 1), + n2 = ratio * n/(ratio + 1), alpha = alpha, + sided = sided, beta = beta, Power = 1 - beta, + sigma0 = sigma0, sigma1 = sigma1, p1 = p1, + p2 = p2, delta0 = delta0, p10 = p10, p20 = p20))) + } + else return(n = n) } - - # get z-values needed - z.beta <- qnorm(1 - beta) - - # coerce all sided values not equal to 2 to 1 - sided[sided != 2] <- 1 - - z.alpha <- qnorm(1 - alpha / sided) - d0 <- (delta0 == 0) - - # sample size for risk difference - Farrington and Manning - if (scale == "difference") - { - if (min(abs(p1 - p2 - delta0)) < .1e-10) - { - stop("p1 - p2 may not equal delta0 when scale is \"Difference\"") - } - a <- 1 + ratio - b <- -(a + p1 + ratio * p2 + delta0 * (ratio + 2)) - c <- delta0 ^ 2 + delta0 * (2 * p1 + a) + p1 + ratio * p2 - d <- -p1 * delta0 * (1 + delta0) - v <- (b / (3 * a)) ^ 3 - b * c / 6 / a ^ 2 + d / 2 / a - u <- (sign(v) + (v==0)) * sqrt((b / 3 / a) ^ 2 - c / 3 / a) - w <- (pi + acos(v /u ^ 3)) / 3 - p10 <- 2 * u * cos(w) - b / 3 / a - p20 <- p10 - delta0 - p10[d0] <- (p1[d0] + ratio[d0] * p2[d0]) / (1 + ratio[d0]) - p20[d0] <- p10[d0] - sigma0 <- sqrt((p10 * (1 - p10) + p20 * (1 - p20) / ratio) - * (ratio + 1)) - sigma1 <- sqrt((p1 * (1 - p1) + p2 * (1 - p2) / ratio) * (ratio + 1)) - if (is.null(n)){ - n <- ((z.alpha * sigma0 + z.beta * sigma1) / (p1 - p2 - delta0)) ^ 2 - if (outtype == 2) - { - return(data.frame(cbind(n1=n / (ratio + 1), n2=ratio * n / (ratio + 1)))) - } - else if (outtype == 3) - { - return(data.frame(cbind(n=n, n1=n / (ratio + 1), n2=ratio * n / (ratio + 1), - alpha = alpha, sided=sided, beta = beta, Power = 1-beta, - sigma0=sigma0, sigma1=sigma1, p1=p1 ,p2=p2, - delta0=delta0, p10=p10, p20=p20))) - } - else return(n=n) - } - else - { pwr <- pnorm(-(qnorm(1-alpha/sided)-sqrt(n) * ((p1 - p2 - delta0)/sigma0))*sigma0/sigma1) - if (outtype == 2) - { - return(data.frame(cbind(n1=n / (ratio + 1), n2=ratio * n / (ratio + 1), Power=pwr))) - } - else if (outtype == 3) - { - return(data.frame(cbind(n=n, n1=n / (ratio + 1), n2=ratio * n / (ratio + 1), - alpha = alpha, sided=sided, beta = 1-pwr, Power = pwr, - sigma0=sigma0, sigma1=sigma1, p1=p1 ,p2=p2, - delta0=delta0, p10=p10, p20=p20))) - } - else return(Power=pwr) - } + else { + pwr <- pnorm(-(qnorm(1 - alpha/sided) - sqrt(n) * + ((p1 - p2 - delta0)/sigma0)) * sigma0/sigma1) + if (outtype == 2) { + return(data.frame(cbind(n1 = n/(ratio + 1), n2 = ratio * + n/(ratio + 1), Power = pwr))) + } + else if (outtype == 3) { + return(data.frame(cbind(n = n, n1 = n/(ratio + 1), + n2 = ratio * n/(ratio + 1), alpha = alpha, + sided = sided, beta = 1 - pwr, Power = pwr, + sigma0 = sigma0, sigma1 = sigma1, p1 = p1, + p2 = p2, delta0 = delta0, p10 = p10, p20 = p20))) + } + else return(Power = pwr) } - # sample size for risk ratio - Farrington and Manning - else if (scale == "rr") - { - RR <- exp(delta0) - if (min(abs(p1 / p2 - RR)) < .1e-6) - { - stop("p1/p2 may not equal exp(delta0) when scale=\"RR\"") - } - a <- (1 + ratio) - b <- -(RR * (1 + ratio * p2) + ratio + p1) - c <- RR * (p1 + ratio * p2) - p10 <- (-b - sqrt(b ^ 2 - 4 * a * c)) / 2 / a - p20 <- p10 / RR - p10[d0] <- (p1[d0] + ratio[d0] * p2[d0]) / (1 + ratio[d0]) - p20[d0] <- p10[d0] - sigma0 <- sqrt((ratio + 1) * - (p10 * (1 - p10) + RR ^ 2 * p20 * (1 - p20) / ratio)) - sigma1 <- sqrt((ratio + 1) * - (p1 * (1 - p1) + RR ^ 2 * p2 * (1 - p2) / ratio)) - if (is.null(n)){ - n <- ((z.alpha * sigma0 + z.beta * sigma1) / (p1 - p2 * RR)) ^ 2 - if (outtype == 2) - { - return(data.frame(data.frame(cbind(n1=n / (ratio + 1), - n2=ratio * n / (ratio + 1))))) - } - else if (outtype == 3) - { - return(data.frame(cbind(n=n, n1=n / (ratio + 1), n2=ratio * n / (ratio + 1), - alpha = alpha, sided=sided, beta = 1-pwr, Power = pwr, - sigma0=sigma0, sigma1=sigma1, p1=p1, p2=p2, - delta0=delta0, p10=p10, p20=p20))) - } - else return(n=n) - } - else - { pwr <- pnorm(-(qnorm(1-alpha/sided)-sqrt(n) * ((p1 - p2 * RR)/sigma0))*sigma0/sigma1) - if (outtype == 2) - { - return(data.frame(cbind(n1=n / (ratio + 1), n2=ratio * n / (ratio + 1), Power=pwr))) - } - else if (outtype == 3) - { - return(data.frame(cbind(n=n, n1=n / (ratio + 1), n2=ratio * n / (ratio + 1), - alpha = alpha, sided=sided, beta = 1-pwr, Power = pwr, - sigma0=sigma0, sigma1=sigma1, p1=p1 ,p2=p2, - delta0=delta0, p10=p10, p20=p20))) - } - else return(Power=pwr) - } + } + else if (scale == "rr") { + RR <- exp(delta0) + if (min(abs(p1/p2 - RR)) < 1e-07) { + stop("p1/p2 may not equal exp(delta0) when scale=\"RR\"") + } + a <- (1 + ratio) + b <- -(RR * (1 + ratio * p2) + ratio + p1) + c <- RR * (p1 + ratio * p2) + p10 <- (-b - sqrt(b^2 - 4 * a * c))/2/a + p20 <- p10/RR + p10[d0] <- (p1[d0] + ratio[d0] * p2[d0])/(1 + ratio[d0]) + p20[d0] <- p10[d0] + sigma0 <- sqrt((ratio + 1) * (p10 * (1 - p10) + RR^2 * + p20 * (1 - p20)/ratio)) + sigma1 <- sqrt((ratio + 1) * (p1 * (1 - p1) + RR^2 * + p2 * (1 - p2)/ratio)) + if (is.null(n)) { + n <- ((z.alpha * sigma0 + z.beta * sigma1)/(p1 - p2 * RR))^2 + if (outtype == 2) { + return(data.frame(cbind(n1 = n/(ratio + 1), + n2 = ratio * n/(ratio + 1)))) } - - # sample size for log-odds-ratio - based on Miettinen and Nurminen max - # likelihood estimate and asymptotic variance from, e.g., Lachin (2000) - else - { - OR <- exp(-delta0) - if (min(abs(p1 / (1 - p1) / p2 * (1 - p2) * OR) - 1) < .1e-6) - { - stop("p1/(1-p1)/p2*(1-p2) may not equal exp(delta0) when scale=\"OR\"") - } - a <- OR - 1 - b <- 1 + ratio * OR + (1 - OR) * (ratio * p2 + p1) - c <- -(ratio * p2 + p1) - p10 <- (-b + sqrt(b ^ 2 - 4 * a * c)) / 2 / a - p20 <- OR * p10 / (1 + p10 * (OR - 1)) - p10[d0] <- (p1[d0] + ratio[d0] * p2[d0]) / (1 + ratio[d0]) - p20[d0] <- p10[d0] - sigma0 <- sqrt((ratio + 1) * - (1 / p10 / (1 - p10) + 1 / p20 / (1 - p20) / ratio)) - sigma1 <- sqrt((ratio + 1) * - (1 / p1 / (1 - p1) + 1 / p2 / (1 - p2) / ratio)) - - if (is.null(n)){ - n <- ((z.alpha * sigma0 + z.beta * sigma1) / - log(OR / p2 * (1 - p2) * p1 / (1 - p1))) ^ 2 - - if (outtype == 2) - { - return(data.frame(cbind(n1=n / (ratio + 1), n2=ratio * n / (ratio + 1)))) - } - else if (outtype == 3) - { - return(data.frame(cbind(n=n, n1=n / (ratio+1), n2=ratio * n / (ratio + 1), - alpha = alpha, sided=sided, beta = 1-pwr, Power = pwr, - sigma0=sigma0, sigma1=sigma1, p1=p1, p2=p2, - delta0=delta0, p10=p10, p20=p20))) - } - else - { - return(n=n) - } - } - else - { pwr <- pnorm(-(qnorm(1-alpha/sided)-sqrt(n) * - (log(OR / p2 * (1 - p2) * p1 / (1 - p1))/sigma0))*sigma0/sigma1) - if (outtype == 2) - { - return(data.frame(cbind(n1=n / (ratio + 1), n2=ratio * n / (ratio + 1), Power=pwr))) - } - else if (outtype == 3) - { - return(data.frame(cbind(n=n, n1=n / (ratio + 1), n2=ratio * n / (ratio + 1), - alpha = alpha, sided=sided, beta = 1-pwr, Power = pwr, - sigma0=sigma0, sigma1=sigma1, p1=p1 ,p2=p2, - delta0=delta0, p10=p10, p20=p20))) - } - else return(Power=pwr) - } + else if (outtype == 3) { + return(data.frame(cbind(n = n, n1 = n/(ratio + 1), + n2 = ratio * n/(ratio + 1), alpha = alpha, + sided = sided, beta = beta, Power = 1-beta, + sigma0 = sigma0, sigma1 = sigma1, p1 = p1, + p2 = p2, delta0 = delta0, p10 = p10, p20 = p20))) + } + else return(n = n) } + else { + pwr <- pnorm(-(qnorm(1 - alpha/sided) - sqrt(n) * + ((p1 - p2 * RR)/sigma0)) * sigma0/sigma1) + if (outtype == 2) { + return(data.frame(cbind(n1 = n/(ratio + 1), n2 = ratio * + n/(ratio + 1), Power = pwr))) + } + else if (outtype == 3) { + return(data.frame(cbind(n = n, n1 = n/(ratio + 1), + n2 = ratio * n/(ratio + 1), alpha = alpha, + sided = sided, beta = 1 - pwr, Power = pwr, + sigma0 = sigma0, sigma1 = sigma1, p1 = p1, + p2 = p2, delta0 = delta0, p10 = p10, p20 = p20))) + } + else return(Power = pwr) + } + } + else { + OR <- exp(-delta0) + if (min(abs(p1/(1 - p1)/p2 * (1 - p2) * OR) - 1) < 1e-07) { + stop("p1/(1-p1)/p2*(1-p2) may not equal exp(delta0) when scale=\"OR\"") + } + a <- OR - 1 + b <- 1 + ratio * OR + (1 - OR) * (ratio * p2 + p1) + c <- -(ratio * p2 + p1) + p10 <- (-b + sqrt(b^2 - 4 * a * c))/2/a + p20 <- OR * p10/(1 + p10 * (OR - 1)) + p10[d0] <- (p1[d0] + ratio[d0] * p2[d0])/(1 + ratio[d0]) + p20[d0] <- p10[d0] + sigma0 <- sqrt((ratio + 1) * (1/p10/(1 - p10) + 1/p20/(1 - p20)/ratio)) + sigma1 <- sqrt((ratio + 1) * (1/p1/(1 - p1) + 1/p2/(1 - p2)/ratio)) + if (is.null(n)) { + n <- ((z.alpha * sigma0 + z.beta * sigma1)/log(OR/p2 * (1 - p2) * p1/(1 - p1)))^2 + if (outtype == 2) { + return(data.frame(cbind(n1 = n/(ratio + 1), n2 = ratio * + n/(ratio + 1)))) + } + else if (outtype == 3) { + return(data.frame(cbind(n = n, n1 = n/(ratio + 1), + n2 = ratio * n/(ratio + 1), alpha = alpha, + sided = sided, beta = beta, Power = 1-beta, + sigma0 = sigma0, sigma1 = sigma1, p1 = p1, + p2 = p2, delta0 = delta0, p10 = p10, p20 = p20))) + } + else { + return(n = n) + } + } + else { + pwr <- pnorm(-(qnorm(1 - alpha/sided) - sqrt(n) * + (log(OR/p2 * (1 - p2) * p1/(1 - p1))/sigma0)) * + sigma0/sigma1) + if (outtype == 2) { + return(data.frame(cbind(n1 = n/(ratio + 1), n2 = ratio * + n/(ratio + 1), Power = pwr))) + } + else if (outtype == 3) { + return(data.frame(cbind(n = n, n1 = n/(ratio + 1), + n2 = ratio * n/(ratio + 1), alpha = alpha, + sided = sided, beta = 1 - pwr, Power = pwr, + sigma0 = sigma0, sigma1 = sigma1, p1 = p1, + p2 = p2, delta0 = delta0, p10 = p10, p20 = p20))) + } + else return(Power = pwr) + } + } } "simBinomial" <- function(p1, p2, n1, n2, delta0=0, nsim=10000, chisq=0, adj=0, scale="Difference") Modified: pkg/gsDesign/man/binomial.Rd =================================================================== --- pkg/gsDesign/man/binomial.Rd 2013-05-18 10:05:57 UTC (rev 349) +++ pkg/gsDesign/man/binomial.Rd 2013-05-27 11:27:22 UTC (rev 350) @@ -3,6 +3,7 @@ \alias{ciBinomial} \alias{nBinomial} \alias{simBinomial} +\alias{varBinomial} \title{3.2: Testing, Confidence Intervals, Sample Size and Power for Comparing Two Binomial Rates} \description{Support is provided for sample size estimation, power, testing, confidence intervals and simulation for fixed sample size trials (that is, not group sequential or adaptive) with two arms and binary outcomes. @@ -31,6 +32,9 @@ As noted in documentation for \code{bpower.sim()} in the HMisc package, by using \code{testBinomial()} you can see that the formulas without any continuity correction are quite accurate. In fact, Type I error for a continuity-corrected test is significantly lower (Gordon and Watson, 1996) than the nominal rate. Thus, as a default no continuity corrections are performed. + +\code{varBinomial} computes blinded estimates of the variance of the estimate of 1) event rate differences, 2) logarithm of the risk ratio, or 3) logarithm of the odds ratio. This is intended for blinded sample size re-estimation for comparative trials with a binary outcome. + } \usage{ @@ -41,6 +45,7 @@ ciBinomial(x1, x2, n1, n2, alpha=.05, adj=0, scale="Difference") simBinomial(p1, p2, n1, n2, delta0=0, nsim=10000, chisq=0, adj=0, scale="Difference") +varBinomial(x,n,delta0=0,ratio=1,scale="Difference") } \arguments{ For \code{simBinomial()} and \code{ciBinomial()} all arguments must have length 1. @@ -49,7 +54,9 @@ For \code{nBinomial()}, \code{p1, p2, beta, delta0} and \code{ratio} may be vectors. -For \code{nBinomial()} or \code{testBinomial()}, when one or more arguments is a vector, the routines return a vector of sample sizes and powers, respectively. +For \code{varBinomial()}, \code{x} may be a vector. + +For \code{nBinomial()}, \code{testBinomial()} and \code{varBinomial()}, when one or more arguments is a vector, the routines return a vector of sample sizes or powers (\code{nBinomial}), test statistics (\code{testBinomial}), or variance estimates (\code{varBinomial}). Where vector arguments are allowed, there may be a mix of scalar and vector arguments. All arguments specified using vectors must have the same length. @@ -70,7 +77,8 @@ 3 returns a data frame with total sample size (\code{n}), sample size in each group (\code{n1, n2}), Type I error (\code{alpha}), 1 or 2 (\code{sided}, as input), Type II error (\code{beta}), power (\code{Power}), null and alternate hypothesis standard deviations (\code{sigma0, sigma1}), input event rates (\code{p1, p2}), null hypothesis difference in treatment group meands (\code{delta0}) and null hypothesis event rates (\code{p10, p20}). } -\item{n}{If power is to be computed in \code{nBinomial()}, input total trial sample size in \code{n}.} +\item{n}{If power is to be computed in \code{nBinomial()}, input total trial sample size in \code{n}; this may be a vector. This is also the sample size in \code{varBinomial}, in which case the argument must be a scalar.} +\item{x}{Number of \dQuote{successes} in the combined control and experimental groups.} \item{x1}{Number of \dQuote{successes} in the control group} \item{x2}{Number of \dQuote{successes} in the experimental group} \item{n1}{Number of observations in the control group} @@ -111,7 +119,7 @@ and a difference between these rates under the null hypothesis, \code{delta0}. From these values, actual rates under the null hypothesis are computed, which are labeled \code{p10} and \code{p20} when \code{outtype=3}. The rates \code{p1} and \code{p2} are used to compute a variance for a Z-test comparing rates under the alternative hypothesis, -while \code{p10} and \code{p20} are used under the null hypothesis. +while \code{p10} and \code{p20} are used under the null hypothesis. This computational method is also used to estimate variances in \code{varBinomial()} based on the overall event rate observed and the input treatment difference specified in \code{delta0}. Sample size with \code{scale="Difference"} produces an error if \code{p1-p2=delta0}. Normally, the alternative hypothesis under consideration would be \code{p1-p2-delta0}$>0$. @@ -124,6 +132,8 @@ \code{ciBinomial()} returns a vector with a confidence interval. + \code{varBinomial()} returns a vector of (blinded) variance estimates of the difference of event rates (\code{scale="Difference"}), logarithm of the odds-ratio (\code{scale="OR"}) or logarithm of the risk-ratio (\code{scale="RR"}). + With the default \code{outtype=1}, \code{nBinomial()} returns a vector of total sample sizes is returned. With \code{outtype=2}, \code{nBinomial()} returns a data frame containing two vectors \code{n1} and \code{n2} containing sample sizes for groups 1 and 2, respectively; if \code{n} is input, this option also returns the power in a third vector, \code{Power}. @@ -135,8 +145,8 @@ \item{sided}{As input} \item{beta}{As input; if \code{n} is input, this is computed} \item{Power}{If \code{n=NULL} on input, this is \code{1-beta}; otherwise, the power is computed for each sample size input} -\item{sigma0}{A vector containing the standard deviation of the treatment effect difference under the null hypothesis} - \item{sigma1}{A vector containing the standard deviation of the treatment effect difference under the alternative hypothesis} +\item{sigma0}{A vector containing the standard deviation of the treatment effect difference under the null hypothesis times \code{sqrt(n)} when \code{scale="Difference"} or \code{scale="OR"}; when \code{scale="RR"}, this is the standard deviation time \code{sqrt(n)} for the numerator of the Farrington-Manning test statistic \code{x1-exp(delta0)*x2}.} + \item{sigma1}{A vector containing the values as \code{sigma0}, in this case estimated under the alternative hypothesis.} \item{p1}{As input} \item{p2}{As input} \item{p10}{group 1 event rate used for null hypothesis} @@ -224,6 +234,17 @@ legend(x=c(.15, .2),y=c(4500, 6000),lty=c(2, 1, 3, 4), lwd=2, legend=c("25 pct reduction", "33 pct reduction", "40 pct reduction", "50 pct reduction")) + +# compute blinded estimate of treatment effect difference +x1 <- rbinom(n=1,size=100,p=.2) +x2 <- rbinom(n=1,size=200,p=.1) +# blinded estimate of risk difference variance +varBinomial(x=x1+x2,n=300,ratio=2,delta0=0) +# blnded estimate of log-risk-ratio +varBinomial(x=x1+x2,n=300,ratio=2,delta0=0,scale="RR") +# blinded estimate of log-odds-ratio +varBinomial(x=x1+x2,n=300,ratio=2,delta0=0,scale="OR") + } \keyword{design} Modified: pkg/gsDesign/man/gsDesign.Rd =================================================================== --- pkg/gsDesign/man/gsDesign.Rd 2013-05-18 10:05:57 UTC (rev 349) +++ pkg/gsDesign/man/gsDesign.Rd 2013-05-27 11:27:22 UTC (rev 350) @@ -1,16 +1,18 @@ \name{gsDesign} \alias{gsDesign} \alias{print.gsDesign} +\alias{xtable.gsDesign} \title{2.1: Design Derivation} \description{\code{gsDesign()} is used to find boundaries and trial size required for a group sequential design.} \usage{ gsDesign(k=3, test.type=4, alpha=0.025, beta=0.1, astar=0, delta=0, n.fix=1, timing=1, sfu=sfHSD, sfupar=-4, sfl=sfHSD, sflpar=-2, tol=0.000001, r=18, n.I = 0, - maxn.IPlan = 0, nFixSurv=0, endpoint=NULL, delta1=1, delta0=0) - -\method{print}{gsDesign}(x,...)} - + maxn.IPlan = 0, nFixSurv=0, endpoint=NULL, delta1=1, delta0=0) +\method{print}{gsDesign}(x,\dots) +\method{xtable}{gsDesign}(x, caption = NULL, label=NULL, align=NULL, digits=NULL, display=NULL, + footnote = NULL, fnwid = "9cm", deltaname="delta", + Nname="N",logdelta=FALSE, \dots)} \arguments{ \item{k}{Number of analyses planned, including interim and final.} \item{test.type}{\code{1=}one-sided \cr @@ -25,7 +27,7 @@ probability of crossing a lower bound at all analyses combined. This will be changed to \eqn{1 - }\code{alpha} when default value of 0 is used. Since this is the expected usage, normally \code{astar} is not specified by the user.} - \item{delta}{Standardized effect size for theta under alternative hypothesis. See details and examples.} + \item{delta}{Effect size for theta under alternative hypothesis. For \code{xtable.gsDesign} this would normally be set to the natural parameter value. For \code{gsDesign} this can be set to the standardized effect size to generate a sample size if \code{n.fix=NULL}. See details and examples.} \item{n.fix}{Sample size for fixed design with no interim; used to find maximum group sequential sample size. For a time-to-event outcome, input number of events required for a fixed design rather than sample size and enter fixed design sample size (optional) in \code{nFixSurv}. See details and examples.} @@ -57,14 +59,24 @@ \item{n.I}{Used for re-setting bounds when timing of analyses changes from initial design; see examples.} \item{maxn.IPlan}{Used for re-setting bounds when timing of analyses changes from initial design; see examples.} \item{nFixSurv}{If a time-to-event variable is used, \code{nFixSurv} computed as the sample size from \code{nSurvival} may be entered to have \code{gsDesign} - compute the total sample size required as well as the number of events at each analysis that will be returned in \code{n.fix}; this is rounded up to an even number.}| - \item{x}{In \code{print.gsDesign} this is an object of class gsDesign.} - \item{...}{This should allow optional arguments that are standard when calling \code{print}.} + compute the total sample size required as well as the number of events at each analysis that will be returned in \code{n.fix}; this is rounded up to an even number.} - \item{endpoint}{An optional character string that should represent the type of endpoint used for the study. This may be used by output functions. Types most likely to be recognized initially are "TTE" for time-to-event outcomes with fixed design sample size generated by \code{nSurvival()} and "Binomial" for 2-sample binomial outcomes with fixed design sample size generated by \code{nBinomial()}.} + \item{endpoint}{An optional character string that should represent the type of endpoint used for the study. This may be used by output functions. Types most likely to be recognized initially are "TTE" for time-to-event outcomes with fixed design sample size generated by \code{nSurvival()} and "Binomial" for 2-sample binomial outcomes with fixed design sample size generated by \code{nBinomial()}.} \item{delta1}{\code{delta1} and \code{delta0} may be used to store information about the natural parameter scale compared to \code{delta} that is a standardized effect size. \code{delta1} is the alternative hypothesis parameter value on the natural scale (e.g., the difference in two binomial rates).} \item{delta0}{The parameter value under the null hypothesis on the natural parameter scale. Default is 0, which might normally be interpreted as no difference between two treatment groups. If non-zero, this would normally represent a non-inferiority margin.} + \item{x}{In \code{print.gsDesign} this is an object of class gsDesign.} + \item{caption}{Table caption.} + \item{label}{Character vector of length 1 containing the LaTeX label or HTML anchor. Set to NULL to suppress the label. Default value is NULL.} + \item{align}{Character vector of length equal to the number of columns of the resulting table indicating the alignment of the corresponding columns. Also, "|" may be used to produce vertical lines between columns in LaTeX tables, but these are effectively ignored when considering the required length of the supplied vector. If a character vector of length one is supplied, it is split as strsplit(align, "")[[1]] before processing. Since the row names are printed in the first column, the length of align is one greater than ncol(x) if x is a data.frame. Use "l", "r", and "c" to denote left, right, and center alignment, respectively. Use "p\{3cm\}" etc for a LaTeX column of the specified width. For HTML output the "p" alignment is interpreted as "l", ignoring the width request. Default depends on the class of x.} + \item{digits}{Numeric vector of length equal to one (in which case it will be replicated as necessary) or to the number of columns of the resulting table or matrix of the same size as the resulting table indicating the number of digits to display in the corresponding columns. Since the row names are printed in the first column, the length of the vector digits or the number of columns of the matrix digits is one greater than ncol(x) if x is a data.frame. Default depends of class of x. If values of digits are negative, the corresponding values of x are displayed in scientific format with abs(digits) digits.} + \item{display}{Character vector of length equal to the number of columns of the resulting table indicating the format for the corresponding columns. Since the row names are printed in the first column, the length of display is one greater than ncol(x) if x is a data.frame. These values are passed to the formatC function. Use "d" (for integers), "f", "e", "E", "g", "G", "fg" (for reals), or "s" (for strings). "f" gives numbers in the usual xxx.xxx format; "e" and "E" give n.ddde+nn or n.dddE+nn (scientific format); "g" and "G" put x[i] into scientific format only if it saves space to do so. "fg" uses fixed format as "f", but digits as number of significant digits. Note that this can lead to quite long result strings. Default depends on the class of x.} + \item{footnote}{Table footnote.} + \item{fnwid}{Width for table footnote.} + \item{deltaname}{Parameter name for output.} + \item{Nname}{Sample size name for output (e.g., N, Events, Deaths, Information).} + \item{logdelta}{TRUE if natural parameter is on log-scale, but you want output on linear scale.} + \item{\dots}{This should allow optional arguments that are standard when calling \code{print} or \code{xtable}.} } \value{ [TRUNCATED] To get the complete diff run: svnlook diff /svnroot/gsdesign -r 350 From noreply at r-forge.r-project.org Mon May 27 20:30:09 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 27 May 2013 20:30:09 +0200 (CEST) Subject: [Gsdesign-commits] r351 - in pkg/gsDesign: . R man Message-ID: <20130527183009.83B7B1856F0@r-forge.r-project.org> Author: keaven Date: 2013-05-27 20:30:09 +0200 (Mon, 27 May 2013) New Revision: 351 Modified: pkg/gsDesign/NAMESPACE pkg/gsDesign/R/gsBinomialExact.R pkg/gsDesign/man/gsBinomialExact.Rd pkg/gsDesign/man/gsDesign.Rd Log: binomialSPRT: new function to produce truncated sequential probability ratio test (SPRT) for a single sample binomial trial. Also included plotting for this design. gsBinomialExact: updated plotting capabilities. Modified: pkg/gsDesign/NAMESPACE =================================================================== --- pkg/gsDesign/NAMESPACE 2013-05-27 11:27:22 UTC (rev 350) +++ pkg/gsDesign/NAMESPACE 2013-05-27 18:30:09 UTC (rev 351) @@ -3,7 +3,7 @@ export(gsCPz, gsHR, gsDelta, gsBValue, gsRR, hrn2z, hrz2n, zn2hr) export(gsBound, gsBound1, gsDesign, gsProbability) export(ciBinomial, nBinomial, simBinomial, testBinomial, gsBinomialExact, varBinomial) -export(nSurvival, nEvents, nNormal) +export(binomialSPRT, plot.binomialSPRT, plot.gsBinomialExact, nSurvival, nEvents, nNormal) export(normalGrid,ssrCP) export(plot.gsDesign, plot.gsProbability, print.gsProbability, print.gsDesign) export(print.nSurvival, xtable.gsDesign, gsBoundSummary, xtable.gsSurv) Modified: pkg/gsDesign/R/gsBinomialExact.R =================================================================== --- pkg/gsDesign/R/gsBinomialExact.R 2013-05-27 11:27:22 UTC (rev 350) +++ pkg/gsDesign/R/gsBinomialExact.R 2013-05-27 18:30:09 UTC (rev 351) @@ -60,3 +60,82 @@ class(x) <- c("gsBinomialExact", "gsProbability") return(x) } +# see http://theriac.org/DeskReference/viewDocument.php?id=65&SectionsList=3 +binomialSPRT<-function(p0=.05,p1=.25,alpha=.1,beta=.15,minn=10,maxn=35){ + lnA <- log((1-beta)/alpha) + lnB <- log(beta/(1-alpha)) + a <- log((1-p1)/(1-p0)) + b <- log(p1/p0)-a + slope <- -a / b + intercept <- c(lnA,lnB)/b + upper <- ceiling(slope*(minn:maxn)+intercept[1]) + lower <- floor(slope*(minn:maxn)+intercept[2]) + lower[lower< -1] <- -1 + indx <- (minn:maxn >= upper)|(lower>=0) + # compute exact boundary crossing probabilities + y <- gsBinomialExact(k=sum(indx),n.I=(minn:maxn)[indx], + theta=c(p0,p1),a=lower[indx],b=upper[indx]) + y$intercept <- intercept + y$slope <- slope + y$alpha <- alpha + y$beta <- beta + y$p0 <- p0 + y$p1 <- p1 + class(y) <- c("binomialSPRT","gsBinomialExact","gsProbability") + return(y) +} +plot.gsBinomialExact <- function(x,plottype=1,...){ + if (plottype==6){ + theta<-(max(x$theta)-min(x$theta))*(0:50)/50+min(x$theta) + y <- gsBinomialExact(k=x$k,theta=theta,n.I=x$n.I,a=x$lower$bound,b=x$upper$bound) + xx <- data.frame(p=theta,EN=y$en) + p<-ggplot(data=xx,aes(x=p,y=EN)) + geom_line() + ylab("Expected sample size") + }else if(plottype==3){ + xx <- data.frame(N=x$n.I,p=x$upper$bound/x$n.I,Bound="Upper") + xx <- rbind(xx, data.frame(N=x$n.I,p=x$lower$bound/x$n.I,Bound="Lower")) + p<-ggplot(data=xx,aes(x=N,y=p,group=Bound))+ + geom_point() + + ylab("Rate at bound") + }else if (plottype==2){ + theta<-(max(x$theta)-min(x$theta))*(0:50)/50+min(x$theta) + # compute exact boundary crossing probabilities + y <- gsBinomialExact(k=x$k,n.I=x$n.I, + theta=theta,a=x$lower$bound,b=x$upper$bound) + # compute probability of crossing upper bound for each theta + Power <- data.frame(rr=theta, + Percent=100*as.vector(matrix(1,ncol=length(y$n.I),nrow=1)%*% + y$upper$prob), + Outcome="Reject H0") + # compute probability of crossing lower bound + futility <- data.frame(rr=theta, + Percent=100*as.vector(matrix(1,ncol=length(y$n.I),nrow=1)%*% + y$lower$prob), + Outcome="Reject H1") + # probability of no boundary crossing + indeterminate <- data.frame(rr=theta,Percent=100-Power$Percent-futility$Percent, + Outcome="Indeterminate") + #combine and plot + outcome <- rbind(Power,futility,indeterminate) + p <- ggplot(data=outcome,aes(x=rr,y=Percent,lty=Outcome))+ + geom_line()+ + xlab("Underlying response rate") + }else{ + xx <- data.frame(N=x$n.I,x=x$upper$bound,Bound="Upper") + xx <- rbind(xx, data.frame(N=x$n.I,x=x$lower$bound,Bound="Lower")) + p<-ggplot(data=xx,aes(x=N,y=x,group=Bound))+ + geom_point() + + ylab("Number of responses") + } + return(p) +} +plot.binomialSPRT <- function(x,plottype=1,...){ + p <- plot.gsBinomialExact(x,plottype=plottype,...) + if (plottype==1){ + p <- p + geom_abline(intercept=x$intercept[1], + slope=x$slope) + + geom_abline(intercept=x$intercept[2], + slope=x$slope) + } + return(p) +} + Modified: pkg/gsDesign/man/gsBinomialExact.Rd =================================================================== --- pkg/gsDesign/man/gsBinomialExact.Rd 2013-05-27 11:27:22 UTC (rev 350) +++ pkg/gsDesign/man/gsBinomialExact.Rd 2013-05-27 18:30:09 UTC (rev 351) @@ -1,14 +1,27 @@ \name{gsBinomialExact} \alias{gsBinomialExact} \alias{print.gsBinomialExact} +\alias{plot.gsBinomialExact} +\alias{binomialSPRT} +\alias{plot.binomialSPRT} \title{3.4: One-Sample Exact Binomial Boundary Crossing Probabilities} -\description{Computes power/Type I error and expected sample size for a group sequential design +\description{ + +\code{gsBinomialExact} computes power/Type I error and expected sample size for a group sequential design in a single-arm trial with a binary outcome. +This can also be used to compare event rates in two-arm studies. The print function has been extended using \code{print.gsBinomialExact} to print \code{gsBinomialExact} objects; see examples. +Similarly, a plot function has been extended using \code{plot.gsBinomialExact} to plot \code{gsBinomialExact} objects; see examples. + +\code{binomialSPRT} computes a truncated binomial sequential probability ratio test (SPRT) which is a specific instance of an exact binomial group sequential design for a single arm trial with a binary outcome. See, for example, \url{http://theriac.org/DeskReference/viewDocument.php?id=65&SectionsList=3}. } \usage{ gsBinomialExact(k=2, theta=c(.1, .2), n.I=c(50, 100), a=c(3, 7), b=c(20,30)) +binomialSPRT(p0,p1,alpha,beta,minn,maxn) +\method{plot}{gsBinomialExact}(x,plottype=1,\dots) +\method{plot}{binomialSPRT}(x,plottype=1,\dots) + } \arguments{ \item{k}{Number of analyses planned, including interim and final.} @@ -16,10 +29,18 @@ \item{n.I}{Sample size at analyses (increasing positive integers); vector of length k.} \item{a}{Number of "successes" required to cross lower bound cutoffs for futility or harm at each analysis; vector of length k; -1 means no lower bound.} \item{b}{Number of "successes" required to cross upper bound cutoffs for futility or harm at each analysis; vector of length k.} - + \item{p0}{Lower of the two response (event) rates hypothesized.} + \item{p1}{Higher of the two response (event) rates hypothesized.} + \item{alpha}{Nominal probability of rejecting response (event) rate \code{p0} when it is true (Type I error rate).} + \item{beta}{Nominal probability of rejecting response (event) rate \code{p1} when it is trun (Type II error rate).} + \item{minn}{Minimum sample size at which sequential testing begins.} + \item{maxn}{Maximum sample size.} + \item{x}{Item of class \code{gsBinomialExact} or \code{binomialSPRT} for \code{print.gsBinomialExact}. Item of class \code{gsBinomialExact} for \code{plot.gsBinomialExact}. Item of class \code{binomialSPRT} for item of class \code{binomialSPRT}.} + \item{plottype}{1 produces a plot with counts of response at bounds (for \code{binomialSPRT}, also produces linear SPRT bounds); 2 produces a plot with power to reject null and alternate response rates as well as the probability of not crossing a bound by the maximum sample size; 3 produces a plot with the response rate at the boundary as a function of sample size when the boundary is crossed; 6 produces a plot of the expected sample size by the underlying event rate (this assumes there is no enrollment beyond the sample size where the boundary is crossed).} +\item{\dots}{arguments passed through to \code{ggplot}} } \details{ -Based on the book "Group Sequential Methods with Applications to Clinical Trials," +\code{gsBinomialExact} is based on the book "Group Sequential Methods with Applications to Clinical Trials," Christopher Jennison and Bruce W. Turnbull, Chapter 12, Section 12.1.2 Exact Calculations for Binary Data. This computation is often used as an approximation for the distribution of the number of events in one treatment group out of all events when the probability of an event is small and sample size is large. @@ -27,7 +48,13 @@ On output, the values of \code{theta} input to \code{gsBinomialExact} will be the parameter values for which the boundary crossing probabilities and expected sample sizes are computed. Note that a[1] equal to -1 lower bound at n.I[1] means 0 successes continues at interim 1; a[2]==0 at interim 2 means 0 successes stops trial for futility at 2nd analysis. -For final analysis, set a[k] equal to b[k]-1 to incorporate all possibilities into non-positive trial; see example. +For final analysis, set a[k] equal to b[k]-1 to incorporate all possibilities into non-positive trial; see example. + +The sequential probability ratio test (SPRT) is a sequential testing scheme allowing testing after each observation. This likelihood ratio is used to determine upper and lower cutoffs which are linear and parallel in the number of responses as a function of sample size. +\code{binomialSPRT} produces a variation the the SPRT that tests only within a range of sample sizes. +While the linear SPRT bounds are continuous, actual bounds are the integer number of response at or beyond each linear bound for each sample size where testing is performed. Because of the truncation and discretization of the bounds, power and Type I error achieve will be lower than the nominal levels specified by \code{alpha} and \code{beta} which can be altered to produce desired values that are achieved by the planned sample size. See also example that shows computation of Type I error when futility bound is considered non-binding. + +Plots produced include boundary plots, expected sample size, response rate at the boundary and power. } \value{ @@ -41,11 +68,19 @@ \item{upper}{A list of the same form as \code{lower} containing the upper bound and upper boundary crossing probabilities.} \item{en}{A vector of the same length as \code{theta} containing expected sample sizes for the trial design corresponding to each value in the vector \code{theta}.} + +\code{binomialSPRT} produces an object of class \code{binomialSPRT} that is an extension of the \code{gsBinomialExact} class. The values returned in addition to those returned by \code{gsBinomialExact} are: + \item{intercept}{A vector of length 2 with the intercepts for the two SPRT bounds.} + \item{slope}{A scalar with the common slope of the SPRT bounds.} + \item{alpha}{As input. Note that this will exceed the actual Type I error achieved by the design returned.} + \item{beta}{As input. Note that this will exceed the actual Type II error achieved by the design returned.} + \item{p0}{As input.} + \item{p1}{As input.} } \seealso{\code{\link{gsProbability}}} \note{The manual is not linked to this help file, but is available in library/gsdesign/doc/gsDesignManual.pdf in the directory where R is installed.} -\author{Jon Hartzel with modifications for gsDesign package by Yevgen Tymofyeyev and Keaven Anderson \email{keaven\_anderson at merck.}} +\author{Jon Hartzel, Yevgen Tymofyeyev and Keaven Anderson \email{keaven\_anderson at merck.}} \references{ Jennison C and Turnbull BW (2000), \emph{Group Sequential Methods with Applications to Clinical Trials}. Boca Raton: Chapman and Hall. @@ -62,5 +97,19 @@ # because of "gsProbability" class above, following is equivalent to # print.gsProbability(zz) zz +# also plot (see also plots below for \code{binomialSPRT}) +# add lines using geom_line() +plot(zz) + geom_line() + +# now for SPRT examples +x <- binomialSPRT(p0=.05,p1=.25,alpha=.1,beta=.2) +# boundary plot +plot(x) +# power plot +plot(x,plottype=2) +# Response (event) rate at boundary +plot(x,plottype=3) +# Expect sample size at boundary crossing or end of trial +plot(x,plottype=6) } \keyword{design} Modified: pkg/gsDesign/man/gsDesign.Rd =================================================================== --- pkg/gsDesign/man/gsDesign.Rd 2013-05-27 11:27:22 UTC (rev 350) +++ pkg/gsDesign/man/gsDesign.Rd 2013-05-27 18:30:09 UTC (rev 351) @@ -12,7 +12,8 @@ \method{print}{gsDesign}(x,\dots) \method{xtable}{gsDesign}(x, caption = NULL, label=NULL, align=NULL, digits=NULL, display=NULL, footnote = NULL, fnwid = "9cm", deltaname="delta", - Nname="N",logdelta=FALSE, \dots)} + Nname="N",logdelta=FALSE, \dots) +} \arguments{ \item{k}{Number of analyses planned, including interim and final.} \item{test.type}{\code{1=}one-sided \cr From noreply at r-forge.r-project.org Wed May 29 15:34:09 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 29 May 2013 15:34:09 +0200 (CEST) Subject: [Gsdesign-commits] r352 - in pkg/gsDesign: R man Message-ID: <20130529133409.6EB0D184BCE@r-forge.r-project.org> Author: keaven Date: 2013-05-29 15:34:09 +0200 (Wed, 29 May 2013) New Revision: 352 Added: pkg/gsDesign/R/ssrCP.R pkg/gsDesign/R/varBinomial.R pkg/gsDesign/R/xtable.R pkg/gsDesign/man/ssrCP.Rd Log: Added varBinomial.R and sscrCP source code required with recent updates. Added: pkg/gsDesign/R/ssrCP.R =================================================================== --- pkg/gsDesign/R/ssrCP.R (rev 0) +++ pkg/gsDesign/R/ssrCP.R 2013-05-29 13:34:09 UTC (rev 352) @@ -0,0 +1,29 @@ +ssrCP <- function(z, theta=NULL, maxinc=2, overrun=0, beta = 0.1, cpadj=c(.5,1-beta), x=gsDesign(k=2, timing=.5, beta=beta)){ + if (class(x)!="gsDesign") stop("x must be passed as an object of class gsDesign") + if (2 != x$k) stop("input group sequential design must have 2 stages (k=2)") + w <- x$timing[1] + if (is.null(theta)) theta <- z / sqrt(x$n.I[1]) + CP <- pnorm(theta*sqrt(x$n.I[2]*(1-w))-(x$upper$bound[2]-z*sqrt(w))/sqrt(1-w)) + n2 <- array(x$n.I[1]+overrun,length(z)) + indx <- ((z>x$lower$bound[1])&(zcpadj[2])) + n2[indx2] <- x$n.I[2] + indx <- indx & !indx2 + n2[indx] <- (((x$upper$bound[2] - z[indx] * sqrt(w)) / sqrt(1 - w) - qnorm(beta))/theta[indx])^2 + x$n.I[1] + n2[n2>maxinc*x$n.I[2]]<-maxinc*x$n.I[2] + return(n2) +} +# Type I error if sufficient statistic used instead of +# combination test + +#unadjTypeIErr <- function(maxinc=2, beta = 0.1, cpadj=c(.5,1-beta), +# x=gsDesign(k=2, timing=.5, beta=beta)){ +# z <- normalGrid() # grid for interim z +# B <- sqrt(x$timing[1])*z # interim B-value +# thetahat <- z/sqrt(n.I[1]) # interim standardized effect size +# cp <- + # stage 2 sample size +# n2 <- ssrCP(z=z,maxinc=maxinc,beta=beta,cpadj=cpadj,x=x) + # Type I error for non-promising zone + # assuming non-binding futility + \ No newline at end of file Added: pkg/gsDesign/R/varBinomial.R =================================================================== --- pkg/gsDesign/R/varBinomial.R (rev 0) +++ pkg/gsDesign/R/varBinomial.R 2013-05-29 13:34:09 UTC (rev 352) @@ -0,0 +1,55 @@ +# blinded estimate of variance for 2-sample binomial +varBinomial<-function(x,n,delta0=0,ratio=1,scale="Difference") +{ # check input arguments + checkVector(x, "integer", c(1, Inf)) + checkScalar(n, "integer", c(1, Inf)) + checkScalar(ratio, "numeric", c(0, Inf), c(FALSE,FALSE)) + scale <- match.arg(tolower(scale), c("difference", "rr", "or")) + # risk difference test - from Miettinen and Nurminen eqn (9) + p<-x/n + phi<-array(0,max(length(delta0),length(x),length(ratio),length(n))) + if (scale=="difference") + { checkScalar(delta0, "numeric", c(-1, 1),c(FALSE,FALSE)) + p1<-p+ratio*delta0/(ratio+1) + p2<-p1-delta0 + a<-1+ratio + b<- -(a+p1+ratio*p2-delta0*(ratio+2)) + c<- delta0^2-delta0*(2*p1+a)+p1+ratio*p2 + d<- p1*delta0*(1-delta0) + v<-(b/(3*a))^3-b*c/6/a^2+d/2/a + u<-sign(v)*sqrt((b/3/a)^2-c/3/a) + w<-(pi+acos(v/u^3))/3 + p10<-2*u*cos(w)-b/3/a + p20<-p10+delta0 + phi <- (p10*(1-p10)+p20*(1-p20)/ratio)*(ratio+1) + phi[delta0==0]<-p*(1-p)/ratio*(1+ratio)^2 + } + else if (scale=="rr") # log(p2/p1) + { checkScalar(delta0, "numeric", c(-Inf, Inf),c(FALSE,FALSE)) + RR<-exp(delta0) + if (delta0==0) phi<-(1-p)/p/ratio*(1+ratio)^2 + else + { p1<-p*(ratio+1)/(ratio*RR+1) + p2<-RR*p1 + a<-(1+ratio)*RR + b<- -(RR*ratio+p2*ratio+1+p1*RR) + c<- ratio*p2+p1 + p10<-(-b-sqrt(b^2-4*a*c))/2/a + p20<-RR*p10 + phi<-(ratio+1)*((1-p10)/p10+(1-p20)/ratio/p20) + } } + # log-odds ratio - based on asymptotic distribution of log-odds + # see vignette + else + { checkScalar(delta0, "numeric", c(-Inf, Inf),c(FALSE,FALSE)) + OR<-exp(delta0) + a<-OR-1 + c<- -p*(ratio+1) + b<- 1+ratio*OR+(OR-1)*c + p10<-(-b+sqrt(b^2-4*a*c))/2/a + p20<-OR*p10/(1+p10*(OR-1)) + phi<-(ratio+1)*(1/p10/(1-p10)+1/p20/(1-p20)/ratio) + phi[delta0==0]<-1/p/(1-p)*(1+1/ratio)*(1+ratio) + } + return(phi/n) +} Added: pkg/gsDesign/R/xtable.R =================================================================== --- pkg/gsDesign/R/xtable.R (rev 0) +++ pkg/gsDesign/R/xtable.R 2013-05-29 13:34:09 UTC (rev 352) @@ -0,0 +1,63 @@ +xtable.gsDesign <- function (x, caption = NULL, label=NULL, align=NULL, digits=NULL, display=NULL, + footnote = NULL, fnwid = "9cm", deltaname="delta", + Nname="N",logdelta=FALSE, ...) +{ + k <- x$k + deltafutility <- gsDelta(x=x,i=1:x$k,z=x$lower$bound[1:x$k]) + deltaefficacy <- gsDelta(x=x,i=1:x$k,z=x$upper$bound[1:x$k]) + deltavals <- c(x$delta0,x$delta1) + if (logdelta){ + deltafutility <- exp(deltafutility) + deltaefficacy <- exp(deltaefficacy) + deltavals <- exp(deltavals) + } + stat <- c("Z-value", "p (1-sided)", + paste(deltaname,"at bound"), + paste("P\\{Cross\\} if ", deltaname,"=", + deltavals[1], sep = ""), + paste("P\\{Cross\\} if ", deltaname,"=", + deltavals[2],sep = "")) + st <- stat + for (i in 2:k) stat <- c(stat, st) + an <- array(" ", 5 * k) + tim <- an + enrol <- an + fut <- an + eff <- an + an[5 * (0:(k - 1)) + 1] <- c(paste("IA ", + as.character(1:(k - 1)), ": ", as.character(round(100 * x$timing[1:(k - 1)],1)), + "\\%", sep = ""), "Final analysis") + an[5 * (1:(k - 1)) + 1] <- paste("\\hline", an[5 * (1:(k -1)) + 1]) + an[5 * (0:(k - 1)) + 2] <- paste(Nname,":", ceiling(x$n.I[1:k])) + fut[5 * (0:(k - 1)) + 1] <- as.character(round(x$lower$bound,2)) + eff[5 * (0:(k - 1)) + 1] <- as.character(round(x$upper$bound,2)) + asp <- as.character(round(pnorm(-x$upper$bound), 4)) + asp[asp == "0"] <- "$< 0.0001$" + eff[5 * (0:(k - 1)) + 2] <- asp + bsp <- as.character(round(pnorm(-x$lower$bound), 4)) + bsp[bsp == "0"] <- " $< 0.0001$" + fut[5 * (0:(k - 1)) + 2] <- bsp + asp <- as.character(round(deltafutility[1:x$k], 4)) + fut[5 * (0:(k - 1)) + 3] <- asp + bsp <- as.character(round(deltaefficacy[1:x$k], 4)) + eff[5 * (0:(k - 1)) + 3] <- bsp + asp <- as.character(round(cumsum(x$upper$prob[, 1]), 4)) + asp[asp == "0"] <- "$< 0.0001$" + eff[5 * (0:(k - 1)) + 4] <- asp + bsp <- as.character(round(cumsum(x$lower$prob[, 1]), 5)) + bsp[bsp == "0"] <- "$< 0.0001$" + fut[5 * (0:(k - 1)) + 4] <- bsp + asp <- as.character(round(cumsum(x$upper$prob[, 2]), 4)) + asp[asp == "0"] <- "$< 0.0001$" + eff[5 * (0:(k - 1)) + 5] <- asp + bsp <- as.character(round(cumsum(x$lower$prob[, 2]), 4)) + bsp[bsp == "0"] <- "$< 0.0001$" + fut[5 * (0:(k - 1)) + 5] <- bsp + neff <- length(eff) + if (!is.null(footnote)) + eff[neff] <- paste(eff[neff], "\\\\ \\hline \\multicolumn{4}{p{", + fnwid, "}}{\\footnotesize", footnote, "}") + x <- data.frame(cbind(an, stat, fut, eff)) + colnames(x) <- c("Analysis", "Value", "Futility", "Efficacy") + xtable(x,caption = caption, label=label, align=align, digits=digits, display=display, ...) +} Added: pkg/gsDesign/man/ssrCP.Rd =================================================================== --- pkg/gsDesign/man/ssrCP.Rd (rev 0) +++ pkg/gsDesign/man/ssrCP.Rd 2013-05-29 13:34:09 UTC (rev 352) @@ -0,0 +1,91 @@ +\name{ssrCP} +\alias{ssrCP} +\title{Sample size re-estimation based on conditional power} +\description{ +\code{ssrCP()} is used with 2-stage designs to update sample size at an interim analysis based on conditional power. +Either the estimated treatment effect at the interim analysis or any chosen effect size can be used for sample size re-estimation. +If not done carefully, these designs can be very inefficient. It is probably a good idea to compare any design to a simpler group sequential design; see, for example, Jennison and Turnbull, Statistics in Medicine, 2003. +Method assumes a small Type I error is included with the interim analysis and that the design is an adaptation from a 2-stage group sequential design (Lehmacher and Wassmer, Biometrics, 1999).} +\usage{ +ssrCP(z, theta = NULL, maxinc = 2, overrun = 0, beta = 0.1, cpadj = c(0.5, 1 - beta), x = gsDesign(k = 2, timing = 0.5, beta = beta)) +} +\arguments{ + \item{z}{Scalar or vector with interim standardized Z-value(s). Input of multiple values makes it easy to plot the revised sample size as a function of the interim test statistic.} + \item{theta}{If \code{NULL} (default), conditional power calculation will be based on estimated interim treatment effect. Otherwise, \code{theta} is the standardized effect size used for conditional power calculation. Using the alternate hypothesis treatment effect can be more efficient than the estimated effect size; see Liu and Chi, Biometrics, 2001. +There is a large literature for designs with conditional power; two commonly discussed methods allowing use of sufficient statistics for testing rather than combination tests have been proposed by Chen, DeMets and Lan, 2004, and Mehta and Pocock, 200x +} + \item{maxinc}{Maximum-fold increase in sample size from planned sample size.} + \item{overrun}{Number of patients enrolled but not analyzed at interim analysis.} + \item{beta}{Targeted Type II error (1 - targeted conditional power)} + \item{cpadj}{Interval of conditional power values for which sample size is to be increased.} + \item{x}{2-stage group sequential design generated using \code{gsDesign().}} +} +\details{See references and examples.} +\value{The initial version of \code{ssrCP()} returns a vector of sample sizes corresponding to the interim test statistics supplied in \code{z}.} +\references{ +Chen, YHJ, DeMets, DL and Lan, KKG. Increasing the sample size when the unblinded interim result is promising, Statistics in Medicine, +23:1023-1038, 2004. + +Jennison, C and Turnbull, BW. Mid-course sample size modification in clinical trials based on the observed treatment effect. Statistics in Medicine, 22:971-993", 2003. + +Lehmacher, W and Wassmer, G. Adaptive sample size calculations in group sequential trials, Biometrics, 55:1286-1290, 1999. + +Liu, Q and Chi, GY., On sample size and inference for two-stage adaptive designs, Biometrics, 57:172-177, 2001. + +Mehta, C and Pocock, S. Adaptive increase in sample size when interim results are promising: A practical guide with examples, Statistics in Medicine, 30:3267-3284, 2011. +} +\seealso{\code{\link{gsDesign}}} + +\author{Keaven Anderson \email{keaven\_anderson at merck.}} + +\examples{ +# timing of interim analysis +timing <- .5 +# upper spending function +sfu <- sfHSD +# upper spending function paramater +sfupar <- -12 +# maximum sample size inflation +maxinflation <- 2 +# assumed enrollment overrrun at IA +overrun <- 25 +# interim z-values for plotting +z <- seq(0,4,.025) +# Type I error (1-sided) +alpha <- .025 +# Type II error for design +beta <- .1 +# Fixed design sample size +n.fix <- 100 +# conditional power interval where sample +# size is to be adjusted +cpadj <- c(.3,.9) +# targeted Type II error when adapting sample size +betastar <- beta + +# generate a 2-stage group sequential design with +# desired planned sample size (first 2 lines set planned sample size to that from above; normally) +x<-gsDesign(k=2,n.fix=n.fix,timing=timing,sfu=sfu,sfupar=sfupar,alpha=alpha,beta=beta) +nalt <- maxinflation*(x$n.I[2]) +par(mar=c(7, 4, 4, 4)+.1) +plot(z,ssrCP(x=x,z=z,overrun=overrun,beta=betastar,cpadj=cpadj),type="l",xlim=c(0,3.5),axes=FALSE,xlab="",ylab="") +lines(z,80+240*dnorm(z,mean=0),col=2) +lines(z,80+240*dnorm(z,mean=sqrt(x$n.I[1])*x$theta[2]),col=3) +lines(z,80+240*dnorm(z,mean=sqrt(x$n.I[1])*x$theta[2]/2),col=4) +lines(z,80+240*dnorm(z,mean=sqrt(x$n.I[1])*x$theta[2]*.75),col=5) +axis(side=2, at=75+25*(0:5), labels=as.character(75+25*(0:5))) +mtext(side=2,"Sample size",line=2) +axis(side=4, at=80+240*seq(0,.4,.1), labels=as.character(seq(0,.4,.1))) +mtext(side=4,expression(paste("Density for ",z[1])),line=2) +lines(x=c(-3.5,3.5),y=c(nalt,nalt),lty=2) +w <- x$timing[1] +theta <- seq(.5,3.5,.5) / sqrt(x$n.I[1]) +CP <- pnorm(theta*sqrt(x$n.I[2]*(1-w))-(x$upper$bound[2]-seq(.5,3.5,.5)*sqrt(w))/sqrt(1-w)) +axis(side=1,line=3,at=seq(.5,3.5,.5),labels=as.character(round(CP,2))) +mtext(side=1,"CP",line=3.5,at=.25) +axis(side=1,line=1,at=seq(0,3.5,.5),labels=as.character(seq(0,3.5,.5))) +mtext(side=1,expression(z[1]),line=.75,at=-.15) +axis(side=1,line=5,at=seq(.5,3.5,.5),labels=as.character(round(theta/x$theta[2],2))) +mtext(side=1,expression(hat(theta)/theta[1]),line=5.5,at=.25) +} +\keyword{design}