[Splm-commits] r160 - pkg/R
noreply at r-forge.r-project.org
noreply at r-forge.r-project.org
Tue Apr 2 23:02:16 CEST 2013
Author: the_sculler
Date: 2013-04-02 23:02:16 +0200 (Tue, 02 Apr 2013)
New Revision: 160
Added:
pkg/R/sarem2srREmod.R
pkg/R/sem2srREmod.R
pkg/R/sparseBmethods.R
Log:
Added missing functions sparseBmethods.R, sarem2srREmod.R, sem2srREmod.R
Added: pkg/R/sarem2srREmod.R
===================================================================
--- pkg/R/sarem2srREmod.R (rev 0)
+++ pkg/R/sarem2srREmod.R 2013-04-02 21:02:16 UTC (rev 160)
@@ -0,0 +1,239 @@
+sarem2srREmod <-
+function (X, y, ind, tind, n, k, t., nT, w, w2, coef0 = rep(0, 4),
+ hess = FALSE, trace = trace, x.tol = 1.5e-18, rel.tol = 1e-15,
+ method="BFGS",
+ ...)
+{
+
+ ## New KKP+SR estimator, Giovanni Millo 12/03/2013
+ ## structure:
+ ## a) specific part
+ ## - set names, bounds and initial values for parms
+ ## - define building blocks for likelihood and GLS as functions of parms
+ ## - define likelihood
+ ## b) generic part(independent from ll.c() and #parms)
+ ## - fetch covariance parms from max lik
+ ## - calc last GLS step
+ ## - fetch betas
+ ## - calc final covariances
+ ## - make list of results
+
+ ## needs ldetB(), xprodB()
+
+ ## set names for final parms vectors
+ nam.beta <- dimnames(X)[[2]]
+ nam.errcomp <- c("phi", "rho", "lambda", "psi")
+
+ ## initialize values for optimizer
+ myparms0 <- coef0
+
+ ## modules for likelihood
+ Vmat <- function(rho, t.) {
+ V1 <- matrix(ncol = t., nrow = t.)
+ for (i in 1:t.) V1[i, ] <- rho^abs(1:t. - i)
+ V <- (1/(1 - rho^2)) * V1
+ }
+ Vmat.1 <- function(rho, t.) {
+ ## V^(-1) is 'similar' to its 3x3 counterpart,
+ ## irrespective of t.:
+ ## see Vmat.R in /sparsealgebra
+ if(t.==1) {Vmat.1 <- 1} else {
+ Vmat.1 <- matrix(0, ncol = t., nrow = t.)
+ ## non-extreme diag. elements
+ for (i in 2:(t.-1)) Vmat.1[i,i] <- (1-rho^4)/(1-rho^2)
+ ## extremes of diagonal
+ Vmat.1[1,1] <- Vmat.1[t.,t.] <- 1
+ ## bidiagonal elements
+ for (j in 1:(t.-1)) Vmat.1[j+1,j] <- -rho
+ for (k in 1:(t.-1)) Vmat.1[k,k+1] <- -rho
+ }
+ return(Vmat.1)
+ }
+ alfa2 <- function(rho) (1 + rho)/(1 - rho)
+ d2 <- function(rho, t.) alfa2(rho) + t. - 1
+ Jt <- matrix(1, ncol = t., nrow = t.)
+ In <- diag(1, n)
+ det2 <- function(phi, rho, lambda, t., w) (d2(rho, t.) * (1 -
+ rho)^2 * phi + 1)
+ invSigma <- function(phirholambda, n, t., w) {
+ ## retrieve parms
+ phi <- phirholambda[1]
+ rho <- phirholambda[2]
+ lambda <- phirholambda[3]
+ ## psi not used: here passing 4 parms, but works anyway
+ ## because psi is last one
+ ## calc inverse
+ invVmat <- Vmat.1(rho, t.) #
+ BB <- xprodB(lambda, w)
+ chi <- phi/(d2(rho, t.)*(1-rho)^2*phi+1)
+ invSigma <- kronecker((invVmat-chi*(invVmat %*% Jt %*% invVmat)),
+ BB)
+ invSigma
+ }
+ ## likelihood function, both steps included
+ ll.c <- function(phirholambda, y, X, n, t., w, w2, wy) {
+ ## retrieve parms
+ phi <- phirholambda[1]
+ rho <- phirholambda[2]
+ lambda <- phirholambda[3]
+ psi <- phirholambda[4] # lag-specific line
+ ## calc inverse sigma
+ sigma.1 <- invSigma(phirholambda, n, t., w2)
+ ## lag y
+ Ay <- y - psi * wy # lag-specific line
+ ## do GLS step to get e, s2e
+ glsres <- GLSstep(X, Ay, sigma.1) # lag-specific line (Ay for y)
+ e <- glsres[["ehat"]]
+ s2e <- glsres[["sigma2"]]
+ ## calc ll
+ zero <- t.*ldetB(psi, w) #log(detB(psi, w)) # lag-specific line (else zero <- 0)
+ uno <- n/2 * log(1 - rho^2)
+ due <- -n/2 * log(det2(phi, rho, lambda, t., w2))
+ tre <- -(n * t.)/2 * log(s2e)
+ quattro <- (t.) * ldetB(lambda, w2) #log(detB(lambda, w2))
+ cinque <- -1/(2 * s2e) * t(e) %*% sigma.1 %*% e
+ const <- -(n * t.)/2 * log(2 * pi)
+ ll.c <- const + zero + uno + due + tre + quattro + cinque
+ ## invert sign for minimization
+ llc <- -ll.c
+ }
+
+ ## set bounds for optimizer
+ lower.bounds <- c(1e-08, -0.999, -0.999, -0.999) # lag-specific line (4th parm)
+ upper.bounds <- c(1e+09, 0.999, 0.999, 0.999) # lag-specific line (idem)
+
+ ## constraints as cA %*% theta + cB >= 0
+ ## equivalent to: phi>=0, -1<=(rho, lambda, psi)<=1
+ ## NB in maxLik() optimization cannot start at the boundary of the
+ ## parameter space !
+ cA <- cbind(c(1, rep(0,6)),
+ c(0,1,-1,rep(0,4)),
+ c(rep(0,3), 1, -1, rep(0,2)),
+ c(rep(0,5), 1, -1))
+ cB <- c(0, rep(1,6))
+
+ ## generic from here
+
+ ## calc. Wy (spatial lag of y)
+ ## (flexible fun accepting either listws or matrices for w)
+ Wy <- function(y, w, tind) { # lag-specific line
+ wyt <- function(y, w) { # lag-specific line
+ if("listw" %in% class(w)) { # lag-specific line
+ wyt <- lag.listw(w, y) # lag-specific line
+ } else { # lag-specific line
+ wyt <- w %*% y # lag-specific line
+ } # lag-specific line
+ return(wyt) # lag-specific line
+ } # lag-specific line
+ wy<-list() # lag-specific line
+ for (j in 1:length(unique(tind))) { # lag-specific line
+ yT<-y[tind==unique(tind)[j]] # lag-specific line
+ wy[[j]] <- wyt(yT, w) # lag-specific line
+ } # lag-specific line
+ return(unlist(wy)) # lag-specific line
+ } # lag-specific line
+
+ ## GLS step function
+ GLSstep <- function(X, y, sigma.1) {
+ b.hat <- solve(t(X) %*% sigma.1 %*% X,
+ t(X) %*% sigma.1 %*% y)
+ ehat <- y - X %*% b.hat
+ sigma2ehat <- (t(ehat) %*% sigma.1 %*% ehat)/(n * t.)
+ return(list(betahat=b.hat, ehat=ehat, sigma2=sigma2ehat))
+ }
+
+ ## lag y once for all
+ wy <- Wy(y, w2, tind) # lag-specific line
+
+
+ ## optimization
+
+ ## adaptive scaling
+ parscale <- 1/max(myparms0, 0.1)
+
+ if(method=="nlminb") {
+
+ optimum <- nlminb(start = myparms0, objective = ll.c,
+ gradient = NULL, hessian = NULL,
+ y = y, X = X, n = n, t. = t., w = w, w2 = w2, wy = wy,
+ scale = parscale,
+ control = list(x.tol = x.tol,
+ rel.tol = rel.tol, trace = trace),
+ lower = lower.bounds, upper = upper.bounds)
+
+ ## log likelihood at optimum (notice inverted sign)
+ myll <- -optimum$objective
+ ## retrieve optimal parms and H
+ myparms <- optimum$par
+ myHessian <- fdHess(myparms, function(x) -ll.c(x,
+ y, X, n, t., w, w2, wy))$Hessian # lag-specific line: wy
+
+ } else {
+
+ require(maxLik)
+
+ ## initial values are not allowed to be zero
+ maxout<-function(x,a) ifelse(x>a, x, a)
+ myparms0 <- maxout(myparms0, 0.01)
+
+ ## invert sign for MAXimization
+ ll.c2 <- function(phirholambda, y, X, n, t., w, w2, wy) {
+ -ll.c(phirholambda, y, X, n, t., w, w2, wy)
+ }
+
+ ## max likelihood
+ optimum <- maxLik(logLik = ll.c2,
+ grad = NULL, hess = NULL, start=myparms0,
+ method = method,
+ parscale = parscale,
+ constraints=list(ineqA=cA, ineqB=cB),
+ y = y, X = X, n = n, t. = t., w = w, w2 = w2, wy = wy)
+
+ ## log likelihood at optimum (notice inverted sign)
+ myll <- optimum$maximum # this one MAXimizes
+ ## retrieve optimal parms and H
+ myparms <- optimum$estimate
+ myHessian <- optimum$hessian
+ }
+
+ ## one last GLS step at optimal vcov parms
+ sigma.1 <- invSigma(myparms, n, t., w)
+ Ay <- y - myparms[length(myparms)] * wy # lag-specific line
+ beta <- GLSstep(X, Ay, sigma.1)
+
+ ## final vcov(beta)
+ covB <- as.numeric(beta[[3]]) *
+ solve(t(X) %*% sigma.1 %*% X)
+
+ ## final vcov(errcomp)
+ nvcovpms <- length(nam.errcomp) - 1
+ ## error handler here for singular Hessian cases
+ covTheta <- try(solve(-myHessian), silent=TRUE)
+ if(class(covTheta) == "try-error") {
+ covTheta <- matrix(NA, ncol=nvcovpms+1,
+ nrow=nvcovpms+1)
+ warning("Hessian matrix is not invertible")
+ }
+ covAR <- covTheta[nvcovpms+1, nvcovpms+1, drop=FALSE]
+ covPRL <- covTheta[1:nvcovpms, 1:nvcovpms, drop=FALSE]
+
+ ## final parms
+ betas <- as.vector(beta[[1]])
+ sigma2 <- as.numeric(beta[["sigma2"]])
+ arcoef <- myparms[which(nam.errcomp=="psi")] # lag-specific line
+ errcomp <- myparms[which(nam.errcomp!="psi")]
+ names(betas) <- nam.beta
+ names(arcoef) <- "psi" # lag-specific line
+ names(errcomp) <- nam.errcomp[which(nam.errcomp!="psi")]
+
+ dimnames(covB) <- list(nam.beta, nam.beta)
+ dimnames(covAR) <- list(names(arcoef), names(arcoef))
+ dimnames(covPRL) <- list(names(errcomp), names(errcomp))
+
+ ## result
+ RES <- list(betas = betas, arcoef=arcoef, errcomp = errcomp,
+ covB = covB, covAR=covAR, covPRL = covPRL, ll = myll,
+ sigma2 = sigma2)
+
+ return(RES)
+}
Added: pkg/R/sem2srREmod.R
===================================================================
--- pkg/R/sem2srREmod.R (rev 0)
+++ pkg/R/sem2srREmod.R 2013-04-02 21:02:16 UTC (rev 160)
@@ -0,0 +1,210 @@
+sem2srREmod <-
+function (X, y, ind, tind, n, k, t., nT, w, w2, coef0 = rep(0, 4),
+ hess = FALSE, trace = trace, x.tol = 1.5e-18, rel.tol = 1e-15,
+ method="BFGS",
+ ...)
+{
+
+ ## New KKP+SR estimator, Giovanni Millo 12/03/2013
+ ## structure:
+ ## a) specific part
+ ## - set names, bounds and initial values for parms
+ ## - define building blocks for likelihood and GLS as functions of parms
+ ## - define likelihood
+ ## b) generic part(independent from ll.c() and #parms)
+ ## - fetch covariance parms from max lik
+ ## - calc last GLS step
+ ## - fetch betas
+ ## - calc final covariances
+ ## - make list of results
+
+ ## needs ldetB(), xprodB()
+
+ ## set names for final parms vectors
+ nam.beta <- dimnames(X)[[2]]
+ nam.errcomp <- c("phi", "rho", "lambda")
+
+ ## initialize values for optimizer
+ myparms0 <- coef0
+
+ ## modules for likelihood
+ Vmat <- function(rho, t.) {
+ V1 <- matrix(ncol = t., nrow = t.)
+ for (i in 1:t.) V1[i, ] <- rho^abs(1:t. - i)
+ V <- (1/(1 - rho^2)) * V1
+ }
+ Vmat.1 <- function(rho, t.) {
+ ## V^(-1) is 'similar' to its 3x3 counterpart,
+ ## irrespective of t.:
+ ## see Vmat.R in /sparsealgebra
+ if(t.==1) {Vmat.1 <- 1} else {
+ Vmat.1 <- matrix(0, ncol = t., nrow = t.)
+ ## non-extreme diag. elements
+ for (i in 2:(t.-1)) Vmat.1[i,i] <- (1-rho^4)/(1-rho^2)
+ ## extremes of diagonal
+ Vmat.1[1,1] <- Vmat.1[t.,t.] <- 1
+ ## bidiagonal elements
+ for (j in 1:(t.-1)) Vmat.1[j+1,j] <- -rho
+ for (k in 1:(t.-1)) Vmat.1[k,k+1] <- -rho
+ }
+ return(Vmat.1)
+ }
+ alfa2 <- function(rho) (1 + rho)/(1 - rho)
+ d2 <- function(rho, t.) alfa2(rho) + t. - 1
+ Jt <- matrix(1, ncol = t., nrow = t.)
+ In <- diag(1, n)
+ det2 <- function(phi, rho, lambda, t., w) (d2(rho, t.) * (1 -
+ rho)^2 * phi + 1)
+ invSigma <- function(phirholambda, n, t., w) {
+ ## retrieve parms
+ phi <- phirholambda[1]
+ rho <- phirholambda[2]
+ lambda <- phirholambda[3]
+ ## psi not used: here passing 4 parms, but works anyway
+ ## because psi is last one
+ ## calc inverse
+ invVmat <- Vmat.1(rho, t.) #
+ BB <- xprodB(lambda, w)
+ chi <- phi/(d2(rho, t.)*(1-rho)^2*phi+1)
+ invSigma <- kronecker((invVmat-chi*(invVmat %*% Jt %*% invVmat)),
+ BB)
+ invSigma
+ }
+ ## likelihood function, both steps included
+ ll.c <- function(phirholambda, y, X, n, t., w, w2, wy) {
+ ## retrieve parms
+ phi <- phirholambda[1]
+ rho <- phirholambda[2]
+ lambda <- phirholambda[3]
+ ## calc inverse sigma
+ sigma.1 <- invSigma(phirholambda, n, t., w2)
+ ## do GLS step to get e, s2e
+ glsres <- GLSstep(X, y, sigma.1)
+ e <- glsres[["ehat"]]
+ s2e <- glsres[["sigma2"]]
+ ## calc ll
+ zero <- 0
+ uno <- n/2 * log(1 - rho^2)
+ due <- -n/2 * log(det2(phi, rho, lambda, t., w2))
+ tre <- -(n * t.)/2 * log(s2e)
+ quattro <- (t.) * ldetB(lambda, w2)
+ cinque <- -1/(2 * s2e) * t(e) %*% sigma.1 %*% e
+ const <- -(n * t.)/2 * log(2 * pi)
+ ll.c <- const + zero + uno + due + tre + quattro + cinque
+ ## invert sign for minimization
+ llc <- -ll.c
+ }
+
+ ## set bounds for optimizer
+ lower.bounds <- c(1e-08, -0.999, -0.999)
+ upper.bounds <- c(1e+09, 0.999, 0.999)
+
+ ## constraints as cA %*% theta + cB >= 0
+ ## equivalent to: phi>=0, -1<=(rho, lambda, psi)<=1
+ ## NB in maxLik() optimization cannot start at the boundary of the
+ ## parameter space !
+ cA <- cbind(c(1, rep(0,4)),
+ c(0,1,-1,rep(0,2)),
+ c(rep(0,3), 1, -1))
+ cB <- c(0, rep(1,4))
+
+ ## generic from here
+
+ ## GLS step function
+ GLSstep <- function(X, y, sigma.1) {
+ b.hat <- solve(t(X) %*% sigma.1 %*% X,
+ t(X) %*% sigma.1 %*% y)
+ ehat <- y - X %*% b.hat
+ sigma2ehat <- (t(ehat) %*% sigma.1 %*% ehat)/(n * t.)
+ return(list(betahat=b.hat, ehat=ehat, sigma2=sigma2ehat))
+ }
+
+
+ ## optimization
+
+ ## adaptive scaling
+ parscale <- 1/max(myparms0, 0.1)
+
+ if(method=="nlminb") {
+
+ optimum <- nlminb(start = myparms0, objective = ll.c,
+ gradient = NULL, hessian = NULL,
+ y = y, X = X, n = n, t. = t., w = w, w2 = w2,
+ scale = parscale,
+ control = list(x.tol = x.tol,
+ rel.tol = rel.tol, trace = trace),
+ lower = lower.bounds, upper = upper.bounds)
+
+ ## log likelihood at optimum (notice inverted sign)
+ myll <- -optimum$objective
+ ## retrieve optimal parms and H
+ myparms <- optimum$par
+ myHessian <- fdHess(myparms, function(x) -ll.c(x,
+ y, X, n, t., w, w2))$Hessian
+
+ } else {
+
+ require(maxLik)
+
+ ## initial values are not allowed to be zero
+ maxout<-function(x,a) ifelse(x>a, x, a)
+ myparms0 <- maxout(myparms0, 0.01)
+
+ ## invert sign for MAXimization
+ ll.c2 <- function(phirholambda, y, X, n, t., w, w2) {
+ -ll.c(phirholambda, y, X, n, t., w, w2)
+ }
+
+ ## max likelihood
+ optimum <- maxLik(logLik = ll.c2,
+ grad = NULL, hess = NULL, start=myparms0,
+ method = method,
+ parscale = parscale,
+ constraints=list(ineqA=cA, ineqB=cB),
+ y = y, X = X, n = n, t. = t., w = w, w2 = w2)
+
+ ## log likelihood at optimum (notice inverted sign)
+ myll <- optimum$maximum # this one MAXimizes
+ ## retrieve optimal parms and H
+ myparms <- optimum$estimate
+ myHessian <- optimum$hessian
+ }
+
+ ## one last GLS step at optimal vcov parms
+ sigma.1 <- invSigma(myparms, n, t., w)
+ beta <- GLSstep(X, y, sigma.1)
+
+ ## final vcov(beta)
+ covB <- as.numeric(beta[[3]]) *
+ solve(t(X) %*% sigma.1 %*% X)
+
+ ## final vcov(errcomp)
+ nvcovpms <- length(nam.errcomp) - 1
+ ## error handler here for singular Hessian cases
+ covTheta <- try(solve(-myHessian), silent=TRUE)
+ if(class(covTheta) == "try-error") {
+ covTheta <- matrix(NA, ncol=nvcovpms+1,
+ nrow=nvcovpms+1)
+ warning("Hessian matrix is not invertible")
+ }
+ covAR <- NULL
+ covPRL <- covTheta
+
+ ## final parms
+ betas <- as.vector(beta[[1]])
+ sigma2 <- as.numeric(beta[["sigma2"]])
+ arcoef <- NULL
+ errcomp <- myparms[which(nam.errcomp!="psi")]
+ names(betas) <- nam.beta
+ names(errcomp) <- nam.errcomp[which(nam.errcomp!="psi")]
+
+ dimnames(covB) <- list(nam.beta, nam.beta)
+ dimnames(covPRL) <- list(names(errcomp), names(errcomp))
+
+ ## result
+ RES <- list(betas = betas, arcoef=arcoef, errcomp = errcomp,
+ covB = covB, covAR=covAR, covPRL = covPRL, ll = myll,
+ sigma2 = sigma2)
+
+ return(RES)
+}
Added: pkg/R/sparseBmethods.R
===================================================================
--- pkg/R/sparseBmethods.R (rev 0)
+++ pkg/R/sparseBmethods.R 2013-04-02 21:02:16 UTC (rev 160)
@@ -0,0 +1,113 @@
+## sparse matrix version function for B'B
+## default is "spam" if fed a listw of style "W" or "S", "naive" if a matrix
+##
+## usage and computational gains on large example:
+#> system.time(b1s<-solveB(lambda=0.2, listw=lwUScw))
+# user system elapsed
+# 0.04 0.00 0.16
+#> system.time(b1m<-solveB(lambda=0.2, listw=lwUScw, method="Matrix"))
+# user system elapsed
+# 0.14 0.00 0.55
+#> system.time(b1n<-solveB(lambda=0.2, listw=lwUScw, method="naive"))
+# user system elapsed
+# 20.75 0.14 20.91
+
+## needed here, but already in 'splm'??
+
+listw2U_spam <- function(lw){
+0.5 * (lw + t(lw))
+}
+
+similar.listw_spam <- function(listw)
+{
+ nbsym <- attr(listw$neighbours, "sym")
+ if (is.null(nbsym))
+ nbsym <- is.symmetric.nb(listw$neighbours, FALSE)
+ if (!nbsym)
+ stop("Only symmetric nb can yield similar to symmetric weights")
+ if (attr(listw$weights, "mode") == "general")
+ if (!attr(listw$weights, "glistsym"))
+ stop("General weights must be symmetric")
+ n <- length(listw$neighbours)
+ if (n < 1)
+ stop("non-positive number of entities")
+ sww <- as.spam.listw(listw)
+ if (listw$style == "W") {
+ sd <- attr(listw$weights, "comp")$d
+ sd1 <- 1/(sqrt(sd))
+ sdd <- diag.spam(sd, n, n)
+ sdd1 <- diag.spam(sd1, n, n)
+ sww1 <- sdd %*% sww
+ res <- sdd1 %*% sww1 %*% sdd1
+ }
+ else if (listw$style == "S") {
+ q <- attr(listw$weights, "comp")$q
+ Q <- attr(listw$weights, "comp")$Q
+ eff.n <- attr(listw$weights, "comp")$eff.n
+ q1 <- 1/(sqrt(q))
+ qq <- diag.spam(q, n, n)
+ qq1 <- diag.spam(q1, n, n)
+ ww0 <- (Q/eff.n) * sww
+ ww1 <- qq %*% ww0
+ sim0 <- qq1 %*% ww1 %*% qq1
+ res <- (eff.n/Q) * sim0
+ }
+ else stop("Conversion not suitable for this weights style")
+ res
+}
+
+xprodB <- function(lambda, listw, can.sim=TRUE) {
+
+ ## check if listw or matrix;
+ if("listw" %in% class(listw)) {
+ ## case listw is 'listw'
+ n <- length(listw$weights)
+ if (listw$style %in% c("W", "S") & can.sim) {
+ csrw <- listw2U_spam(similar.listw_spam(listw))
+ similar <- TRUE
+ } else {
+ csrw <- as.spam.listw(listw)
+ }
+ } else {
+ ## case listw is 'matrix'
+ n <- ncol(listw)
+ csrw <- as.spam(listw)
+ }
+ I <- diag.spam(1, n, n)
+ B <- I - lambda * csrw
+ xprodB <- t(B) %*% B
+ return(xprodB)
+}
+
+ldetB <- function(lambda, listw, can.sim=TRUE) {
+
+ ## check if listw or matrix;
+ if("listw" %in% class(listw)) {
+ ## case listw is 'listw'
+ if (listw$style %in% c("W", "S") & can.sim) {
+ csrw <- listw2U_spam(similar.listw_spam(listw))
+ similar <- TRUE
+ }
+ else csrw <- as.spam.listw(listw)
+ n <- length(listw$weights)
+ I <- diag.spam(1, n, n)
+ B <- I - lambda * csrw
+ J1 <- try(determinant(B, logarithm = TRUE)$modulus, silent = TRUE)
+ if (class(J1) == "try-error") {
+ ## fall back on standard methods
+ ldetB <- log(det(as.matrix(B)))
+ warning("Bad result in calculating log(det(B))")
+ } else {
+ ldetB <- J1
+ }
+ } else {
+ ## case listw is 'matrix'
+ n <- ncol(listw)
+ ldetB <- log(det(diag(1, n) - lambda * listw))
+ #csrw <- as.spam(listw)
+ }
+
+ return(ldetB)
+}
+
+
More information about the Splm-commits
mailing list