From noreply at r-forge.r-project.org Thu May 1 07:09:21 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 1 May 2014 07:09:21 +0200 (CEST) Subject: [Eventstudies-commits] r301 - pkg/R Message-ID: <20140501050922.04490180384@r-forge.r-project.org> Author: chiraganand Date: 2014-05-01 07:09:21 +0200 (Thu, 01 May 2014) New Revision: 301 Modified: pkg/R/eventstudy.R Log: Moved single firm handling out of lmAMM. Modified: pkg/R/eventstudy.R =================================================================== --- pkg/R/eventstudy.R 2014-04-30 18:56:29 UTC (rev 300) +++ pkg/R/eventstudy.R 2014-05-01 05:09:21 UTC (rev 301) @@ -19,15 +19,16 @@ if (is.levels == TRUE) { firm.returns <- diff(log(firm.returns)) * 100 } - + + ## handle single series + if (is.null(ncol(firm.returns))) { + stop("firm.returns should be a zoo series with at least one column. Use '[' with 'drop = FALSE'.") + } + firmNames <- colnames(firm.returns) + ### Run models ## AMM if (type == "lmAMM") { - if (is.null(ncol(firm.returns))) { - stop("firm.returns should be a zoo series with at least one column. Use '[' with 'drop = FALSE'.") - } - firmNames <- colnames(firm.returns) - ## AMM residual to time series timeseriesAMM <- function(firm.returns, X, verbose = FALSE, nlags = 1) { tmp <- resid(lmAMM(firm.returns = firm.returns, From noreply at r-forge.r-project.org Thu May 1 07:10:52 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 1 May 2014 07:10:52 +0200 (CEST) Subject: [Eventstudies-commits] r302 - pkg/inst/tests Message-ID: <20140501051052.B8523185164@r-forge.r-project.org> Author: chiraganand Date: 2014-05-01 07:10:52 +0200 (Thu, 01 May 2014) New Revision: 302 Added: pkg/inst/tests/test_interfaces.R Log: Added test cases for testing eventstudy interfaces to various models. Added: pkg/inst/tests/test_interfaces.R =================================================================== --- pkg/inst/tests/test_interfaces.R (rev 0) +++ pkg/inst/tests/test_interfaces.R 2014-05-01 05:10:52 UTC (rev 302) @@ -0,0 +1,51 @@ +context("eventstudy") + +test_that("test.interfaces", { + load("test_SplitDates.rda") + load("test_StockPriceReturns.rda") + load("test_NiftyIndex.rda") + + +### Basic event study with default args (market residuals) + cat("Checking market residuals interface: ") + expected_mean <- c(0, 0.0393985717416213, -0.7458035091065, + 0.457817077869512, 0.715714066835461, 2.33986420702835, + 2.37333344340029) + expected_outcomes <- c("success", "success") + + test_events <- data.frame(outcome.unit = "ONGC", + event.when = c("2011-08-01", "2010-05-14"), + stringsAsFactors = FALSE) + test_returns<- StockPriceReturns[complete.cases(StockPriceReturns$ONGC), "ONGC", + drop = FALSE] + test_es <- eventstudy(firm.returns = test_returns, + eventList = test_events, + width = 3, + market.returns = NiftyIndex) + + expect_that(expected_mean, equals(test_es$eventstudy.output[, "Mean"])) + expect_that(expected_outcomes, equals(test_es$outcomes)) + expect_is(test_es, "es") + +### AMM interface + cat("Checking AMM interface: ") +XX expected_mean <- c(0, 0.0393985717416213, -0.7458035091065, + 0.457817077869512, 0.715714066835461, 2.33986420702835, + 2.37333344340029) + expected_outcomes <- c("success", "success") + + test_events <- data.frame(outcome.unit = "ONGC", + event.when = c("2011-08-01", "2010-05-14"), + stringsAsFactors = FALSE) + test_returns<- StockPriceReturns[complete.cases(StockPriceReturns$ONGC), "ONGC", + drop = FALSE] + test_es <- eventstudy(firm.returns = test_returns, + eventList = test_events, + width = 3, + type = "lmAMM", + market.returns = NiftyIndex, + others = test_others) + + expect_that(expected_mean, equals(test_es$eventstudy.output[, "Mean"])) + expect_that(expected_outcomes, equals(test_es$outcomes)) + expect_is(test_es, "es") From noreply at r-forge.r-project.org Thu May 1 21:29:11 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 1 May 2014 21:29:11 +0200 (CEST) Subject: [Eventstudies-commits] r303 - pkg/inst/tests Message-ID: <20140501192911.7858F1872B0@r-forge.r-project.org> Author: chiraganand Date: 2014-05-01 21:29:11 +0200 (Thu, 01 May 2014) New Revision: 303 Added: pkg/inst/tests/test_NiftyIndex.rda pkg/inst/tests/test_SplitDates.rda pkg/inst/tests/test_StockPriceReturns.rda pkg/inst/tests/test_USDINR.rda Modified: pkg/inst/tests/test_interfaces.R Log: Added interface testing for inference procedures and remapping, added more testing data sets. Added: pkg/inst/tests/test_NiftyIndex.rda =================================================================== (Binary files differ) Property changes on: pkg/inst/tests/test_NiftyIndex.rda ___________________________________________________________________ Added: svn:mime-type + application/x-gzip Added: pkg/inst/tests/test_SplitDates.rda =================================================================== (Binary files differ) Property changes on: pkg/inst/tests/test_SplitDates.rda ___________________________________________________________________ Added: svn:mime-type + application/x-gzip Added: pkg/inst/tests/test_StockPriceReturns.rda =================================================================== (Binary files differ) Property changes on: pkg/inst/tests/test_StockPriceReturns.rda ___________________________________________________________________ Added: svn:mime-type + application/x-xz Added: pkg/inst/tests/test_USDINR.rda =================================================================== (Binary files differ) Property changes on: pkg/inst/tests/test_USDINR.rda ___________________________________________________________________ Added: svn:mime-type + application/x-gzip Modified: pkg/inst/tests/test_interfaces.R =================================================================== --- pkg/inst/tests/test_interfaces.R 2014-05-01 05:10:52 UTC (rev 302) +++ pkg/inst/tests/test_interfaces.R 2014-05-01 19:29:11 UTC (rev 303) @@ -4,8 +4,8 @@ load("test_SplitDates.rda") load("test_StockPriceReturns.rda") load("test_NiftyIndex.rda") + load("test_usdinr.rda") - ### Basic event study with default args (market residuals) cat("Checking market residuals interface: ") expected_mean <- c(0, 0.0393985717416213, -0.7458035091065, @@ -29,9 +29,9 @@ ### AMM interface cat("Checking AMM interface: ") -XX expected_mean <- c(0, 0.0393985717416213, -0.7458035091065, - 0.457817077869512, 0.715714066835461, 2.33986420702835, - 2.37333344340029) + expected_mean <- c(0, 0.135927645042554, -0.600457594252805, 0.631525565290171, + 0.871423869901684, 2.54741102266723, 2.5989730099384) + expected_outcomes <- c("success", "success") test_events <- data.frame(outcome.unit = "ONGC", @@ -39,13 +39,114 @@ stringsAsFactors = FALSE) test_returns<- StockPriceReturns[complete.cases(StockPriceReturns$ONGC), "ONGC", drop = FALSE] + test_others <- USDINR test_es <- eventstudy(firm.returns = test_returns, eventList = test_events, width = 3, type = "lmAMM", - market.returns = NiftyIndex, + market.returns = NiftyIndex[index(USDINR)], others = test_others) expect_that(expected_mean, equals(test_es$eventstudy.output[, "Mean"])) expect_that(expected_outcomes, equals(test_es$outcomes)) expect_is(test_es, "es") + +### Excess return + cat("Checking excess return interface: ") + expected_mean <- c(0, 0.138567158395153, -0.631185954448288, 0.701644918222266, + 1.15001275036422, 2.88646832315114, 3.32315429568726) + expected_outcomes <- c("success", "success") + + test_events <- data.frame(outcome.unit = "ONGC", + event.when = c("2011-08-01", "2010-05-14"), + stringsAsFactors = FALSE) + test_returns<- StockPriceReturns[complete.cases(StockPriceReturns$ONGC), "ONGC", + drop = FALSE] + + test_es <- eventstudy(firm.returns = test_returns, + eventList = test_events, + width = 3, + type = "excessReturn", + market.returns = NiftyIndex) + + expect_that(expected_mean, equals(test_es$eventstudy.output[, "Mean"])) + expect_that(expected_outcomes, equals(test_es$outcomes)) + expect_is(test_es, "es") + +### Remapping + cat("Checking remapping: ") + test_events <- data.frame(outcome.unit = "ONGC", + event.when = c("2011-08-01", "2010-05-14"), + stringsAsFactors = FALSE) + test_returns <- StockPriceReturns[complete.cases(StockPriceReturns$ONGC), "ONGC", + drop = FALSE] + + ## cumsum + test_es <- eventstudy(firm.returns = test_returns, + eventList = test_events, + width = 3, + type = "None", + to.remap = FALSE, + remap = "cumsum") + + test_es_remap <- eventstudy(firm.returns = test_returns, + eventList = test_events, + width = 3, + type = "None", + to.remap = TRUE, + remap = "cumsum") + + expect_error(expect_that(test_es, equals(test_es_remap))) + + ## cumprod + test_es <- eventstudy(firm.returns = test_returns, + eventList = test_events, + width = 3, + type = "None", + to.remap = FALSE, + remap = "cumprod") + + test_es_remap <- eventstudy(firm.returns = test_returns, + eventList = test_events, + width = 3, + type = "None", + to.remap = TRUE, + remap = "cumprod") + + expect_error(expect_that(test_es, equals(test_es_remap))) + +### Inference + cat("Checking inference interface: ") + ## bootstrap + test_es_inference <- eventstudy(firm.returns = test_returns, + eventList = test_events, + width = 3, + type = "None", + inference = TRUE, + inference.strategy = "bootstrap") + + test_es <- eventstudy(firm.returns = test_returns, + eventList = test_events, + width = 3, + type = "None", + inference = FALSE, + inference.strategy = "bootstrap") + + expect_error(expect_that(test_es_inference, equals(test_es))) + + ## wilcoxon + test_es_inference <- eventstudy(firm.returns = test_returns, + eventList = test_events, + width = 3, + type = "None", + inference = TRUE, + inference.strategy = "wilcoxon") + + test_es <- eventstudy(firm.returns = test_returns, + eventList = test_events, + width = 3, + type = "None", + inference = FALSE, + inference.strategy = "wilcoxon") + + expect_error(expect_that(test_es_inference, equals(test_es))) From noreply at r-forge.r-project.org Fri May 2 06:30:58 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 2 May 2014 06:30:58 +0200 (CEST) Subject: [Eventstudies-commits] r304 - pkg/inst/tests Message-ID: <20140502043059.000691875E7@r-forge.r-project.org> Author: chiraganand Date: 2014-05-02 06:30:58 +0200 (Fri, 02 May 2014) New Revision: 304 Modified: pkg/inst/tests/test_interfaces.R Log: Fixed expected error error in test cases. Modified: pkg/inst/tests/test_interfaces.R =================================================================== --- pkg/inst/tests/test_interfaces.R 2014-05-01 19:29:11 UTC (rev 303) +++ pkg/inst/tests/test_interfaces.R 2014-05-02 04:30:58 UTC (rev 304) @@ -4,7 +4,7 @@ load("test_SplitDates.rda") load("test_StockPriceReturns.rda") load("test_NiftyIndex.rda") - load("test_usdinr.rda") + load("test_USDINR.rda") ### Basic event study with default args (market residuals) cat("Checking market residuals interface: ") @@ -96,7 +96,7 @@ to.remap = TRUE, remap = "cumsum") - expect_error(expect_that(test_es, equals(test_es_remap))) + expect_false(isTRUE(all.equal(test_es, test_es_remap))) ## cumprod test_es <- eventstudy(firm.returns = test_returns, @@ -113,7 +113,7 @@ to.remap = TRUE, remap = "cumprod") - expect_error(expect_that(test_es, equals(test_es_remap))) + expect_false(isTRUE(all.equal(test_es, test_es_remap))) ### Inference cat("Checking inference interface: ") @@ -132,7 +132,7 @@ inference = FALSE, inference.strategy = "bootstrap") - expect_error(expect_that(test_es_inference, equals(test_es))) + expect_false(isTRUE(all.equal(test_es, test_es_inference))) ## wilcoxon test_es_inference <- eventstudy(firm.returns = test_returns, @@ -149,4 +149,6 @@ inference = FALSE, inference.strategy = "wilcoxon") - expect_error(expect_that(test_es_inference, equals(test_es))) + expect_false(isTRUE(all.equal(test_es, test_es_inference))) + +}) # end test_that() From noreply at r-forge.r-project.org Fri May 2 10:42:30 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 2 May 2014 10:42:30 +0200 (CEST) Subject: [Eventstudies-commits] r305 - pkg/inst/tests Message-ID: <20140502084230.1DD2A186FE1@r-forge.r-project.org> Author: chiraganand Date: 2014-05-02 10:42:29 +0200 (Fri, 02 May 2014) New Revision: 305 Modified: pkg/inst/tests/test_interfaces.R Log: Added test for no model output and checking for no colname in firm returns. Modified: pkg/inst/tests/test_interfaces.R =================================================================== --- pkg/inst/tests/test_interfaces.R 2014-05-02 04:30:58 UTC (rev 304) +++ pkg/inst/tests/test_interfaces.R 2014-05-02 08:42:29 UTC (rev 305) @@ -27,6 +27,27 @@ expect_that(expected_outcomes, equals(test_es$outcomes)) expect_is(test_es, "es") +### None + cat("Checking no model output: ") + expected_mean <- c(0, -0.197406699931557, -0.804299958306487, + 0.0135570496689663, -0.418062964428412, + 0.904144365357373, -0.806779427723603) + expected_outcomes <- c("success", "success") + + test_events <- data.frame(outcome.unit = "ONGC", + event.when = c("2011-08-01", "2010-05-14"), + stringsAsFactors = FALSE) + test_returns<- StockPriceReturns[complete.cases(StockPriceReturns$ONGC), "ONGC", + drop = FALSE] + test_es <- eventstudy(firm.returns = test_returns, + eventList = test_events, + width = 3, + type = "None") + + expect_that(expected_mean, equals(test_es$eventstudy.output[, "Mean"])) + expect_that(expected_outcomes, equals(test_es$outcomes)) + expect_is(test_es, "es") + ### AMM interface cat("Checking AMM interface: ") expected_mean <- c(0, 0.135927645042554, -0.600457594252805, 0.631525565290171, @@ -152,3 +173,17 @@ expect_false(isTRUE(all.equal(test_es, test_es_inference))) }) # end test_that() + +test_that("test.arguments", { + load("test_StockPriceReturns.rda") + + cat("Checking single series handling: ") + test_events <- data.frame(outcome.unit = "ONGC", + event.when = c("2011-08-01", "2010-05-14"), + stringsAsFactors = FALSE) + test_returns<- StockPriceReturns$ONGC + expect_error(eventstudy(firm.returns = test_returns, + eventList = test_events, + width = 3, + type = "None")) +}) From noreply at r-forge.r-project.org Fri May 2 10:58:40 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 2 May 2014 10:58:40 +0200 (CEST) Subject: [Eventstudies-commits] r306 - in pkg: . inst/tests man Message-ID: <20140502085840.EE54D187635@r-forge.r-project.org> Author: chiraganand Date: 2014-05-02 10:58:40 +0200 (Fri, 02 May 2014) New Revision: 306 Modified: pkg/DESCRIPTION pkg/inst/tests/test_eventstudy.R pkg/man/NiftyIndex.Rd pkg/man/eventstudies-package.Rd pkg/man/eventstudy.Rd pkg/man/inference.wilcox.Rd pkg/man/manyfirmssubperiod.lmAMM.Rd pkg/man/remap.event.reindex.Rd Log: Minor modifications. Modified: pkg/DESCRIPTION =================================================================== --- pkg/DESCRIPTION 2014-05-02 08:42:29 UTC (rev 305) +++ pkg/DESCRIPTION 2014-05-02 08:58:40 UTC (rev 306) @@ -2,7 +2,7 @@ Type: Package Title: Event study analysis Version: 1.2 -Date: 2014-04-03 +Date: 2014-05-02 Author: Chirag Anand, Vikram Bahure, Vimal Balasubramaniam, Ajay Shah Maintainer: Vikram Bahure Depends: R (>= 2.12.0), zoo, xts, boot, testthat, sandwich Modified: pkg/inst/tests/test_eventstudy.R =================================================================== --- pkg/inst/tests/test_eventstudy.R 2014-05-02 08:42:29 UTC (rev 305) +++ pkg/inst/tests/test_eventstudy.R 2014-05-02 08:58:40 UTC (rev 306) @@ -58,3 +58,4 @@ class = "factor")), .Names = c("z.e", "outcomes" )))) }) +## TODO: check for missing data on event time, it should pick up data on the last available timestamp. Modified: pkg/man/NiftyIndex.Rd =================================================================== --- pkg/man/NiftyIndex.Rd 2014-05-02 08:42:29 UTC (rev 305) +++ pkg/man/NiftyIndex.Rd 2014-05-02 08:58:40 UTC (rev 306) @@ -2,7 +2,7 @@ \alias{NiftyIndex} \docType{data} -\title{NSE Nifty index from 2004 to 2012.} +\title{NSE Nifty index from 2004 to 2012} \description{A sample time series of Nifty index return from 1990 to 2012.} Modified: pkg/man/eventstudies-package.Rd =================================================================== --- pkg/man/eventstudies-package.Rd 2014-05-02 08:42:29 UTC (rev 305) +++ pkg/man/eventstudies-package.Rd 2014-05-02 08:58:40 UTC (rev 306) @@ -1,9 +1,9 @@ \name{eventstudies-package} -\alias{eventstudies} +\alias{eventstudies-package} \docType{package} \title{ - An R package for conducting event studies. + eventstudies: R package for conducting event studies } \description{ Modified: pkg/man/eventstudy.Rd =================================================================== --- pkg/man/eventstudy.Rd 2014-05-02 08:42:29 UTC (rev 305) +++ pkg/man/eventstudy.Rd 2014-05-02 08:58:40 UTC (rev 306) @@ -1,7 +1,7 @@ \name{eventstudy} \alias{eventstudy} -\title{Event study analysis} +\title{Perform event study analysis} \description{ \sQuote{eventstudy} provides an easy interface that integrates all Modified: pkg/man/inference.wilcox.Rd =================================================================== --- pkg/man/inference.wilcox.Rd 2014-05-02 08:42:29 UTC (rev 305) +++ pkg/man/inference.wilcox.Rd 2014-05-02 08:58:40 UTC (rev 306) @@ -2,7 +2,7 @@ \alias{inference.wilcox} \title{ - Wilcox inference for event study analysis + Wilcox inference for event study estimator } \description{ Modified: pkg/man/manyfirmssubperiod.lmAMM.Rd =================================================================== --- pkg/man/manyfirmssubperiod.lmAMM.Rd 2014-05-02 08:42:29 UTC (rev 305) +++ pkg/man/manyfirmssubperiod.lmAMM.Rd 2014-05-02 08:58:40 UTC (rev 306) @@ -1,7 +1,7 @@ \name{manyfirmssubperiod.lmAMM} \alias{manyfirmssubperiod.lmAMM} -\title{Estimate exposure for many regressands over multiple periods.} +\title{Estimate exposure for many regressands over multiple periods} \description{\code{\link{manyfirmssubperiod.lmAMM}} estimates exposure for many regressands over a set of regressors obtained by using Modified: pkg/man/remap.event.reindex.Rd =================================================================== --- pkg/man/remap.event.reindex.Rd 2014-05-02 08:42:29 UTC (rev 305) +++ pkg/man/remap.event.reindex.Rd 2014-05-02 08:58:40 UTC (rev 306) @@ -2,7 +2,7 @@ \alias{remap.event.reindex} \title{ - Reindex value within event window + Re-index value within event window } \description{ From noreply at r-forge.r-project.org Fri May 2 11:16:14 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 2 May 2014 11:16:14 +0200 (CEST) Subject: [Eventstudies-commits] r307 - pkg/man Message-ID: <20140502091614.C38D7186199@r-forge.r-project.org> Author: chiraganand Date: 2014-05-02 11:16:14 +0200 (Fri, 02 May 2014) New Revision: 307 Modified: pkg/man/eventstudy.Rd Log: Minor changes. Modified: pkg/man/eventstudy.Rd =================================================================== --- pkg/man/eventstudy.Rd 2014-05-02 08:58:40 UTC (rev 306) +++ pkg/man/eventstudy.Rd 2014-05-02 09:16:14 UTC (rev 307) @@ -7,7 +7,7 @@ \sQuote{eventstudy} provides an easy interface that integrates all functionalities of package \pkg{eventstudies} to undertake event study analysis. It allows the user to specify the type of data adjustment - to be done (using the augmented market model functionalities of the + to be done (using market model functionalities of the package) and then an inference strategy of choice. } @@ -123,8 +123,8 @@ \dQuote{type} argument. See section on \sQuote{Model arguments} for more details. - The argument \sQuote{width} in \code{\link{phys2eventtime}} has been - set to zero. + \code{\link{phys2eventtime}} is called with \sQuote{width} set to + zero when called from this function. } \section{\bold{Model arguments}}{ From noreply at r-forge.r-project.org Fri May 2 13:27:10 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 2 May 2014 13:27:10 +0200 (CEST) Subject: [Eventstudies-commits] r308 - pkg/man Message-ID: <20140502112710.36F1E1874BE@r-forge.r-project.org> Author: vikram Date: 2014-05-02 13:27:09 +0200 (Fri, 02 May 2014) New Revision: 308 Modified: pkg/man/eventstudy.Rd Log: Modified manual example to correct event date 'wrongspan' error Modified: pkg/man/eventstudy.Rd =================================================================== --- pkg/man/eventstudy.Rd 2014-05-02 09:16:14 UTC (rev 307) +++ pkg/man/eventstudy.Rd 2014-05-02 11:27:09 UTC (rev 308) @@ -222,7 +222,7 @@ ## Event study using Augment Market Model data("AMMData") events <- data.frame(outcome.unit = "Infosys", - event.when = c("2012-03-01", "2012-04-01"), + event.when = c("2012-04-01", "2012-06-01"), stringsAsFactors = FALSE) es <- eventstudy(firm.returns = AMMData[, "Infosys", drop = FALSE], From noreply at r-forge.r-project.org Fri May 2 15:53:30 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 2 May 2014 15:53:30 +0200 (CEST) Subject: [Eventstudies-commits] r309 - pkg/man Message-ID: <20140502135330.14AD5187389@r-forge.r-project.org> Author: chiraganand Date: 2014-05-02 15:53:29 +0200 (Fri, 02 May 2014) New Revision: 309 Modified: pkg/man/eventstudy.Rd Log: Added one more firm in the lmAMM eventstudy example. Modified: pkg/man/eventstudy.Rd =================================================================== --- pkg/man/eventstudy.Rd 2014-05-02 11:27:09 UTC (rev 308) +++ pkg/man/eventstudy.Rd 2014-05-02 13:53:29 UTC (rev 309) @@ -221,11 +221,11 @@ ## Event study using Augment Market Model data("AMMData") -events <- data.frame(outcome.unit = "Infosys", +events <- data.frame(outcome.unit = c("Infosys", "TCS"), event.when = c("2012-04-01", "2012-06-01"), stringsAsFactors = FALSE) -es <- eventstudy(firm.returns = AMMData[, "Infosys", drop = FALSE], +es <- eventstudy(firm.returns = AMMData[, c("Infosys", "TCS")], eventList = events, width = 10, type = "lmAMM", From noreply at r-forge.r-project.org Fri May 2 17:23:23 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 2 May 2014 17:23:23 +0200 (CEST) Subject: [Eventstudies-commits] r310 - pkg/man Message-ID: <20140502152323.878B9187493@r-forge.r-project.org> Author: chiraganand Date: 2014-05-02 17:23:23 +0200 (Fri, 02 May 2014) New Revision: 310 Modified: pkg/man/ees.Rd pkg/man/eesPlot.Rd Log: Improved citation information. Modified: pkg/man/ees.Rd =================================================================== --- pkg/man/ees.Rd 2014-05-02 13:53:29 UTC (rev 309) +++ pkg/man/ees.Rd 2014-05-02 15:23:23 UTC (rev 310) @@ -73,18 +73,18 @@ } \references{ - Ila Patnaik, Nirvikar Singh and Ajay Shah (2013). - Foreign Investors under stress: Evidence from - India. \emph{International Finance}, \bold{16(2)}, 213-244. - URL - http://onlinelibrary.wiley.com/doi/10.1111/j.1468-2362.2013.12032.x/abstract - and also available at http://macrofinance.nipfp.org.in/releases/PatnaikShahSingh2013_Foreign_Investors.html + \cite{Ila Patnaik, Nirvikar Singh and Ajay Shah (2013). + Foreign Investors under stress: Evidence from + India. + International Finance, 16(2), 213-244. + \link{http://onlinelibrary.wiley.com/doi/10.1111/j.1468-2362.2013.12032.x/abstract} + \link{http://macrofinance.nipfp.org.in/releases/PatnaikShahSingh2013_Foreign_Investors.html} + } To convert number to words, code uses function \href{http://finzi.psych.upenn.edu/R/Rhelp02a/archive/46843.html}{\dQuote{numbers2words}} by \href{http://socserv.mcmaster.ca/jfox/}{John Fox} and \dQuote{deprintize} function by \href{http://mbq.me/}{Miron Kursa}. - } \author{Vikram Bahure, Vimal Balasubramaniam} @@ -92,9 +92,9 @@ \examples{ library(eventstudies) data(EESData) -## Input S&P 500 as the univariate series +## Input S&P 500 as the univariate series input <- EESData$sp500 ## Constructing summary statistics for 5% tail values (5% on both sides) -output <- ees(input, prob.value = 5) +output <- ees(input, prob.value = 5) str(output) } Modified: pkg/man/eesPlot.Rd =================================================================== --- pkg/man/eesPlot.Rd 2014-05-02 13:53:29 UTC (rev 309) +++ pkg/man/eesPlot.Rd 2014-05-02 15:23:23 UTC (rev 310) @@ -58,12 +58,13 @@ defined on the event series.} \references{ - Ila Patnaik, Nirvikar Singh and Ajay Shah (2013). - Foreign Investors under stress: Evidence from - India. \emph{International Finance}, \bold{16(2)}, 213-244. - URL - http://onlinelibrary.wiley.com/doi/10.1111/j.1468-2362.2013.12032.x/abstract - and also available at http://macrofinance.nipfp.org.in/releases/PatnaikShahSingh2013_Foreign_Investors.html + \cite{Ila Patnaik, Nirvikar Singh and Ajay Shah (2013). + Foreign Investors under stress: Evidence from + India. + International Finance, 16(2), 213-244. + \link{http://onlinelibrary.wiley.com/doi/10.1111/j.1468-2362.2013.12032.x/abstract} + \link{http://macrofinance.nipfp.org.in/releases/PatnaikShahSingh2013_Foreign_Investors.html} + } } \author{Vikram Bahure, Vimal Balasubramaniam} From noreply at r-forge.r-project.org Fri May 2 20:03:39 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 2 May 2014 20:03:39 +0200 (CEST) Subject: [Eventstudies-commits] r311 - pkg/inst/tests Message-ID: <20140502180339.C39E81877AE@r-forge.r-project.org> Author: chiraganand Date: 2014-05-02 20:03:39 +0200 (Fri, 02 May 2014) New Revision: 311 Modified: pkg/inst/tests/test_eventstudy.R Log: Added test case for checking missing event date in data. Modified: pkg/inst/tests/test_eventstudy.R =================================================================== --- pkg/inst/tests/test_eventstudy.R 2014-05-02 15:23:23 UTC (rev 310) +++ pkg/inst/tests/test_eventstudy.R 2014-05-02 18:03:39 UTC (rev 311) @@ -57,5 +57,14 @@ "wdatamissing", "wrongspan"), class = "factor")), .Names = c("z.e", "outcomes" )))) + +## Check the previous date +cat("\nTesting handling of missing data on event date: ") +eventdate <- "2004-01-10" +eventdate_output <- "2004-01-09" +eventslist <- data.frame(outcome.unit = "ITC", event.when = eventdate, + stringsAsFactors = FALSE) +a <- phys2eventtime(p, eventslist, width = 2) +expect_that(as.numeric(a$z.e["0",]), + equals(as.numeric(p$ITC[as.Date(eventdate_output), ]))) }) -## TODO: check for missing data on event time, it should pick up data on the last available timestamp. From noreply at r-forge.r-project.org Fri May 2 20:15:54 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 2 May 2014 20:15:54 +0200 (CEST) Subject: [Eventstudies-commits] r312 - pkg/man Message-ID: <20140502181554.304A2186CFB@r-forge.r-project.org> Author: chiraganand Date: 2014-05-02 20:15:53 +0200 (Fri, 02 May 2014) New Revision: 312 Modified: pkg/man/ees.Rd pkg/man/eesPlot.Rd Log: Fixed citation information. Modified: pkg/man/ees.Rd =================================================================== --- pkg/man/ees.Rd 2014-05-02 18:03:39 UTC (rev 311) +++ pkg/man/ees.Rd 2014-05-02 18:15:53 UTC (rev 312) @@ -77,8 +77,8 @@ Foreign Investors under stress: Evidence from India. International Finance, 16(2), 213-244. - \link{http://onlinelibrary.wiley.com/doi/10.1111/j.1468-2362.2013.12032.x/abstract} - \link{http://macrofinance.nipfp.org.in/releases/PatnaikShahSingh2013_Foreign_Investors.html} + \url{http://onlinelibrary.wiley.com/doi/10.1111/j.1468-2362.2013.12032.x/abstract} + \url{http://macrofinance.nipfp.org.in/releases/PatnaikShahSingh2013_Foreign_Investors.html} } To convert number to words, code uses function Modified: pkg/man/eesPlot.Rd =================================================================== --- pkg/man/eesPlot.Rd 2014-05-02 18:03:39 UTC (rev 311) +++ pkg/man/eesPlot.Rd 2014-05-02 18:15:53 UTC (rev 312) @@ -60,10 +60,9 @@ \references{ \cite{Ila Patnaik, Nirvikar Singh and Ajay Shah (2013). Foreign Investors under stress: Evidence from - India. - International Finance, 16(2), 213-244. - \link{http://onlinelibrary.wiley.com/doi/10.1111/j.1468-2362.2013.12032.x/abstract} - \link{http://macrofinance.nipfp.org.in/releases/PatnaikShahSingh2013_Foreign_Investors.html} + India. International Finance, 16(2), 213-244. + \url{http://onlinelibrary.wiley.com/doi/10.1111/j.1468-2362.2013.12032.x/abstract} + \url{http://macrofinance.nipfp.org.in/releases/PatnaikShahSingh2013_Foreign_Investors.html} } } From noreply at r-forge.r-project.org Fri May 9 19:34:23 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 9 May 2014 19:34:23 +0200 (CEST) Subject: [Eventstudies-commits] r313 - pkg/man Message-ID: <20140509173423.3BC00187679@r-forge.r-project.org> Author: chiraganand Date: 2014-05-09 19:34:22 +0200 (Fri, 09 May 2014) New Revision: 313 Modified: pkg/man/AMMData.Rd pkg/man/EESData.Rd pkg/man/NiftyIndex.Rd pkg/man/SplitDates.Rd pkg/man/StockPriceReturns.Rd pkg/man/ees.Rd pkg/man/eesPlot.Rd pkg/man/eventstudy.Rd pkg/man/excessReturn.Rd pkg/man/inference.bootstrap.Rd pkg/man/inference.wilcox.Rd pkg/man/lmAMM.Rd pkg/man/makeX.Rd pkg/man/manyfirmssubperiod.lmAMM.Rd pkg/man/marketResidual.Rd pkg/man/phys2eventtime.Rd pkg/man/remap.cumprod.Rd pkg/man/remap.cumsum.Rd pkg/man/remap.event.reindex.Rd pkg/man/subperiod.lmAMM.Rd Log: Modified language, improved examples. Modified: pkg/man/AMMData.Rd =================================================================== --- pkg/man/AMMData.Rd 2014-05-02 18:15:53 UTC (rev 312) +++ pkg/man/AMMData.Rd 2014-05-09 17:34:22 UTC (rev 313) @@ -1,20 +1,21 @@ \name{AMMData} \alias{AMMData} +\docType{data} +\title{Data set containing firm returns, market returns, currency + returns, and call money rate used for AMM estimation} -\title{Data containing firm returns, market returns, and currency - returns used for AMM estimation} +\description{This data set consists of daily time series for firm + returns (Infosys and TCS), market returns (Nifty returns), currency + returns (INR/USD), and call money rate. It is used to demonstrate + augmented market model estimation. -\description{The data series is a daily time-series zoo object. The sample range for the data is from 2012-02-01 to 2014-01-31. It consists daily time series for firm returns (Infosys and TCS), market returns (Nifty returns) and currency returns (INR/USD). This data is used to demonstrate the AMM estimation.} + The data series is a daily time-series zoo object. The sample range for + the data is from 2012-02-01 to 2014-01-31. All series are in per cent. +} \usage{data(AMMData)} \author{Vikram Bahure} -\examples{ -library(zoo) -data(AMMData) -str(AMMData) -} - \keyword{AMMData} Modified: pkg/man/EESData.Rd =================================================================== --- pkg/man/EESData.Rd 2014-05-02 18:15:53 UTC (rev 312) +++ pkg/man/EESData.Rd 2014-05-09 17:34:22 UTC (rev 313) @@ -1,18 +1,15 @@ \name{EESData} - +\alias{EESData} \docType{data} -\alias{EESData} - \title{Returns data used for extreme events analysis} -\description{A daily time series object for S&P 500 and the NIFTY Index.} +\description{This data set is used to demonstrate extreme events study + functionality of the package. It contains daily returns data (in per + cent) of S&P 500 and the NIFTY Index.} \usage{data(EESData)} -\format{\code{zoo}} +\author{Chirag Anand} -\examples{ - data(EESData) -} \keyword{datasets} Modified: pkg/man/NiftyIndex.Rd =================================================================== --- pkg/man/NiftyIndex.Rd 2014-05-02 18:15:53 UTC (rev 312) +++ pkg/man/NiftyIndex.Rd 2014-05-09 17:34:22 UTC (rev 313) @@ -4,14 +4,11 @@ \title{NSE Nifty index from 2004 to 2012} -\description{A sample time series of Nifty index return from 1990 to - 2012.} +\description{Time series of Nifty index return (in per cent) from 1990 + to 2012.} \usage{data(NiftyIndex)} -\format{\pkg{zoo}} +\author{Vikram Bahure} -\examples{ -data(NiftyIndex) -} -\keyword{datasets} +\keyword{NiftyIndex} Modified: pkg/man/SplitDates.Rd =================================================================== --- pkg/man/SplitDates.Rd 2014-05-02 18:15:53 UTC (rev 312) +++ pkg/man/SplitDates.Rd 2014-05-09 17:34:22 UTC (rev 313) @@ -1,12 +1,10 @@ \name{SplitDates} - +\alias{SplitDates} \docType{data} -\alias{SplitDates} +\title{Data set of events used to perform event study analysis} -\title{Sample data containing set of events to perform eventstudy analysis.} - -\description{ This data set contains stock split event dates for the index +\description{This data set contains stock split event dates for the index constituents of the Bombay Stock Exchange index (SENSEX). The data format follows the required format in the function \code{phys2eventtime}, with two columns 'outcome.unit' (firm name) and @@ -14,9 +12,6 @@ \usage{data(SplitDates)} -\format{\code{data.frame}} +\author{Vikram Bahure} -\examples{ - data(SplitDates) -} \keyword{datasets} Modified: pkg/man/StockPriceReturns.Rd =================================================================== --- pkg/man/StockPriceReturns.Rd 2014-05-02 18:15:53 UTC (rev 312) +++ pkg/man/StockPriceReturns.Rd 2014-05-09 17:34:22 UTC (rev 313) @@ -1,20 +1,15 @@ \name{StockPriceReturns} - +\alias{StockPriceReturns} \docType{data} -\alias{StockPriceReturns} +\title{Stock price returns data} -\title{Sample data containing stock price returns} +\description{This data set contains stock price returns (in per cent) of + 30 major stocks on the National Stock Exchange (NSE) of India for a + period of 23 years.} -\description{This data set contains stock price returns of 30 major - stocks on the National Stock Exchange (NSE) of India for a period of 23 - years.} - \usage{data(StockPriceReturns)} -\format{\code{zoo}} +\author{Vikram Bahure} -\examples{ - data(StockPriceReturns) -} \keyword{datasets} Modified: pkg/man/ees.Rd =================================================================== --- pkg/man/ees.Rd 2014-05-02 18:15:53 UTC (rev 312) +++ pkg/man/ees.Rd 2014-05-09 17:34:22 UTC (rev 313) @@ -80,21 +80,12 @@ \url{http://onlinelibrary.wiley.com/doi/10.1111/j.1468-2362.2013.12032.x/abstract} \url{http://macrofinance.nipfp.org.in/releases/PatnaikShahSingh2013_Foreign_Investors.html} } - - To convert number to words, code uses function - \href{http://finzi.psych.upenn.edu/R/Rhelp02a/archive/46843.html}{\dQuote{numbers2words}} - by \href{http://socserv.mcmaster.ca/jfox/}{John Fox} and - \dQuote{deprintize} function by \href{http://mbq.me/}{Miron Kursa}. } \author{Vikram Bahure, Vimal Balasubramaniam} \examples{ -library(eventstudies) data(EESData) -## Input S&P 500 as the univariate series -input <- EESData$sp500 -## Constructing summary statistics for 5% tail values (5% on both sides) -output <- ees(input, prob.value = 5) -str(output) +r <- ees(EESData$sp500, prob.value = 5) +str(r, max.level = 2) } Modified: pkg/man/eesPlot.Rd =================================================================== --- pkg/man/eesPlot.Rd 2014-05-02 18:15:53 UTC (rev 312) +++ pkg/man/eesPlot.Rd 2014-05-09 17:34:22 UTC (rev 313) @@ -69,9 +69,7 @@ \author{Vikram Bahure, Vimal Balasubramaniam} \examples{ -library(eventstudies) data(EESData) -## Generating event study plots (using modified event study methodology) eesPlot(z = EESData, response.series.name = "nifty", event.series.name = "sp500", Modified: pkg/man/eventstudy.Rd =================================================================== --- pkg/man/eventstudy.Rd 2014-05-02 18:15:53 UTC (rev 312) +++ pkg/man/eventstudy.Rd 2014-05-09 17:34:22 UTC (rev 313) @@ -202,8 +202,6 @@ } \examples{ -## Performing event study -library(eventstudies) data("StockPriceReturns") data("SplitDates") Modified: pkg/man/excessReturn.Rd =================================================================== --- pkg/man/excessReturn.Rd 2014-05-02 18:15:53 UTC (rev 312) +++ pkg/man/excessReturn.Rd 2014-05-09 17:34:22 UTC (rev 313) @@ -27,7 +27,6 @@ data(StockPriceReturns) data(NiftyIndex) -## Excess return er.result <- excessReturn(firm.returns = StockPriceReturns, market.returns = NiftyIndex) Modified: pkg/man/inference.bootstrap.Rd =================================================================== --- pkg/man/inference.bootstrap.Rd 2014-05-02 18:15:53 UTC (rev 312) +++ pkg/man/inference.bootstrap.Rd 2014-05-09 17:34:22 UTC (rev 313) @@ -30,7 +30,7 @@ } \item{to.plot}{a \sQuote{logical} indicating whether to generate an - eventstudy plot of the inference estimated. Defaults to + event study plot of the inference estimated. Defaults to \sQuote{TRUE}. } @@ -61,7 +61,7 @@ \examples{ data(StockPriceReturns) data(SplitDates) -## Converting physical dates to event time frame + es.results <- phys2eventtime(z = StockPriceReturns, events = SplitDates, width = 5) @@ -69,10 +69,7 @@ start = -5, end = +5) -## Cumulating event window eventtime <- remap.cumsum(es.w, is.pc = FALSE, base = 0) - -## Constructing confidence interval using bootstrap inference strategy inference.bootstrap(es.w = eventtime, to.plot = FALSE) } Modified: pkg/man/inference.wilcox.Rd =================================================================== --- pkg/man/inference.wilcox.Rd 2014-05-02 18:15:53 UTC (rev 312) +++ pkg/man/inference.wilcox.Rd 2014-05-09 17:34:22 UTC (rev 313) @@ -27,7 +27,7 @@ } \item{to.plot}{a \sQuote{logical} indicating whether to generate an - eventstudy plot of the inference estimated. Defaults to + event study plot of the inference estimated. Defaults to \sQuote{TRUE}. } @@ -55,14 +55,12 @@ \examples{ data(StockPriceReturns) data(SplitDates) -## Converting physical dates to event time frame + es.results <- phys2eventtime(z = StockPriceReturns, events = SplitDates, width = 5) es.w <- window(es.results$z.e, start = -5, end = +5) - -## Cumulating event window eventtime <- remap.cumsum(es.w, is.pc = FALSE, base = 0) -## Constructing confidence interval using wilcoxon inference strategy + inference.wilcox(es.w = eventtime, to.plot = FALSE) } Modified: pkg/man/lmAMM.Rd =================================================================== --- pkg/man/lmAMM.Rd 2014-05-02 18:15:53 UTC (rev 312) +++ pkg/man/lmAMM.Rd 2014-05-09 17:34:22 UTC (rev 313) @@ -85,14 +85,12 @@ market.returns <- AMMData[,"index.nifty"] currency.returns <- AMMData[,"currency.inrusd"] -## Creating regressors for AMM estimation using makeX function X <- makeX(market.returns, others = currency.returns, switch.to.innov = FALSE, market.returns.purge = FALSE, verbose = FALSE) -## Augmented market model residual amm.result <- lmAMM(firm.returns, X, nlags = 0, verbose = FALSE) plot(amm.result) Modified: pkg/man/makeX.Rd =================================================================== --- pkg/man/makeX.Rd 2014-05-02 18:15:53 UTC (rev 312) +++ pkg/man/makeX.Rd 2014-05-09 17:34:22 UTC (rev 313) @@ -79,7 +79,6 @@ market.returns <- AMMData$index.nifty currency.returns <- AMMData$currency.inrusd -## Constructing regressors (independent variables) for AMM X <- makeX(market.returns, others = currency.returns, switch.to.innov = FALSE, Modified: pkg/man/manyfirmssubperiod.lmAMM.Rd =================================================================== --- pkg/man/manyfirmssubperiod.lmAMM.Rd 2014-05-02 18:15:53 UTC (rev 312) +++ pkg/man/manyfirmssubperiod.lmAMM.Rd 2014-05-09 17:34:22 UTC (rev 313) @@ -59,16 +59,12 @@ } \examples{ - -## Running manyfirmssubperiod.lmAMM() involves as many steps as working -## with onefirmAMM. data("AMMData") firm.returns <- AMMData[, c("Infosys","TCS")] market.returns <- AMMData[, "index.nifty"] currency.returns <- AMMData[, "currency.inrusd"] -## Creating X for AMM estimation using makeX function X <- makeX(market.returns, others = currency.returns, nlags = 1, @@ -77,7 +73,6 @@ verbose = FALSE, dates = as.Date(c("2012-02-01", "2013-01-01", "2014-01-20"))) -## Estimating exposure res <- manyfirmssubperiod.lmAMM(firm.returns = firm.returns, X = X, lags = 1, Modified: pkg/man/marketResidual.Rd =================================================================== --- pkg/man/marketResidual.Rd 2014-05-02 18:15:53 UTC (rev 312) +++ pkg/man/marketResidual.Rd 2014-05-09 17:34:22 UTC (rev 313) @@ -24,7 +24,6 @@ data(StockPriceReturns) data(NiftyIndex) -## Market model residual mm.result <- marketResidual(firm.returns = StockPriceReturns, market.returns = NiftyIndex) Modified: pkg/man/phys2eventtime.Rd =================================================================== --- pkg/man/phys2eventtime.Rd 2014-05-02 18:15:53 UTC (rev 312) +++ pkg/man/phys2eventtime.Rd 2014-05-09 17:34:22 UTC (rev 313) @@ -78,7 +78,6 @@ data(StockPriceReturns) data(SplitDates) -## Converting physical dates to event time result <- phys2eventtime(z = StockPriceReturns, events = SplitDates, width = 5) Modified: pkg/man/remap.cumprod.Rd =================================================================== --- pkg/man/remap.cumprod.Rd 2014-05-02 18:15:53 UTC (rev 312) +++ pkg/man/remap.cumprod.Rd 2014-05-09 17:34:22 UTC (rev 313) @@ -37,19 +37,16 @@ data(StockPriceReturns) data(SplitDates) -## Converting to event time frame es.results <- phys2eventtime(z = StockPriceReturns, events = SplitDates, width = 5) es.w <- window(es.results$z.e, start = -5, end = +5) -## Cumulating (geometric product) event window output eventtime <- remap.cumprod(es.w, is.pc = TRUE, is.returns = TRUE, base = 100) -## Comparing abnormal returns (AR) and cumulative (geometric) abnormal returns (CAR) check.output <- cbind(es.w[,1], eventtime[,1]) colnames(check.output) <- c("abnormal.returns", "cumulative.abnormal.returns") check.output Modified: pkg/man/remap.cumsum.Rd =================================================================== --- pkg/man/remap.cumsum.Rd 2014-05-02 18:15:53 UTC (rev 312) +++ pkg/man/remap.cumsum.Rd 2014-05-09 17:34:22 UTC (rev 313) @@ -39,13 +39,10 @@ data(StockPriceReturns) data(SplitDates) -## Converting to event time frame es.results <- phys2eventtime(z = StockPriceReturns, events = SplitDates, width = 5) es.w <- window(es.results$z.e, start = -5, end = +5) - -## Cumulating (arithmetic) event window output eventtime <- remap.cumsum(es.w, is.pc = FALSE, base = 0) ## Comparing abnormal returns (AR) and cumulative abnormal returns (CAR) Modified: pkg/man/remap.event.reindex.Rd =================================================================== --- pkg/man/remap.event.reindex.Rd 2014-05-02 18:15:53 UTC (rev 312) +++ pkg/man/remap.event.reindex.Rd 2014-05-09 17:34:22 UTC (rev 313) @@ -29,13 +29,12 @@ data(StockPriceReturns) data(SplitDates) -## Converting to event time frame es.results <- phys2eventtime(z = StockPriceReturns, events = SplitDates, width = 5) es.w <- window(es.results$z.e, start = -5, end = +5) -## Reindexing event time (t=0) to 100 eventtime <- remap.event.reindex(es.w) -head(eventtime[,1:5]) + +head(eventtime[, 1:5]) } Modified: pkg/man/subperiod.lmAMM.Rd =================================================================== --- pkg/man/subperiod.lmAMM.Rd 2014-05-02 18:15:53 UTC (rev 312) +++ pkg/man/subperiod.lmAMM.Rd 2014-05-09 17:34:22 UTC (rev 313) @@ -67,12 +67,10 @@ \examples{ data("AMMData") -## Create RHS before running subperiod.lmAMM() firm.returns <- AMMData$Infosys market.returns <- AMMData$index.nifty currency.returns <- AMMData$currency.inrusd -## Constructing regressors for AMM regressors <- makeX(market.returns, others = currency.returns, switch.to.innov = TRUE, @@ -81,7 +79,6 @@ dates = as.Date(c("2012-02-01","2013-01-01","2014-01-20")), verbose = FALSE) -## Run AMM for one firm across different periods res <- subperiod.lmAMM(firm.returns, X = regressors, nlags = 1, From noreply at r-forge.r-project.org Sat May 10 08:57:28 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sat, 10 May 2014 08:57:28 +0200 (CEST) Subject: [Eventstudies-commits] r314 - pkg/R Message-ID: <20140510065728.4478B18752C@r-forge.r-project.org> Author: chiraganand Date: 2014-05-10 08:57:27 +0200 (Sat, 10 May 2014) New Revision: 314 Modified: pkg/R/eventstudy.R Log: Added a warning if there are model arguments supplied when type is None. Modified: pkg/R/eventstudy.R =================================================================== --- pkg/R/eventstudy.R 2014-05-09 17:34:22 UTC (rev 313) +++ pkg/R/eventstudy.R 2014-05-10 06:57:27 UTC (rev 314) @@ -8,12 +8,13 @@ inference = TRUE, inference.strategy = "bootstrap", ...) { - # type = "marketResidual", "excessReturn", "AMM", "None" - ## arguments to the model - extra.var <- list(...) if (type == "None" && !is.null(firm.returns)) { outputModel <- firm.returns + if (length(list(...)) != 0) { + warning(deparse("type"), " = ", deparse("None"), + " does not take extra arguments, ignoring them.") + } } if (is.levels == TRUE) { From noreply at r-forge.r-project.org Mon May 12 09:06:57 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 12 May 2014 09:06:57 +0200 (CEST) Subject: [Eventstudies-commits] r315 - pkg/R Message-ID: <20140512070657.E47881873ED@r-forge.r-project.org> Author: vikram Date: 2014-05-12 09:06:57 +0200 (Mon, 12 May 2014) New Revision: 315 Modified: pkg/R/ees.R Log: modified ees function to summary.ees function, it takes the output from get.clusters.formatted and provides the summary Modified: pkg/R/ees.R =================================================================== --- pkg/R/ees.R 2014-05-10 06:57:27 UTC (rev 314) +++ pkg/R/ees.R 2014-05-12 07:06:57 UTC (rev 315) @@ -6,10 +6,7 @@ #---------------------------------------------------------------- # INPUT: -# 'input' : Data series for which extreme events are -# to be identified. More than one series -# is permissble. The 'input' should be in time -# series format. +# 'input' : Output of get.clusters.formatted # 'prob.value': This is the tail value for which event is # to be defined. For eg: prob.value=5 will # consider 5% tail on both sides @@ -34,37 +31,15 @@ # - Clustered, Un-clustered and Both #------------------------------------------------------------------ # NOTE: -ees <- function(input,prob.value){ +summary.ees <- function(input,prob.value){ no.var <- NCOL(input) - #------------------------------------------------ - # Breaking the function if any input is not given - #------------------------------------------------ - # For one variable - # If class of data is not time series - class.input <- class(input)%in%c("xts","zoo") - if(class.input==FALSE){ - stop("Input data is not in time series format. Valid 'input' should be of class xts and zoo") - } - - # Converting an xts object to zoo series - input.class <- length(which(class(input)%in%"xts")) - if(length(input.class)==1){ - input <- zoo(input) - } - #----------------------------------------- # Event series: Clustered and un-clustered #----------------------------------------- - tmp <- get.clusters.formatted(event.series=input, - response.series=input, - probvalue=prob.value, - event.value="nonreturns", - response.value="nonreturns") - - tail.events <- tmp[which(tmp$left.tail==1 | tmp$right.tail==1),] - clustered.tail.events <- tmp[which(tmp$cluster.pattern>1),] - unclustered.tail.events <- tmp[-which(tmp$cluster.pattern>1),] + tail.events <- input[which(input$left.tail==1 | input$right.tail==1),] + clustered.tail.events <- input[which(input$cluster.pattern>1),] + unclustered.tail.events <- input[-which(input$cluster.pattern>1),] # Left tail data left.tail.clustered <- clustered.tail.events[which(clustered.tail.events$left.tail==1),c("event.series","cluster.pattern")] left.tail.unclustered <- unclustered.tail.events[which(unclustered.tail.events$left.tail==1),c("event.series","cluster.pattern")] @@ -78,10 +53,10 @@ # Extreme event output #--------------------- # Summary statistics - summ.st <- sumstat(input) + summ.st <- attr(input,"sumstat") # Distribtution of events - event.dist <- extreme.events.distribution(input,prob.value) + event.dist <- attr(input,"extreme.events.distribution") # Run length distribution runlength <- runlength.dist(input,prob.value) @@ -221,8 +196,8 @@ get.clusters.formatted <- function(event.series, response.series, probvalue=5, - event.value="returns", - response.value="returns"){ + event.value="nonreturns", + response.value="nonreturns"){ # Getting levels in event format tmp <- gen.data(event.series, probvalue=probvalue, @@ -329,6 +304,8 @@ "response.series","cluster.pattern") # Results + attr(tmp.ts, which = "sumstat") <- sumstat(input = event.series) + attr(tmp.ts, which = "extreme.events.distribution") <- extreme.events.distribution(input = event.series, gcf.output = tmp.ts, prob.value = probvalue) return(tmp.ts) } @@ -370,70 +347,6 @@ return(tmp) } -###################### -# Yearly summary stats -###################### -#---------------------------- -# INPUT: -# 'input': Data series for which event cluster distribution -# is to be calculated; -# 'prob.value': Probility value for which tail is to be constructed this -# value is equivalent to one side tail for eg. if prob.value=5 -# then we have values of 5% tail on both sides -# Functions used: yearly.exevent.summary() -# OUTPUT: -# Yearly distribution of extreme events -#---------------------------- -yearly.exevent.dist <- function(input, prob.value){ - no.var <- NCOL(input) - mylist <- list() - # Estimating cluster count - #-------------------- - # Formatting clusters - #-------------------- - tmp <- get.clusters.formatted(event.series=input, - response.series=input, - probvalue=prob.value, - event.value="nonreturns", - response.value="nonreturns") - - tmp.res <- yearly.exevent.summary(tmp) - tmp.res[is.na(tmp.res)] <- 0 - # Left and right tail - lower.tail.yearly.exevent <- tmp.res[,1:2] - upper.tail.yearly.exevent <- tmp.res[,3:4] - output <- list() - output$lower.tail <- lower.tail.yearly.exevent - output$upper.tail <- upper.tail.yearly.exevent - mylist <- output - - return(mylist) -} - -#------------------------------------------------ -# Get yearly no. and median for good and bad days -#------------------------------------------------ -yearly.exevent.summary <- function(tmp){ - tmp.bad <- tmp[which(tmp[,"left.tail"]==1),] - tmp.good <- tmp[which(tmp[,"right.tail"]==1),] - # Bad days - tmp.bad.y <- apply.yearly(xts(tmp.bad),function(x)nrow(x)) - tmp.bad.y <- merge(tmp.bad.y,apply.yearly(xts(tmp.bad[,1]),function(x)median(x,na.rm=T))) - index(tmp.bad.y) <- as.yearmon(as.Date(substr(index(tmp.bad.y),1,4),"%Y")) - # Good days - tmp.good.y <- apply.yearly(xts(tmp.good),function(x)nrow(x)) - tmp.good.y <- merge(tmp.good.y,apply.yearly(xts(tmp.good[,1]),function(x)median(x,na.rm=T))) - index(tmp.good.y) <- as.yearmon(as.Date(substr(index(tmp.good.y),1,4),"%Y")) - tmp.res <- merge(tmp.bad.y,tmp.good.y) - colnames(tmp.res) <- c("number.lowertail","median.lowertail", - "number.uppertail","median.uppertail") - output <- as.data.frame(tmp.res) - cn <- rownames(output) - rownames(output) <- sapply(rownames(output), - function(x)substr(x,nchar(x)-3,nchar(x))) - return(output) -} - ############################# # Getting event segregation # - clustered and unclustered @@ -455,7 +368,7 @@ # Distribution of extreme events #---------------------------- -extreme.events.distribution <- function(input,prob.value){ +extreme.events.distribution <- function(input, gcf.output, prob.value){ # Creating an empty frame no.var <- NCOL(input) lower.tail.dist <- data.frame(matrix(NA,nrow=no.var,ncol=6)) @@ -473,7 +386,7 @@ # Cluster count #-------------- # Non-returns (if it is already in return format) - tmp <- get.event.count(input,probvalue=prob.value, + tmp <- get.event.count(input, gcf.output, probvalue=prob.value, value="nonreturns") lower.tail.dist <- tmp[1,] upper.tail.dist <- tmp[2,] @@ -489,57 +402,104 @@ # Functions used in event count calculation get.event.count <- function(series, probvalue=5, + gcf.output, value="returns"){ # Extracting dataset tmp.old <- gen.data(series,probvalue,value) - tmp <- get.clusters.formatted(event.series=series, - response.series=series, - probvalue, - event.value=value, - response.value=value) - - cp <- tmp[,"cluster.pattern"] + cp <- gcf.output[,"cluster.pattern"] lvl <- as.numeric(levels(as.factor(cp))) lvl.use <- lvl[which(lvl>1)] # Calculating Total events tot.ev.l <- length(which(tmp.old[,"left.tail"]==1)) tot.ev.r <- length(which(tmp.old[,"right.tail"]==1)) # Calculating Unclustered events - un.clstr.l <- length(which(tmp[,"left.tail"]==1 & - tmp[,"cluster.pattern"]==1)) - un.clstr.r <- length(which(tmp[,"right.tail"]==1 & - tmp[,"cluster.pattern"]==1)) + un.clstr.l <- length(which(gcf.output[,"left.tail"]==1 & + gcf.output[,"cluster.pattern"]==1)) + un.clstr.r <- length(which(gcf.output[,"right.tail"]==1 & + gcf.output[,"cluster.pattern"]==1)) # Calculating Used clusters us.cl.l <- us.cl.r <- NULL for(i in 1:length(lvl.use)){ - tmp1 <- length(which(tmp[,"cluster.pattern"]==lvl.use[i] & - tmp[,"left.tail"]==1))*lvl.use[i] - tmp2 <- length(which(tmp[,"cluster.pattern"]==lvl.use[i] & - tmp[,"right.tail"]==1))*lvl.use[i] + tmp1 <- length(which(gcf.output[,"cluster.pattern"]==lvl.use[i] & + gcf.output[,"left.tail"]==1))*lvl.use[i] + tmp2 <- length(which(gcf.output[,"cluster.pattern"]==lvl.use[i] & + gcf.output[,"right.tail"]==1))*lvl.use[i] us.cl.l <- sum(us.cl.l,tmp1,na.rm=TRUE) us.cl.r <- sum(us.cl.r,tmp2,na.rm=TRUE) } # Making a table tb <- data.frame(matrix(NA,2,6)) - colnames(tb) <- c("unclstr","used.clstr","removed.clstr","tot.clstr","tot","tot.used") + colnames(tb) <- c("unclustered.events","used.clustered.events","removed.clustered.events","total.clustered.events","total.events","total.used.events") rownames(tb) <- c("lower","upper") - tb[,"tot"] <- c(tot.ev.l,tot.ev.r) - tb[,"unclstr"] <- c(un.clstr.l,un.clstr.r) - tb[,"used.clstr"] <- c(us.cl.l,us.cl.r) - tb[,"tot.used"] <- tb$unclstr+tb$used.clstr - tb[,"tot.clstr"] <- tb$tot-tb$unclstr - tb[,"removed.clstr"] <- tb$tot.clstr-tb$used.clstr + tb[,"total.events"] <- c(tot.ev.l,tot.ev.r) + tb[,"unclustered.events"] <- c(un.clstr.l,un.clstr.r) + tb[,"used.clustered.events"] <- c(us.cl.l,us.cl.r) + tb[,"total.used.events"] <- tb$unclustered.events+tb$used.clustered.events + tb[,"total.clustered.events"] <- tb$total.events-tb$unclustered.events + tb[,"removed.clustered.events"] <- tb$total.clustered.events-tb$used.clustered.events return(tb) } +###################### +# Yearly summary stats +###################### +#---------------------------- +# INPUT: +# 'input': Output from get.clusters.formatted function +# 'prob.value': Probility value for which tail is to be constructed this +# value is equivalent to one side tail for eg. if prob.value=5 +# then we have values of 5% tail on both sides +# Functions used: yearly.exevent.summary() +# OUTPUT: +# Yearly distribution of extreme events +#---------------------------- +yearly.exevent.dist <- function(input, prob.value){ + mylist <- list() + ## Estimating cluster count + tmp.res <- yearly.exevent.summary(input) + tmp.res[is.na(tmp.res)] <- 0 + ## Left and right tail + lower.tail.yearly.exevent <- tmp.res[,1:2] + upper.tail.yearly.exevent <- tmp.res[,3:4] + output <- list() + output$lower.tail <- lower.tail.yearly.exevent + output$upper.tail <- upper.tail.yearly.exevent + mylist <- output + return(mylist) +} + +#------------------------------------------------ +# Get yearly no. and median for good and bad days +#------------------------------------------------ +yearly.exevent.summary <- function(tmp){ + tmp.bad <- tmp[which(tmp[,"left.tail"]==1),] + tmp.good <- tmp[which(tmp[,"right.tail"]==1),] + # Bad days + tmp.bad.y <- apply.yearly(xts(tmp.bad),function(x)nrow(x)) + tmp.bad.y <- merge(tmp.bad.y,apply.yearly(xts(tmp.bad[,1]),function(x)median(x,na.rm=T))) + index(tmp.bad.y) <- as.yearmon(as.Date(substr(index(tmp.bad.y),1,4),"%Y")) + # Good days + tmp.good.y <- apply.yearly(xts(tmp.good),function(x)nrow(x)) + tmp.good.y <- merge(tmp.good.y,apply.yearly(xts(tmp.good[,1]),function(x)median(x,na.rm=T))) + index(tmp.good.y) <- as.yearmon(as.Date(substr(index(tmp.good.y),1,4),"%Y")) + tmp.res <- merge(tmp.bad.y,tmp.good.y) + colnames(tmp.res) <- c("number.lowertail","median.lowertail", + "number.uppertail","median.uppertail") + output <- as.data.frame(tmp.res) + cn <- rownames(output) + rownames(output) <- sapply(rownames(output), + function(x)substr(x,nchar(x)-3,nchar(x))) + return(output) +} + #################################### # Quantile values for extreme events #################################### #----------------------------------- # INPUT: -# 'input': Data series in time series format +# 'input': Output of get.clusters.formatted # Note: The input series expects the input to be in levels not in returns, # if the some the inputs are already in return formats one has to # use the other variable 'already.return.series' @@ -551,9 +511,8 @@ #----------------------------------- quantile.extreme.values <- function(input, prob.value){ # Creating an empty frame - no.var <- NCOL(input) - lower.tail.qnt.value <- data.frame(matrix(NA,nrow=no.var,ncol=6)) - upper.tail.qnt.value <- data.frame(matrix(NA,nrow=no.var,ncol=6)) + lower.tail.qnt.value <- data.frame(matrix(NA,nrow=1,ncol=6)) + upper.tail.qnt.value <- data.frame(matrix(NA,nrow=1,ncol=6)) colnames(lower.tail.qnt.value) <- c("Min","25%","Median","75%","Max", "Mean") rownames(lower.tail.qnt.value) <- "extreme.events" @@ -561,24 +520,15 @@ "Mean") rownames(upper.tail.qnt.value) <- "extreme.events" # Estimating cluster count - #-------------------- - # Formatting clusters - #-------------------- - tmp <- get.clusters.formatted(event.series=input, - response.series=input, - probvalue=prob.value, - event.value="nonreturns", - response.value="nonreturns") - # Left tail - tmp.left.tail <- tmp[which(tmp$left.tail==1), + tmp.left.tail <- input[which(input$left.tail==1), "event.series"] df.left <- t(data.frame(quantile(tmp.left.tail,c(0,0.25,0.5,0.75,1)))) tmp.left <- round(cbind(df.left,mean(tmp.left.tail)),2) rownames(tmp.left) <- "extreme.events" colnames(tmp.left) <- c("0%","25%","Median","75%","100%","Mean") # Right tail - tmp.right.tail <- tmp[which(tmp$right.tail==1), + tmp.right.tail <- input[which(input$right.tail==1), "event.series"] df.right <- t(data.frame(quantile(tmp.right.tail,c(0,0.25,0.5,0.75,1)))) tmp.right <- round(cbind(df.right, @@ -613,29 +563,20 @@ #----------------------------------- runlength.dist <- function(input, prob.value){ - # Creating an empty frame - no.var <- NCOL(input) - # Finding maximum Run length # Seed value max.runlength <- 0 #--------------------------- # Estimating max. Run length #--------------------------- - tmp <- get.clusters.formatted(event.series=input, - response.series=input, - probvalue=prob.value, - event.value="nonreturns", - response.value="nonreturns") - - tmp.runlength <- get.cluster.distribution(tmp,"event.series") + tmp.runlength <- get.cluster.distribution(input,"event.series") max.runlength <- max(max.runlength,as.numeric(colnames(tmp.runlength)[NCOL(tmp.runlength)])) # Generating empty frame col.names <- seq(2:max.runlength)+1 - lower.tail.runlength <- data.frame(matrix(NA,nrow=no.var, + lower.tail.runlength <- data.frame(matrix(NA,nrow=1, ncol=length(col.names))) - upper.tail.runlength <- data.frame(matrix(NA,nrow=no.var, + upper.tail.runlength <- data.frame(matrix(NA,nrow=1, ncol=length(col.names))) colnames(lower.tail.runlength) <- col.names rownames(lower.tail.runlength) <- "clustered.events" @@ -645,7 +586,7 @@ #---------------------- # Run length estimation #---------------------- - tmp.res <- get.cluster.distribution(tmp,"event.series") + tmp.res <- get.cluster.distribution(input,"event.series") for(j in 1:length(colnames(tmp.res))){ col.number <- colnames(tmp.res)[j] lower.tail.runlength[1,col.number] <- tmp.res[1,col.number] From noreply at r-forge.r-project.org Mon May 12 09:16:47 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 12 May 2014 09:16:47 +0200 (CEST) Subject: [Eventstudies-commits] r316 - pkg/R Message-ID: <20140512071647.91E3418651F@r-forge.r-project.org> Author: vikram Date: 2014-05-12 09:16:47 +0200 (Mon, 12 May 2014) New Revision: 316 Modified: pkg/R/ees.R Log: Added class ees to get.clusters.formatted.output Modified: pkg/R/ees.R =================================================================== --- pkg/R/ees.R 2014-05-12 07:06:57 UTC (rev 315) +++ pkg/R/ees.R 2014-05-12 07:16:47 UTC (rev 316) @@ -31,7 +31,7 @@ # - Clustered, Un-clustered and Both #------------------------------------------------------------------ # NOTE: -summary.ees <- function(input,prob.value){ +summary.ees <- function(input){ no.var <- NCOL(input) #----------------------------------------- @@ -59,13 +59,13 @@ event.dist <- attr(input,"extreme.events.distribution") # Run length distribution - runlength <- runlength.dist(input,prob.value) + runlength <- runlength.dist(input) # Quantile extreme values - qnt.values <- quantile.extreme.values(input,prob.value) + qnt.values <- quantile.extreme.values(input) # Yearly distribution of extreme event dates - yearly.exevent <- yearly.exevent.dist(input,prob.value) + yearly.exevent <- yearly.exevent.dist(input) #--------------------- # Compiling the output @@ -306,6 +306,8 @@ # Results attr(tmp.ts, which = "sumstat") <- sumstat(input = event.series) attr(tmp.ts, which = "extreme.events.distribution") <- extreme.events.distribution(input = event.series, gcf.output = tmp.ts, prob.value = probvalue) + attr(tmp.ts, which = "probvalue") <- probvalue + class(tmp.ts) <- c("ees","zoo") return(tmp.ts) } @@ -455,7 +457,7 @@ # OUTPUT: # Yearly distribution of extreme events #---------------------------- -yearly.exevent.dist <- function(input, prob.value){ +yearly.exevent.dist <- function(input){ mylist <- list() ## Estimating cluster count tmp.res <- yearly.exevent.summary(input) @@ -509,7 +511,7 @@ # OUTPUT: # Lower tail and Upper tail quantile values #----------------------------------- -quantile.extreme.values <- function(input, prob.value){ +quantile.extreme.values <- function(input){ # Creating an empty frame lower.tail.qnt.value <- data.frame(matrix(NA,nrow=1,ncol=6)) upper.tail.qnt.value <- data.frame(matrix(NA,nrow=1,ncol=6)) @@ -561,7 +563,7 @@ # OUTPUT: # Lower tail and Upper tail Run length distribution #----------------------------------- -runlength.dist <- function(input, prob.value){ +runlength.dist <- function(input){ # Finding maximum Run length # Seed value From noreply at r-forge.r-project.org Mon May 12 10:24:57 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 12 May 2014 10:24:57 +0200 (CEST) Subject: [Eventstudies-commits] r317 - pkg/R Message-ID: <20140512082457.6AB7B186C51@r-forge.r-project.org> Author: vikram Date: 2014-05-12 10:24:57 +0200 (Mon, 12 May 2014) New Revision: 317 Modified: pkg/R/ees.R Log: minor modification in presenting output summary Modified: pkg/R/ees.R =================================================================== --- pkg/R/ees.R 2014-05-12 07:16:47 UTC (rev 316) +++ pkg/R/ees.R 2014-05-12 08:24:57 UTC (rev 317) @@ -31,29 +31,16 @@ # - Clustered, Un-clustered and Both #------------------------------------------------------------------ # NOTE: -summary.ees <- function(input){ +summary.ees <- function(x, ...){ no.var <- NCOL(input) - #----------------------------------------- - # Event series: Clustered and un-clustered - #----------------------------------------- - tail.events <- input[which(input$left.tail==1 | input$right.tail==1),] - clustered.tail.events <- input[which(input$cluster.pattern>1),] - unclustered.tail.events <- input[-which(input$cluster.pattern>1),] - # Left tail data - left.tail.clustered <- clustered.tail.events[which(clustered.tail.events$left.tail==1),c("event.series","cluster.pattern")] - left.tail.unclustered <- unclustered.tail.events[which(unclustered.tail.events$left.tail==1),c("event.series","cluster.pattern")] - left.all <- tail.events[which(tail.events$left.tail==1),c("event.series","cluster.pattern")] - # Right tail data - right.tail.clustered <- clustered.tail.events[which(clustered.tail.events$right.tail==1),c("event.series","cluster.pattern")] - right.tail.unclustered <- unclustered.tail.events[which(unclustered.tail.events$right.tail==1),c("event.series","cluster.pattern")] - right.all <- tail.events[which(tail.events$right.tail==1),c("event.series","cluster.pattern")] - #--------------------- # Extreme event output #--------------------- # Summary statistics summ.st <- attr(input,"sumstat") + colnames(summ.st) <- NULL + summ.st <- t(summ.st) # Distribtution of events event.dist <- attr(input,"extreme.events.distribution") @@ -63,7 +50,7 @@ # Quantile extreme values qnt.values <- quantile.extreme.values(input) - + # Yearly distribution of extreme event dates yearly.exevent <- yearly.exevent.dist(input) @@ -73,21 +60,15 @@ output <- lower.tail <- upper.tail <- list() # Compiling lower tail and upper tail separately # Lower tail - lower.tail$data <- list(left.all,left.tail.clustered, - left.tail.unclustered) - names(lower.tail$data) <- c("All","Clustered","Unclustered") lower.tail$extreme.event.distribution <- event.dist$lower.tail lower.tail$runlength <- runlength$lower.tail lower.tail$quantile.values <- qnt.values$lower.tail - lower.tail$yearly.extreme.event <- yearly.exevent$lower.tail + lower.tail$yearly.extreme.event <- round(t(yearly.exevent$lower.tail),2) # Upper tail - upper.tail$data <- list(right.all,right.tail.clustered, - right.tail.unclustered) - names(upper.tail$data) <- c("All","Clustered","Unclustered") upper.tail$extreme.event.distribution <- event.dist$upper.tail upper.tail$runlength <- runlength$upper.tail upper.tail$quantile.values <- qnt.values$upper.tail - upper.tail$yearly.extreme.event <- yearly.exevent$upper.tail + upper.tail$yearly.extreme.event <- round(t(yearly.exevent$upper.tail),2) # Output output$data.summary <- summ.st output$lower.tail <- lower.tail @@ -487,8 +468,8 @@ tmp.good.y <- merge(tmp.good.y,apply.yearly(xts(tmp.good[,1]),function(x)median(x,na.rm=T))) index(tmp.good.y) <- as.yearmon(as.Date(substr(index(tmp.good.y),1,4),"%Y")) tmp.res <- merge(tmp.bad.y,tmp.good.y) - colnames(tmp.res) <- c("number.lowertail","median.lowertail", - "number.uppertail","median.uppertail") + colnames(tmp.res) <- c("total.events.l","median.value.l", + "total.events.u","median.value.u") output <- as.data.frame(tmp.res) cn <- rownames(output) rownames(output) <- sapply(rownames(output), From noreply at r-forge.r-project.org Mon May 12 12:49:18 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 12 May 2014 12:49:18 +0200 (CEST) Subject: [Eventstudies-commits] r318 - pkg/R Message-ID: <20140512104918.3DD03186823@r-forge.r-project.org> Author: vikram Date: 2014-05-12 12:49:17 +0200 (Mon, 12 May 2014) New Revision: 318 Modified: pkg/R/ees.R Log: Created plot.ees functionality Modified: pkg/R/ees.R =================================================================== --- pkg/R/ees.R 2014-05-12 08:24:57 UTC (rev 317) +++ pkg/R/ees.R 2014-05-12 10:49:17 UTC (rev 318) @@ -675,92 +675,109 @@ # width = width of event window for event study plot # prob.value = Probability value for which extreme events is determined #------------------------- -eesPlot <- function(z, response.series.name, - event.series.name, - titlestring, ylab, width=5, - prob.value=5){ - #----------------- - # Get event dates - #----------------- - # Get both clustered and unclustered dates - e.s <- z[,event.series.name] - r.s <- z[,response.series.name] - data.use <- get.clusters.formatted(event.series=e.s, - response.series=r.s, - probvalue=prob.value, - event.value="nonreturns", - response.value="nonreturns") - # Get only unclustered data - data.frmt <- data.use[which(data.use$cluster.pattern==1),] - data.frmt2 <- data.use[which(data.use$cluster.pattern!=0),] +###################### +## Extreme event dates +###################### +## Input: get.clusters.formatted (GCF) output +## Output: Extreme Event dates for normal and purged data +extremeDates <- function(input){ + ##----------------- + ## Get event dates + ##----------------- + ## Get only unclustered data + data.only.cluster <- input[which(input$cluster.pattern==1),] + data.no.cluster <- input[which(input$cluster.pattern!=0),] - # get dates for bigdays and baddays - baddays.normal <- index(data.frmt[which(data.frmt[,"left.tail"]==1)]) - bigdays.normal <- index(data.frmt[which(data.frmt[,"right.tail"]==1)]) - baddays.purged <- index(data.frmt2[which(data.frmt2[,"left.tail"]==1)]) - bigdays.purged <- index(data.frmt2[which(data.frmt2[,"right.tail"]==1)]) + ## get dates for bigdays and baddays + days.bad.normal <- index(data.only.cluster[which(data.only.cluster[,"left.tail"]==1)]) + days.good.normal <- index(data.only.cluster[which(data.only.cluster[,"right.tail"]==1)]) + days.bad.purged <- index(data.no.cluster[which(data.no.cluster[,"left.tail"]==1)]) + days.good.purged <- index(data.no.cluster[which(data.no.cluster[,"right.tail"]==1)]) + ## Event list + events.good.normal <- data.frame(outcome.unit=rep("response.series", + length(days.good.normal)), + event.when=days.good.normal) + events.bad.normal <- data.frame(outcome.unit=rep("response.series", + length(days.bad.normal)), + event.when=days.bad.normal) + events.good.purged <- data.frame(outcome.unit=rep("response.series", + length(days.good.purged)), + event.when=days.good.purged) + events.bad.purged <- data.frame(outcome.unit=rep("response.series", + length(days.bad.purged)), + event.when=days.bad.purged) + dates <- list(events.good.normal=events.good.normal, + events.bad.normal=events.bad.normal, + events.good.purged=events.good.purged, + events.bad.purged=events.bad.purged) + for(i in 1:length(dates)){dates[[i]][,1] <- as.character(dates[[i]][,1])} + return(dates) +} - d.good.normal <- bigdays.normal - d.bad.normal <- baddays.normal - d.good.purged <- bigdays.purged - d.bad.purged <- baddays.purged - - # ES for normal returns - es.good.normal <- corecomp(data.use,d.good.normal, - "response.series",width) - es.bad.normal <- corecomp(data.use,d.bad.normal, - "response.series",width) - - # ES for purged returns - es.good.purged <- corecomp(data.use,d.good.purged, - "response.series",width) - es.bad.purged <- corecomp(data.use,d.bad.purged, - "response.series",width) - - big.normal <- max(abs(cbind(es.good.normal,es.bad.normal))) - big.purged <- max(abs(cbind(es.good.purged,es.bad.purged))) - big <- max(big.normal,big.purged) - hilo1 <- c(-big,big) - - #--------------- - # Plotting graph - plot.es.graph.both(es.good.normal,es.bad.normal, - es.good.purged,es.bad.purged, - width,titlestring,ylab) +##---------------------- +## Getting ees inference +##---------------------- +## Event study plot for EES (extreme event studies) +## Input: Output of GCF +eesInference <- function(input, eventLists, to.remap, remap, width, + inference = TRUE, inference.strategy = "bootstrap"){ + inf <- list() + ## Computing inference + ## Normal + # Good days + inf$good.normal <- eventstudy(input, eventList=eventLists$events.good.normal, + type="None", to.remap=to.remap, + remap=remap, width=width, inference=inference, + inference.strategy=inference.strategy) + # Bad days + inf$bad.normal <- eventstudy(input, eventList=eventLists$events.bad.normal, + type="None", to.remap=to.remap, + remap=remap, width=width, inference=inference, + inference.strategy=inference.strategy) + ## Purged + # Good days + inf$good.purged <- eventstudy(input, eventList=eventLists$events.good.purged, + type="None", to.remap=to.remap, + remap=remap, width=width, inference=inference, + inference.strategy=inference.strategy) + # Bad days + inf$bad.purged <- eventstudy(input, eventList=eventLists$events.bad.purged, + type="None", to.remap=to.remap, + remap=remap, width=width, inference=inference, + inference.strategy=inference.strategy) + + class(inf) <- "ees" + return(inf) } -#-------------------------- -# Eventstudy analysis -# -using eventstudy package -#-------------------------- -corecomp <- function(z,dlist,seriesname,width) { - events <- data.frame(outcome.unit=rep(seriesname, length(dlist)), event.when=dlist) - es.results <- phys2eventtime(z, events, width=0) - es.w <- window(es.results$z.e, start=-width, end=+width) - # Replaing NA's with zeroes - es.w[is.na(es.w)] <- 0 - es.w <- remap.cumsum(es.w, is.pc=FALSE, base=0) - inference.bootstrap(es.w,to.plot=FALSE) -} - -#---------------------------------- -# Plotting graph in es.error.metric -#---------------------------------- -plot.es.graph.both <- function(es.good.normal,es.bad.normal, - es.good.purged,es.bad.purged, - width,titlestring,ylab){ + +plot.ees <- function(x, xlab = NULL, ...){ + ## assign own labels if they're missing + if (is.null(xlab)) { + xlab <- "Event time" + } + ## Inference + es.good.normal <- x$good.normal$eventstudy.output + es.bad.normal <- x$bad.normal$eventstudy.output + es.good.purged <- x$good.purged$eventstudy.output + es.bad.purged <- x$bad.purged$eventstudy.output + # Width + width <- (NROW(x[[1]]$eventstudy.output)-1)/2 + ##--------------- + ## Plotting graph + ##--------------- big.normal <- max(abs(cbind(es.good.normal,es.bad.normal))) big.purged <- max(abs(cbind(es.good.purged,es.bad.purged))) big <- max(big.normal,big.purged) - hilo1 <- c(-big,big) + ylim.max <- c(-big,big) # Plotting graph par(mfrow=c(1,2)) # Plot very good days - plot(-width:width, es.good.normal[,2], type="l", lwd=2, ylim=hilo1, col="red", - xlab="Event time (days)", ylab=ylab, - main=paste("Very good", " (by ", titlestring, ")", sep="")) + plot(-width:width, es.good.normal[,2], type="l", lwd=2, + ylim=ylim.max, col="red", xlab=xlab, + main="Very good days", ...) lines(-width:width, es.good.purged[,2], lwd=2, lty=1,type="l", col="orange") points(-width:width, es.good.normal[,2], pch=19,col="red") points(-width:width, es.good.purged[,2], pch=25,col="orange") @@ -779,9 +796,9 @@ col=c("red","orange"),lty=c(1,1),bty="n") # Plot very bad days - plot(-width:width, es.bad.normal[,2], type="l", lwd=2, ylim=hilo1, col="red", - xlab="Event time (days)", ylab=ylab, - main=paste("Very bad", " (by ", titlestring, ")", sep="")) + plot(-width:width, es.bad.normal[,2], type="l", lwd=2, + ylim=ylim.max, col="red", xlab=xlab, + main = "Very bad days",...) lines(-width:width, es.bad.purged[,2], lwd=2, lty=1,type="l", col="orange") points(-width:width, es.bad.normal[,2], pch=19,col="red") points(-width:width, es.bad.purged[,2], pch=25,col="orange") From noreply at r-forge.r-project.org Mon May 12 12:50:15 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 12 May 2014 12:50:15 +0200 (CEST) Subject: [Eventstudies-commits] r319 - / pkg/data pkg/man Message-ID: <20140512105015.7940E1868B7@r-forge.r-project.org> Author: chiraganand Date: 2014-05-12 12:50:15 +0200 (Mon, 12 May 2014) New Revision: 319 Added: pkg/data/OtherReturns.rda pkg/man/OtherReturns.Rd Removed: pkg/data/AMMData.rda pkg/data/EESData.rda pkg/data/INR.rda pkg/data/MMData.rda pkg/data/NiftyIndex.rda pkg/man/AMMData.Rd pkg/man/EESData.Rd pkg/man/INR.Rd pkg/man/MMData.Rd pkg/man/NiftyIndex.Rd Modified: pkg/man/ees.Rd pkg/man/eesPlot.Rd pkg/man/eventstudy.Rd pkg/man/excessReturn.Rd pkg/man/lmAMM.Rd pkg/man/makeX.Rd pkg/man/manyfirmssubperiod.lmAMM.Rd pkg/man/marketResidual.Rd pkg/man/phys2eventtime.Rd pkg/man/remap.cumprod.Rd pkg/man/remap.cumsum.Rd pkg/man/remap.event.reindex.Rd pkg/man/subperiod.lmAMM.Rd todo.org Log: Removed unneeded data sets, clubbed all the other returns into one rda, added it's manual. Deleted: pkg/data/AMMData.rda =================================================================== (Binary files differ) Deleted: pkg/data/EESData.rda =================================================================== (Binary files differ) Deleted: pkg/data/INR.rda =================================================================== (Binary files differ) Deleted: pkg/data/MMData.rda =================================================================== (Binary files differ) Deleted: pkg/data/NiftyIndex.rda =================================================================== (Binary files differ) Added: pkg/data/OtherReturns.rda =================================================================== (Binary files differ) Property changes on: pkg/data/OtherReturns.rda ___________________________________________________________________ Added: svn:mime-type + application/x-xz Deleted: pkg/man/AMMData.Rd =================================================================== --- pkg/man/AMMData.Rd 2014-05-12 10:49:17 UTC (rev 318) +++ pkg/man/AMMData.Rd 2014-05-12 10:50:15 UTC (rev 319) @@ -1,21 +0,0 @@ -\name{AMMData} -\alias{AMMData} -\docType{data} - -\title{Data set containing firm returns, market returns, currency - returns, and call money rate used for AMM estimation} - -\description{This data set consists of daily time series for firm - returns (Infosys and TCS), market returns (Nifty returns), currency - returns (INR/USD), and call money rate. It is used to demonstrate - augmented market model estimation. - - The data series is a daily time-series zoo object. The sample range for - the data is from 2012-02-01 to 2014-01-31. All series are in per cent. -} - -\usage{data(AMMData)} - -\author{Vikram Bahure} - -\keyword{AMMData} Deleted: pkg/man/EESData.Rd =================================================================== --- pkg/man/EESData.Rd 2014-05-12 10:49:17 UTC (rev 318) +++ pkg/man/EESData.Rd 2014-05-12 10:50:15 UTC (rev 319) @@ -1,15 +0,0 @@ -\name{EESData} -\alias{EESData} -\docType{data} - -\title{Returns data used for extreme events analysis} - -\description{This data set is used to demonstrate extreme events study - functionality of the package. It contains daily returns data (in per - cent) of S&P 500 and the NIFTY Index.} - -\usage{data(EESData)} - -\author{Chirag Anand} - -\keyword{datasets} Deleted: pkg/man/INR.Rd =================================================================== --- pkg/man/INR.Rd 2014-05-12 10:49:17 UTC (rev 318) +++ pkg/man/INR.Rd 2014-05-12 10:50:15 UTC (rev 319) @@ -1,18 +0,0 @@ -\name{INR} -\alias{INR} -\docType{data} - -\title{Exchange rate data of Indian Rupee to US Dollar} - -\description{ - A sample of INR/USD rates from 1990 to 2011. -} -\usage{data(INR)} - -\format{\pkg{zoo}} - -\examples{ -data(INR) -} - -\keyword{datasets} Deleted: pkg/man/MMData.Rd =================================================================== --- pkg/man/MMData.Rd 2014-05-12 10:49:17 UTC (rev 318) +++ pkg/man/MMData.Rd 2014-05-12 10:50:15 UTC (rev 319) @@ -1,21 +0,0 @@ -\name{MMData} -\alias{MMData} - - -\title{Sample data used for market model examples} - -\description{This data is only used for market model examples.} - -\usage{data(MMData)} - -\format{\pkg{zoo}} - -\author{Vikram Bahure} - -\examples{ -library(zoo) -data(MMData) -str(MMData) -} - -\keyword{MMData} Deleted: pkg/man/NiftyIndex.Rd =================================================================== --- pkg/man/NiftyIndex.Rd 2014-05-12 10:49:17 UTC (rev 318) +++ pkg/man/NiftyIndex.Rd 2014-05-12 10:50:15 UTC (rev 319) @@ -1,14 +0,0 @@ -\name{NiftyIndex} -\alias{NiftyIndex} -\docType{data} - -\title{NSE Nifty index from 2004 to 2012} - -\description{Time series of Nifty index return (in per cent) from 1990 - to 2012.} - -\usage{data(NiftyIndex)} - -\author{Vikram Bahure} - -\keyword{NiftyIndex} Added: pkg/man/OtherReturns.Rd =================================================================== --- pkg/man/OtherReturns.Rd (rev 0) +++ pkg/man/OtherReturns.Rd 2014-05-12 10:50:15 UTC (rev 319) @@ -0,0 +1,20 @@ +\name{OtherReturns} +\alias{OtherReturns} +\docType{data} + +\title{Data set containing daily returns of Nifty index, USD INR, call momey + rate, and S&P 500 index.} + +\description{This data set consists of daily time series of market + returns (Nifty index and S&P 500 index), currency returns (USD/INR), + and call money rate. + + The data series is a daily time-series zoo object. All series are in + per cent. +} + +\usage{data(OtherReturns)} + +\author{Chirag Anand} + +\keyword{OtherReturns} Modified: pkg/man/ees.Rd =================================================================== --- pkg/man/ees.Rd 2014-05-12 10:49:17 UTC (rev 318) +++ pkg/man/ees.Rd 2014-05-12 10:50:15 UTC (rev 319) @@ -85,7 +85,8 @@ \author{Vikram Bahure, Vimal Balasubramaniam} \examples{ -data(EESData) -r <- ees(EESData$sp500, prob.value = 5) +data(OtherReturns) + +r <- ees(OtherReturns$SP500, prob.value = 5) str(r, max.level = 2) } Modified: pkg/man/eesPlot.Rd =================================================================== --- pkg/man/eesPlot.Rd 2014-05-12 10:49:17 UTC (rev 318) +++ pkg/man/eesPlot.Rd 2014-05-12 10:50:15 UTC (rev 319) @@ -69,10 +69,11 @@ \author{Vikram Bahure, Vimal Balasubramaniam} \examples{ -data(EESData) -eesPlot(z = EESData, - response.series.name = "nifty", - event.series.name = "sp500", +data("OtherReturns") + +eesPlot(z = OtherReturns, + response.series.name = OtherReturns$NiftyIndex, + event.series.name = OtherReturns$SP500, titlestring = "S&P500", ylab = "(Cum.) change in NIFTY") } Modified: pkg/man/eventstudy.Rd =================================================================== --- pkg/man/eventstudy.Rd 2014-05-12 10:49:17 UTC (rev 318) +++ pkg/man/eventstudy.Rd 2014-05-12 10:50:15 UTC (rev 319) @@ -218,12 +218,21 @@ plot(es) ## Event study using Augment Market Model -data("AMMData") +data("OtherReturns") + events <- data.frame(outcome.unit = c("Infosys", "TCS"), event.when = c("2012-04-01", "2012-06-01"), stringsAsFactors = FALSE) -es <- eventstudy(firm.returns = AMMData[, c("Infosys", "TCS")], +ammdata <- merge.zoo(Infosys = StockPriceReturns$Infosys, + TCS = StockPriceReturns$TCS, + NiftyIndex, + INRUSD = OtherReturns$INRUSD, + CallMoneyRate = OtherReturns$CallMoneyRate, + all = FALSE) +ammdata <- window(ammdata, start = "2012-02-01", end = "2012-12-31") + +es <- eventstudy(firm.returns = ammdata[, c("Infosys", "TCS")], eventList = events, width = 10, type = "lmAMM", @@ -231,9 +240,9 @@ remap = "cumsum", inference = TRUE, inference.strategy = "bootstrap", - ## model args - market.returns = AMMData[, "index.nifty"], - others = AMMData[, c("currency.inrusd", "call.money.rate")], + # model arguments + market.returns = ammdata[, "NiftyIndex"], + others = ammdata[, c("INRUSD", "CallMoneyRate")], market.returns.purge = TRUE ) str(es) Modified: pkg/man/excessReturn.Rd =================================================================== --- pkg/man/excessReturn.Rd 2014-05-12 10:49:17 UTC (rev 318) +++ pkg/man/excessReturn.Rd 2014-05-12 10:50:15 UTC (rev 319) @@ -25,15 +25,15 @@ \examples{ data(StockPriceReturns) -data(NiftyIndex) +data(OtherReturns) er.result <- excessReturn(firm.returns = StockPriceReturns, - market.returns = NiftyIndex) + market.returns = OtherReturns$NiftyIndex) -## Checking output: Comparing excess return, raw returns, nifty returns -output <- merge(er.result$Infosys, StockPriceReturns$Infosys, NiftyIndex,all=FALSE) -colnames(output) <- c("excess.return", "raw.returns", "nifty.returns") -tail(output) +tail(merge(excessReturn = er.result$Infosys, + Infosys = StockPriceReturns$Infosys, + NiftyIndex = OtherReturns$NiftyIndex, + all=FALSE)) } \keyword{excessReturn} Modified: pkg/man/lmAMM.Rd =================================================================== --- pkg/man/lmAMM.Rd 2014-05-12 10:49:17 UTC (rev 318) +++ pkg/man/lmAMM.Rd 2014-05-12 10:50:15 UTC (rev 319) @@ -77,32 +77,31 @@ } \examples{ -data("AMMData") data("StockPriceReturns") -data("NiftyIndex") +data("OtherReturns") -firm.returns <- AMMData[,"Infosys"] -market.returns <- AMMData[,"index.nifty"] -currency.returns <- AMMData[,"currency.inrusd"] +firm.returns <- StockPriceReturns[, "Infosys"] +market.returns <- OtherReturns[ ,"NiftyIndex"] +currency.returns <- OtherReturns[, "INRUSD"] X <- makeX(market.returns, - others = currency.returns, - switch.to.innov = FALSE, - market.returns.purge = FALSE, - verbose = FALSE) + others = currency.returns, + switch.to.innov = FALSE, + market.returns.purge = FALSE, + verbose = FALSE) amm.result <- lmAMM(firm.returns, X, nlags = 0, verbose = FALSE) plot(amm.result) amm.residual <- residuals(amm.result) -amm.residual <- zoo(amm.residual,as.Date(names(amm.residual))) +amm.residual <- zoo(amm.residual, + order.by = as.Date(names(amm.residual))) -## Checking output: Comparing augmented market model residual, raw returns, nifty returns -output <- merge(amm.residual, StockPriceReturns$Infosys, NiftyIndex, - all = FALSE) -colnames(output) <- c("AMM Residual", "Raw Returns", "Nifty Returns") -tail(output) -plot(output) +comparison <- merge(AMMResidual = amm.residual, + Infosys = StockPriceReturns$Infosys, + NiftyIndex = OtherReturns$NiftyIndex, + all = FALSE) +plot(comparison) } \keyword{lmAMM} Modified: pkg/man/makeX.Rd =================================================================== --- pkg/man/makeX.Rd 2014-05-12 10:49:17 UTC (rev 318) +++ pkg/man/makeX.Rd 2014-05-12 10:50:15 UTC (rev 319) @@ -75,15 +75,15 @@ \author{Ajay Shah, Chirag Anand, Vikram Bahure, Vimal Balasubramaniam} \examples{ -data("AMMData") -market.returns <- AMMData$index.nifty -currency.returns <- AMMData$currency.inrusd +data("OtherReturns") +market.returns <- OtherReturns$NiftyIndex +currency.returns <- OtherReturns$INRUSD X <- makeX(market.returns, others = currency.returns, switch.to.innov = FALSE, market.returns.purge = FALSE, verbose = FALSE) -head(X) +head(na.omit(X)) } \keyword{makeX} Modified: pkg/man/manyfirmssubperiod.lmAMM.Rd =================================================================== --- pkg/man/manyfirmssubperiod.lmAMM.Rd 2014-05-12 10:49:17 UTC (rev 318) +++ pkg/man/manyfirmssubperiod.lmAMM.Rd 2014-05-12 10:50:15 UTC (rev 319) @@ -59,11 +59,11 @@ } \examples{ -data("AMMData") +data("OtherReturns") -firm.returns <- AMMData[, c("Infosys","TCS")] -market.returns <- AMMData[, "index.nifty"] -currency.returns <- AMMData[, "currency.inrusd"] +firm.returns <- StockPriceReturns[, c("Infosys","TCS")] +market.returns <- OtherReturns$NiftyIndex +currency.returns <- OtherReturns$INRUSD X <- makeX(market.returns, others = currency.returns, Modified: pkg/man/marketResidual.Rd =================================================================== --- pkg/man/marketResidual.Rd 2014-05-12 10:49:17 UTC (rev 318) +++ pkg/man/marketResidual.Rd 2014-05-12 10:50:15 UTC (rev 319) @@ -21,18 +21,17 @@ \author{Vikram Bahure} \examples{ -data(StockPriceReturns) -data(NiftyIndex) +data("StockPriceReturns") +data("OtherReturns") mm.result <- marketResidual(firm.returns = StockPriceReturns, - market.returns = NiftyIndex) + market.returns = OtherReturns$NiftyIndex) -## Checking output: Comparing market model residual, raw returns, nifty returns -output <- merge(mm.result$Infosys, StockPriceReturns$Infosys, NiftyIndex, - all=FALSE) -colnames(output) <- c("market.residual", "raw.returns", "nifty.returns") -tail(output) - +comparison <- merge(MarketResidual = mm.result$Infosys, + Infosys = StockPriceReturns$Infosys, + NiftyIndex = OtherReturns$NiftyIndex, + all = FALSE) +plot(comparison) } \keyword{marketResidual} Modified: pkg/man/phys2eventtime.Rd =================================================================== --- pkg/man/phys2eventtime.Rd 2014-05-12 10:49:17 UTC (rev 318) +++ pkg/man/phys2eventtime.Rd 2014-05-12 10:50:15 UTC (rev 319) @@ -83,15 +83,5 @@ width = 5) print(result$z.e[as.character(-3:3)]) print(result$outcomes) - -## Checking conversion to event time frame for first successful event date -c.no <- as.numeric(colnames(result$z.e)) -cnames <- SplitDates[c.no[1], ] -phys.output <- as.numeric(result$z.e[as.character(c(-5:5)), as.character(c.no[1])]) -loc <- which(index(StockPriceReturns) \%in\% SplitDates[c.no[1], "event.when"]) -raw.data <- as.numeric(StockPriceReturns[c((loc-5):(loc+5)), - SplitDates[c.no[1], "outcome.unit"]]) -check.output <- cbind(raw.data, phys.output) -check.output } \keyword{phys2eventime} Modified: pkg/man/remap.cumprod.Rd =================================================================== --- pkg/man/remap.cumprod.Rd 2014-05-12 10:49:17 UTC (rev 318) +++ pkg/man/remap.cumprod.Rd 2014-05-12 10:50:15 UTC (rev 319) @@ -47,9 +47,6 @@ is.returns = TRUE, base = 100) -check.output <- cbind(es.w[,1], eventtime[,1]) -colnames(check.output) <- c("abnormal.returns", "cumulative.abnormal.returns") -check.output -head(eventtime[,1:5]) +print(eventtime[as.character(-3:3), ]) } Modified: pkg/man/remap.cumsum.Rd =================================================================== --- pkg/man/remap.cumsum.Rd 2014-05-12 10:49:17 UTC (rev 318) +++ pkg/man/remap.cumsum.Rd 2014-05-12 10:50:15 UTC (rev 319) @@ -45,9 +45,5 @@ es.w <- window(es.results$z.e, start = -5, end = +5) eventtime <- remap.cumsum(es.w, is.pc = FALSE, base = 0) -## Comparing abnormal returns (AR) and cumulative abnormal returns (CAR) -check.output <- cbind(es.w[,1], eventtime[,1]) -colnames(check.output) <- c("abnormal.returns", "cumulative.abnormal.returns") -check.output -head(eventtime[,1:5]) +print(eventtime[as.character(-3:3), ]) } Modified: pkg/man/remap.event.reindex.Rd =================================================================== --- pkg/man/remap.event.reindex.Rd 2014-05-12 10:49:17 UTC (rev 318) +++ pkg/man/remap.event.reindex.Rd 2014-05-12 10:50:15 UTC (rev 319) @@ -36,5 +36,5 @@ eventtime <- remap.event.reindex(es.w) -head(eventtime[, 1:5]) +eventtime[as.character(-3:3), ] } Modified: pkg/man/subperiod.lmAMM.Rd =================================================================== --- pkg/man/subperiod.lmAMM.Rd 2014-05-12 10:49:17 UTC (rev 318) +++ pkg/man/subperiod.lmAMM.Rd 2014-05-12 10:50:15 UTC (rev 319) @@ -64,12 +64,13 @@ \seealso{ \code{\link{lmAMM}}} -\examples{ -data("AMMData") +\examples{ +data("StockPriceReturns") +data("OtherReturns") -firm.returns <- AMMData$Infosys -market.returns <- AMMData$index.nifty -currency.returns <- AMMData$currency.inrusd +firm.returns <- StockPriceReturns$Infosys +market.returns <- OtherReturns$NiftyIndex +currency.returns <- OtherReturns$USDINR regressors <- makeX(market.returns, others = currency.returns, Modified: todo.org =================================================================== --- todo.org 2014-05-12 10:49:17 UTC (rev 318) +++ todo.org 2014-05-12 10:50:15 UTC (rev 319) @@ -41,3 +41,259 @@ * Testing - manual calculation of numbers in the tests - revert old tests? +* plot.amm + - Fix the x-axis tick labels: the number is too small + - Increase number of plots (the funky way) + +* plot.es + - "Event study plot capabilities" email on 30th April. + +* Ajay's comments +** On the eesPlot code + data.frmt2 <- data.use[which(data.use$cluster.pattern != 0), ] + + Can we please have better variable names. + + hilo1 <- c(-big, big) + plot.es.graph.both(es.good.normal, es.bad.normal, es.good.purged, + es.bad.purged, width, titlestring, ylab) + + Can we please have better names than hilo1. And, you are making it and + not using it. + +** Feedback on eesPlot + Why do we have eesPlot? + + When I look at the name, I think "Okay, this is a plot function, and + why is this not just an S3 plot method". When I see the first one line + description on the man page my opinion is confirmed. + + Then I look deeper and it is absolutely not a plot function! It is a + function which figures out a list of events, then runs an event study, + and then does a customised plot. + + We should not have such functions. + + We should ask the user to run ees() and then run eventstudy() and then + use the plot method. + + Perhaps we should ask the user to do: + + es.lefttail <- eventstudies(left tail) + es.righttail <- eventstudies(right tail) + plot(mfrow=c(2,1)) + plot(es.lefttail, type="blah") + plot(es.righttail, type="blah") + + On an unrelated note, I found it disturbing that the code for + eesPlot() does not use ees(). This violates the principle of code + reuse. Perhaps we should have the framework where x<-ees() just makes + lists of interesting events and then summary(x) generates all those + descriptive tables about number of events and run length and so on. + + Why is the example saying " ## Generating event study plots (using + modified event study methodology)". It looks gauche. + + There is one spelling mistake in the man page but I've forgotten where + it is. + +** Feedback on eventstudies::ees + 1. The entire concept of what we're doing is critically connected + to the choice of the event window!!! + + The function and the documentation of the function is silent about + this and that's completely wrong. + + Our concept of what's a clean unclustered event is : clean within a + stated event window. We never say this. And, it's bad software + engineering to hardcode this to a number. This must be an argument to + the function. + + 2. The title of the function and the first para of the function are + quite lame. They say: + + "This function generates summary statistics for identification and + analysis of extreme events.". This mostly leaves me in the dark + about what's going on. + + "Tail (Rare) events are often the object of interest in finance. + These events are defined as those that have a low probability of + occurrence. This function identifies such events based on + prob.value mentioned by the user and generates summary + statistics about the events. If ???prob.value??? is 2.5%, events + below + 2.5% (lower tail) and above 97.5% (upper tail) of the + distribution + are identified as extreme events." This makes the function seem + like a massive waste of time. Using R we can trivially find the + upper tail observations - no new function is required here. If I + read this paragraph I would completely lose interest in the + package; I would think these lame developers are taking trivial + one/two lines of R code and encoding it as a function with a new + name - why would I never bother to learn their new API. + + The entire value added of the code lies in identifying clean + unclustered events, stabbing into messy situations by trying to fuse + clustered events under certain conditions, and walking away from + places where fusing can't be done. None of that is advertised in the + man page. The word 'fuse' does not occur anywhere on the man page! + + 3. When I run the example I get a huge messy structure that's no + fun. Why not have: + str(output, max.level=2) + which is more comprehensible. + + 4. Look at + + library(eventstudies) + data(EESData) + ## Input S&P 500 as the univariate series + input <- EESData$sp500 + ## Constructing summary statistics for 5% tail values (5% on both + sides) + output <- ees(input, prob.value = 5) + str(output) + + It looks nicer and more readable as: + + library(eventstudies) + data(EESData) + r <- ees(EESData$sp500, prob.value = 5) + str(r, max.level=2) + + 5. Choose a consistent style. Is there going to be a + library(eventstudies) in front of all the examples? This was not + there with the others. Why is it here? + + 6. Why are we saying " To convert number to words, code uses + function ???numbers2words?? by + John Fox and ???deprintize?? function by Miron Kursa.". We are + using thousands of functions by others but is this a big deal? + + 7. In + + $data$Clustered + event.series cluster.pattern + 2000-03-16 2.524452 3 + 2003-03-17 3.904668 2 + + Perhaps the word `runlength' is universally understood instead of + cluster.pattern + + The word `event.series' is incomprehensible to me. + + 8. In : + + > output$upper.tail$extreme.event.distribution + unclstr used.clstr removed.clstr tot.clstr tot tot.used + upper 65 5 32 37 102 70 + + The column names are horrible. + + Pick a more rational sequencing where this process unfolds from + left to right. + + This table is the heart of the functionality of what's being done and + it isn't explained at all in the man page. + + The man page should say that the researcher might like to only + study clean unclustered events - in which case he should run with + xxx. If he wishes to use the methodology of fusing adjacent events + as done in PSS, then additionally we are able to salvage the events + xxx. + + + 9. The run length table should be defined as a table showing a + column which is the run length and a column which is the number + of events which are a run of that length. + + 10. Just confirming: In a package vignette we're going to be able + to reproduce some key results from the tables of PSS using this + function? + + 11. Wouldn't it be neat to draw something graphical with + abline(v=xxx, lty=2) where all the extreme events are shown on + a picture? With a different colour for fused and for rejected + events. + +** Feedback on eventstudies package + + First batch. + + - At many places the phrase `eventstudy' is being used when what's + required is `event study'. + + - When I say ?AMMData iqt is riddled with mistakes!!!! The man page + has four sentences and has more than 1 error per sentence. + + 1. The first few words read: "The data series is a daily time-series + zoo object. The sample range for the data is from 2012-02-01 to + 2014-01-31." Why should this be the top priority? + + 2. The two sentences after this, which add up to the full man page, + contain one grammatical error each. + + 3. Nowhere in the man page is the unit mentioned (per cent). + + 4. The dataset contains call.money.rate and that's inconsistent with + the man page. + + 5. The example says library(zoo) which is not required. + +Why do we need a special data object named AMMData? Can we not just +have one single example dataset with daily returns data for firms, +that is used for the examples involving both event studies and AMM? + +If you had to have this in the package (which I doubt), a better +example is: + + data(AMMData) + str(AMMData) + tail(AMMData) + summary(AMMData) + +We in India use too many abbreviations. Let's stick to the phrase +`augmented market model' instead of overusing the phrase AMM. + + +*** When I say ?EESData I see a section `Format' which is not in ?AMMData. + + The facts on this man page should say that this is a dataset for + the purpose of demonstrating the EES functionality (no + abbreviations please), and for replicating the results of the PSS + paper. It should explain what the data is (daily returns measured + in per cent). + + - Why is the example here different from the example for AMMData? + +*** The dataset INR introduces a new word `sample' which was not used in the previous two. + Can we please have extreme maniacal consistency in all these? + As pointed out above, there is duplication between INR being here and + it being in AMMData. + +*** It is truly wrong to have a MMData data object!! + Nothing prevents you from estimating an MM using the data for an AMM. + Can we please be more intelligent about all this. + +** Collated + - bad variable names + - eesPlot: make it S3 function + - Do: ees(), eventstudy(), plot() + - summary.ees() + - ees(): event window in the API and the man pages (language + information) + - Remove comments from examples, plus cleaning + - Example consistency: remove library() calls from examples + - Remove unneeded references + - ees(): output colnames, output table format (+sequencing) + - ees(): reproducibility of PSS in the vignette + - plot.ees() + - Spell check + - Use "event study" instead of "eventstudy" + - Man pages: AMMData: grammatical errors, language, units, + consistent sections, call.money.rate + - EESData: say about PSS + - Avoid abbreviations + - Get rid of MMData, INR dataset + - lmAMM example + - phys2eventtime example + - Spell check From noreply at r-forge.r-project.org Mon May 12 12:52:23 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 12 May 2014 12:52:23 +0200 (CEST) Subject: [Eventstudies-commits] r320 - / Message-ID: <20140512105223.97257186978@r-forge.r-project.org> Author: chiraganand Date: 2014-05-12 12:52:23 +0200 (Mon, 12 May 2014) New Revision: 320 Modified: todo.org Log: Removed extra text. Modified: todo.org =================================================================== --- todo.org 2014-05-12 10:50:15 UTC (rev 319) +++ todo.org 2014-05-12 10:52:23 UTC (rev 320) @@ -47,253 +47,3 @@ * plot.es - "Event study plot capabilities" email on 30th April. - -* Ajay's comments -** On the eesPlot code - data.frmt2 <- data.use[which(data.use$cluster.pattern != 0), ] - - Can we please have better variable names. - - hilo1 <- c(-big, big) - plot.es.graph.both(es.good.normal, es.bad.normal, es.good.purged, - es.bad.purged, width, titlestring, ylab) - - Can we please have better names than hilo1. And, you are making it and - not using it. - -** Feedback on eesPlot - Why do we have eesPlot? - - When I look at the name, I think "Okay, this is a plot function, and - why is this not just an S3 plot method". When I see the first one line - description on the man page my opinion is confirmed. - - Then I look deeper and it is absolutely not a plot function! It is a - function which figures out a list of events, then runs an event study, - and then does a customised plot. - - We should not have such functions. - - We should ask the user to run ees() and then run eventstudy() and then - use the plot method. - - Perhaps we should ask the user to do: - - es.lefttail <- eventstudies(left tail) - es.righttail <- eventstudies(right tail) - plot(mfrow=c(2,1)) - plot(es.lefttail, type="blah") - plot(es.righttail, type="blah") - - On an unrelated note, I found it disturbing that the code for - eesPlot() does not use ees(). This violates the principle of code - reuse. Perhaps we should have the framework where x<-ees() just makes - lists of interesting events and then summary(x) generates all those - descriptive tables about number of events and run length and so on. - - Why is the example saying " ## Generating event study plots (using - modified event study methodology)". It looks gauche. - - There is one spelling mistake in the man page but I've forgotten where - it is. - -** Feedback on eventstudies::ees - 1. The entire concept of what we're doing is critically connected - to the choice of the event window!!! - - The function and the documentation of the function is silent about - this and that's completely wrong. - - Our concept of what's a clean unclustered event is : clean within a - stated event window. We never say this. And, it's bad software - engineering to hardcode this to a number. This must be an argument to - the function. - - 2. The title of the function and the first para of the function are - quite lame. They say: - - "This function generates summary statistics for identification and - analysis of extreme events.". This mostly leaves me in the dark - about what's going on. - - "Tail (Rare) events are often the object of interest in finance. - These events are defined as those that have a low probability of - occurrence. This function identifies such events based on - prob.value mentioned by the user and generates summary - statistics about the events. If ???prob.value??? is 2.5%, events - below - 2.5% (lower tail) and above 97.5% (upper tail) of the - distribution - are identified as extreme events." This makes the function seem - like a massive waste of time. Using R we can trivially find the - upper tail observations - no new function is required here. If I - read this paragraph I would completely lose interest in the - package; I would think these lame developers are taking trivial - one/two lines of R code and encoding it as a function with a new - name - why would I never bother to learn their new API. - - The entire value added of the code lies in identifying clean - unclustered events, stabbing into messy situations by trying to fuse - clustered events under certain conditions, and walking away from - places where fusing can't be done. None of that is advertised in the - man page. The word 'fuse' does not occur anywhere on the man page! - - 3. When I run the example I get a huge messy structure that's no - fun. Why not have: - str(output, max.level=2) - which is more comprehensible. - - 4. Look at - - library(eventstudies) - data(EESData) - ## Input S&P 500 as the univariate series - input <- EESData$sp500 - ## Constructing summary statistics for 5% tail values (5% on both - sides) - output <- ees(input, prob.value = 5) - str(output) - - It looks nicer and more readable as: - - library(eventstudies) - data(EESData) - r <- ees(EESData$sp500, prob.value = 5) - str(r, max.level=2) - - 5. Choose a consistent style. Is there going to be a - library(eventstudies) in front of all the examples? This was not - there with the others. Why is it here? - - 6. Why are we saying " To convert number to words, code uses - function ???numbers2words?? by - John Fox and ???deprintize?? function by Miron Kursa.". We are - using thousands of functions by others but is this a big deal? - - 7. In - - $data$Clustered - event.series cluster.pattern - 2000-03-16 2.524452 3 - 2003-03-17 3.904668 2 - - Perhaps the word `runlength' is universally understood instead of - cluster.pattern - - The word `event.series' is incomprehensible to me. - - 8. In : - - > output$upper.tail$extreme.event.distribution - unclstr used.clstr removed.clstr tot.clstr tot tot.used - upper 65 5 32 37 102 70 - - The column names are horrible. - - Pick a more rational sequencing where this process unfolds from - left to right. - - This table is the heart of the functionality of what's being done and - it isn't explained at all in the man page. - - The man page should say that the researcher might like to only - study clean unclustered events - in which case he should run with - xxx. If he wishes to use the methodology of fusing adjacent events - as done in PSS, then additionally we are able to salvage the events - xxx. - - - 9. The run length table should be defined as a table showing a - column which is the run length and a column which is the number - of events which are a run of that length. - - 10. Just confirming: In a package vignette we're going to be able - to reproduce some key results from the tables of PSS using this - function? - - 11. Wouldn't it be neat to draw something graphical with - abline(v=xxx, lty=2) where all the extreme events are shown on - a picture? With a different colour for fused and for rejected - events. - -** Feedback on eventstudies package - - First batch. - - - At many places the phrase `eventstudy' is being used when what's - required is `event study'. - - - When I say ?AMMData iqt is riddled with mistakes!!!! The man page - has four sentences and has more than 1 error per sentence. - - 1. The first few words read: "The data series is a daily time-series - zoo object. The sample range for the data is from 2012-02-01 to - 2014-01-31." Why should this be the top priority? - - 2. The two sentences after this, which add up to the full man page, - contain one grammatical error each. - - 3. Nowhere in the man page is the unit mentioned (per cent). - - 4. The dataset contains call.money.rate and that's inconsistent with - the man page. - - 5. The example says library(zoo) which is not required. - -Why do we need a special data object named AMMData? Can we not just -have one single example dataset with daily returns data for firms, -that is used for the examples involving both event studies and AMM? - -If you had to have this in the package (which I doubt), a better -example is: - - data(AMMData) - str(AMMData) - tail(AMMData) - summary(AMMData) - -We in India use too many abbreviations. Let's stick to the phrase -`augmented market model' instead of overusing the phrase AMM. - - -*** When I say ?EESData I see a section `Format' which is not in ?AMMData. - - The facts on this man page should say that this is a dataset for - the purpose of demonstrating the EES functionality (no - abbreviations please), and for replicating the results of the PSS - paper. It should explain what the data is (daily returns measured - in per cent). - - - Why is the example here different from the example for AMMData? - -*** The dataset INR introduces a new word `sample' which was not used in the previous two. - Can we please have extreme maniacal consistency in all these? - As pointed out above, there is duplication between INR being here and - it being in AMMData. - -*** It is truly wrong to have a MMData data object!! - Nothing prevents you from estimating an MM using the data for an AMM. - Can we please be more intelligent about all this. - -** Collated - - bad variable names - - eesPlot: make it S3 function - - Do: ees(), eventstudy(), plot() - - summary.ees() - - ees(): event window in the API and the man pages (language + information) - - Remove comments from examples, plus cleaning - - Example consistency: remove library() calls from examples - - Remove unneeded references - - ees(): output colnames, output table format (+sequencing) - - ees(): reproducibility of PSS in the vignette - - plot.ees() - - Spell check - - Use "event study" instead of "eventstudy" - - Man pages: AMMData: grammatical errors, language, units, - consistent sections, call.money.rate - - EESData: say about PSS - - Avoid abbreviations - - Get rid of MMData, INR dataset - - lmAMM example - - phys2eventtime example - - Spell check From noreply at r-forge.r-project.org Mon May 12 16:00:42 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 12 May 2014 16:00:42 +0200 (CEST) Subject: [Eventstudies-commits] r321 - pkg/vignettes Message-ID: <20140512140042.EEA451874C4@r-forge.r-project.org> Author: ajayshah Date: 2014-05-12 16:00:42 +0200 (Mon, 12 May 2014) New Revision: 321 Added: pkg/vignettes/new.Rnw Log: First push of a fully new vignette. Added: pkg/vignettes/new.Rnw =================================================================== --- pkg/vignettes/new.Rnw (rev 0) +++ pkg/vignettes/new.Rnw 2014-05-12 14:00:42 UTC (rev 321) @@ -0,0 +1,204 @@ +\documentclass[a4paper,11pt]{article} +\usepackage{graphicx} +\usepackage{a4wide} +\usepackage[colorlinks,linkcolor=blue,citecolor=red]{hyperref} +\usepackage{natbib} +\usepackage{float} +\usepackage{tikz} +\usepackage{parskip} +\usepackage{amsmath} +\title{Introduction to the \textbf{eventstudies} package in R} +\author{Ajay Shah} +\begin{document} +\maketitle + +\begin{abstract} +\end{abstract} +\SweaveOpts{engine=R,pdf=TRUE} + +\section{The standard event study in finance} + +In this section, we look at using the eventstudies package for the +purpose of doing the standard event study using daily returns data in +financial economics. This is a workhorse application of event +studies. The treatment here assumes knowledge of event studies +\citep{Corrado2011}. + +To conduct an event study, you must have a list of firms with +associated dates, and you must have returns data for these +firms. These dates must be stored as a simple data frame. To +illustrate this, we use the object `SplitDates' in the package which +is used for doing examples. + +<>= +library(eventstudies) +data(SplitDates) # The sample +str(SplitDates) # Just a data frame +head(SplitDates) +@ + +The representation of dates is a data frame with two columns. The +first column is the name of the unit of observation which experienced +the event. The second column is the event date. + +The second thing that is required for doing an event study is data for +stock price returns for all the firms. The sample dataset supplied in +the package is named `StockPriceReturns': + +<>= +data(StockPriceReturns) # The sample +str(StockPriceReturns) # A zoo object +head(StockPriceReturns,3) # Time series of dates and returns. +@ + +The StockPriceReturns object is thus a zoo object which is a time +series of daily returns. These are measured in per cent, i.e. a value +of +4 is returns of +4\%. The zoo object has many columns of returns +data, one for each unit of observation which, in this case, is a +firm. The column name of the zoo object must match the firm name +(i.e. the name of the unit of observation) in the list of events. + +The package gracefully handles the three kinds of problems encountered +with real world data: (a) a firm in the returns where there is no +event, (b) a firm with an event where returns data is lacking and (c) +a stream of missing data in the returns data surrounding the event +date. + +With this in hand, we are ready to run our first event study, using +raw returns: + +<>= +es <- eventstudy(firm.returns = StockPriceReturns, + eventList = SplitDates, + width = 10, + type = "None", + to.remap = TRUE, + remap = "cumsum", + inference = TRUE, + inference.strategy = "bootstrap") +@ + +This runs an event study using events listed in SplitDates, and using +returns data for the firms in StockPriceReturns. An event window of 10 +days is analysed. + +Event studies with returns data typically do some kind of adjustment +of the returns data in order to reduce variance. In order to keep +things simple, in this first event study, we are doing no adjustment, +which is done by setting `type' to ``None''. + +While daily returns data has been supplied, the standard event study +deals with cumulated returns. In order to achieve this, we set +to.remap to TRUE and we ask that this remapping be done using cumsum. + +Finally, we come to inference strategy. We instruct eventstudy to do +inference and ask for bootstrap inference. + +Let us peek and poke at the object `es' that is returned. + +<>= +class(es) +str(es) +@ + +The object returned by eventstudy is of class `es'. It is a list with +five components. Three of these are just a record of the way +eventstudy() was run: the inference procedure adopted (bootstrap +inference in this case), the window width (10 in this case) and the +method used for mapping the data (cumsum). The two new things are +`outcomes' and `eventstudy.output'. + +The vector `outcomes' shows the disposition of each event in the +events table. There are 22 rows in SplitDates, hence there will be 22 +elements in the vector `outcomes'. In this vector, `success' denotes a +successful use of the event. When an event cannot be used properly, +various error codes are supplied. E.g. `unitmissing' is reported when +the events table shows an event for a unit of observation where +returns data is not observed. + +\begin{figure} +\begin{center} +<>= +par(mai=c(.8,.8,.2,.2)) +plot(es, cex.axis=.7, cex.lab=.7) +@ +\end{center} +\caption{Plot method applied to es object}\label{f:esplot1} +\end{figure} + +% TODO: The x label should be "Event time (days)" and should +% automatically handle other situations like weeks or months or microseconds. +% The y label is much too long. + +Plot and print methods for the class `es' are supplied. The standard +plot is illustrated in Figure \ref{f:esplot1}. In this case, we see +the 95\% confidence interval is above 0 and below 0 and in no case can +the null of no-effect, compared with the starting date (10 days before +the stock split date), be rejected. + +In this first example, raw stock market returns was utilised in the +event study. It is important to emphasise that the event study is a +statistically valid tool even under these circumstances. Averaging +across multiple events isolates the event-related +fluctuations. However, there is a loss of statistical efficiency that +comes from fluctuations of stock prices that can have nothing to do +with firm level news. In order to increase efficiency, we resort to +adjustment of the returns data. + +The standard methodology in the literature is to use a market +model. This estimates a time-series regression $r_{jt} = \alpha_j + +\beta_j r_{Mt} + \epsilon_{jt}$ where $r_{jt}$ is returns for firm $j$ +on date $t$, and $r_{Mt}$ is returns on the market index on date +$t$. The market index captures market-wide fluctuations, which have +nothing to do with firm-specific factors. The event study is then +conducted with the cumulated $\epsilon_{jt}$ time series. This yields +improved statistical efficiency as $\textrm{Var}(\epsilon_j) < +\textrm{Var}(r_j)$. + +This is invoked by setting `type' to `marketResidual': + +<>= +data(NiftyIndex) +es.mm <- eventstudy(firm.returns = StockPriceReturns, + eventList = SplitDates, + width = 10, + type = "marketResidual", + market.returns=NiftyIndex, + to.remap = TRUE, + remap = "cumsum", + inference = TRUE, + inference.strategy = "bootstrap") +@ + +In addition to setting `type' to `marketResidual', we are now required +to supply data for the market index, $r_{Mt}$. In the above example, +this is the data object NiftyIndex supplied in the package. This is +just a zoo vector with daily returns of the stock market index. + +\begin{figure} +\begin{center} +<>= +par(mai=c(.8,.8,.2,.2)) +plot(es.mm, cex.axis=.7, cex.lab=.7) +@ +\end{center} +\caption{Adjustment using the market model}\label{f:esplotmm} +\end{figure} + +A comparison of the range of the $y$ axis in Figure \ref{f:esplot1} +versus that seen in Figure \ref{f:esplotmm} shows the substantial +improvement in statistical efficiency that was obtained by market +model adjustment. + +We close our treatment of the standard finance event study with one +step forward on further reducing the variance of $\epsilon$: by doing +an `augmented market model' regression with more than one explanatory +variable. + +\newpage +\bibliographystyle{jss} \bibliography{es} + +\end{document} + + +lmAMM From noreply at r-forge.r-project.org Mon May 12 16:07:44 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 12 May 2014 16:07:44 +0200 (CEST) Subject: [Eventstudies-commits] r322 - in pkg: . R Message-ID: <20140512140745.0EDF018752D@r-forge.r-project.org> Author: vikram Date: 2014-05-12 16:07:44 +0200 (Mon, 12 May 2014) New Revision: 322 Modified: pkg/NAMESPACE pkg/R/ees.R Log: Modified NAMESPACE to include ees functionality; minor modifcation in naming of ees functions Modified: pkg/NAMESPACE =================================================================== --- pkg/NAMESPACE 2014-05-12 14:00:42 UTC (rev 321) +++ pkg/NAMESPACE 2014-05-12 14:07:44 UTC (rev 322) @@ -1,5 +1,6 @@ export(eventstudy, inference.bootstrap, inference.wilcox, phys2eventtime, - remap.cumsum, remap.cumprod, remap.event.reindex, ees, eesPlot) + remap.cumsum, remap.cumprod, remap.event.reindex, eesSummary, eesDates, + eesInference) export(marketResidual, excessReturn) @@ -15,5 +16,6 @@ S3method(plot, amm) S3method(plot, es) +S3method(plot, ees) import(boot,sandwich,testthat,xts,zoo) Modified: pkg/R/ees.R =================================================================== --- pkg/R/ees.R 2014-05-12 14:00:42 UTC (rev 321) +++ pkg/R/ees.R 2014-05-12 14:07:44 UTC (rev 322) @@ -31,7 +31,7 @@ # - Clustered, Un-clustered and Both #------------------------------------------------------------------ # NOTE: -summary.ees <- function(x, ...){ +eesSummary <- function(x, ...){ no.var <- NCOL(input) #--------------------- @@ -681,7 +681,7 @@ ###################### ## Input: get.clusters.formatted (GCF) output ## Output: Extreme Event dates for normal and purged data -extremeDates <- function(input){ +eesDates <- function(input){ ##----------------- ## Get event dates ##----------------- @@ -720,6 +720,7 @@ ##---------------------- ## Event study plot for EES (extreme event studies) ## Input: Output of GCF +## eventLists: Output of eesDates eesInference <- function(input, eventLists, to.remap, remap, width, inference = TRUE, inference.strategy = "bootstrap"){ inf <- list() From noreply at r-forge.r-project.org Mon May 12 16:15:52 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 12 May 2014 16:15:52 +0200 (CEST) Subject: [Eventstudies-commits] r323 - pkg/vignettes Message-ID: <20140512141552.D63CD1869C2@r-forge.r-project.org> Author: chiraganand Date: 2014-05-12 16:15:52 +0200 (Mon, 12 May 2014) New Revision: 323 Modified: pkg/vignettes/new.Rnw Log: Updated object. Modified: pkg/vignettes/new.Rnw =================================================================== --- pkg/vignettes/new.Rnw 2014-05-12 14:07:44 UTC (rev 322) +++ pkg/vignettes/new.Rnw 2014-05-12 14:15:52 UTC (rev 323) @@ -16,7 +16,7 @@ \end{abstract} \SweaveOpts{engine=R,pdf=TRUE} -\section{The standard event study in finance} +\Section{The standard event study in finance} In this section, we look at using the eventstudies package for the purpose of doing the standard event study using daily returns data in @@ -158,12 +158,12 @@ This is invoked by setting `type' to `marketResidual': <>= -data(NiftyIndex) +data(OtherReturns) es.mm <- eventstudy(firm.returns = StockPriceReturns, eventList = SplitDates, width = 10, type = "marketResidual", - market.returns=NiftyIndex, + market.returns=OtherReturns$NiftyIndex, to.remap = TRUE, remap = "cumsum", inference = TRUE, From noreply at r-forge.r-project.org Mon May 12 16:53:23 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 12 May 2014 16:53:23 +0200 (CEST) Subject: [Eventstudies-commits] r324 - pkg/vignettes Message-ID: <20140512145323.5F6D0186D43@r-forge.r-project.org> Author: chiraganand Date: 2014-05-12 16:53:23 +0200 (Mon, 12 May 2014) New Revision: 324 Modified: pkg/vignettes/new.Rnw Log: Added text on augmented market model event study. Modified: pkg/vignettes/new.Rnw =================================================================== --- pkg/vignettes/new.Rnw 2014-05-12 14:15:52 UTC (rev 323) +++ pkg/vignettes/new.Rnw 2014-05-12 14:53:23 UTC (rev 324) @@ -16,7 +16,7 @@ \end{abstract} \SweaveOpts{engine=R,pdf=TRUE} -\Section{The standard event study in finance} +\section{The standard event study in finance} In this section, we look at using the eventstudies package for the purpose of doing the standard event study using daily returns data in @@ -59,10 +59,10 @@ (i.e. the name of the unit of observation) in the list of events. The package gracefully handles the three kinds of problems encountered -with real world data: (a) a firm in the returns where there is no -event, (b) a firm with an event where returns data is lacking and (c) -a stream of missing data in the returns data surrounding the event -date. +with real world data: (a) a firm where returns is observed but there +is no event, (b) a firm with an event where returns data is lacking +and (c) a stream of missing data in the returns data surrounding the +event date. With this in hand, we are ready to run our first event study, using raw returns: @@ -163,17 +163,19 @@ eventList = SplitDates, width = 10, type = "marketResidual", - market.returns=OtherReturns$NiftyIndex, to.remap = TRUE, remap = "cumsum", inference = TRUE, - inference.strategy = "bootstrap") + inference.strategy = "bootstrap", + market.returns=OtherReturns$NiftyIndex + ) @ In addition to setting `type' to `marketResidual', we are now required to supply data for the market index, $r_{Mt}$. In the above example, -this is the data object NiftyIndex supplied in the package. This is -just a zoo vector with daily returns of the stock market index. +this is the data object NiftyIndex supplied from the OtherReturns data +object in the package. This is just a zoo vector with daily returns of +the stock market index. \begin{figure} \begin{center} @@ -191,14 +193,67 @@ model adjustment. We close our treatment of the standard finance event study with one -step forward on further reducing the variance of $\epsilon$: by doing +step forward on further reducing $\textrm{Var}(\epsilon)$ : by doing an `augmented market model' regression with more than one explanatory -variable. +variable. The augmented market model uses regressions like: +\[ +r_{jt} = \alpha_j + \beta_1,j r_{M1,t} + \beta_2,j r_{M2,t} + \epsilon_{jt} +\] + +where in addition to the market index $r_{M1,t}$, there is an +additional explanatory variable $r_{M2,t}$. One natural candidate is +the returns on the exchange rate, but there are many other candidates. + +An extensive literature has worked out the unique problems of +econometrics that need to be addressed in doing augmented market +models. The package uses the synthesis of this literature as presented +in \citet{patnaik2010amm}.\footnote{The source code for augmented + market models in the package is derived from the source code written + for \citet{patnaik2010amm}.} + +To repeat the stock splits event study using augmented market models, +we use the incantation: + +<>= +es.amm <- eventstudy(firm.returns = StockPriceReturns, + eventList = SplitDates, + width = 10, + type = "lmAMM", + to.remap = TRUE, + remap = "cumsum", + inference = TRUE, + inference.strategy = "bootstrap", + market.returns=OtherReturns$NiftyIndex, + others=OtherReturns$USDINR, + market.returns.purge=TRUE + ) +@ + +Here the additional regressor on the augmented market model is the +returns on the exchange rate, which is the slot USDINR in +OtherReturns. The full capabilities for doing augmented market models +from \citet{patnaik2010amm} are available. These are documented +elsewhere. For the present moment, we will use the feature +market.returns.purge without explaining it. + +Let us look at the gains in statistical efficiency across the three +variants of the event study. We will use the width of the confidence +interval at date 0 as a measure of efficiency. + +<>= +tmp <- rbind(es$eventstudy.output[10,], es.mm$eventstudy.output[10,])[,c(1,3)] +rownames(tmp) <- c("None","MM") +tmp[,2]-tmp[,1] +@ + +This shows a sharp reduction in the width of the bootstrap 95\% +confidence interval from None to MM adjustment. Over and above this, a +small gain is obtained when going from MM adjustment to AMM +adjustment. + \newpage \bibliographystyle{jss} \bibliography{es} \end{document} - - -lmAMM From noreply at r-forge.r-project.org Wed May 14 12:29:02 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 14 May 2014 12:29:02 +0200 (CEST) Subject: [Eventstudies-commits] r325 - in pkg: . R man Message-ID: <20140514102902.DFF36186CC0@r-forge.r-project.org> Author: vikram Date: 2014-05-14 12:29:01 +0200 (Wed, 14 May 2014) New Revision: 325 Added: pkg/R/eesInference.R pkg/man/eesDates.Rd pkg/man/eesInference.Rd pkg/man/eesSummary.Rd pkg/man/get.clusters.formatted.Rd Removed: pkg/R/ees.R Modified: pkg/NAMESPACE pkg/man/ees.Rd Log: Added new ees functions to NAMESPACE; added documentation for the same Modified: pkg/NAMESPACE =================================================================== --- pkg/NAMESPACE 2014-05-12 14:53:23 UTC (rev 324) +++ pkg/NAMESPACE 2014-05-14 10:29:01 UTC (rev 325) @@ -1,6 +1,6 @@ export(eventstudy, inference.bootstrap, inference.wilcox, phys2eventtime, remap.cumsum, remap.cumprod, remap.event.reindex, eesSummary, eesDates, - eesInference) + eesInference, get.clusters.formatted) export(marketResidual, excessReturn) Deleted: pkg/R/ees.R =================================================================== --- pkg/R/ees.R 2014-05-12 14:53:23 UTC (rev 324) +++ pkg/R/ees.R 2014-05-14 10:29:01 UTC (rev 325) @@ -1,825 +0,0 @@ - -# Total 16 functions -############################ -# Identifying extreme events -############################ - -#---------------------------------------------------------------- -# INPUT: -# 'input' : Output of get.clusters.formatted -# 'prob.value': This is the tail value for which event is -# to be defined. For eg: prob.value=5 will -# consider 5% tail on both sides -#----------------------------------------------------------------- -# OUTPUT: -# Result will be in a list of 3 with following tables: -# 1. Summary statistics -# a. Summary of whole data-set -# 2. Lower tail: Extreme event tables -# a. Distribution of extreme events -# b. Run length distribution -# c. Quantile values -# d. Yearly distribution -# e. Extreme event data -# - Clustered, Un-clustered and Both -# 3. Upper tail: Extreme event tables -# a. Distribution of extreme events -# b. Run length distribution -# c. Quantile values -# d. Yearly distribution -# e. Extreme event data -# - Clustered, Un-clustered and Both -#------------------------------------------------------------------ -# NOTE: -eesSummary <- function(x, ...){ - no.var <- NCOL(input) - - #--------------------- - # Extreme event output - #--------------------- - # Summary statistics - summ.st <- attr(input,"sumstat") - colnames(summ.st) <- NULL - summ.st <- t(summ.st) - - # Distribtution of events - event.dist <- attr(input,"extreme.events.distribution") - - # Run length distribution - runlength <- runlength.dist(input) - - # Quantile extreme values - qnt.values <- quantile.extreme.values(input) - - # Yearly distribution of extreme event dates - yearly.exevent <- yearly.exevent.dist(input) - - #--------------------- - # Compiling the output - #--------------------- - output <- lower.tail <- upper.tail <- list() - # Compiling lower tail and upper tail separately - # Lower tail - lower.tail$extreme.event.distribution <- event.dist$lower.tail - lower.tail$runlength <- runlength$lower.tail - lower.tail$quantile.values <- qnt.values$lower.tail - lower.tail$yearly.extreme.event <- round(t(yearly.exevent$lower.tail),2) - # Upper tail - upper.tail$extreme.event.distribution <- event.dist$upper.tail - upper.tail$runlength <- runlength$upper.tail - upper.tail$quantile.values <- qnt.values$upper.tail - upper.tail$yearly.extreme.event <- round(t(yearly.exevent$upper.tail),2) - # Output - output$data.summary <- summ.st - output$lower.tail <- lower.tail - output$upper.tail <- upper.tail - return(output) -} - -######################################## -# Functions used for formatting clusters -######################################## -#------------------------ -# Categorzing tail events -# for ES analysis -#------------------------ -# Generates returns for the series -# Mark left tail, right tail events -gen.data <- function(d,probvalue,value="nonreturns"){ - res <- data.frame(dates=index(d),value=coredata(d)) - if(value=="returns"){ - res$returns <- c(NA,coredata(diff(log(d))*100)) - }else{ - res$returns <- d - } - pval <- c(probvalue/100,(1-(probvalue/100))) - pval <- quantile(res$returns,prob=pval,na.rm=TRUE) - res$left.tail <- as.numeric(res$returns < pval[1]) - res$right.tail <- as.numeric(res$returns > pval[2]) - res$both.tails <- res$left.tail + res$right.tail - res <- res[complete.cases(res),] - if(value=="returns"){ - return(res[-1,]) - }else{ - return(res) - } -} - - -#------------------- -# Summarise patterns -summarise.rle <- function(oneseries){ - tp <- rle(oneseries) - tp1 <- data.frame(tp$lengths,tp$values) - tp1 <- subset(tp1,tp1[,2]==1) - summary(tp1[,1]) -} - -# Summarise the pattern of cluster -summarise.cluster <- function(obj){ - rle.both <- summarise.rle(obj$both.tail) - rle.left <- summarise.rle(obj$left.tail) - rle.right <- summarise.rle(obj$right.tail) - rbind(both=rle.both,left=rle.left,right=rle.right) -} - -# Getting location for the length -exact.pattern.location <- function(us,pt,pt.len){ - st <- rle(us) - len <- st$length - loc.cs <- cumsum(st$length) - loc <- loc.cs[which(st$values==pt & st$length==pt.len)]-pt.len+1 - return(loc) -} - -# Identify and mark mixed clusters -identify.mixedclusters <- function(m,j){ - m$remove.mixed <- 0 - rownum <- which(m$pattern==TRUE) - for(i in 1:length(rownum)){ - nextnum <- rownum[i]+j-1 - twonums <- m$returns[c(rownum[i]:nextnum)] > 0 - if(sum(twonums)==j || sum(twonums)==0){ - next - }else{ - m$remove.mixed[c(rownum[i]:nextnum)] <- 5 - } - } - m -} - -#-------------------- -# Formatting clusters -#-------------------- -# This function takes does the following transformation: -#---------------------------------------------------- -# What the function does? -# i. Get extreme events from event.series -# ii. Remove all the mixed clusters -# iii. Get different types cluster -# iv. Further club the clusters for event series and -# corresponding response series to get -# clustered returns -# v. Throw the output in timeseries format -#---------------------------------------------------- -# Input for the function -# event.series = Series in levels or returns on events -# is to be defined -# response.series = Series in levels or returns on which -# response is to be generated -# prob.value = Tail value for defining an event -# event.value = What value is to be studied -# returns or levels -# Similarly for response.value -#---------------------------------------------------- -# Output = Formatted clusters in time series format -#---------------------------------------------------- -get.clusters.formatted <- function(event.series, - response.series, - probvalue=5, - event.value="nonreturns", - response.value="nonreturns"){ - # Getting levels in event format - tmp <- gen.data(event.series, - probvalue=probvalue, - value=event.value) - res.ser <- gen.data(response.series, - probvalue=probvalue, - value=response.value) - # Storing old data points - tmp.old <- tmp - - # Get pattern with maximum length - res <- summarise.cluster(tmp) - max.len <- max(res[,"Max."]) - - #------------------------ - # Removing mixed clusters - #------------------------ - for(i in max.len:2){ - which.pattern <- rep(1,i) - patrn <- exact.pattern.location(tmp$both.tails,1,i) - # If pattern does not exist move to next pattern - if(length(patrn)==0){next} - tmp$pattern <- FALSE - tmp$pattern[patrn] <- TRUE - tmp <- identify.mixedclusters(m=tmp,i) - me <- length(which(tmp$remove.mixed==5)) - - if(me!=0){ - tmp <- tmp[-which(tmp$remove.mixed==5),] - cat("Pattern of:",i,";", - "Discarded event:",me/i,"\n") - } - } - tmp.nc <- tmp - - # Merging event and response series - tmp.es <- xts(tmp[,-1],as.Date(tmp$dates)) - tmp.rs <- xts(res.ser[,-1],as.Date(res.ser$dates)) - tmp.m <- merge(tmp.es,res.ser=tmp.rs[,c("value","returns")], - all=F) - - # Formatting - if(event.value=="returns"){ - which.value <- event.value - }else{ - which.value <- "value" - } - # Converting to data.frame - temp <- as.data.frame(tmp.m) - temp$dates <- rownames(temp) - n <- temp - # Get pattern with maximum length - res <- summarise.cluster(temp) - max.len <- max(res[,"Max."]) - cat("Maximum length after removing mixed clusters is", - max.len,"\n") - # Marking clusters - n$cluster.pattern <- n$both.tails - for(pt.len in max.len:1){ - mark <- exact.pattern.location(n$both.tails,1,pt.len) - if(length(mark)==0){next} - n$cluster.pattern[mark] <- pt.len - } - - #------------------- - # Clustering returns - #------------------- - print("Clustering events.") - for(pt.len in max.len:2){ - rownum <- exact.pattern.location(n$both.tails,1,pt.len) - # If pattern does not exist - if(length(rownum)==0){ - cat("Pattern",pt.len,"does not exist.","\n");next - } - # Clustering - while(length(rownum)>0){ - prevnum <- rownum[1]-1 - lastnum <- rownum[1]+pt.len-1 - # Clustering event series - if(event.value=="returns"){ - newreturns <- (n$value[lastnum]-n$value[prevnum])*100/n$value[prevnum] - n[rownum[1],c("value","returns")] <- c(n$value[lastnum],newreturns) - }else{ - newreturns <- sum(n$value[rownum[1]:lastnum],na.rm=T) - n[rownum[1],c("value","returns")] <- c(n$value[lastnum],newreturns) - } - # Clustering response series - if(response.value=="returns"){ - newreturns.rs <- (n$value.1[lastnum]-n$value.1[prevnum])*100/n$value.1[prevnum] - n[rownum[1],c("value.1","returns.1")] <- c(n$value.1[lastnum],newreturns.rs) - }else{ - newreturns <- sum(n$value.1[rownum[1]:lastnum],na.rm=T) - n[rownum[1],c("value.1","returns.1")] <- c(n$value.1[lastnum],newreturns) - } - n <- n[-c((rownum[1]+1):lastnum),] - rownum <- exact.pattern.location(n$both.tails,1,pt.len) - } - } - # Columns to keep - cn <- c(which.value,"left.tail","right.tail", - "returns.1","cluster.pattern") - tmp.ts <- zoo(n[,cn],order.by=as.Date(n$dates)) - colnames(tmp.ts) <- c("event.series","left.tail","right.tail", - "response.series","cluster.pattern") - - # Results - attr(tmp.ts, which = "sumstat") <- sumstat(input = event.series) - attr(tmp.ts, which = "extreme.events.distribution") <- extreme.events.distribution(input = event.series, gcf.output = tmp.ts, prob.value = probvalue) - attr(tmp.ts, which = "probvalue") <- probvalue - class(tmp.ts) <- c("ees","zoo") - return(tmp.ts) -} - -############################## -# Summary statistics functions -############################## -#--------------------------------------------- -# Table 1: Summary statistics -# INPUT: Time series data-set for which -# summary statistics is to be estimated -# OUTPUT: A data frame with: -# - Values: "Minimum", 5%,"25%","Median", -# "Mean","75%","95%","Maximum", -# "Standard deviation","IQR", -# "Observations" -#---------------------------------------------- -sumstat <- function(input){ - no.var <- NCOL(input) - if(no.var==1){input <- xts(input)} - # Creating empty frame: chassis - tmp <- data.frame(matrix(NA,nrow=11,ncol=NCOL(input))) - colnames(tmp) <- "summary" - rownames(tmp) <- c("Min","5%","25%","Median","Mean","75%","95%", - "Max","sd","IQR","Obs.") - # Estimating summary statistics - tmp[1,] <- apply(input,2,function(x){min(x,na.rm=TRUE)}) - tmp[2,] <- apply(input,2,function(x){quantile(x,0.05,na.rm=TRUE)}) - tmp[3,] <- apply(input,2,function(x){quantile(x,0.25,na.rm=TRUE)}) - tmp[4,] <- apply(input,2,function(x){median(x,na.rm=TRUE)}) - tmp[5,] <- apply(input,2,function(x){mean(x,na.rm=TRUE)}) - tmp[6,] <- apply(input,2,function(x){quantile(x,0.75,na.rm=TRUE)}) - tmp[7,] <- apply(input,2,function(x){quantile(x,0.95,na.rm=TRUE)}) - tmp[8,] <- apply(input,2,function(x){max(x,na.rm=TRUE)}) - tmp[9,] <- apply(input,2,function(x){sd(x,na.rm=TRUE)}) - tmp[10,] <- apply(input,2,function(x){IQR(x,na.rm=TRUE)}) - tmp[11,] <- apply(input,2,function(x){NROW(x)}) - tmp <- round(tmp,2) - - return(tmp) -} - -############################# -# Getting event segregation -# - clustered and unclustered -############################# -#---------------------------- -# INPUT: -# 'input': Data series for which event cluster distribution -# is to be calculated; -# Note: The input series expects the input to be in levels not in returns, -# if the some the inputs are already in return formats one has to -# use the other variable 'already.return.series' -# 'already.return.series': column name is to be given which already has -# return series in the data-set -# 'prob.value': Probility value for which tail is to be constructed this -# value is equivalent to one side tail for eg. if prob.value=5 -# then we have values of 5% tail on both sides -# Functions used: get.event.count() -# OUTPUT: -# Distribution of extreme events -#---------------------------- - -extreme.events.distribution <- function(input, gcf.output, prob.value){ - # Creating an empty frame - no.var <- NCOL(input) - lower.tail.dist <- data.frame(matrix(NA,nrow=no.var,ncol=6)) - upper.tail.dist <- data.frame(matrix(NA,nrow=no.var,ncol=6)) - colnames(lower.tail.dist) <- c("Unclustered","Used clusters", - "Removed clusters","Total clusters", - "Total","Total used clusters") - rownames(lower.tail.dist) <- colnames(input) - colnames(upper.tail.dist) <- c("Unclustered","Used clusters", - "Removed clusters","Total clusters", - "Total","Total used clusters") - rownames(upper.tail.dist) <- colnames(input) - # Estimating cluster count - #-------------- - # Cluster count - #-------------- - # Non-returns (if it is already in return format) - tmp <- get.event.count(input, gcf.output, probvalue=prob.value, - value="nonreturns") - lower.tail.dist <- tmp[1,] - upper.tail.dist <- tmp[2,] - - #----------------------------- - # Naming the tail distribution - #----------------------------- - mylist <- list(lower.tail.dist,upper.tail.dist) - names(mylist) <- c("lower.tail", "upper.tail") - return(mylist) -} - -# Functions used in event count calculation -get.event.count <- function(series, - probvalue=5, - gcf.output, - value="returns"){ - # Extracting dataset - tmp.old <- gen.data(series,probvalue,value) - cp <- gcf.output[,"cluster.pattern"] - lvl <- as.numeric(levels(as.factor(cp))) - lvl.use <- lvl[which(lvl>1)] - # Calculating Total events - tot.ev.l <- length(which(tmp.old[,"left.tail"]==1)) - tot.ev.r <- length(which(tmp.old[,"right.tail"]==1)) - # Calculating Unclustered events - un.clstr.l <- length(which(gcf.output[,"left.tail"]==1 & - gcf.output[,"cluster.pattern"]==1)) - un.clstr.r <- length(which(gcf.output[,"right.tail"]==1 & - gcf.output[,"cluster.pattern"]==1)) - # Calculating Used clusters - us.cl.l <- us.cl.r <- NULL - for(i in 1:length(lvl.use)){ - tmp1 <- length(which(gcf.output[,"cluster.pattern"]==lvl.use[i] & - gcf.output[,"left.tail"]==1))*lvl.use[i] - tmp2 <- length(which(gcf.output[,"cluster.pattern"]==lvl.use[i] & - gcf.output[,"right.tail"]==1))*lvl.use[i] - us.cl.l <- sum(us.cl.l,tmp1,na.rm=TRUE) - us.cl.r <- sum(us.cl.r,tmp2,na.rm=TRUE) - } - - # Making a table - tb <- data.frame(matrix(NA,2,6)) - colnames(tb) <- c("unclustered.events","used.clustered.events","removed.clustered.events","total.clustered.events","total.events","total.used.events") - rownames(tb) <- c("lower","upper") - tb[,"total.events"] <- c(tot.ev.l,tot.ev.r) - tb[,"unclustered.events"] <- c(un.clstr.l,un.clstr.r) - tb[,"used.clustered.events"] <- c(us.cl.l,us.cl.r) - tb[,"total.used.events"] <- tb$unclustered.events+tb$used.clustered.events - tb[,"total.clustered.events"] <- tb$total.events-tb$unclustered.events - tb[,"removed.clustered.events"] <- tb$total.clustered.events-tb$used.clustered.events - - return(tb) -} - -###################### -# Yearly summary stats -###################### -#---------------------------- -# INPUT: -# 'input': Output from get.clusters.formatted function -# 'prob.value': Probility value for which tail is to be constructed this -# value is equivalent to one side tail for eg. if prob.value=5 -# then we have values of 5% tail on both sides -# Functions used: yearly.exevent.summary() -# OUTPUT: -# Yearly distribution of extreme events -#---------------------------- -yearly.exevent.dist <- function(input){ - mylist <- list() - ## Estimating cluster count - tmp.res <- yearly.exevent.summary(input) - tmp.res[is.na(tmp.res)] <- 0 - ## Left and right tail - lower.tail.yearly.exevent <- tmp.res[,1:2] - upper.tail.yearly.exevent <- tmp.res[,3:4] - output <- list() - output$lower.tail <- lower.tail.yearly.exevent - output$upper.tail <- upper.tail.yearly.exevent - mylist <- output - return(mylist) -} - -#------------------------------------------------ -# Get yearly no. and median for good and bad days -#------------------------------------------------ -yearly.exevent.summary <- function(tmp){ - tmp.bad <- tmp[which(tmp[,"left.tail"]==1),] - tmp.good <- tmp[which(tmp[,"right.tail"]==1),] - # Bad days - tmp.bad.y <- apply.yearly(xts(tmp.bad),function(x)nrow(x)) - tmp.bad.y <- merge(tmp.bad.y,apply.yearly(xts(tmp.bad[,1]),function(x)median(x,na.rm=T))) - index(tmp.bad.y) <- as.yearmon(as.Date(substr(index(tmp.bad.y),1,4),"%Y")) - # Good days - tmp.good.y <- apply.yearly(xts(tmp.good),function(x)nrow(x)) - tmp.good.y <- merge(tmp.good.y,apply.yearly(xts(tmp.good[,1]),function(x)median(x,na.rm=T))) - index(tmp.good.y) <- as.yearmon(as.Date(substr(index(tmp.good.y),1,4),"%Y")) - tmp.res <- merge(tmp.bad.y,tmp.good.y) - colnames(tmp.res) <- c("total.events.l","median.value.l", - "total.events.u","median.value.u") - output <- as.data.frame(tmp.res) - cn <- rownames(output) - rownames(output) <- sapply(rownames(output), - function(x)substr(x,nchar(x)-3,nchar(x))) - return(output) -} - -#################################### -# Quantile values for extreme events -#################################### -#----------------------------------- -# INPUT: -# 'input': Output of get.clusters.formatted -# Note: The input series expects the input to be in levels not in returns, -# if the some the inputs are already in return formats one has to -# use the other variable 'already.return.series' -# 'already.return.series': column name is to be given which already has -# return series in the data-set -# Functions used: get.clusters.formatted() -# OUTPUT: -# Lower tail and Upper tail quantile values -#----------------------------------- -quantile.extreme.values <- function(input){ - # Creating an empty frame - lower.tail.qnt.value <- data.frame(matrix(NA,nrow=1,ncol=6)) - upper.tail.qnt.value <- data.frame(matrix(NA,nrow=1,ncol=6)) - colnames(lower.tail.qnt.value) <- c("Min","25%","Median","75%","Max", - "Mean") - rownames(lower.tail.qnt.value) <- "extreme.events" - colnames(upper.tail.qnt.value) <- c("Min","25%","Median","75%","Max", - "Mean") - rownames(upper.tail.qnt.value) <- "extreme.events" - # Estimating cluster count - # Left tail - tmp.left.tail <- input[which(input$left.tail==1), - "event.series"] - df.left <- t(data.frame(quantile(tmp.left.tail,c(0,0.25,0.5,0.75,1)))) - tmp.left <- round(cbind(df.left,mean(tmp.left.tail)),2) - rownames(tmp.left) <- "extreme.events" - colnames(tmp.left) <- c("0%","25%","Median","75%","100%","Mean") - # Right tail - tmp.right.tail <- input[which(input$right.tail==1), - "event.series"] - df.right <- t(data.frame(quantile(tmp.right.tail,c(0,0.25,0.5,0.75,1)))) - tmp.right <- round(cbind(df.right, - mean(tmp.right.tail)),2) - rownames(tmp.right) <- "extreme.events" - colnames(tmp.right) <- c("0%","25%","Median","75%","100%","Mean") - - lower.tail.qnt.value <- tmp.left - upper.tail.qnt.value <- tmp.right - - mylist <- list(lower.tail.qnt.value,upper.tail.qnt.value) - names(mylist) <- c("lower.tail", "upper.tail") - return(mylist) -} - -########################## -# Run length distribution -########################## -#----------------------------------- -# INPUT: -# 'input': Data series in time series format -# Note: The input series expects the input to be in levels not in returns, -# if the some the inputs are already in return formats one has to -# use the other variable 'already.return.series' -# 'already.return.series': column name is to be given which already has -# return series in the data-set -# Functions used: get.clusters.formatted() -# get.cluster.distribution() -# numbers2words() -# OUTPUT: -# Lower tail and Upper tail Run length distribution -#----------------------------------- -runlength.dist <- function(input){ - - # Finding maximum Run length - # Seed value - max.runlength <- 0 - #--------------------------- - # Estimating max. Run length - #--------------------------- - tmp.runlength <- get.cluster.distribution(input,"event.series") - max.runlength <- max(max.runlength,as.numeric(colnames(tmp.runlength)[NCOL(tmp.runlength)])) - - # Generating empty frame - col.names <- seq(2:max.runlength)+1 - lower.tail.runlength <- data.frame(matrix(NA,nrow=1, - ncol=length(col.names))) - upper.tail.runlength <- data.frame(matrix(NA,nrow=1, - ncol=length(col.names))) - colnames(lower.tail.runlength) <- col.names - rownames(lower.tail.runlength) <- "clustered.events" - colnames(upper.tail.runlength) <- col.names - rownames(upper.tail.runlength) <- "clustered.events" - - #---------------------- - # Run length estimation - #---------------------- - tmp.res <- get.cluster.distribution(input,"event.series") - for(j in 1:length(colnames(tmp.res))){ - col.number <- colnames(tmp.res)[j] - lower.tail.runlength[1,col.number] <- tmp.res[1,col.number] - upper.tail.runlength[1,col.number] <- tmp.res[2,col.number] - } - - # Replacing NA's with zeroes - lower.tail.runlength[is.na(lower.tail.runlength)] <- 0 - upper.tail.runlength[is.na(upper.tail.runlength)] <- 0 - - # creating column names - word.cn <- NULL - for(i in 1:length(col.names)){ - word.cn[i] <- numbers2words(col.names[i]) - } - colnames(lower.tail.runlength) <- word.cn - colnames(upper.tail.runlength) <- word.cn - mylist <- list(lower.tail.runlength,upper.tail.runlength) - names(mylist) <- c("lower.tail", "upper.tail") - return(mylist) -} - -#------------------------- -# Get cluster distribution -#------------------------- -# Input for this function is the output of get.cluster.formatted -get.cluster.distribution <- function(tmp,variable){ - # Extract cluster category - cp <- tmp[,"cluster.pattern"] - lvl <- as.numeric(levels(as.factor(cp))) - lvl.use <- lvl[which(lvl>1)] - # Get numbers for each category - tb <- data.frame(matrix(NA,2,length(lvl.use))) - colnames(tb) <- as.character(lvl.use) - rownames(tb) <- c(paste(variable,":lower tail"), - paste(variable,":upper tail")) - for(i in 1:length(lvl.use)){ - tb[1,i] <- length(which(tmp[,"cluster.pattern"]==lvl.use[i] - & tmp[,"left.tail"]==1)) - tb[2,i] <- length(which(tmp[,"cluster.pattern"]==lvl.use[i] - & tmp[,"right.tail"]==1)) - - } - return(tb) -} - -#---------------------------- -# Converting numbers to words -#---------------------------- -numbers2words <- function(x){ - helper <- function(x){ - digits <- rev(strsplit(as.character(x), "")[[1]]) - nDigits <- length(digits) - if (nDigits == 1) as.vector(ones[digits]) - else if (nDigits == 2) - if (x <= 19) as.vector(teens[digits[1]]) - else trim(paste(tens[digits[2]], - Recall(as.numeric(digits[1])))) - else if (nDigits == 3) trim(paste(ones[digits[3]], "hundred", - Recall(makeNumber(digits[2:1])))) - else { - nSuffix <- ((nDigits + 2) %/% 3) - 1 - if (nSuffix > length(suffixes)) stop(paste(x, "is too large!")) - trim(paste(Recall(makeNumber(digits[ - nDigits:(3*nSuffix + 1)])), - suffixes[nSuffix], - Recall(makeNumber(digits[(3*nSuffix):1])))) - } - } - trim <- function(text){ - gsub("^\ ", "", gsub("\ *$", "", text)) - } - makeNumber <- function(...) as.numeric(paste(..., collapse="")) - opts <- options(scipen=100) - on.exit(options(opts)) - ones <- c("", "one", "two", "three", "four", "five", "six", "seven", - "eight", "nine") - names(ones) <- 0:9 - teens <- c("ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen", - "sixteen", " seventeen", "eighteen", "nineteen") - names(teens) <- 0:9 - tens <- c("twenty", "thirty", "forty", "fifty", "sixty", "seventy", - "eighty", - "ninety") - names(tens) <- 2:9 - x <- round(x) - suffixes <- c("thousand", "million", "billion", "trillion") - if (length(x) > 1) return(sapply(x, helper)) - helper(x) -} - -########################## -# Extreme event study plot -########################## -# This function generates event study plot for clustered and un-clustered data -#------------------------- -# Input for the function -# z = Data object with both the series response.series and event.series -# response.series.name = Column name of the series for which response is observed -# event.series.name = Column name of the series on which event is observed -# titlestring = Title string for the event study plot -# ylab = Y - axis label -# width = width of event window for event study plot -# prob.value = Probability value for which extreme events is determined -#------------------------- - -###################### -## Extreme event dates -###################### -## Input: get.clusters.formatted (GCF) output -## Output: Extreme Event dates for normal and purged data -eesDates <- function(input){ - ##----------------- - ## Get event dates - ##----------------- - ## Get only unclustered data - data.only.cluster <- input[which(input$cluster.pattern==1),] - data.no.cluster <- input[which(input$cluster.pattern!=0),] - - ## get dates for bigdays and baddays - days.bad.normal <- index(data.only.cluster[which(data.only.cluster[,"left.tail"]==1)]) - days.good.normal <- index(data.only.cluster[which(data.only.cluster[,"right.tail"]==1)]) - days.bad.purged <- index(data.no.cluster[which(data.no.cluster[,"left.tail"]==1)]) - days.good.purged <- index(data.no.cluster[which(data.no.cluster[,"right.tail"]==1)]) - ## Event list - events.good.normal <- data.frame(outcome.unit=rep("response.series", - length(days.good.normal)), - event.when=days.good.normal) - events.bad.normal <- data.frame(outcome.unit=rep("response.series", - length(days.bad.normal)), - event.when=days.bad.normal) - events.good.purged <- data.frame(outcome.unit=rep("response.series", - length(days.good.purged)), - event.when=days.good.purged) - events.bad.purged <- data.frame(outcome.unit=rep("response.series", - length(days.bad.purged)), - event.when=days.bad.purged) - dates <- list(events.good.normal=events.good.normal, - events.bad.normal=events.bad.normal, - events.good.purged=events.good.purged, - events.bad.purged=events.bad.purged) - for(i in 1:length(dates)){dates[[i]][,1] <- as.character(dates[[i]][,1])} - return(dates) -} - -##---------------------- -## Getting ees inference -##---------------------- -## Event study plot for EES (extreme event studies) -## Input: Output of GCF -## eventLists: Output of eesDates -eesInference <- function(input, eventLists, to.remap, remap, width, - inference = TRUE, inference.strategy = "bootstrap"){ - inf <- list() - ## Computing inference - ## Normal - # Good days - inf$good.normal <- eventstudy(input, eventList=eventLists$events.good.normal, - type="None", to.remap=to.remap, - remap=remap, width=width, inference=inference, - inference.strategy=inference.strategy) - # Bad days - inf$bad.normal <- eventstudy(input, eventList=eventLists$events.bad.normal, - type="None", to.remap=to.remap, - remap=remap, width=width, inference=inference, - inference.strategy=inference.strategy) - ## Purged - # Good days - inf$good.purged <- eventstudy(input, eventList=eventLists$events.good.purged, - type="None", to.remap=to.remap, - remap=remap, width=width, inference=inference, - inference.strategy=inference.strategy) - # Bad days - inf$bad.purged <- eventstudy(input, eventList=eventLists$events.bad.purged, - type="None", to.remap=to.remap, - remap=remap, width=width, inference=inference, - inference.strategy=inference.strategy) - - class(inf) <- "ees" - return(inf) -} - -plot.ees <- function(x, xlab = NULL, ...){ - ## assign own labels if they're missing - if (is.null(xlab)) { - xlab <- "Event time" - } - ## Inference - es.good.normal <- x$good.normal$eventstudy.output - es.bad.normal <- x$bad.normal$eventstudy.output - es.good.purged <- x$good.purged$eventstudy.output - es.bad.purged <- x$bad.purged$eventstudy.output - # Width - width <- (NROW(x[[1]]$eventstudy.output)-1)/2 - ##--------------- - ## Plotting graph - ##--------------- - big.normal <- max(abs(cbind(es.good.normal,es.bad.normal))) - big.purged <- max(abs(cbind(es.good.purged,es.bad.purged))) - big <- max(big.normal,big.purged) - ylim.max <- c(-big,big) - - # Plotting graph - par(mfrow=c(1,2)) - - # Plot very good days - plot(-width:width, es.good.normal[,2], type="l", lwd=2, - ylim=ylim.max, col="red", xlab=xlab, - main="Very good days", ...) - lines(-width:width, es.good.purged[,2], lwd=2, lty=1,type="l", col="orange") - points(-width:width, es.good.normal[,2], pch=19,col="red") - points(-width:width, es.good.purged[,2], pch=25,col="orange") - lines(-width:width, es.good.normal[,1], lwd=0.8, lty=2, col="red") - lines(-width:width, es.good.normal[,3], lwd=0.8, lty=2, col="red") - lines(-width:width, es.good.purged[,1], lwd=0.8, lty=4, col="orange") - lines(-width:width, es.good.purged[,3], lwd=0.8, lty=4, col="orange") - abline(h=0,v=0) - points(-width:width, es.good.normal[,1], pch=19,col="red") - points(-width:width, es.good.purged[,1],pch=25,col="orange") - points(-width:width, es.good.normal[,3], pch=19,col="red") - points(-width:width, es.good.purged[,3],pch=25,col="orange") - - legend("topleft",legend=c("Un-clustered","Clustered"), - cex=0.7,pch=c(19,25), - col=c("red","orange"),lty=c(1,1),bty="n") - - # Plot very bad days - plot(-width:width, es.bad.normal[,2], type="l", lwd=2, - ylim=ylim.max, col="red", xlab=xlab, - main = "Very bad days",...) - lines(-width:width, es.bad.purged[,2], lwd=2, lty=1,type="l", col="orange") - points(-width:width, es.bad.normal[,2], pch=19,col="red") - points(-width:width, es.bad.purged[,2], pch=25,col="orange") - lines(-width:width, es.bad.normal[,1], lwd=0.8, lty=2, col="red") - lines(-width:width, es.bad.normal[,3], lwd=0.8, lty=2, col="red") - lines(-width:width, es.bad.purged[,1], lwd=0.8, lty=2, col="orange") - lines(-width:width, es.bad.purged[,3], lwd=0.8, lty=2, col="orange") - points(-width:width, es.bad.normal[,1], pch=19,col="red") - points(-width:width, es.bad.purged[,1], pch=25,col="orange") - points(-width:width, es.bad.normal[,3], pch=19,col="red") - points(-width:width, es.bad.purged[,3], pch=25,col="orange") - - abline(h=0,v=0) - legend("topleft",legend=c("Un-clustered","Clustered"), - cex=0.7,pch=c(19,25), - col=c("red","orange"),lty=c(1,1),bty="n") -} - -#-------------------------- -# Suppress the messages -deprintize<-function(f){ - return(function(...) {capture.output(w<-f(...));return(w);}); -} Copied: pkg/R/eesInference.R (from rev 324, pkg/R/ees.R) =================================================================== --- pkg/R/eesInference.R (rev 0) [TRUNCATED] To get the complete diff run: svnlook diff /svnroot/eventstudies -r 325 From noreply at r-forge.r-project.org Wed May 14 12:30:06 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 14 May 2014 12:30:06 +0200 (CEST) Subject: [Eventstudies-commits] r326 - pkg/man Message-ID: <20140514103006.F05DC186D18@r-forge.r-project.org> Author: vikram Date: 2014-05-14 12:30:05 +0200 (Wed, 14 May 2014) New Revision: 326 Removed: pkg/man/ees.Rd Log: Removed old documentation file of ees function Deleted: pkg/man/ees.Rd =================================================================== --- pkg/man/ees.Rd 2014-05-14 10:29:01 UTC (rev 325) +++ pkg/man/ees.Rd 2014-05-14 10:30:05 UTC (rev 326) @@ -1,92 +0,0 @@ -\name{eesSummary} -\alias{eesSummary} - -\title{ Summary statistics of extreme events } - -\description{ - This function generates summary statistics for identification and - analysis of extreme events. -} - -\usage{ - eesSummary(input, prob.value) -} - -\arguments{ - - \item{input}{a univariate \pkg{zoo} or \pkg{xts} time series object} - - \item{prob.value}{The value (in percent) on the probability - distribution to define a tail event.} -} - -\details{ - Tail (Rare) events are often the object of interest in finance. These - events are defined as those that have a low probability of - occurrence. This function identifies such events based on - \sQuote{prob.value} mentioned by the user and generates summary - statistics about the events. If \sQuote{prob.value} is 2.5\%, events - below 2.5\% (lower tail) and above 97.5\% (upper tail) of the - distribution are identified as extreme events. - - Following statistics is generated for both lower and upper tail - events: - - \itemize{ - \item \sQuote{extreme.event.distribution} provides summary - statistics on the number of consecutive events (\dQuote{clustered} - events) and those that are not (\dQuote{unclustered} - events). Consecutive events that are \dQuote{mixed}, i.e., with - upper (lower) tail event occurring after a lower (upper) tail event, - are classified separately. - - \item \sQuote{runlength}: When events are \dQuote{clustered}, - \sQuote{runlength} classifies such clusters into different duration - bins. - - \item \sQuote{quantile.values}: Within such events, - \sQuote{quantile.values} provide the probability distribution values - at 0\%, 25\%, 50\%, 75\% and 100\%, alongside the mean. - - \item \sQuote{yearly.extreme.event}: A year-wise tabulation of such - extreme events, with a clustered event taken as one event. - } -} - -\value{ A \code{list} object containing: - - \item{data.summary}{a \sQuote{data.frame} containing summary of - the data set minimum, maximum, inter-quartile range, mean, median, - standard deviation and quantile values at 5\%, 25\%, 75\% and 95\%.} - - \item{lower.tail}{a \sQuote{list} that contains - \sQuote{extreme.event.distribution}, \sQuote{runlength}, - \sQuote{quantile.values} and \sQuote{yearly.extreme.event} for the - events on the lower tail of the distribution. See - \sQuote{Details}.} - - \item{upper.tail}{a \sQuote{list} that contains - \sQuote{extreme.event.distribution}, \sQuote{runlength}, - \sQuote{quantile.values} and \sQuote{yearly.extreme.event} for the - events on the upper tail of the distribution. See - \sQuote{Details}.} -} - -\references{ - \cite{Ila Patnaik, Nirvikar Singh and Ajay Shah (2013). - Foreign Investors under stress: Evidence from - India. - International Finance, 16(2), 213-244. - \url{http://onlinelibrary.wiley.com/doi/10.1111/j.1468-2362.2013.12032.x/abstract} - \url{http://macrofinance.nipfp.org.in/releases/PatnaikShahSingh2013_Foreign_Investors.html} - } -} - -\author{Vikram Bahure, Vimal Balasubramaniam} - -\examples{ -data(OtherReturns) - -r <- eesSummary(OtherReturns$SP500, prob.value = 5) -str(r, max.level = 2) -} From noreply at r-forge.r-project.org Wed May 14 16:40:00 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 14 May 2014 16:40:00 +0200 (CEST) Subject: [Eventstudies-commits] r327 - pkg/man Message-ID: <20140514144000.9E5FF183A24@r-forge.r-project.org> Author: chiraganand Date: 2014-05-14 16:40:00 +0200 (Wed, 14 May 2014) New Revision: 327 Modified: pkg/man/eventstudy.Rd Log: Fixed the example, added information on lmAMM arguments. Modified: pkg/man/eventstudy.Rd =================================================================== --- pkg/man/eventstudy.Rd 2014-05-14 10:30:05 UTC (rev 326) +++ pkg/man/eventstudy.Rd 2014-05-14 14:40:00 UTC (rev 327) @@ -146,46 +146,55 @@ - nlags \cr - dates \cr - verbose \cr + + Note: arguments are directly passed to \sQuote{makeX}, see + \code{\link{lmAMM}} for more details. \cr } } \value{ - A list with class attribute \dQuote{es} holding the - following elements: + A list with class attribute \dQuote{es} holding the following + elements: - \item{eventstudy.output}{ - a \sQuote{matrix} containing mean (bootstrap) or median (with - wilcoxon) estimate with confidence interval; \sQuote{NULL} if there - are no \dQuote{success} \dQuote{outcomes}. - } + \itemize{ + \item{eventstudy.output}{ + a \sQuote{matrix} containing mean (bootstrap) or median (with + wilcoxon) estimate with confidence interval; \sQuote{NULL} if there + are no \dQuote{success} \dQuote{outcomes}. + } - \item{outcomes}{a character vector that is the output from - \code{\link{phys2eventtime}} containing details of the successful use - of an event: - - \itemize{ - \item{success: shows the successful use of event date.} - \item{wdatamissing: appears when width data is missing around the - event. This will not appear when this function is used since the - argument \sQuote{width} in \code{\link{phys2eventtime}} is set to zero.} - \item{wrongspan: if event date falls outside the range of physical date.} - \item{unitmissing: when the unit (firm name) is missing in the event list.} + \item{outcomes}{a character vector that is the output from + \code{\link{phys2eventtime}} containing details of the successful use + of an event: + + \itemize{ + \item{success: shows the successful use of event date.} + \item{wdatamissing: appears when width data is missing around the + event. This will not appear when this function is used since the + argument \sQuote{width} in \code{\link{phys2eventtime}} is set to zero.} + \item{wrongspan: if event date falls outside the range of physical date.} + \item{unitmissing: when the unit (firm name) is missing in the event list.} + } } - } - \item{inference}{ - a \sQuote{character} providing information about which inference - strategy was utilised to estimate the confidence intervals. - } + \item{inference}{ + a \sQuote{character} providing information about which inference + strategy was utilised to estimate the confidence intervals. + } - \item{width}{ - a \sQuote{numeric} specifying the window width for event study output. + \item{width}{ + a \sQuote{numeric} specifying the window width for event study output. + } + + \item{remap}{ + a \sQuote{character} specifying the remapping technique + used. Options are mentioned in \dQuote{remap} argument description. + } } - \item{remap}{ - a \sQuote{character} specifying the remapping technique - used. Options are mentioned in \dQuote{remap} argument description. - } + Function \sQuote{print.es} is provided to print the coefficients and + exposures of the analysis. \sQuote{plot.es} is used to plot the model + residuals and firm returns. } \author{Ajay Shah, Chirag Anand, Vikram Bahure, Vimal Balasubramaniam} @@ -217,7 +226,7 @@ str(es) plot(es) -## Event study using Augment Market Model +## Event study using Augmented Market Model data("OtherReturns") events <- data.frame(outcome.unit = c("Infosys", "TCS"), @@ -226,7 +235,7 @@ ammdata <- merge.zoo(Infosys = StockPriceReturns$Infosys, TCS = StockPriceReturns$TCS, - NiftyIndex, + NiftyIndex = OtherReturns$NiftyIndex, INRUSD = OtherReturns$INRUSD, CallMoneyRate = OtherReturns$CallMoneyRate, all = FALSE) From noreply at r-forge.r-project.org Wed May 14 19:12:10 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 14 May 2014 19:12:10 +0200 (CEST) Subject: [Eventstudies-commits] r328 - in pkg: data man Message-ID: <20140514171210.57FC5183FDA@r-forge.r-project.org> Author: chiraganand Date: 2014-05-14 19:12:10 +0200 (Wed, 14 May 2014) New Revision: 328 Modified: pkg/data/OtherReturns.rda pkg/data/StockPriceReturns.rda pkg/man/StockPriceReturns.Rd pkg/man/eventstudy.Rd Log: Changed other returns and stock price returns data set, limited to 2.5 years. Made the eventstudy example simpler. Modified: pkg/data/OtherReturns.rda =================================================================== (Binary files differ) Modified: pkg/data/StockPriceReturns.rda =================================================================== (Binary files differ) Modified: pkg/man/StockPriceReturns.Rd =================================================================== --- pkg/man/StockPriceReturns.Rd 2014-05-14 14:40:00 UTC (rev 327) +++ pkg/man/StockPriceReturns.Rd 2014-05-14 17:12:10 UTC (rev 328) @@ -5,8 +5,7 @@ \title{Stock price returns data} \description{This data set contains stock price returns (in per cent) of - 30 major stocks on the National Stock Exchange (NSE) of India for a - period of 23 years.} + 30 major stocks on the National Stock Exchange (NSE) of India.} \usage{data(StockPriceReturns)} Modified: pkg/man/eventstudy.Rd =================================================================== --- pkg/man/eventstudy.Rd 2014-05-14 14:40:00 UTC (rev 327) +++ pkg/man/eventstudy.Rd 2014-05-14 17:12:10 UTC (rev 328) @@ -213,6 +213,7 @@ \examples{ data("StockPriceReturns") data("SplitDates") +data("OtherReturns") ## Event study without adjustment es <- eventstudy(firm.returns = StockPriceReturns, @@ -227,21 +228,11 @@ plot(es) ## Event study using Augmented Market Model -data("OtherReturns") - events <- data.frame(outcome.unit = c("Infosys", "TCS"), event.when = c("2012-04-01", "2012-06-01"), stringsAsFactors = FALSE) -ammdata <- merge.zoo(Infosys = StockPriceReturns$Infosys, - TCS = StockPriceReturns$TCS, - NiftyIndex = OtherReturns$NiftyIndex, - INRUSD = OtherReturns$INRUSD, - CallMoneyRate = OtherReturns$CallMoneyRate, - all = FALSE) -ammdata <- window(ammdata, start = "2012-02-01", end = "2012-12-31") - -es <- eventstudy(firm.returns = ammdata[, c("Infosys", "TCS")], +es <- eventstudy(firm.returns = StockPriceReturns, eventList = events, width = 10, type = "lmAMM", @@ -250,8 +241,8 @@ inference = TRUE, inference.strategy = "bootstrap", # model arguments - market.returns = ammdata[, "NiftyIndex"], - others = ammdata[, c("INRUSD", "CallMoneyRate")], + market.returns = OtherReturns[, "NiftyIndex"], + others = OtherReturns[, c("USDINR", "CallMoneyRate")], market.returns.purge = TRUE ) str(es) From noreply at r-forge.r-project.org Wed May 14 20:19:31 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 14 May 2014 20:19:31 +0200 (CEST) Subject: [Eventstudies-commits] r329 - pkg/inst/tests Message-ID: <20140514181931.D96231857B0@r-forge.r-project.org> Author: chiraganand Date: 2014-05-14 20:19:31 +0200 (Wed, 14 May 2014) New Revision: 329 Added: pkg/inst/tests/test_INR.rda Modified: pkg/inst/tests/test_inr_inference.R pkg/inst/tests/test_marketresiduals.R Log: Added old INR data only for testing INR inference, use the local test data for market residual test code. Added: pkg/inst/tests/test_INR.rda =================================================================== (Binary files differ) Property changes on: pkg/inst/tests/test_INR.rda ___________________________________________________________________ Added: svn:mime-type + application/x-xz Modified: pkg/inst/tests/test_inr_inference.R =================================================================== --- pkg/inst/tests/test_inr_inference.R 2014-05-14 17:12:10 UTC (rev 328) +++ pkg/inst/tests/test_inr_inference.R 2014-05-14 18:19:31 UTC (rev 329) @@ -3,7 +3,7 @@ test_that("test.inr.inference", { library(eventstudies) -load(system.file("data", "INR.rda",package = "eventstudies")) +load("test_INR.rda") inr_returns <- diff(log(INR))[-1] @@ -16,7 +16,7 @@ ) ) -event_time_data <- phys2eventtime(inr_returns,eventslist,width=10) +event_time_data <- phys2eventtime(inr_returns[, , drop = FALSE] , eventslist,width=10) w <- window(event_time_data$z.e,start=-10,end=10) expect_that(inference.bootstrap(w, to.plot=FALSE)[,2], Modified: pkg/inst/tests/test_marketresiduals.R =================================================================== --- pkg/inst/tests/test_marketresiduals.R 2014-05-14 17:12:10 UTC (rev 328) +++ pkg/inst/tests/test_marketresiduals.R 2014-05-14 18:19:31 UTC (rev 329) @@ -3,8 +3,8 @@ test_that("test.market.residuals", { library(eventstudies) -load(system.file("data", "StockPriceReturns.rda", package = "eventstudies")) -load(system.file("data", "NiftyIndex.rda", package = "eventstudies")) +load("test_StockPriceReturns.rda") +load("test_NiftyIndex.rda") alldata <- merge(StockPriceReturns, NiftyIndex, all = TRUE) StockPriceReturns <- alldata[,-which(colnames(alldata) %in% "NiftyIndex")] From noreply at r-forge.r-project.org Wed May 14 20:56:58 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 14 May 2014 20:56:58 +0200 (CEST) Subject: [Eventstudies-commits] r330 - pkg/R Message-ID: <20140514185658.D95E2186CF3@r-forge.r-project.org> Author: chiraganand Date: 2014-05-14 20:56:58 +0200 (Wed, 14 May 2014) New Revision: 330 Modified: pkg/R/eventstudy.R Log: Fixed sapply returning a list, fill NAs with 0s after converting to event time. Modified: pkg/R/eventstudy.R =================================================================== --- pkg/R/eventstudy.R 2014-05-14 18:19:31 UTC (rev 329) +++ pkg/R/eventstudy.R 2014-05-14 18:56:58 UTC (rev 330) @@ -49,15 +49,15 @@ } else { ## More than one firm # Extracting and merging - tmp.resid <- sapply(colnames(firm.returns), function(y) + tmp.resid <- lapply(colnames(firm.returns), function(y) { timeseriesAMM(firm.returns = firm.returns[,y], X = regressors, verbose = FALSE, nlags = 1) }) - outputModel <- zoo(tmp.resid, - order.by = as.Date(rownames(tmp.resid))) + names(tmp.resid) <- colnames(firm.returns) + outputModel <- do.call(merge.zoo, tmp.resid) } } ## end AMM @@ -93,6 +93,9 @@ cn.names <- eventList[which(es$outcomes=="success"),1] } + ## replace NAs with 0 as it's returns now + es.w <- na.fill(es.w, 0) + if(length(cn.names)==1){ cat("Event date exists only for",cn.names,"\n") if (inference == TRUE) { From noreply at r-forge.r-project.org Thu May 15 01:11:10 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 15 May 2014 01:11:10 +0200 (CEST) Subject: [Eventstudies-commits] r331 - pkg/man Message-ID: <20140514231110.424C9181123@r-forge.r-project.org> Author: chiraganand Date: 2014-05-15 01:11:09 +0200 (Thu, 15 May 2014) New Revision: 331 Modified: pkg/man/eventstudy.Rd Log: Added information on na.fill, added market model example. Modified: pkg/man/eventstudy.Rd =================================================================== --- pkg/man/eventstudy.Rd 2014-05-14 18:56:58 UTC (rev 330) +++ pkg/man/eventstudy.Rd 2014-05-14 23:11:09 UTC (rev 331) @@ -87,6 +87,8 @@ series, use \sQuote{[} with \code{drop = FALSE} for subsetting the data set. See \code{\link{phys2eventtime}} for more details. + \sQuote{NA} values in the returns data are converted to \code{0}. + \dQuote{type} currently supports: \itemize{ \item{\dQuote{marketResidual}: uses \code{\link{marketResidual}} @@ -103,7 +105,8 @@ } Arguments to a model type can be sent inside \sQuote{...}. See - \sQuote{Model arguments} section for details on accepted fields. + \sQuote{Model arguments} section for details on accepted + fields. \dQuote{remap} can take three values: \itemize{ @@ -215,7 +218,7 @@ data("SplitDates") data("OtherReturns") -## Event study without adjustment + # Event study without adjustment es <- eventstudy(firm.returns = StockPriceReturns, eventList = SplitDates, width = 10, @@ -227,7 +230,21 @@ str(es) plot(es) -## Event study using Augmented Market Model + # Event study using Market Model +es <- eventstudy(firm.returns = StockPriceReturns, + eventList = SplitDates, + width = 10, + type = "marketResidual", + to.remap = TRUE, + remap = "cumsum", + inference = TRUE, + inference.strategy = "bootstrap", + market.returns = OtherReturns[, "NiftyIndex"], + ) +str(es) +plot(es) + + # Event study using Augmented Market Model events <- data.frame(outcome.unit = c("Infosys", "TCS"), event.when = c("2012-04-01", "2012-06-01"), stringsAsFactors = FALSE) From noreply at r-forge.r-project.org Thu May 15 01:12:29 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 15 May 2014 01:12:29 +0200 (CEST) Subject: [Eventstudies-commits] r332 - pkg/man Message-ID: <20140514231230.04AB8180450@r-forge.r-project.org> Author: chiraganand Date: 2014-05-15 01:12:29 +0200 (Thu, 15 May 2014) New Revision: 332 Modified: pkg/man/eesInference.Rd Log: Fixed some parenthesis. Modified: pkg/man/eesInference.Rd =================================================================== --- pkg/man/eesInference.Rd 2014-05-14 23:11:09 UTC (rev 331) +++ pkg/man/eesInference.Rd 2014-05-14 23:12:29 UTC (rev 332) @@ -4,7 +4,7 @@ \title{Extreme event study inference estimation} \description{This function performs event study analysis on extreme event dates - (sQuote(eesDates)) and formatted output (\sQuote{get.clusters.formatted}) + (\sQuote{eesDates}) and formatted output (\sQuote{get.clusters.formatted}) } \usage{ @@ -52,7 +52,7 @@ dates are obtained from function \sQuote{eesDates}. The function also estimates confidence interval using different inference strategies (\sQuote{bootstrap,wilcoxon}). The functionalities are similar to - \sQuote{eventstudy) function without market model adjustment and \sQuote{input} + \sQuote{eventstudy} function without market model adjustment and \sQuote{input} is output of \sQuote{get.clusters.formatted}, not \sQuote{firm.returns}. } From noreply at r-forge.r-project.org Thu May 15 01:13:29 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 15 May 2014 01:13:29 +0200 (CEST) Subject: [Eventstudies-commits] r333 - pkg/vignettes Message-ID: <20140514231330.01793186A0C@r-forge.r-project.org> Author: chiraganand Date: 2014-05-15 01:13:29 +0200 (Thu, 15 May 2014) New Revision: 333 Modified: pkg/vignettes/new.Rnw Log: Added code to compare AMM output. Modified: pkg/vignettes/new.Rnw =================================================================== --- pkg/vignettes/new.Rnw 2014-05-14 23:12:29 UTC (rev 332) +++ pkg/vignettes/new.Rnw 2014-05-14 23:13:29 UTC (rev 333) @@ -89,7 +89,7 @@ While daily returns data has been supplied, the standard event study deals with cumulated returns. In order to achieve this, we set -to.remap to TRUE and we ask that this remapping be done using cumsum. +to.remap to \code{TRUE} and we ask that this remapping be done using cumsum. Finally, we come to inference strategy. We instruct eventstudy to do inference and ask for bootstrap inference. @@ -103,7 +103,7 @@ The object returned by eventstudy is of class `es'. It is a list with five components. Three of these are just a record of the way -eventstudy() was run: the inference procedure adopted (bootstrap +\code{eventstudy()} was run: the inference procedure adopted (bootstrap inference in this case), the window width (10 in this case) and the method used for mapping the data (cumsum). The two new things are `outcomes' and `eventstudy.output'. @@ -243,9 +243,13 @@ interval at date 0 as a measure of efficiency. <>= -tmp <- rbind(es$eventstudy.output[10,], es.mm$eventstudy.output[10,])[,c(1,3)] -rownames(tmp) <- c("None","MM") -tmp[,2]-tmp[,1] +tmp <- rbind(es$eventstudy.output[10, ], + es.mm$eventstudy.output[10, ], + es.amm$eventstudy.output[10, ] + )[,c(1,3)] +rownames(tmp) <- c("None","MM", "AMM") +print(tmp["MM", ] - tmp["None", ]) +print(tmp["AMM", ] - tmp["None", ]) @ This shows a sharp reduction in the width of the bootstrap 95\% From noreply at r-forge.r-project.org Thu May 15 01:40:06 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 15 May 2014 01:40:06 +0200 (CEST) Subject: [Eventstudies-commits] r334 - pkg/R Message-ID: <20140514234006.A0EE5183A24@r-forge.r-project.org> Author: chiraganand Date: 2014-05-15 01:40:06 +0200 (Thu, 15 May 2014) New Revision: 334 Modified: pkg/R/eventstudy.R Log: Moved the input args from the output list into attributes; modified print and plot functions accordingly. Modified: pkg/R/eventstudy.R =================================================================== --- pkg/R/eventstudy.R 2014-05-14 23:13:29 UTC (rev 333) +++ pkg/R/eventstudy.R 2014-05-14 23:40:06 UTC (rev 334) @@ -135,10 +135,14 @@ result <- es.w } if(to.remap==TRUE){remapping <- remap} else {remapping <- "none"} - final.result <- list(eventstudy.output=result, - outcomes=as.character(es$outcomes), - inference=inference.strategy, - width=width, remap=remapping) + + final.result <- list(eventstudy.output = result, + outcomes = as.character(es$outcomes)) + + attr(final.result, which = "inference") <- inference.strategy + attr(final.result, which = "width") <- width + attr(final.result, which = "remap") <- remapping + class(final.result) <- "es" return(final.result) } @@ -149,7 +153,7 @@ print.es <- function(x, ...){ cat("Event study", colnames(x$eventstudy.output)[2], "response with", - x$inference, "inference for CI:\n") + attr(x, "inference"), "inference for CI:\n") print(x$eventstudy.output) cat("\n","Event outcome has",length(which(x$outcomes=="success")), "successful outcomes out of", length(x$outcomes),"events:","\n") @@ -171,11 +175,11 @@ ## assign own labels if they're missing if (is.null(ylab)) { - if (x$remap == "cumsum") { + if (attr(x, "remap") == "cumsum") { remapLabel <- "Cum." - } else if (x$remap == "cumprod") { + } else if (attr(x, "remap") == "cumprod") { remapLabel <- "Cum. product" - } else if (x$remap == "reindex") { + } else if (attr(x, "remap") == "reindex") { remapLabel <- "Re-index" } else { remapLabel <- "" From noreply at r-forge.r-project.org Thu May 15 01:55:20 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 15 May 2014 01:55:20 +0200 (CEST) Subject: [Eventstudies-commits] r335 - pkg/vignettes Message-ID: <20140514235520.3F95E186CFA@r-forge.r-project.org> Author: chiraganand Date: 2014-05-15 01:55:20 +0200 (Thu, 15 May 2014) New Revision: 335 Modified: pkg/vignettes/new.Rnw Log: Fixed code formatting. Modified: pkg/vignettes/new.Rnw =================================================================== --- pkg/vignettes/new.Rnw 2014-05-14 23:40:06 UTC (rev 334) +++ pkg/vignettes/new.Rnw 2014-05-14 23:55:20 UTC (rev 335) @@ -89,7 +89,7 @@ While daily returns data has been supplied, the standard event study deals with cumulated returns. In order to achieve this, we set -to.remap to \code{TRUE} and we ask that this remapping be done using cumsum. +to.remap to \emph{TRUE} and we ask that this remapping be done using cumsum. Finally, we come to inference strategy. We instruct eventstudy to do inference and ask for bootstrap inference. @@ -103,7 +103,7 @@ The object returned by eventstudy is of class `es'. It is a list with five components. Three of these are just a record of the way -\code{eventstudy()} was run: the inference procedure adopted (bootstrap +\emph{eventstudy()} was run: the inference procedure adopted (bootstrap inference in this case), the window width (10 in this case) and the method used for mapping the data (cumsum). The two new things are `outcomes' and `eventstudy.output'. @@ -247,7 +247,7 @@ es.mm$eventstudy.output[10, ], es.amm$eventstudy.output[10, ] )[,c(1,3)] -rownames(tmp) <- c("None","MM", "AMM") +rownames(tmp) <- c("None", "MM", "AMM") print(tmp["MM", ] - tmp["None", ]) print(tmp["AMM", ] - tmp["None", ]) @ From noreply at r-forge.r-project.org Thu May 15 02:17:36 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 15 May 2014 02:17:36 +0200 (CEST) Subject: [Eventstudies-commits] r336 - pkg/vignettes Message-ID: <20140515001736.4A76E186AB1@r-forge.r-project.org> Author: chiraganand Date: 2014-05-15 02:17:35 +0200 (Thu, 15 May 2014) New Revision: 336 Removed: pkg/vignettes/es.bib pkg/vignettes/new.Rnw Modified: pkg/vignettes/eventstudies.Rnw pkg/vignettes/eventstudies.bib Log: Added citation link of AMM, got the vignette to work, renamed files. Deleted: pkg/vignettes/es.bib =================================================================== --- pkg/vignettes/es.bib 2014-05-14 23:55:20 UTC (rev 335) +++ pkg/vignettes/es.bib 2014-05-15 00:17:35 UTC (rev 336) @@ -1,48 +0,0 @@ - at Article{MacKinlay1997, - author = {A. Craig MacKinlay}, - title = {Event Studies in Economics and Finance}, - journal = {Journal of Economic Literature}, - year = 1997, - volume = {XXXV}, - pages = {13-39}} - - - at Article{Corrado2011, - author = {Charles J. Corrado}, - title = {Event studies: A methodology review}, - journal = {Accounting and Finance}, - year = 2011, - volume = 51, - pages = {207-234}} - - at Article{PatnaikShahSingh2013, - author = {Patnaik, Ila and Shah, Ajay and Singh, Nirvikar}, - title = {Foreign Investors Under Stress: Evidence from India }, - journal = {International Finance}, - year = 2013, -volume = 16, -number= 2, -pages = {213-244} -} - - at article{davison1986efficient, - title={Efficient bootstrap simulation}, - author={Davinson, AC and Hinkley, DV and Schechtman, E}, - journal={Biometrika}, - volume={73}, - number={3}, - pages={555--566}, - year={1986}, - publisher={Biometrika Trust} -} - - at article{brown1985using, - title={Using daily stock returns: The case of event studies}, - author={Brown, Stephen J and Warner, Jerold B}, - journal={Journal of financial economics}, - volume={14}, - number={1}, - pages={3--31}, - year={1985}, - publisher={Elsevier} -} Modified: pkg/vignettes/eventstudies.Rnw =================================================================== --- pkg/vignettes/eventstudies.Rnw 2014-05-14 23:55:20 UTC (rev 335) +++ pkg/vignettes/eventstudies.Rnw 2014-05-15 00:17:35 UTC (rev 336) @@ -8,372 +8,256 @@ \usepackage{parskip} \usepackage{amsmath} \title{Introduction to the \textbf{eventstudies} package in R} -\author{Vikram Bahure \and Vimal Balasubramaniam \and Ajay Shah\thanks{We thank - Chirag Anand for valuable inputs in the creation of this vignette.}} +\author{Ajay Shah} \begin{document} -% \VignetteIndexEntry{eventstudies: A package with functionality to do Event Studies} -% \VignetteDepends{} -% \VignetteKeywords{eventstudies} -% \VignettePackage{eventstudies} \maketitle \begin{abstract} - Event study analysis is an important tool in the econometric - analysis of an event and its impact on a measured - outcome. Although widely used in finance, it is a generic tool - that can be used in other disciplines as well. There is, however, - no single repository to undertake such an analysis with - R. \texttt{eventstudies} provides the toolbox to carry out an - event-study analysis. It contains functions to transform data - into the event-time frame and procedures for statistical - inference. In this vignette, we provide an example from the field of finance and - utilise the rich features of this package. \end{abstract} - \SweaveOpts{engine=R,pdf=TRUE} -\section{Introduction} +\section{The standard event study in finance} -Event study methodology has been primarily used to evaluate the impact of specific events on the value of a firm. The typical procedure for conducting an event study involves -\citep{MacKinlay1997}: -\begin{enumerate} -\item Defining the event of interest and the event window. The event window should be larger than the specific period of interest. -\item Determining a measure of abnormal returns, the most common being the \textit{constant mean return model} and the \textit{market model}. This is important to disentangle the effects on stock prices of information that is specific to the firm under question (e.g. stock split announcement) and information that is likely to affect all stock prices (e.g. interest rates). -\item Analysis of firm returns on or after the event date. -\end{enumerate} +In this section, we look at using the eventstudies package for the +purpose of doing the standard event study using daily returns data in +financial economics. This is a workhorse application of event +studies. The treatment here assumes knowledge of event studies +\citep{Corrado2011}. -The \textbf{eventstudies} package brings together the various aspects of an event study analysis in one package. It provides for functions to calculate returns, transform data into event-time, and inference procedures. All functions in this package are implemented in the R system for statistical computing. The package, and R are available at no cost under the terms of the general public license (GPL) from the comprehensive R archive network (CRAN, \texttt{http://CRAN.R-project.org}). +To conduct an event study, you must have a list of firms with +associated dates, and you must have returns data for these +firms. These dates must be stored as a simple data frame. To +illustrate this, we use the object `SplitDates' in the package which +is used for doing examples. -This paper is organised as follows. A skeletal event study model is presented in Section \ref{s:model}. Section \ref{s:approach} discusses the software approach used in this package. Section \ref{s:example} shows an example. +<>= +library(eventstudies) +data(SplitDates) # The sample +str(SplitDates) # Just a data frame +head(SplitDates) +@ -\section{Skeletal event study model} \label{s:model} +The representation of dates is a data frame with two columns. The +first column is the name of the unit of observation which experienced +the event. The second column is the event date. -In this section, we present a model to evaluate the impact of stock splits on returns \citep{Corrado2011}. +The second thing that is required for doing an event study is data for +stock price returns for all the firms. The sample dataset supplied in +the package is named `StockPriceReturns': -Let day $0$ identify the stock split date under scrutiny and let days t = $...,-3,-2,-1$ represent trading days leading up to the event. If the return on the firm with the stock split $R_o$ is statistically large compared to returns on previous dates, we may conclude that the stock split event had a significant price impact. +<>= +data(StockPriceReturns) # The sample +str(StockPriceReturns) # A zoo object +head(StockPriceReturns,3) # Time series of dates and returns. +@ -To disentangle the impact of the stock split on the returns of the firm from general market-wide information, we use the market-model to adjust the event-date return, thus removing the influence of market information. +The StockPriceReturns object is thus a zoo object which is a time +series of daily returns. These are measured in per cent, i.e. a value +of +4 is returns of +4\%. The zoo object has many columns of returns +data, one for each unit of observation which, in this case, is a +firm. The column name of the zoo object must match the firm name +(i.e. the name of the unit of observation) in the list of events. -The market model is calculated as follows: +The package gracefully handles the three kinds of problems encountered +with real world data: (a) a firm where returns is observed but there +is no event, (b) a firm with an event where returns data is lacking +and (c) a stream of missing data in the returns data surrounding the +event date. -\[ R_t = a + b RM_t + e_t \] +With this in hand, we are ready to run our first event study, using +raw returns: -The firm-specific return $e_t$ is unrelated to the overall market and has an expected value of zero. Hence, the expected event date return conditional on the event date market return is +<>= +es <- eventstudy(firm.returns = StockPriceReturns, + eventList = SplitDates, + width = 10, + type = "None", + to.remap = TRUE, + remap = "cumsum", + inference = TRUE, + inference.strategy = "bootstrap") +@ -\[ E(R_0|RM_0) = a + b RM_0 \] +This runs an event study using events listed in SplitDates, and using +returns data for the firms in StockPriceReturns. An event window of 10 +days is analysed. -The abnormal return $A_0$ is simply the day-zero firm-specific return $e_0$: +Event studies with returns data typically do some kind of adjustment +of the returns data in order to reduce variance. In order to keep +things simple, in this first event study, we are doing no adjustment, +which is done by setting `type' to ``None''. -\[ A_0 = R_0- E(R_0|RM_0) = R_0 - a - b RM_0 \] +While daily returns data has been supplied, the standard event study +deals with cumulated returns. In order to achieve this, we set +to.remap to \emph{TRUE} and we ask that this remapping be done using cumsum. -A series of abnormal returns from previous periods are also calculated for comparison, and to determine statistical significance. +Finally, we come to inference strategy. We instruct eventstudy to do +inference and ask for bootstrap inference. -\[ A_t = R_t- E(R_t|RM_t) = R_t - a - b RM_t \] +Let us peek and poke at the object `es' that is returned. -The event date abnormal return $A_0$ is then assessed for statistical significance relative to the distribution of abnormal returns $A_t$ in the control period. A common assumption used to formulate tests of statistical significance is that abnormal returns are normally distributed. However, such distributional assumptions may not be necessary with non-parametric procedures. For detailed exposition on the theoretical framework of eventstudies, please refer to % Insert Corrado (2011) and Campbell, Lo, McKinlay ``Econometrics of Financial Markets'' +<>= +class(es) +str(es) +@ -\section{Software approach} \label{s:approach} +The object returned by eventstudy is of class `es'. It is a list with +five components. Three of these are just a record of the way +\emph{eventstudy()} was run: the inference procedure adopted (bootstrap +inference in this case), the window width (10 in this case) and the +method used for mapping the data (cumsum). The two new things are +`outcomes' and `eventstudy.output'. -\textbf{eventstudies} offers the following functionalities: +The vector `outcomes' shows the disposition of each event in the +events table. There are 22 rows in SplitDates, hence there will be 22 +elements in the vector `outcomes'. In this vector, `success' denotes a +successful use of the event. When an event cannot be used properly, +various error codes are supplied. E.g. `unitmissing' is reported when +the events table shows an event for a unit of observation where +returns data is not observed. -\begin{itemize} -\item Models for calculating idiosyncratic returns -\item Procedures for converting data from physical time into event time -\item Procedures for inference -\end{itemize} +\begin{figure} +\begin{center} +<>= +par(mai=c(.8,.8,.2,.2)) +plot(es, cex.axis=.7, cex.lab=.7) +@ +\end{center} +\caption{Plot method applied to es object}\label{f:esplot1} +\end{figure} -\subsection{Models for calculating idiosyncratic returns} +% TODO: The x label should be "Event time (days)" and should +% automatically handle other situations like weeks or months or microseconds. +% The y label is much too long. -Firm returns can be calculated using the following functions: +Plot and print methods for the class `es' are supplied. The standard +plot is illustrated in Figure \ref{f:esplot1}. In this case, we see +the 95\% confidence interval is above 0 and below 0 and in no case can +the null of no-effect, compared with the starting date (10 days before +the stock split date), be rejected. -\begin{itemize} -\item \texttt{excessReturn}: estimation of excess returns i.e. $R_j - R_m$ where $R_j$ is the return of firm $j$ and $R_m$ is the market return. - -\item \texttt{marketResidual}: estimation of market model to obtain idiosyncratic firm returns, controlling for the market returns. - -\item \texttt{lmAMM}: estimation of the augmented market model which provides user the capability to run market models with orthogonalisation and obtain idiosyncratic returns. +In this first example, raw stock market returns was utilised in the +event study. It is important to emphasise that the event study is a +statistically valid tool even under these circumstances. Averaging +across multiple events isolates the event-related +fluctuations. However, there is a loss of statistical efficiency that +comes from fluctuations of stock prices that can have nothing to do +with firm level news. In order to increase efficiency, we resort to +adjustment of the returns data. -\end{itemize} -The function \texttt{lmAMM} is a generic function that allows users to -run an augmented market model (AMM) by using regressors provided by -\texttt{makeX} function and undertake the analysis of the market model -in a regression setting and obtain idiosyncratic -returns. The auxiliary regression that purges the effect of the -explanatory variables on one another is performed using \texttt{makeX} -function. \texttt{subpperiod.lmAMM} function allows for a single firm -AMM analysis for different periods in the sample. While -\texttt{manyfirmssubperiod.lmAMM}\footnote{User can use this function - to perform AMM for more than one firm by providing argument \textit{dates=NULL}} replicates the -\texttt{subperiod.lmAMM} analysis for more than one firms. +The standard methodology in the literature is to use a market +model. This estimates a time-series regression $r_{jt} = \alpha_j + +\beta_j r_{Mt} + \epsilon_{jt}$ where $r_{jt}$ is returns for firm $j$ +on date $t$, and $r_{Mt}$ is returns on the market index on date +$t$. The market index captures market-wide fluctuations, which have +nothing to do with firm-specific factors. The event study is then +conducted with the cumulated $\epsilon_{jt}$ time series. This yields +improved statistical efficiency as $\textrm{Var}(\epsilon_j) < +\textrm{Var}(r_j)$. -The output of \texttt{lmAMM} function is an list object of class -\texttt{amm}. It includes the linear model output along with AMM -exposure, standard deviation, significance and residuals. These AMM -residuals are further used in event study analysis. +This is invoked by setting `type' to `marketResidual': -\subsection{Converting data from physical time into event time} +<>= +data(OtherReturns) +es.mm <- eventstudy(firm.returns = StockPriceReturns, + eventList = SplitDates, + width = 10, + type = "marketResidual", + to.remap = TRUE, + remap = "cumsum", + inference = TRUE, + inference.strategy = "bootstrap", + market.returns=OtherReturns$NiftyIndex + ) +@ -The conversion of the returns data to event-time, and to cumulate returns is done using the following functions: +In addition to setting `type' to `marketResidual', we are now required +to supply data for the market index, $r_{Mt}$. In the above example, +this is the data object NiftyIndex supplied from the OtherReturns data +object in the package. This is just a zoo vector with daily returns of +the stock market index. -\begin{itemize} -\item \texttt{phys2eventtime}: conversion to an event frame. This requires a time series object of stock price returns (our outcome variable) and a data frame with two columns \textit{outcome.unit} and \textit{event.date}, the firms and the date on which the event occurred respectively. - -\item \texttt{remap.cumsum}: conversion of returns to cumulative returns. The input for this function is the time-series object in event-time that is obtained as the output from \texttt{phys2eventtime}. -\end{itemize} - -The function \texttt{phys2eventtime} is generic and can handle objects -of any time frequency, including intra-day high frequency data. While -\texttt{remap.cumsum} is sufficiently general to be used on any time -series object for which we would like to obtain cumulative values, in -this context, the attempt is to cumulate idiosyncratic returns to -obtain a clear identification of the magnitude and size of the impact -of an event \citep{brown1985using}. - -At this point of analysis, we hold one important data object organised in event-time, where each column of this object corresponds to the event on the outcome unit, with values before and after the event organised as before and after $-T,-(T-1),...,-3,-2,-1,0,1,2,3,...,T-1,T$. The package, once again, is very general and allows users to decide on the number of time units before and after the event that must be used for statistical inference. - -\subsection{Procedures for inference} - -Procedures for inference include: -\begin{itemize} - -\item \texttt{inference.wilcox}: estimation of wilcox inference to - generate the distribution of cumulative returns series. - -\item \texttt{inference.bootstrap}: estimation of bootstrap to - generate the distribution of cumulative returns series. -\end{itemize} - -The last stage in the analysis of eventstudies is statistical inference. At present, we have two different inference procedures incorporated into the package. The first of the two, \texttt{inference.wilcox} is a traditional test of inference for eventstudies. The second inference procedure, \texttt{inference.bootstrap} is another non-parametric procedure that exploits the multiplicity of outcome units for which such an event has taken place. For example, a corporate action event such as stock splits may have taken place for many firms (outcome units) at different points in time. This cross-sectional variation in outcome is exploited by the bootstrap inference procedure. - -The inference procedures would generally require no more than the object generated in the second stage of our analysis, for instance, the cumulative returns in event-time (\texttt{es.w}), and will ask whether the user wants a plot of the results using the inference procedure used. - -We intend to expand the suite of inference procedures available for analysis to include the more traditional procedures such as the Patell $t-$test. - -\section{Performing eventstudy analysis: An example}\label{s:example} - -In this section, we demonstrate the package with a study of the impact of stock splits on the stock prices of firms. We use the returns series of the thirty index companies, as of 2013, of the Bombay Stock Exchange (BSE), between 2001 and 2013. We also have stock split dates for each firm since 2000. - -Our data consists of a \textit{zoo} object for stock price returns for the thirty firms. This is our ``outcome variable'' of interest, and is called \textit{StockPriceReturns}. Another zoo object, \textit{NiftyIndex}, contains a time series of market returns. - -<<>>= -library(eventstudies) -data(StockPriceReturns) -data(NiftyIndex) -str(StockPriceReturns) -head(StockPriceReturns[rowSums(is.na((StockPriceReturns)))==3,1:3]) -head(NiftyIndex) -@ - -As required by the package, the event date (the dates on which stock splits occured for these 30 firms) for each firm is recorded in \textit{SplitDates} where ``outcome.unit'' is the name of the firm (the column name in ``StockPriceReturns'') and ``event.date'' is when the event took place for that outcome unit. In R, the column ``outcome.unit'' has to be of class ``character'' and ``event.date'' of class ``Date'', as seen below: - -<<>>= -data(SplitDates) -head(SplitDates) -data(INR) -inrusd <- diff(log(INR))*100 -all.data <- merge(StockPriceReturns,NiftyIndex,inrusd,all=TRUE) -StockPriceReturns <- all.data[,-which(colnames(all.data)%in%c("NiftyIndex", "inr"))] -NiftyIndex <- all.data$NiftyIndex -inrusd <- all.data$inr -@ - -\subsection{Calculating idiosyncratic returns} - -Calculating returns, though straightforward, can be done in a variety -of different ways. The function \texttt{excessReturn} calculates the -excess returns while \texttt{marketResidual} calculates the market -model. The two inputs are \texttt{firm.returns} and -\texttt{market.returns}. The results are stored in \texttt{er.result} -and \texttt{mm.result} respectively. These are the standard -idiosyncratic return estimation that is possible with this package. - -<<>>= # Excess return -er.result <- excessReturn(firm.returns = StockPriceReturns, market.returns = NiftyIndex) -er.result <- er.result[rowSums(is.na(er.result))!=NCOL(er.result),] -head(er.result[,1:3]) - -@ - -<<>>= # Extracting market residual -mm.result <- marketResidual(firm.returns = StockPriceReturns, market.returns = NiftyIndex) -mm.result <- mm.result[rowSums(is.na(mm.result))!=NCOL(mm.result),] -head(mm.result[,1:3]) - -@ - -To provide flexibility to users, a general regression framework to -estimate idiosyncratic returns, the augmented market model, is also -available. In this case, we would like to purge any currency returns -from the outcome return of interest, and the \textit{a-priori} -expectation is that the variance of the residual is reduced in this -process. In this case, the model requires a time-series of the -exchange rate along with firm returns and market returns. The complete -data set consisting of firm returns, market returns and exchange rate -for the same period\footnote{A balanced data without NAs is preferred} -is first created. - -The first step is to create regressors using market returns and -exchange rate using \texttt{makeX} function. The output of -\texttt{makeX} function is further used in \texttt{lmAMM} along with -firm returns to compute augmented market model residuals. - -% AMM model -<<>>= # Create RHS before running lmAMM() -################### -## AMM residuals ## -################### -## Getting Regressors -regressors <- makeX(market.returns=NiftyIndex, others=inrusd, - market.returns.purge=TRUE, nlags=1) -## AMM residual to time series -timeseries.lmAMM <- function(firm.returns,X,verbose=FALSE,nlags=1){ - tmp <- resid(lmAMM(firm.returns,X,nlags)) - tmp.res <- zoo(tmp,as.Date(names(tmp))) -} -## One firm -amm.result <- timeseries.lmAMM(firm.returns=StockPriceReturns[,1], - X=regressors, verbose=FALSE, nlags=1) - -## More than one firm - # Extracting and merging -tmp.resid <- sapply(colnames(StockPriceReturns)[1:3],function(y) - timeseries.lmAMM(firm.returns=StockPriceReturns[,y], - X=regressors, - verbose=FALSE, - nlags=1)) -amm.resid <- do.call("merge",tmp.resid) -@ - -\subsection{Conversion to event-time frame} - -The conversion from physical time into event time combines the two objects we have constructed till now: \textit{SplitDates} and \textit{StockPriceReturns}. These two objects are input matrices for the function \texttt{phys2eventtime}. With the specification of ``width=5'' in the function, we require phys2eventtime to define a successfull unit entry (an event) in the result as one where there is no missing data for 5 days before and after the event. This is marked as ``success'' in the resulting list object. With data missing, the unit is flagged ``wdatamissing''. In case the event falls outside of the range of physical time provided in the input data, the unit entry will be flagged ``wrongspan'' and if the unit in \textit{SplitDates} is missing in \textit{StockPriceReturns}, we identify these entries as ``unitmissing''. This allows the user to identify successful entries in the sample for an analysis based on event time. In this example, we make use of successful entries in the data and the output object is stored as \textit{es.w}: - -<<>>= -es <- phys2eventtime(z=StockPriceReturns, events=SplitDates, - width=5) -str(es) -es$outcomes -es.w <- window(es$z.e, start=-5,end=5) -colnames(es.w) <- SplitDates[which(es$outcomes=="success"),1] -SplitDates[1,] -StockPriceReturns[SplitDates[1,2],SplitDates[1,1]] -es.w[,1] -@ - -The identification of impact of such an event on returns is better represented with cumulative returns as the outcome variable. We cumulate returns on this (event) time series object, by applying the function \texttt{remap.cumsum}. - -<<>>= -es.cs <- remap.cumsum(es.w,is.pc=FALSE,base=0) -es.cs[,1] -@ - -The last stage in the analysis of an event-study is that of obtaining statistical confidence with the result by using different statistical inference procedures. - -\subsection{Inference procedures} - -While the package is sufficiently generalised to undertake a wide array of inference procedures, at present it contains only two inference procedures: 1/ The bootstrap and 2/ Wilcoxon Rank test. We look at both in turn below: - -\subsubsection{Bootstrap inference} -We hold an event time object that contains several cross-sectional observations for a single definition of an event: The stock split. At each event time, i.e., $-T,-(T-1),...,0,...,(T-1),T$, we hold observations for 30 stocks. At this point, without any assumption on the distribution of these cross sectional returns, we can generate the sampling distribution for the location estimator (mean in this case) using non-parametric inference procedures. The bootstrap is our primary function in the suite of inference procedures under construction.\footnote{Detailed explanation of the methodology is presented in \citet{PatnaikShahSingh2013}. This specific approach is based on \citet{davison1986efficient}.} - -\textit{inference.bootstrap} performs the bootstrap to generate distribution of $\overline{CR}$. The bootstrap generates confidence interval at 2.5 percent and 97.5 percent for the estimate. - -<<>>= -result <- inference.bootstrap(es.w=es.cs, to.plot=FALSE) -print(result) -@ - -\begin{figure}[t] - \begin{center} - \caption{Stock splits event and response of respective stock - returns: Bootstrap CI} - \setkeys{Gin}{width=0.8\linewidth} - \setkeys{Gin}{height=0.8\linewidth} -<>= -es.na.btsp <- eventstudy(firm.returns = StockPriceReturns, - eventList = SplitDates, width = 10, to.remap = TRUE, - remap = "cumsum", inference = TRUE, - inference.strategy = "bootstrap", type = "None") -plot(es.na.btsp) -@ - \end{center} - \label{fig:one} +\begin{figure} +\begin{center} +<>= +par(mai=c(.8,.8,.2,.2)) +plot(es.mm, cex.axis=.7, cex.lab=.7) +@ +\end{center} +\caption{Adjustment using the market model}\label{f:esplotmm} \end{figure} -\subsubsection{Wilcoxon signed rank test} -Another non-parametric inference available and is used widely with event study analysis is the Wilcoxon signed rank test. This package provides a wrapper that uses the function \texttt{wilcox.test} in \texttt{stats}. +A comparison of the range of the $y$ axis in Figure \ref{f:esplot1} +versus that seen in Figure \ref{f:esplotmm} shows the substantial +improvement in statistical efficiency that was obtained by market +model adjustment. -<<>>= -result <- inference.wilcox(es.w=es.cs, to.plot=FALSE) -print(result) -@ +We close our treatment of the standard finance event study with one +step forward on further reducing $\textrm{Var}(\epsilon)$ : by doing +an `augmented market model' regression with more than one explanatory +variable. The augmented market model uses regressions like: -\begin{figure}[t] - \begin{center} - \caption{Stock splits event and response of respective stock - returns: Wilcoxon CI} - \setkeys{Gin}{width=0.8\linewidth} - \setkeys{Gin}{height=0.8\linewidth} -<>= -es.na.wcx <- eventstudy(firm.returns = StockPriceReturns, - eventList = SplitDates, width = 10, to.remap = TRUE, - remap = "cumsum", inference = TRUE, - inference.strategy = "wilcox", type = "None") -plot(es.na.wcx) -@ - \end{center} - \label{fig:two} -\end{figure} +\[ +r_{jt} = \alpha_j + \beta_1,j r_{M1,t} + \beta_2,j r_{M2,t} + \epsilon_{jt} +\] -\subsection{General eventstudy wrapper} +where in addition to the market index $r_{M1,t}$, there is an +additional explanatory variable $r_{M2,t}$. One natural candidate is +the returns on the exchange rate, but there are many other candidates. -While the general framework to perform an eventstudy analysis has been explained with an example in detail, the package also has a wrapper that makes use of all functions explained above to generate the end result for analysis. While this is a quick mechanism to study events that fit this style of analysis, we encourage users to make use of the core functionalities to extend and use this package in ways the wrapper does not capture. Several examples of this wrapper \texttt{eventstudy}, is provided below for convenience: +An extensive literature has worked out the unique problems of +econometrics that need to be addressed in doing augmented market +models. The package uses the synthesis of this literature as presented +in \citet{patnaik2010amm}.\footnote{The source code for augmented + market models in the package is derived from the source code written + for \citet{patnaik2010amm}.} -<<>>= -## Event study without adjustment -es.na <- eventstudy(firm.returns = StockPriceReturns, eventList = - SplitDates, width = 10, to.remap = TRUE, - remap = "cumsum", inference = TRUE, - inference.strategy = "wilcoxon", type = "None") - +To repeat the stock splits event study using augmented market models, +we use the incantation: -## Event study using market residual and bootstrap -es.mm <- eventstudy(firm.returns = StockPriceReturns, eventList = SplitDates, - width = 10, to.remap = TRUE, remap = "cumsum", - inference = TRUE, inference.strategy = "bootstrap", - type = "marketResidual", market.returns = NiftyIndex) +<>= +es.amm <- eventstudy(firm.returns = StockPriceReturns, + eventList = SplitDates, + width = 10, + type = "lmAMM", + to.remap = TRUE, + remap = "cumsum", + inference = TRUE, + inference.strategy = "bootstrap", + market.returns=OtherReturns$NiftyIndex, + others=OtherReturns$USDINR, + market.returns.purge=TRUE + ) +@ -## Event study using excess return and bootstrap -es.er <- eventstudy(firm.returns = StockPriceReturns, eventList = SplitDates, - width = 10, to.remap = TRUE, remap = "cumsum", - inference = TRUE, inference.strategy = "bootstrap", - type = "excessReturn", market.returns = NiftyIndex) +Here the additional regressor on the augmented market model is the +returns on the exchange rate, which is the slot USDINR in +OtherReturns. The full capabilities for doing augmented market models +from \citet{patnaik2010amm} are available. These are documented +elsewhere. For the present moment, we will use the feature +market.returns.purge without explaining it. -## Event study using augmented market model (AMM) and bootstrap -es.amm <- eventstudy(firm.returns = StockPriceReturns, eventList = SplitDates, - width = 10, to.remap = TRUE, remap = "cumsum", - inference = TRUE, inference.strategy = "bootstrap", - type = "lmAMM", market.returns = NiftyIndex, - others=inrusd, verbose=FALSE, - switch.to.innov=TRUE, market.returns.purge=TRUE, nlags=1) -print(es.na) -summary(es.na) +Let us look at the gains in statistical efficiency across the three +variants of the event study. We will use the width of the confidence +interval at date 0 as a measure of efficiency. -@ +<>= +tmp <- rbind(es$eventstudy.output[10, ], + es.mm$eventstudy.output[10, ], + es.amm$eventstudy.output[10, ] + )[,c(1,3)] +rownames(tmp) <- c("None", "MM", "AMM") +print(tmp["MM", ] - tmp["None", ]) +print(tmp["AMM", ] - tmp["None", ]) +@ -The analysis of events has a wide array of tools and procedures available in the econometric literature. The objective in this package is to start with a core group of functionalities that deliver the platform for event studies following which we intend to extend and facilitate more inference procedures for use from this package. +This shows a sharp reduction in the width of the bootstrap 95\% +confidence interval from None to MM adjustment. Over and above this, a +small gain is obtained when going from MM adjustment to AMM +adjustment. -\section{Computational details} -The package code is written in R. It has dependencies to -zoo -(\href{http://cran.r-project.org/web/packages/zoo/index.html}{Zeileis - 2012}) and boot -(\href{http://cran.r-project.org/web/packages/boot/index.html}{Ripley - 2013}). R itself as well as these packages can be obtained from [TRUNCATED] To get the complete diff run: svnlook diff /svnroot/eventstudies -r 336 From noreply at r-forge.r-project.org Thu May 15 15:34:48 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 15 May 2014 15:34:48 +0200 (CEST) Subject: [Eventstudies-commits] r337 - in pkg: R man vignettes Message-ID: <20140515133448.1E1D418740F@r-forge.r-project.org> Author: vikram Date: 2014-05-15 15:34:47 +0200 (Thu, 15 May 2014) New Revision: 337 Added: pkg/vignettes/new.Rnw Modified: pkg/R/eesInference.R pkg/man/eesDates.Rd pkg/man/eesInference.Rd pkg/man/get.clusters.formatted.Rd Log: Minor corrections Modified: pkg/R/eesInference.R =================================================================== --- pkg/R/eesInference.R 2014-05-15 00:17:35 UTC (rev 336) +++ pkg/R/eesInference.R 2014-05-15 13:34:47 UTC (rev 337) @@ -719,8 +719,8 @@ ## Event study plot for EES (extreme event studies) ## Input: Output of GCF ## eventLists: Output of eesDates -eesInference <- function(input, eventLists, to.remap=TRUE, remap="cumsum", - width, inference = TRUE, +eesInference <- function(input, eventLists, width, to.remap=TRUE, + remap="cumsum", inference = TRUE, inference.strategy = "bootstrap"){ inf <- list() Modified: pkg/man/eesDates.Rd =================================================================== --- pkg/man/eesDates.Rd 2014-05-15 00:17:35 UTC (rev 336) +++ pkg/man/eesDates.Rd 2014-05-15 13:34:47 UTC (rev 337) @@ -56,9 +56,9 @@ \examples{ data(OtherReturns) ## Formatting extreme event dates -input <- get.clusters.formatted(event.series = OthersReturns[,"SP500"], - response.series = OtherReturns[,"NiftyIndex"], - prob.value=5) +input <- get.clusters.formatted(event.series = OtherReturns[,"SP500"], + response.series = OtherReturns[,"NiftyIndex"]) + ## Extracting event dates event.lists <- eesDates(input) str(event.dates, max.level = 2) Modified: pkg/man/eesInference.Rd =================================================================== --- pkg/man/eesInference.Rd 2014-05-15 00:17:35 UTC (rev 336) +++ pkg/man/eesInference.Rd 2014-05-15 13:34:47 UTC (rev 337) @@ -8,8 +8,8 @@ } \usage{ - eesInference(input, eventLists, to.remap = TRUE, remap = "cumsum", inference = "TRUE", - inference.strategy = "bootstrap") + eesInference(input, eventLists, width, to.remap = TRUE, remap = "cumsum", + inference = "TRUE", inference.strategy = "bootstrap") } \arguments{ @@ -22,6 +22,11 @@ for normal and purged events } + \item{width}{ + an \sQuote{integer} of length 1 that specifies a + symmetric event window around the event date. + } + \item{to.remap}{ \sQuote{logical}, indicating whether or not to remap the data in \sQuote{input}.The default setting is \sQuote{TRUE} @@ -43,7 +48,8 @@ inference strategy to be used for estimating the confidence interval. Presently, two methods are available: \dQuote{bootstrap} and \dQuote{wilcox}. The default setting is \sQuote{bootstrap}. - } + } + } \details{ @@ -100,14 +106,13 @@ \examples{ data(OtherReturns) ## Formatting extreme event dates -input <- get.clusters.formatted(event.series = OthersReturns[,"SP500"], - response.series = OtherReturns[,"NiftyIndex"], - prob.value=5) +input <- get.clusters.formatted(event.series = OtherReturns[,"SP500"], + response.series = OtherReturns[,"NiftyIndex"]) ## Extracting event dates event.lists <- eesDates(input) ## Performing event study analysis and computing inference -inf <- eesInference(input = input, eventLists = event.lists) +inf <- eesInference(input = input, eventLists = event.lists, width = 5) str(inf, max.level = 2) } Modified: pkg/man/get.clusters.formatted.Rd =================================================================== --- pkg/man/get.clusters.formatted.Rd 2014-05-15 00:17:35 UTC (rev 336) +++ pkg/man/get.clusters.formatted.Rd 2014-05-15 13:34:47 UTC (rev 337) @@ -76,6 +76,8 @@ \examples{ data(OtherReturns) -gcf <- get.clusters.formatted(OtherReturns$SP500, prob.value = 5) +gcf <- get.clusters.formatted(event.series = OtherReturns$SP500, + response.series = OtherReturns$NiftyIndex) + str(gcf, max.level = 2) } Copied: pkg/vignettes/new.Rnw (from rev 324, pkg/vignettes/new.Rnw) =================================================================== --- pkg/vignettes/new.Rnw (rev 0) +++ pkg/vignettes/new.Rnw 2014-05-15 13:34:47 UTC (rev 337) @@ -0,0 +1,260 @@ +\documentclass[a4paper,11pt]{article} +\usepackage{graphicx} +\usepackage{a4wide} +\usepackage[colorlinks,linkcolor=blue,citecolor=red]{hyperref} +\usepackage{natbib} +\usepackage{float} +\usepackage{tikz} +\usepackage{parskip} +\usepackage{amsmath} +\title{Introduction to the \textbf{eventstudies} package in R} +\author{Ajay Shah} +\begin{document} +\maketitle + +\begin{abstract} +\end{abstract} +\SweaveOpts{engine=R,pdf=TRUE} + +\section{The standard event study in finance} + +In this section, we look at using the eventstudies package for the +purpose of doing the standard event study using daily returns data in +financial economics. This is a workhorse application of event +studies. The treatment here assumes knowledge of event studies +\citep{Corrado2011}. + +To conduct an event study, you must have a list of firms with +associated dates, and you must have returns data for these +firms. These dates must be stored as a simple data frame. To +illustrate this, we use the object `SplitDates' in the package which +is used for doing examples. + +<>= +library(eventstudies) +data(SplitDates) # The sample +str(SplitDates) # Just a data frame +head(SplitDates) +@ + +The representation of dates is a data frame with two columns. The +first column is the name of the unit of observation which experienced +the event. The second column is the event date. + +The second thing that is required for doing an event study is data for +stock price returns for all the firms. The sample dataset supplied in +the package is named `StockPriceReturns': + +<>= +data(StockPriceReturns) # The sample +str(StockPriceReturns) # A zoo object +head(StockPriceReturns,3) # Time series of dates and returns. +@ + +The StockPriceReturns object is thus a zoo object which is a time +series of daily returns. These are measured in per cent, i.e. a value +of +4 is returns of +4\%. The zoo object has many columns of returns +data, one for each unit of observation which, in this case, is a +firm. The column name of the zoo object must match the firm name +(i.e. the name of the unit of observation) in the list of events. + +The package gracefully handles the three kinds of problems encountered +with real world data: (a) a firm where returns is observed but there +is no event, (b) a firm with an event where returns data is lacking +and (c) a stream of missing data in the returns data surrounding the +event date. + +With this in hand, we are ready to run our first event study, using +raw returns: + +<>= +es <- eventstudy(firm.returns = StockPriceReturns, + eventList = SplitDates, + width = 10, + type = "None", + to.remap = TRUE, + remap = "cumsum", + inference = TRUE, + inference.strategy = "bootstrap") +@ + +This runs an event study using events listed in SplitDates, and using +returns data for the firms in StockPriceReturns. An event window of 10 +days is analysed. + +Event studies with returns data typically do some kind of adjustment +of the returns data in order to reduce variance. In order to keep +things simple, in this first event study, we are doing no adjustment, +which is done by setting `type' to ``None''. + +While daily returns data has been supplied, the standard event study +deals with cumulated returns. In order to achieve this, we set +to.remap to TRUE and we ask that this remapping be done using cumsum. + +Finally, we come to inference strategy. We instruct eventstudy to do +inference and ask for bootstrap inference. + +Let us peek and poke at the object `es' that is returned. + +<>= +class(es) +str(es) +@ + +The object returned by eventstudy is of class `es'. It is a list with +five components. Three of these are just a record of the way +eventstudy() was run: the inference procedure adopted (bootstrap +inference in this case), the window width (10 in this case) and the +method used for mapping the data (cumsum). The two new things are +`outcomes' and `eventstudy.output'. + +The vector `outcomes' shows the disposition of each event in the +events table. There are 22 rows in SplitDates, hence there will be 22 +elements in the vector `outcomes'. In this vector, `success' denotes a +successful use of the event. When an event cannot be used properly, +various error codes are supplied. E.g. `unitmissing' is reported when +the events table shows an event for a unit of observation where +returns data is not observed. + +\begin{figure} +\begin{center} +<>= +par(mai=c(.8,.8,.2,.2)) +plot(es, cex.axis=.7, cex.lab=.7) +@ +\end{center} +\caption{Plot method applied to es object}\label{f:esplot1} +\end{figure} + +% TODO: The x label should be "Event time (days)" and should +% automatically handle other situations like weeks or months or microseconds. +% The y label is much too long. + +Plot and print methods for the class `es' are supplied. The standard +plot is illustrated in Figure \ref{f:esplot1}. In this case, we see +the 95\% confidence interval is above 0 and below 0 and in no case can +the null of no-effect, compared with the starting date (10 days before +the stock split date), be rejected. + +In this first example, raw stock market returns was utilised in the +event study. It is important to emphasise that the event study is a +statistically valid tool even under these circumstances. Averaging +across multiple events isolates the event-related +fluctuations. However, there is a loss of statistical efficiency that +comes from fluctuations of stock prices that can have nothing to do +with firm level news. In order to increase efficiency, we resort to +adjustment of the returns data. + +The standard methodology in the literature is to use a market +model. This estimates a time-series regression $r_{jt} = \alpha_j + +\beta_j r_{Mt} + \epsilon_{jt}$ where $r_{jt}$ is returns for firm $j$ +on date $t$, and $r_{Mt}$ is returns on the market index on date +$t$. The market index captures market-wide fluctuations, which have +nothing to do with firm-specific factors. The event study is then +conducted with the cumulated $\epsilon_{jt}$ time series. This yields +improved statistical efficiency as $\textrm{Var}(\epsilon_j) < +\textrm{Var}(r_j)$. + +This is invoked by setting `type' to `marketResidual': + +<>= +data(OtherReturns) +es.mm <- eventstudy(firm.returns = StockPriceReturns, + eventList = SplitDates, + width = 10, + type = "marketResidual", + to.remap = TRUE, + remap = "cumsum", + inference = TRUE, + inference.strategy = "bootstrap", + market.returns=OtherReturns$NiftyIndex + ) +@ + +In addition to setting `type' to `marketResidual', we are now required +to supply data for the market index, $r_{Mt}$. In the above example, +this is the data object NiftyIndex supplied from the OtherReturns data +object in the package. This is just a zoo vector with daily returns of +the stock market index. + +\begin{figure} +\begin{center} +<>= +par(mai=c(.8,.8,.2,.2)) +plot(es.mm, cex.axis=.7, cex.lab=.7) +@ +\end{center} +\caption{Adjustment using the market model}\label{f:esplotmm} +\end{figure} + +A comparison of the range of the $y$ axis in Figure \ref{f:esplot1} +versus that seen in Figure \ref{f:esplotmm} shows the substantial +improvement in statistical efficiency that was obtained by market +model adjustment. + +We close our treatment of the standard finance event study with one +step forward on further reducing $\textrm{Var}(\epsilon)$ : by doing +an `augmented market model' regression with more than one explanatory +variable. The augmented market model uses regressions like: + +\[ +r_{jt} = \alpha_j + \beta_1,j r_{M1,t} + \beta_2,j r_{M2,t} + \epsilon_{jt} +\] + +where in addition to the market index $r_{M1,t}$, there is an +additional explanatory variable $r_{M2,t}$. One natural candidate is +the returns on the exchange rate, but there are many other candidates. + +An extensive literature has worked out the unique problems of +econometrics that need to be addressed in doing augmented market +models. The package uses the synthesis of this literature as presented +in \citet{patnaik2010amm}.\footnote{The source code for augmented + market models in the package is derived from the source code written + for \citet{patnaik2010amm}.} + +To repeat the stock splits event study using augmented market models, +we use the incantation: + +% Check some error +<>= +es.amm <- eventstudy(firm.returns = StockPriceReturns, + eventList = SplitDates, + width = 10, + type = "lmAMM", + to.remap = TRUE, + remap = "cumsum", + inference = TRUE, + inference.strategy = "bootstrap", + market.returns=OtherReturns$NiftyIndex, + others=OtherReturns$USDINR, + market.returns.purge=TRUE + ) +@ + +Here the additional regressor on the augmented market model is the +returns on the exchange rate, which is the slot USDINR in +OtherReturns. The full capabilities for doing augmented market models +from \citet{patnaik2010amm} are available. These are documented +elsewhere. For the present moment, we will use the feature +market.returns.purge without explaining it. + +Let us look at the gains in statistical efficiency across the three +variants of the event study. We will use the width of the confidence +interval at date 0 as a measure of efficiency. + +<>= +tmp <- rbind(es$eventstudy.output[10,], es.mm$eventstudy.output[10,])[,c(1,3)] +rownames(tmp) <- c("None","MM") +tmp[,2]-tmp[,1] +@ + +This shows a sharp reduction in the width of the bootstrap 95\% +confidence interval from None to MM adjustment. Over and above this, a +small gain is obtained when going from MM adjustment to AMM +adjustment. + +\newpage +\bibliographystyle{jss} \bibliography{es} + +\end{document} From noreply at r-forge.r-project.org Thu May 15 17:50:14 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 15 May 2014 17:50:14 +0200 (CEST) Subject: [Eventstudies-commits] r338 - pkg/inst/tests Message-ID: <20140515155014.BBAEF186B29@r-forge.r-project.org> Author: chiraganand Date: 2014-05-15 17:50:14 +0200 (Thu, 15 May 2014) New Revision: 338 Modified: pkg/inst/tests/test_interfaces.R Log: Fixed formatting of test output. Modified: pkg/inst/tests/test_interfaces.R =================================================================== --- pkg/inst/tests/test_interfaces.R 2014-05-15 13:34:47 UTC (rev 337) +++ pkg/inst/tests/test_interfaces.R 2014-05-15 15:50:14 UTC (rev 338) @@ -7,7 +7,7 @@ load("test_USDINR.rda") ### Basic event study with default args (market residuals) - cat("Checking market residuals interface: ") + cat("\nChecking market residuals interface: ") expected_mean <- c(0, 0.0393985717416213, -0.7458035091065, 0.457817077869512, 0.715714066835461, 2.33986420702835, 2.37333344340029) @@ -28,7 +28,7 @@ expect_is(test_es, "es") ### None - cat("Checking no model output: ") + cat("\nChecking no model output: ") expected_mean <- c(0, -0.197406699931557, -0.804299958306487, 0.0135570496689663, -0.418062964428412, 0.904144365357373, -0.806779427723603) @@ -49,7 +49,7 @@ expect_is(test_es, "es") ### AMM interface - cat("Checking AMM interface: ") + cat("\nChecking AMM interface: ") expected_mean <- c(0, 0.135927645042554, -0.600457594252805, 0.631525565290171, 0.871423869901684, 2.54741102266723, 2.5989730099384) @@ -73,7 +73,7 @@ expect_is(test_es, "es") ### Excess return - cat("Checking excess return interface: ") + cat("\nChecking excess return interface: ") expected_mean <- c(0, 0.138567158395153, -0.631185954448288, 0.701644918222266, 1.15001275036422, 2.88646832315114, 3.32315429568726) expected_outcomes <- c("success", "success") @@ -95,7 +95,7 @@ expect_is(test_es, "es") ### Remapping - cat("Checking remapping: ") + cat("\nChecking remapping: ") test_events <- data.frame(outcome.unit = "ONGC", event.when = c("2011-08-01", "2010-05-14"), stringsAsFactors = FALSE) @@ -137,7 +137,7 @@ expect_false(isTRUE(all.equal(test_es, test_es_remap))) ### Inference - cat("Checking inference interface: ") + cat("\nChecking inference interface: ") ## bootstrap test_es_inference <- eventstudy(firm.returns = test_returns, eventList = test_events, @@ -177,7 +177,7 @@ test_that("test.arguments", { load("test_StockPriceReturns.rda") - cat("Checking single series handling: ") + cat("\nChecking single series handling: ") test_events <- data.frame(outcome.unit = "ONGC", event.when = c("2011-08-01", "2010-05-14"), stringsAsFactors = FALSE) From noreply at r-forge.r-project.org Thu May 15 18:02:13 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 15 May 2014 18:02:13 +0200 (CEST) Subject: [Eventstudies-commits] r339 - pkg/vignettes Message-ID: <20140515160214.05B29186E53@r-forge.r-project.org> Author: chiraganand Date: 2014-05-15 18:02:13 +0200 (Thu, 15 May 2014) New Revision: 339 Modified: pkg/vignettes/eventstudies.Rnw Log: Added vignette index entry. Modified: pkg/vignettes/eventstudies.Rnw =================================================================== --- pkg/vignettes/eventstudies.Rnw 2014-05-15 15:50:14 UTC (rev 338) +++ pkg/vignettes/eventstudies.Rnw 2014-05-15 16:02:13 UTC (rev 339) @@ -10,6 +10,12 @@ \title{Introduction to the \textbf{eventstudies} package in R} \author{Ajay Shah} \begin{document} + +% \VignetteIndexEntry{An R package for conducting event studies and a platform for methodological research on event studies.} +% \VignetteDepends{} +% \VignetteKeywords{eventstudies} +% \VignettePackage{eventstudies} + \maketitle \begin{abstract} From noreply at r-forge.r-project.org Thu May 15 18:02:35 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 15 May 2014 18:02:35 +0200 (CEST) Subject: [Eventstudies-commits] r340 - pkg/man Message-ID: <20140515160235.3E96F186B3A@r-forge.r-project.org> Author: chiraganand Date: 2014-05-15 18:02:34 +0200 (Thu, 15 May 2014) New Revision: 340 Modified: pkg/man/eesDates.Rd Log: Fixed ees example. Modified: pkg/man/eesDates.Rd =================================================================== --- pkg/man/eesDates.Rd 2014-05-15 16:02:13 UTC (rev 339) +++ pkg/man/eesDates.Rd 2014-05-15 16:02:34 UTC (rev 340) @@ -61,5 +61,5 @@ ## Extracting event dates event.lists <- eesDates(input) -str(event.dates, max.level = 2) +str(event.lists, max.level = 2) } From noreply at r-forge.r-project.org Thu May 15 19:03:20 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 15 May 2014 19:03:20 +0200 (CEST) Subject: [Eventstudies-commits] r341 - pkg/vignettes Message-ID: <20140515170321.02F5F186DA4@r-forge.r-project.org> Author: chiraganand Date: 2014-05-15 19:03:18 +0200 (Thu, 15 May 2014) New Revision: 341 Modified: pkg/vignettes/eventstudies.Rnw Log: Fixed formatting. Modified: pkg/vignettes/eventstudies.Rnw =================================================================== --- pkg/vignettes/eventstudies.Rnw 2014-05-15 16:02:34 UTC (rev 340) +++ pkg/vignettes/eventstudies.Rnw 2014-05-15 17:03:18 UTC (rev 341) @@ -1,4 +1,5 @@ \documentclass[a4paper,11pt]{article} +\usepackage[utf8]{inputenc} \usepackage{graphicx} \usepackage{a4wide} \usepackage[colorlinks,linkcolor=blue,citecolor=red]{hyperref} @@ -24,7 +25,7 @@ \section{The standard event study in finance} -In this section, we look at using the eventstudies package for the +In this section, we look at using the `eventstudies' package for the purpose of doing the standard event study using daily returns data in financial economics. This is a workhorse application of event studies. The treatment here assumes knowledge of event studies @@ -33,7 +34,7 @@ To conduct an event study, you must have a list of firms with associated dates, and you must have returns data for these firms. These dates must be stored as a simple data frame. To -illustrate this, we use the object `SplitDates' in the package which +illustrate this, we use the object \emph{SplitDates} in the package which is used for doing examples. <>= @@ -49,7 +50,7 @@ The second thing that is required for doing an event study is data for stock price returns for all the firms. The sample dataset supplied in -the package is named `StockPriceReturns': +the package is named \emph{StockPriceReturns}: <>= data(StockPriceReturns) # The sample @@ -57,7 +58,7 @@ head(StockPriceReturns,3) # Time series of dates and returns. @ -The StockPriceReturns object is thus a zoo object which is a time +The \emph{StockPriceReturns} object is thus a \emph{zoo} object which is a time series of daily returns. These are measured in per cent, i.e. a value of +4 is returns of +4\%. The zoo object has many columns of returns data, one for each unit of observation which, in this case, is a @@ -84,21 +85,21 @@ inference.strategy = "bootstrap") @ -This runs an event study using events listed in SplitDates, and using -returns data for the firms in StockPriceReturns. An event window of 10 +This runs an event study using events listed in \emph{SplitDates}, and using +returns data for the firms in \emph{StockPriceReturns}. An event window of 10 days is analysed. Event studies with returns data typically do some kind of adjustment of the returns data in order to reduce variance. In order to keep things simple, in this first event study, we are doing no adjustment, -which is done by setting `type' to ``None''. +which is done by setting \texttt{type} to ``\texttt{None}''. While daily returns data has been supplied, the standard event study deals with cumulated returns. In order to achieve this, we set -to.remap to \emph{TRUE} and we ask that this remapping be done using cumsum. - +\texttt{to.remap} to \texttt{TRUE} and we ask that this remapping be done using ``\texttt{cumsum}''. + Finally, we come to inference strategy. We instruct eventstudy to do -inference and ask for bootstrap inference. +inference and ask for ``\texttt{bootstrap}'' inference. Let us peek and poke at the object `es' that is returned. @@ -107,18 +108,18 @@ str(es) @ -The object returned by eventstudy is of class `es'. It is a list with +The object returned by eventstudy is of \texttt{class} `es'. It is a list with five components. Three of these are just a record of the way -\emph{eventstudy()} was run: the inference procedure adopted (bootstrap +\texttt{eventstudy()} was run: the inference procedure adopted (``\texttt{bootstrap}'' inference in this case), the window width (10 in this case) and the -method used for mapping the data (cumsum). The two new things are -`outcomes' and `eventstudy.output'. +method used for mapping the data (``\texttt{cumsum}''). The two new things are +`\texttt{outcomes}' and `\texttt{eventstudy.output}'. -The vector `outcomes' shows the disposition of each event in the -events table. There are 22 rows in SplitDates, hence there will be 22 -elements in the vector `outcomes'. In this vector, `success' denotes a +The vector `\texttt{outcomes}' shows the disposition of each event in the +events table. There are 22 rows in \emph{SplitDates}, hence there will be 22 +elements in the vector `\texttt{outcomes}'. In this vector, ``\texttt{success}'' denotes a successful use of the event. When an event cannot be used properly, -various error codes are supplied. E.g. `unitmissing' is reported when +various error codes are supplied. E.g. ``\texttt{unitmissing}'' is reported when the events table shows an event for a unit of observation where returns data is not observed. @@ -161,7 +162,7 @@ improved statistical efficiency as $\textrm{Var}(\epsilon_j) < \textrm{Var}(r_j)$. -This is invoked by setting `type' to `marketResidual': +This is invoked by setting \texttt{type} to ``\texttt{marketResidual}'': <>= data(OtherReturns) @@ -177,9 +178,9 @@ ) @ -In addition to setting `type' to `marketResidual', we are now required +In addition to setting \texttt{type} to ``\texttt{marketResidual}'', we are now required to supply data for the market index, $r_{Mt}$. In the above example, -this is the data object NiftyIndex supplied from the OtherReturns data +this is the data object `\texttt{NiftyIndex}' supplied from the \emph{OtherReturns} data object in the package. This is just a zoo vector with daily returns of the stock market index. @@ -238,11 +239,11 @@ @ Here the additional regressor on the augmented market model is the -returns on the exchange rate, which is the slot USDINR in -OtherReturns. The full capabilities for doing augmented market models +returns on the exchange rate, which is the slot `\texttt{USDINR}' in +\emph{OtherReturns}. The full capabilities for doing augmented market models from \citet{patnaik2010amm} are available. These are documented elsewhere. For the present moment, we will use the feature -market.returns.purge without explaining it. +\texttt{market.returns.purge} without explaining it. Let us look at the gains in statistical efficiency across the three variants of the event study. We will use the width of the confidence @@ -254,12 +255,13 @@ es.amm$eventstudy.output[10, ] )[,c(1,3)] rownames(tmp) <- c("None", "MM", "AMM") + print(tmp["MM", ] - tmp["None", ]) print(tmp["AMM", ] - tmp["None", ]) @ This shows a sharp reduction in the width of the bootstrap 95\% -confidence interval from None to MM adjustment. Over and above this, a +confidence interval from ``\texttt{None}'' to MM adjustment. Over and above this, a small gain is obtained when going from MM adjustment to AMM adjustment. From noreply at r-forge.r-project.org Thu May 15 19:04:01 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 15 May 2014 19:04:01 +0200 (CEST) Subject: [Eventstudies-commits] r342 - pkg/man Message-ID: <20140515170401.B2A15186DEA@r-forge.r-project.org> Author: chiraganand Date: 2014-05-15 19:04:01 +0200 (Thu, 15 May 2014) New Revision: 342 Modified: pkg/man/eesDates.Rd Log: Changed language, modified example. Modified: pkg/man/eesDates.Rd =================================================================== --- pkg/man/eesDates.Rd 2014-05-15 17:03:18 UTC (rev 341) +++ pkg/man/eesDates.Rd 2014-05-15 17:04:01 UTC (rev 342) @@ -12,33 +12,34 @@ } \arguments{ - \item{input}{an output object of \sQuote{get.clusters.formatted}} + \item{input}{object returned by \sQuote{get.clusters.formatted}} } -\details{ - This function creates event list for extreme event study analysis. The event list - consists of extreme right tail and left tail event dates for clustered and - unclustered data. The \sQuote{normal} set consists of event dates for only - unclustered events and \sQuote{purged} set consists of event dates for unclustered - and clustered both. Unclustered events consists of clean event window with no event - occurring in the event window and clustered events are fused consecutive events - which lie in the same tail. -} +\details{ The function creates a list of interesting events extracted + from the output of \code{get.clusters.formatted}. The event + list can be directly supplied to the \code{eventstudy} function. + It returns extreme right tail and left tail event dates for clustered + and unclustered data. The \sQuote{normal} set consists of event dates + for only unclustered events and \sQuote{purged} set consists of event + dates for unclustered and clustered both. Unclustered events consist + of clean event window with no event occurring in the event window and + clustered events are fused consecutive events which lie in the same + tail. } + \value{ - A \code{list} object containing: - \item{events.good.normal}{\sQuote{data.frame} containing events list as an input - to \sQuote{eventstudy} function, containing right tail event dates of unclustered - events only} - \item{events.bad.normal}{\sQuote{data.frame} containing events list as an input - to \sQuote{eventstudy} function, containing left tail event dates of unclustered - events only} - \item{events.good.purged}{\sQuote{data.frame} containing events list as an input - to \sQuote{eventstudy} function, containing right tail event dates of unclustered - events and unclustered events} - \item{events.bad.purged}{\sQuote{data.frame} containing events list as an input - to \sQuote{eventstudy} function, containing left tail event dates of unclustered - and clustered events} + A \code{list} object containing: + \item{events.good.normal}{\sQuote{data.frame} containing right tail + event dates of unclustered events.} + + \item{events.bad.normal}{\sQuote{data.frame} containing left tail + event dates of unclustered events.} + + \item{events.good.purged}{\sQuote{data.frame} containing right tail + event dates of unclustered events and unclustered events.} + + \item{events.bad.purged}{\sQuote{data.frame} containing left tail + event dates of unclustered and clustered events.} } \references{ @@ -55,11 +56,10 @@ \examples{ data(OtherReturns) -## Formatting extreme event dates -input <- get.clusters.formatted(event.series = OtherReturns[,"SP500"], - response.series = OtherReturns[,"NiftyIndex"]) -## Extracting event dates -event.lists <- eesDates(input) -str(event.lists, max.level = 2) +input <- get.clusters.formatted(event.series = OtherReturns[, "SP500"], + response.series = OtherReturns[, "NiftyIndex"]) + +eventlist <- eesDates(input) +str(eventlist, max.level = 2) } From noreply at r-forge.r-project.org Thu May 15 19:08:18 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 15 May 2014 19:08:18 +0200 (CEST) Subject: [Eventstudies-commits] r343 - pkg/man Message-ID: <20140515170818.AC5E9186E80@r-forge.r-project.org> Author: chiraganand Date: 2014-05-15 19:08:18 +0200 (Thu, 15 May 2014) New Revision: 343 Removed: pkg/man/eesPlot.Rd Log: Removed older eesPlot man page. Deleted: pkg/man/eesPlot.Rd =================================================================== --- pkg/man/eesPlot.Rd 2014-05-15 17:04:01 UTC (rev 342) +++ pkg/man/eesPlot.Rd 2014-05-15 17:08:18 UTC (rev 343) @@ -1,79 +0,0 @@ -\name{eesPlot} -\alias{eesPlot} - -\title{Plotting clustered and unclustered extreme events} - -\description{ This function plots an event study with extreme events - as identified by the function \sQuote{ees}. -} - -\usage{ -eesPlot(z, - response.series.name, - event.series.name, - titlestring, - ylab, - width, - prob.value) -} - -\arguments{ - - \item{z}{a time series object with the response and event series.} - - \item{response.series.name}{Column name of the series in \sQuote{z} - that will be the \sQuote{outcome} or \sQuote{response} series.} - - \item{event.series.name}{Column name of the series in \sQuote{z} that - will be the \sQuote{event} series.} - - \item{titlestring}{Title for event study plot} - - \item{ylab}{Y-axis label of the plot} - - \item{width}{Time window before and after the event to be plotted} - - \item{prob.value}{Cut-off values in the probability distribution (in - percentage terms) to identify extreme events.} -} - -\details{ - This function draws from many different functions in the package: - \itemize{ - \item Identify extreme events based on \sQuote{prob.value} on the - series named \sQuote{event.series.name} in \sQuote{z}, similar to - the function \code{ees}; - - \item Treats clusters in the events identified as one event by - cumulating the series named \sQuote{response.series.name} in - \sQuote{z} for the period in the cluster; - - \item Obtains confidence intervals using \code{inference.bootstrap} - and the presents the anaylsis in a graph. - } - -} - -\value{A plot of the response series with lower and upper tail events - defined on the event series.} - -\references{ - \cite{Ila Patnaik, Nirvikar Singh and Ajay Shah (2013). - Foreign Investors under stress: Evidence from - India. International Finance, 16(2), 213-244. - \url{http://onlinelibrary.wiley.com/doi/10.1111/j.1468-2362.2013.12032.x/abstract} - \url{http://macrofinance.nipfp.org.in/releases/PatnaikShahSingh2013_Foreign_Investors.html} - } -} - -\author{Vikram Bahure, Vimal Balasubramaniam} - -\examples{ -data("OtherReturns") - -eesPlot(z = OtherReturns, - response.series.name = OtherReturns$NiftyIndex, - event.series.name = OtherReturns$SP500, - titlestring = "S&P500", - ylab = "(Cum.) change in NIFTY") -} From noreply at r-forge.r-project.org Thu May 15 19:24:26 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 15 May 2014 19:24:26 +0200 (CEST) Subject: [Eventstudies-commits] r344 - pkg/vignettes Message-ID: <20140515172426.92E3F187275@r-forge.r-project.org> Author: chiraganand Date: 2014-05-15 19:24:26 +0200 (Thu, 15 May 2014) New Revision: 344 Removed: pkg/vignettes/AMM.Rnw pkg/vignettes/AMM.bib pkg/vignettes/ees.Rnw pkg/vignettes/ees.bib pkg/vignettes/new.Rnw Log: Removed old vignettes. Deleted: pkg/vignettes/AMM.Rnw =================================================================== --- pkg/vignettes/AMM.Rnw 2014-05-15 17:08:18 UTC (rev 343) +++ pkg/vignettes/AMM.Rnw 2014-05-15 17:24:26 UTC (rev 344) @@ -1,176 +0,0 @@ -\documentclass[a4paper,11pt]{article} -\usepackage{graphicx} -\usepackage{a4wide} -\usepackage[colorlinks,linkcolor=blue,citecolor=red]{hyperref} -\usepackage{natbib} -\usepackage{float} -\usepackage{tikz} -\usepackage{parskip} -\usepackage{amsmath} -\title{Augmented Market Models} -\author{Ajay Shah \and Vikram Bahure \and Chirag Anand} -\begin{document} -%\VignetteIndexEntry{eventstudies: Extreme events functionality} -% \VignetteDepends{} -% \VignetteKeywords{extreme event analysis} -% \VignettePackage{eventstudies} -\maketitle - -\begin{abstract} -The document demonstrates the application of Augmented market model -(AMM) from the paper \citet{patnaik2010amm} to extract currency -exposure and AMM residuals from the model. -\end{abstract} - -\SweaveOpts{engine=R,pdf=TRUE} -\section{Introduction} - -Augmented market models (AMM) extends the classical market model \citep{sharpe1964capm, lintner1965capm} to introduce additional right hand side variables like currency returns or interest rates to understand the effect of macro variations in addition to market movements on stock returns. The package provides functionality to estimate augmented market models as well as produce augmented market model residuals (AMM abnormal returns) stripped of market and macro variations to run event studies. The function set was originally written and applied in \citet{patnaik2010amm}. \citet{adler1984exposure} and \citet{jorion1990exchange} are the first papers to use augmented market models to study currency exposure. The standard currency exposure AMM is as follows - -\begin{equation} - r_j = \alpha_j + \beta_{1j} r_{M1} + \beta_{2j} r_{M2} + \epsilon -\end{equation} - -In the original usage of augmented market models, Currency exposure is -expressed as the regression coefficient on currency returns (M2). The -model uses firm stock price as the information set of firm positions -and it relates firm returns $r_j$ to market index movements $r_{M1}$ -and currency fluctuations $r_{M2}$. The coefficient $\beta_{2j}$ -measures the sensitivity of the valuation of firm $j$ to changes in -the exchange rate. This is a widely used technique with multiple -variations including asymmetric exposures. - -The AMM implementation in the package has some key innovations as compared to the original implementation of currency exposure AMM's by \citet{adler1984exposure} and \citet{jorion1990exchange}. -\begin{equation} - r_{jt} = \alpha + \beta_1 r_{M1,t} - + \sum_{i=0}^{k} a_i e_{t-i} + \epsilon_t -\end{equation} - -\begin{enumerate} -\item Exchange rate series is re-expressed as a series of innovations with an AIC selected AR process. Under this specification, an innovation $e_t$ on the currency market has an impact on the stock price at time $t$ and the following $k$ time periods. Under the above model, currency exposure is embedded in the vector of $a_i$ coefficients; it is no longer a simple scalar $\beta_2$ as was the case under the standard model -\item Heteroscedasticity in $r_{M1}$ \& $r_{M2}$ : This is resolved by - using a HAC estimator -\item Decomposition of market exposure from firm exposure: Market exposure issue solved by orthogonalising the market index time-series by first estimating a regression model explaining $r_{M1}$ as a function of past and present currency innovations, and extracting the residual from this regression. These residuals represent uncontaminated market returns -\end{enumerate} - -In the section below, we explain the estimation of currency exposure, -AMM residuals and performing event study analysis. In section \ref{sec:ce}, we -replicate the methodology used in \citet{patnaik2010amm} using the -package. In section \ref{sec:es}, we take the AMM methodology a step ahead to -extract residuals from AMM methodology which we use the to -perform traditional event study analysis. -% Need to talk more about generalisation used for variables other than currency - - -\section{Software approach}\label{sec:ce} -The package has functions which enable the user to compute linear -model AMM output, along with currency exposure, using the AMM -methodology employed in \citet{patnaik2010amm}. In the subsections -below we describe construction of data-set to input in \texttt{lmAMM} -function and further computing AMM output and currency exposure. - -\subsection{Constructing data set} -We need to construct usable data set, before performing AMM analysis -on firm returns using this package. There are two steps to be -followed constructing \texttt{X} (regressors) and firm returns -(regressands), to perform OLS as shown in the \citet{patnaik2010amm}. -\subsubsection{Regressors \& Regressands} -Regressors in the AMM equation are market returns and currency -returns, while regressands is firm returns. All the variables should -have balanced panel if not then merge the time series variable to get -one. \textit{AMMData} is an time series object with market returns as -\textit{Nifty} and currency returns as \textit{INR/USD}. If -currency exposure is to be estimated for different periods separately -then argument \textit{dates} will be helpful or else \textit{NULL} -will be provided to perform for full period. - -The function \textit{makeX} considers that -there is impact of currency on market returns and with the argument -\textit{market.returns.purge}, we orthogonalise the market returns to currency -returns before using AMM model. - -<<>>= -# Create RHS before running subperiod.lmAMM() -library(eventstudies) -data("AMMData") -nifty <- AMMData$index.nifty -inrusd <- AMMData$currency.inrusd -regressand <- AMMData[,c("Infosys","TCS")] -regressors <- makeX(nifty, others=inrusd, - switch.to.innov=TRUE, market.returns.purge=TRUE, nlags=1, - dates=as.Date(c("2012-02-01","2013-01-01","2014-01-20")), verbose=FALSE) -@ - -\subsection{Augmented market model} -Augmented market model output with a class of \textit{amm} is -generated using the function \texttt{lmAMM}. This function takes firm -returns (regressand) and regressor as input. Output of \texttt{lmAMM} -function is a list object with linear model output of AMM, -currency exposure, standard deviation and significance of the -exposure. -<<>>= -## AMM residual to time series -timeseries.lmAMM <- function(firm.returns,X,verbose=FALSE,nlags=1){ - tmp <- resid(lmAMM(firm.returns,X,nlags)) - tmp.res <- zoo(tmp,as.Date(names(tmp))) -} -## One firm -amm.output.one <- lmAMM(regressand[,1],X=regressors,nlags=1) -amm.resid.one <- timeseries.lmAMM(firm.returns=regressand[,1], - X=regressors, verbose=FALSE, nlags=1) -summary(amm.output.one) - -## More than one firm - # Extracting and merging -tmp.resid <- sapply(colnames(regressand)[1:2],function(y) - timeseries.lmAMM(firm.returns=regressand[,y], - X=regressors, - verbose=FALSE, - nlags=1)) -amm.resid <- zoo(tmp.resid,as.Date(rownames(tmp.resid))) -@ - -All the basic functionality are available for object with class -\textit{amm}. \texttt{print},\texttt{summary} and \texttt{plot} -commands can be used to do preliminary analysis. The plot -\ref{fig:amm} compares the AMM residuals with abnormal firm returns. -\begin{figure}[t] - \begin{center} - \label{fig:amm} - \caption{Augment market model} - \setkeys{Gin}{width=0.8\linewidth} - \setkeys{Gin}{height=0.8\linewidth} -<>= -plot(amm.output.one) -@ - \end{center} - \label{fig:one} -\end{figure} - -\subsection{Getting currency exposure} -The output of \texttt{makeX} function is used in \textit{subperiod.lmAMM} and -\textit{lmAMM} function to get currency exposure of the firms and AMM -residuals respectively. In the example below, we demonstrate the use -of \textit{subperiod.lmAMM} function to estimate currency exposure for -firms. -% MakeX and subperiod.lmAMM -<<>>= -# Run AMM for one firm across different periods - deprintize<-function(f){ - return(function(...) {capture.output(w<-f(...));return(w);}); - } -firm.exposure <- deprintize(subperiod.lmAMM)(firm.returns=regressand[,1], - X=regressors, - nlags=1, - verbose=TRUE, - dates= as.Date(c("2012-02-01", - "2013-01-01","2014-01-31"))) -str(firm.exposure) -@ - - We can also perform event study analysis, directly on AMM residuals - using \textit{eventstudy} function. which is presented in - \textit{eventstudies} vignette. - -\bibliographystyle{jss} \bibliography{AMM} -\end{document} Deleted: pkg/vignettes/AMM.bib =================================================================== --- pkg/vignettes/AMM.bib 2014-05-15 17:08:18 UTC (rev 343) +++ pkg/vignettes/AMM.bib 2014-05-15 17:24:26 UTC (rev 344) @@ -1,53 +0,0 @@ - - at article{patnaik2010amm, - title={Does the currency regime shape unhedged currency exposure?}, - author={Patnaik, Ila and Shah, Ajay}, - journal={Journal of International Money and Finance}, - volume={29}, - number={5}, - pages={760-769}, - year={2010}, - publisher={Elsevier} -} - - at article{sharpe1964capm, - title={Capital asset Prices: A Theory of market equilibrium under conditions of risk}, - author={Sharpe, William F}, - journal={The Journal of Finance}, - volume={19}, - number={3}, - pages={425-442}, - year={1964}, - publisher={Wiley Online Library} -} - - at article{lintner1965capm, - title={The valuation of risk assets and the selection of risky investments in stock portfolios and capital budgets}, - author={Lintner, John}, - journal={The Review of Economics and Statistics}, - volume={47}, - number={1}, - pages={13-37}, - year={1965}, - publisher={JSTOR} -} - - at article{adler1984exposure, - title={Exposure to currency risk: definition and measurement}, - author={Adler, Michael and Dumas, Bernard}, - journal={Financial management}, - pages={41-50}, - year={1984}, - publisher={JSTOR} -} - - at article{jorion1990exchange, - title={The exchange-rate exposure of US multinationals}, - author={Jorion, Philippe}, - journal={Journal of Business}, - pages={331-345}, - year={1990}, - publisher={JSTOR} -} - - Deleted: pkg/vignettes/ees.Rnw =================================================================== --- pkg/vignettes/ees.Rnw 2014-05-15 17:08:18 UTC (rev 343) +++ pkg/vignettes/ees.Rnw 2014-05-15 17:24:26 UTC (rev 344) @@ -1,245 +0,0 @@ -\documentclass[a4paper,11pt]{article} -\usepackage{graphicx} -\usepackage{a4wide} -\usepackage[colorlinks,linkcolor=blue,citecolor=red]{hyperref} -\usepackage{natbib} -\usepackage{float} -\usepackage{tikz} -\usepackage{parskip} -\usepackage{amsmath} -\title{Introduction to the \textbf{extreme events} functionality} -\author{Vikram Bahure \and Vimal Balasubramaniam \and Ajay Shah} -\begin{document} -% \VignetteIndexEntry{eventstudies: Extreme events functionality} -% \VignetteDepends{} -% \VignetteKeywords{extreme event analysis} -% \VignettePackage{eventstudies} -\maketitle - -\begin{abstract} - One specific application of the eventstudies package is - \citet{PatnaikShahSingh2013}. This vignette reproduces results from - the paper and explains a specific functionality of the pacakge: to - perform analysis of tail events. \texttt{ees} is a wrapper available - in the package for users to undertake similar ``extreme-events'' - analysis. -\end{abstract} - -\SweaveOpts{engine=R,pdf=TRUE} - -\section{Introduction} - -Extreme events functionality is the analysis of an outcome variable -and its behaviour around tail events of another variable, the event -variable. This package includes an extreme events functionality as a -wrapper in \texttt{ees}. - -Non-parametric studies of events on tails poses several research -challenges: - -\begin{enumerate} -\item What constitutes tail events, i.e., the cut-off points on the - distribution of the event variable? -\item What is the event window, i.e., the window of observation before - and after the event? -\item What happens when multiple tail events (``Clustered events'') - occur within the event window? -\end{enumerate} - -We facilitate these important technical questions with summary -statistics on the distribution and run length of events, quantile -values to determine the cut-off points on the distribution of the -event variable, and depending on the frequency of analysis, -period-wise distribution of extreme events. An analysis of all these -summary statistics for clustered and unclustered events exist as -well. This wrapper provides results for both cases: only unclustered -events and both types of events. - -In the next few sections, we replicate a sub-section of results from -\citet{PatnaikShahSingh2013} that studies whether extreme events on -the S\&P 500 affects returns on the Indian stock index, the -Nifty. Detailed mathematical overview of the methodology is available -in the paper. - - -\section{Extreme event analysis} - -Since the object of interest is the impact on returns of the outcome -variable, nifty, with tail events on the S\&P 500, we first obtain a -zoo object of returns data (``EESData''). Next, we define tail events -for a given probability value; if \textit{prob.value} is 5, then -returns that fall under $0-5\%$ and $95-100\%$ of the probability -distribution form our set of events. - -<<>>== -library(eventstudies) -data(EESData) - -input <- EESData$sp500 - -deprintize<-function(f){ - return(function(...){ - capture.output(w<-f(...));return(w);}) -} -output <- deprintize(ees)(input, prob.value=5) -@ - -As mentioned earlier, one of the most important aspect of a -non-parametric approach to an event study is if the -parameters for such an exercise is validated by the general summary -statistics of the data set being used. The object \texttt{output} is a -list of various relevant summary statistics for the data set, and with -an extreme event analysis for lower and upper tails. For each of the -tails, the following statistics are available: - -\begin{enumerate} -\item Extreme events data set (The input for event study analysis) -\item Distribution of clustered and unclustered tail events -\item Distribution of the run length -\item Quantile values of tail events -\item Yearly distribution of tail events -\end{enumerate} - -\subsection{Summary statistics} - -In \texttt{output\$data.summary}, we present the minimum, maximum, -inter-quartile range (IQR), standard deviation (sd), and the -distribution at 5\%, 25\%, Median, Mean, 75\%, and 95\%. This analysis -for the S\&P 500 is identical to the results presented in Table 1 of -Patnaik, Shah and Singh (2013). - -<<>>== -output$data.summary -@ - -\subsection{Extreme events dataset} - -The output for upper tail and lower tail are in the same format as -mentioned above. The data set is a time series object with 2 columns; -the first column \textit{event.series} contains returns for extreme -events and the second column \textit{cluster.pattern} records the -number of consecutive days in the cluster. Here we show results for -the lower tail of S\&P 500. - -The overall dataset looks as follows: - -<<>>== -head(output$lower.tail$data) -str(output$lower.tail$data) -@ - -\subsection{Distribution of clustered and unclustered events} - -There are several types of clusters in an analysis of extreme -events. Clusters that are purely on either of the tails, or are -mixed. Events that have mixed clusters typically witness sharp -positive returns in the outcome variable, and soon after observing -large negative returns. This ``contamination'' might cause serious -downward bias in the magnitude and direction of impact due to an -extreme event. Therefore, it will be useful to ensure that such -occurrences are not included in the analysis.\footnote{While this is - interesting to study such mixed events by themselves, it is not the - subject for the specific question posed in this vignette.} - -Results from Table 2 of Patnaik, Shah and Singh (2013) show that there -are several mixed clusters in the data set. In other words, there are -many events on the S\&P 500 that provide large positive (negative) -returns followed by large negative (positive) returns in the data -set. As we look closely at the lower tail events in this vignette, the -output for the lower tail events looks like this: - -<<>>= -output$lower.tail$extreme.event.distribution -@ - -``\texttt{unclstr}'' refers to unclustered events, -``\texttt{used.clstr}'' refers to the clusters that are pure and -uncontaminated by mixed tail events, ``\texttt{removed.clstr}'' refers -to the mixed clusters. For the analysis in Patnaik, Shah and Singh -(2013) only 62 out of 102 events are used. These results are identical -to those documented in Table 2 of the paper. - -\subsection{Run length distribution of clusters} - -The next concern is the run length distribution of clusters used in -the analysis. Run length shows the total number of clusters with -\textit{n} consecutive days of its occurence. In the example used -here, we have 3 clusteres with \textit{two} consecutive events and 0 -clusters with \textit{three} consecutive events. This is also -identical the one presented in the paper by Patnaik, Shah and Singh -(2013). - -<<>>= -output$lower.tail$runlength -@ - -\subsection{Extreme event quantile values} -Quantile values show 0\%, 25\%, median, 75\%,100\% and mean values for -the extreme events data. The results shown below match the second row -of Table 4 in the paper. - -<<>>= -output$lower.tail$quantile.values -@ - -\subsection{Yearly distribution of extreme events} -This table shows the yearly distribution and the median value for -extreme events data. The results shown below are in line with the -third and forth column for S\&P 500 in the Table 5 of the paper. - -<<>>= -output$lower.tail$yearly.extreme.event -@ - -The yearly distribution for extreme events include unclustered event -and clustered events which are fused. While in extreme event -distribution of clustered and unclustered event, the clustered events -are defined as total events in a cluster. For example, if there is a -clustered event with three consecutive extreme events then we treat -that as a single event for analysis. - -\section{Extreme event study plot} - -The significance of an event study can be summarised well by visual -representations. With the steps outlined in the \texttt{eventstudies} -vignette, the wrapper \texttt{eesPlot} in the package provides a -convenient user interface to replicate Figure 7 from Patnaik, Shah and -Singh (2013). The plot presents events on the upper tail as ``Very -good'' and lower tail as ``Very bad'' on the event variable S\&P -500. The outcome variable studied here is the Nifty, and the y-axis -presents the cumulative returns in Nifty. This is an event graph, -where data is centered on event date (``0'') and the graph shows 4 -days before and after the event. - -<<>>= -eesPlot(z=EESData, response.series.name="nifty", event.series.name="sp500", titlestring="S&P500", ylab="(Cum.) change in NIFTY", prob.value=5, width=5) -@ - -\begin{figure}[t] - \begin{center} - \caption{Extreme event on S\&P500 and response of NIFTY} - \setkeys{Gin}{width=1\linewidth} - \setkeys{Gin}{height=0.8\linewidth} -<>= -res <- deprintize(eesPlot)(z=EESData, response.series.name="nifty", - event.series.name="sp500", - titlestring="S&P500", - ylab="(Cum.) change in NIFTY", - prob.value=5, width=5) -@ - \end{center} - \label{fig:one} -\end{figure} - -\section{Computational details} -The package code is written in R. It has dependencies to zoo -(\href{http://cran.r-project.org/web/packages/zoo/index.html}{Zeileis - 2012}) and boot -(\href{http://cran.r-project.org/web/packages/boot/index.html}{Ripley - 2013}). R itself as well as these packages can be obtained from -\href{http://CRAN.R-project.org/}{CRAN}. - -% \section{Acknowledgments} -\bibliographystyle{jss} \bibliography{ees} - -\end{document} Deleted: pkg/vignettes/ees.bib =================================================================== --- pkg/vignettes/ees.bib 2014-05-15 17:08:18 UTC (rev 343) +++ pkg/vignettes/ees.bib 2014-05-15 17:24:26 UTC (rev 344) @@ -1,10 +0,0 @@ - at Article{PatnaikShahSingh2013, - author = {Patnaik, Ila and Shah, Ajay and Singh, Nirvikar}, - title = {Foreign Investors Under Stress: Evidence from India }, - journal = {International Finance}, - year = 2013, -volume = 16, -number= 2, -pages = {213-244} -} - Deleted: pkg/vignettes/new.Rnw =================================================================== --- pkg/vignettes/new.Rnw 2014-05-15 17:08:18 UTC (rev 343) +++ pkg/vignettes/new.Rnw 2014-05-15 17:24:26 UTC (rev 344) @@ -1,260 +0,0 @@ -\documentclass[a4paper,11pt]{article} -\usepackage{graphicx} -\usepackage{a4wide} -\usepackage[colorlinks,linkcolor=blue,citecolor=red]{hyperref} -\usepackage{natbib} -\usepackage{float} -\usepackage{tikz} -\usepackage{parskip} -\usepackage{amsmath} -\title{Introduction to the \textbf{eventstudies} package in R} -\author{Ajay Shah} -\begin{document} -\maketitle - -\begin{abstract} -\end{abstract} -\SweaveOpts{engine=R,pdf=TRUE} - -\section{The standard event study in finance} - -In this section, we look at using the eventstudies package for the -purpose of doing the standard event study using daily returns data in -financial economics. This is a workhorse application of event -studies. The treatment here assumes knowledge of event studies -\citep{Corrado2011}. - -To conduct an event study, you must have a list of firms with -associated dates, and you must have returns data for these -firms. These dates must be stored as a simple data frame. To -illustrate this, we use the object `SplitDates' in the package which -is used for doing examples. - -<>= -library(eventstudies) -data(SplitDates) # The sample -str(SplitDates) # Just a data frame -head(SplitDates) -@ - -The representation of dates is a data frame with two columns. The -first column is the name of the unit of observation which experienced -the event. The second column is the event date. - -The second thing that is required for doing an event study is data for -stock price returns for all the firms. The sample dataset supplied in -the package is named `StockPriceReturns': - -<>= -data(StockPriceReturns) # The sample -str(StockPriceReturns) # A zoo object -head(StockPriceReturns,3) # Time series of dates and returns. -@ - -The StockPriceReturns object is thus a zoo object which is a time -series of daily returns. These are measured in per cent, i.e. a value -of +4 is returns of +4\%. The zoo object has many columns of returns -data, one for each unit of observation which, in this case, is a -firm. The column name of the zoo object must match the firm name -(i.e. the name of the unit of observation) in the list of events. - -The package gracefully handles the three kinds of problems encountered -with real world data: (a) a firm where returns is observed but there -is no event, (b) a firm with an event where returns data is lacking -and (c) a stream of missing data in the returns data surrounding the -event date. - -With this in hand, we are ready to run our first event study, using -raw returns: - -<>= -es <- eventstudy(firm.returns = StockPriceReturns, - eventList = SplitDates, - width = 10, - type = "None", - to.remap = TRUE, - remap = "cumsum", - inference = TRUE, - inference.strategy = "bootstrap") -@ - -This runs an event study using events listed in SplitDates, and using -returns data for the firms in StockPriceReturns. An event window of 10 -days is analysed. - -Event studies with returns data typically do some kind of adjustment -of the returns data in order to reduce variance. In order to keep -things simple, in this first event study, we are doing no adjustment, -which is done by setting `type' to ``None''. - -While daily returns data has been supplied, the standard event study -deals with cumulated returns. In order to achieve this, we set -to.remap to TRUE and we ask that this remapping be done using cumsum. - -Finally, we come to inference strategy. We instruct eventstudy to do -inference and ask for bootstrap inference. - -Let us peek and poke at the object `es' that is returned. - -<>= -class(es) -str(es) -@ - -The object returned by eventstudy is of class `es'. It is a list with -five components. Three of these are just a record of the way -eventstudy() was run: the inference procedure adopted (bootstrap -inference in this case), the window width (10 in this case) and the -method used for mapping the data (cumsum). The two new things are -`outcomes' and `eventstudy.output'. - -The vector `outcomes' shows the disposition of each event in the -events table. There are 22 rows in SplitDates, hence there will be 22 -elements in the vector `outcomes'. In this vector, `success' denotes a -successful use of the event. When an event cannot be used properly, -various error codes are supplied. E.g. `unitmissing' is reported when -the events table shows an event for a unit of observation where -returns data is not observed. - -\begin{figure} -\begin{center} -<>= -par(mai=c(.8,.8,.2,.2)) -plot(es, cex.axis=.7, cex.lab=.7) -@ -\end{center} -\caption{Plot method applied to es object}\label{f:esplot1} -\end{figure} - -% TODO: The x label should be "Event time (days)" and should -% automatically handle other situations like weeks or months or microseconds. -% The y label is much too long. - -Plot and print methods for the class `es' are supplied. The standard -plot is illustrated in Figure \ref{f:esplot1}. In this case, we see -the 95\% confidence interval is above 0 and below 0 and in no case can -the null of no-effect, compared with the starting date (10 days before -the stock split date), be rejected. - -In this first example, raw stock market returns was utilised in the -event study. It is important to emphasise that the event study is a -statistically valid tool even under these circumstances. Averaging -across multiple events isolates the event-related -fluctuations. However, there is a loss of statistical efficiency that -comes from fluctuations of stock prices that can have nothing to do -with firm level news. In order to increase efficiency, we resort to -adjustment of the returns data. - -The standard methodology in the literature is to use a market -model. This estimates a time-series regression $r_{jt} = \alpha_j + -\beta_j r_{Mt} + \epsilon_{jt}$ where $r_{jt}$ is returns for firm $j$ -on date $t$, and $r_{Mt}$ is returns on the market index on date -$t$. The market index captures market-wide fluctuations, which have -nothing to do with firm-specific factors. The event study is then -conducted with the cumulated $\epsilon_{jt}$ time series. This yields -improved statistical efficiency as $\textrm{Var}(\epsilon_j) < -\textrm{Var}(r_j)$. - -This is invoked by setting `type' to `marketResidual': - -<>= -data(OtherReturns) -es.mm <- eventstudy(firm.returns = StockPriceReturns, - eventList = SplitDates, - width = 10, - type = "marketResidual", - to.remap = TRUE, - remap = "cumsum", - inference = TRUE, - inference.strategy = "bootstrap", - market.returns=OtherReturns$NiftyIndex - ) -@ - -In addition to setting `type' to `marketResidual', we are now required -to supply data for the market index, $r_{Mt}$. In the above example, -this is the data object NiftyIndex supplied from the OtherReturns data -object in the package. This is just a zoo vector with daily returns of -the stock market index. - -\begin{figure} -\begin{center} -<>= -par(mai=c(.8,.8,.2,.2)) -plot(es.mm, cex.axis=.7, cex.lab=.7) -@ -\end{center} -\caption{Adjustment using the market model}\label{f:esplotmm} -\end{figure} - -A comparison of the range of the $y$ axis in Figure \ref{f:esplot1} -versus that seen in Figure \ref{f:esplotmm} shows the substantial -improvement in statistical efficiency that was obtained by market -model adjustment. - -We close our treatment of the standard finance event study with one -step forward on further reducing $\textrm{Var}(\epsilon)$ : by doing -an `augmented market model' regression with more than one explanatory -variable. The augmented market model uses regressions like: - -\[ -r_{jt} = \alpha_j + \beta_1,j r_{M1,t} + \beta_2,j r_{M2,t} - \epsilon_{jt} -\] - -where in addition to the market index $r_{M1,t}$, there is an -additional explanatory variable $r_{M2,t}$. One natural candidate is -the returns on the exchange rate, but there are many other candidates. - -An extensive literature has worked out the unique problems of -econometrics that need to be addressed in doing augmented market -models. The package uses the synthesis of this literature as presented -in \citet{patnaik2010amm}.\footnote{The source code for augmented - market models in the package is derived from the source code written - for \citet{patnaik2010amm}.} - -To repeat the stock splits event study using augmented market models, -we use the incantation: - -% Check some error -<>= -es.amm <- eventstudy(firm.returns = StockPriceReturns, - eventList = SplitDates, - width = 10, - type = "lmAMM", - to.remap = TRUE, - remap = "cumsum", - inference = TRUE, - inference.strategy = "bootstrap", - market.returns=OtherReturns$NiftyIndex, - others=OtherReturns$USDINR, - market.returns.purge=TRUE - ) -@ - -Here the additional regressor on the augmented market model is the -returns on the exchange rate, which is the slot USDINR in -OtherReturns. The full capabilities for doing augmented market models -from \citet{patnaik2010amm} are available. These are documented -elsewhere. For the present moment, we will use the feature -market.returns.purge without explaining it. - -Let us look at the gains in statistical efficiency across the three -variants of the event study. We will use the width of the confidence -interval at date 0 as a measure of efficiency. - -<>= -tmp <- rbind(es$eventstudy.output[10,], es.mm$eventstudy.output[10,])[,c(1,3)] -rownames(tmp) <- c("None","MM") -tmp[,2]-tmp[,1] -@ - -This shows a sharp reduction in the width of the bootstrap 95\% -confidence interval from None to MM adjustment. Over and above this, a -small gain is obtained when going from MM adjustment to AMM -adjustment. - -\newpage -\bibliographystyle{jss} \bibliography{es} - -\end{document} From noreply at r-forge.r-project.org Thu May 15 20:16:29 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 15 May 2014 20:16:29 +0200 (CEST) Subject: [Eventstudies-commits] r345 - pkg/man Message-ID: <20140515181629.DAF06186F21@r-forge.r-project.org> Author: chiraganand Date: 2014-05-15 20:16:29 +0200 (Thu, 15 May 2014) New Revision: 345 Modified: pkg/man/eesInference.Rd pkg/man/eesSummary.Rd Log: Modified language and examples. Modified: pkg/man/eesInference.Rd =================================================================== --- pkg/man/eesInference.Rd 2014-05-15 17:24:26 UTC (rev 344) +++ pkg/man/eesInference.Rd 2014-05-15 18:16:29 UTC (rev 345) @@ -4,7 +4,8 @@ \title{Extreme event study inference estimation} \description{This function performs event study analysis on extreme event dates - (\sQuote{eesDates}) and formatted output (\sQuote{get.clusters.formatted}) + (\sQuote{eesDates}) and formatted output using + (\sQuote{get.clusters.formatted}) } \usage{ @@ -13,82 +14,84 @@ } \arguments{ - \item{input}{ - an output object of \sQuote{get.clusters.formatted} - } + \item{input}{ + a formatted cluster object, as returned by + \sQuote{get.clusters.formatted} function. + } - \item{eventLists}{ - an output object of \sQuote{eesDates}, which provides event list - for normal and purged events - } + \item{eventLists}{ + a \sQuote{list} of normal and purged events as returned by + \sQuote{eesDates}. + } - \item{width}{ - an \sQuote{integer} of length 1 that specifies a - symmetric event window around the event date. - } + \item{width}{ + an \sQuote{integer} of length 1 that specifies a + symmetric event window around the event date. + } - \item{to.remap}{ - \sQuote{logical}, indicating whether or not to remap - the data in \sQuote{input}.The default setting is \sQuote{TRUE} - } + \item{to.remap}{ + \sQuote{logical}, indicating whether or not to remap + the data in \sQuote{input}.The default setting is \sQuote{TRUE} + } - \item{remap}{ - \sQuote{character}, indicating the type of remap required, - \dQuote{cumsum}, \dQuote{cumprod}, or \dQuote{reindex}. Used when - \sQuote{to.remap} is \sQuote{TRUE}. - } + \item{remap}{ + \sQuote{character}, indicating the type of remap required, + \dQuote{cumsum}, \dQuote{cumprod}, or \dQuote{reindex}. Used when + \sQuote{to.remap} is \sQuote{TRUE}. + } - \item{inference}{ - \sQuote{logical}, specifying whether to undertake statistical - inference and compute confidence intervals. The default setting is - \sQuote{TRUE}. - } + \item{inference}{ + \sQuote{logical}, specifying whether to undertake statistical + inference and compute confidence intervals. The default setting is + \sQuote{TRUE}. + } - \item{inference.strategy}{a \sQuote{character} scalar specifying the - inference strategy to be used for estimating the confidence - interval. Presently, two methods are available: \dQuote{bootstrap} - and \dQuote{wilcox}. The default setting is \sQuote{bootstrap}. - } + \item{inference.strategy}{a \sQuote{character} scalar specifying the + inference strategy to be used for estimating the confidence + interval. Presently, two methods are available: \dQuote{bootstrap} + and \dQuote{wilcox}. The default setting is \sQuote{bootstrap}. + } } -\details{ - This function performs event study analysis on the extreme event dates of normal - (unclustered events) and purged (clustered and unclustered events) sets. These - dates are obtained from function \sQuote{eesDates}. The function also estimates - confidence interval using different inference strategies - (\sQuote{bootstrap,wilcoxon}). The functionalities are similar to - \sQuote{eventstudy} function without market model adjustment and \sQuote{input} - is output of \sQuote{get.clusters.formatted}, not \sQuote{firm.returns}. +\details{ This function performs event study analysis using + \code{eventstudy} function on the extreme event dates of normal + (unclustered events) and purged (clustered and unclustered events) + sets. These interesting dates are obtained from function \sQuote{eesDates}. The + function can estimate confidence interval using different inference + strategies as provided by \code{eventstudy()}. + + The function does not do market model adjustment but takes the + output of \code{get.clusters.formatted} as it's input. } -\value{ - Format of event study output is a \sQuote{matrix} containing mean (bootstrap) - or median (with wilcoxon) estimate with confidence interval; \sQuote{NULL} if there - are no \dQuote{success} \dQuote{outcomes} - - A list with class attribute \dQuote{ees} holding the - following four event study output elements: +\value{ Format of event study output is a \sQuote{matrix} containing + mean or median estimate with confidence interval; \sQuote{NULL} if + there are no \dQuote{success} \dQuote{outcomes}. See + \link{\code{phys2eventtime}} for more details. + + A \sQuote{list} with class attribute \dQuote{ees} holding the + following four event study output elements: - \item{good.normal}{ - an event study inference \sQuote{matrix} for right tail unclustered events, - termed as normal - } - - \item{bad.normal}{ - an event study inference \sQuote{matrix} for left tail unclustered events, - termed as normal - } + \item{good.normal}{ + an event study inference \sQuote{matrix} for right tail unclustered events, + termed as normal + } + + \item{bad.normal}{ + an event study inference \sQuote{matrix} for left tail unclustered events, + termed as normal + } - \item{good.purged}{ - an event study inference \sQuote{matrix} for right tail clustered and unclustered - events, termed as purged - } - - \item{bad.purged}{ - an event study inference \sQuote{matrix} for left tail clustered and unclustered - events, termed as purged - } + \item{good.purged}{ + an event study inference \sQuote{matrix} for right tail clustered and unclustered + events, termed as purged + } + + \item{bad.purged}{ + an event study inference \sQuote{matrix} for left tail clustered and unclustered + events, termed as purged + } } \references{ @@ -105,14 +108,14 @@ \examples{ data(OtherReturns) -## Formatting extreme event dates -input <- get.clusters.formatted(event.series = OtherReturns[,"SP500"], - response.series = OtherReturns[,"NiftyIndex"]) -## Extracting event dates -event.lists <- eesDates(input) +formattedClusters <- get.clusters.formatted(event.series = OtherReturns[, "SP500"], + response.series = OtherReturns[, "NiftyIndex"]) -## Performing event study analysis and computing inference -inf <- eesInference(input = input, eventLists = event.lists, width = 5) -str(inf, max.level = 2) +event.lists <- eesDates(formattedClusters) + +inference <- eesInference(input = formattedClusters, + eventLists = event.lists, + width = 5) +str(inference, max.level = 2) } Modified: pkg/man/eesSummary.Rd =================================================================== --- pkg/man/eesSummary.Rd 2014-05-15 17:24:26 UTC (rev 344) +++ pkg/man/eesSummary.Rd 2014-05-15 18:16:29 UTC (rev 345) @@ -13,15 +13,15 @@ } \arguments{ - \item{input}{an output object of \sQuote{get.clusters.formatted}} + \item{input}{object returned by \sQuote{get.clusters.formatted}} } -\details{ - This function generates summary statistics of extreme events, - using the tail events as defined in the function \sQuote{get.clusters.formatted}. +\details{This function generates summary statistics of extreme events, + using the tail events as returned by the function + \sQuote{get.clusters.formatted}. - Following statistics is generated for both lower and upper tail - events: + Following statistics are generated for both lower and upper tail + events: \itemize{ \item \sQuote{extreme.event.distribution} provides summary @@ -47,7 +47,7 @@ \value{ A \code{list} object containing: \item{data.summary}{a \sQuote{data.frame} containing summary of - the data set minimum, maximum, inter-quartile range, mean, median, + the minimum, maximum, inter-quartile range, mean, median, standard deviation and quantile values at 5\%, 25\%, 75\% and 95\%.} \item{lower.tail}{a \sQuote{list} that contains From noreply at r-forge.r-project.org Thu May 15 20:20:59 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 15 May 2014 20:20:59 +0200 (CEST) Subject: [Eventstudies-commits] r346 - pkg/man Message-ID: <20140515182059.70A8C187126@r-forge.r-project.org> Author: chiraganand Date: 2014-05-15 20:20:59 +0200 (Thu, 15 May 2014) New Revision: 346 Modified: pkg/man/eesInference.Rd Log: Fixed link text. Modified: pkg/man/eesInference.Rd =================================================================== --- pkg/man/eesInference.Rd 2014-05-15 18:16:29 UTC (rev 345) +++ pkg/man/eesInference.Rd 2014-05-15 18:20:59 UTC (rev 346) @@ -68,7 +68,7 @@ \value{ Format of event study output is a \sQuote{matrix} containing mean or median estimate with confidence interval; \sQuote{NULL} if there are no \dQuote{success} \dQuote{outcomes}. See - \link{\code{phys2eventtime}} for more details. + \code{\link{phys2eventtime}} for more details. A \sQuote{list} with class attribute \dQuote{ees} holding the following four event study output elements: From noreply at r-forge.r-project.org Thu May 15 20:22:32 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 15 May 2014 20:22:32 +0200 (CEST) Subject: [Eventstudies-commits] r347 - pkg/man Message-ID: <20140515182232.2DF3118714B@r-forge.r-project.org> Author: vikram Date: 2014-05-15 20:22:31 +0200 (Thu, 15 May 2014) New Revision: 347 Modified: pkg/man/eesSummary.Rd pkg/man/lmAMM.Rd pkg/man/makeX.Rd pkg/man/manyfirmssubperiod.lmAMM.Rd pkg/man/subperiod.lmAMM.Rd Log: Corrected manual examples Modified: pkg/man/eesSummary.Rd =================================================================== --- pkg/man/eesSummary.Rd 2014-05-15 18:20:59 UTC (rev 346) +++ pkg/man/eesSummary.Rd 2014-05-15 18:22:31 UTC (rev 347) @@ -77,7 +77,10 @@ \examples{ data(OtherReturns) - -ees.summary.tables <- eesSummary(OtherReturns$SP500) +## Formatting extreme event dates +input <- get.clusters.formatted(event.series = OtherReturns[,"SP500"], + response.series = OtherReturns[,"NiftyIndex"]) +## Extreme event summary tables +ees.summary.tables <- eesSummary(input) str(ees.summary.tables, max.level = 2) } Modified: pkg/man/lmAMM.Rd =================================================================== --- pkg/man/lmAMM.Rd 2014-05-15 18:20:59 UTC (rev 346) +++ pkg/man/lmAMM.Rd 2014-05-15 18:22:31 UTC (rev 347) @@ -82,7 +82,7 @@ firm.returns <- StockPriceReturns[, "Infosys"] market.returns <- OtherReturns[ ,"NiftyIndex"] -currency.returns <- OtherReturns[, "INRUSD"] +currency.returns <- OtherReturns[, "USDINR"] X <- makeX(market.returns, others = currency.returns, @@ -97,11 +97,11 @@ amm.residual <- zoo(amm.residual, order.by = as.Date(names(amm.residual))) -comparison <- merge(AMMResidual = amm.residual, +Comparison <- merge(AMMResidual = amm.residual, Infosys = StockPriceReturns$Infosys, NiftyIndex = OtherReturns$NiftyIndex, all = FALSE) -plot(comparison) +plot(Comparison, xlab="") } \keyword{lmAMM} Modified: pkg/man/makeX.Rd =================================================================== --- pkg/man/makeX.Rd 2014-05-15 18:20:59 UTC (rev 346) +++ pkg/man/makeX.Rd 2014-05-15 18:22:31 UTC (rev 347) @@ -77,7 +77,7 @@ \examples{ data("OtherReturns") market.returns <- OtherReturns$NiftyIndex -currency.returns <- OtherReturns$INRUSD +currency.returns <- OtherReturns$USDINR X <- makeX(market.returns, others = currency.returns, Modified: pkg/man/manyfirmssubperiod.lmAMM.Rd =================================================================== --- pkg/man/manyfirmssubperiod.lmAMM.Rd 2014-05-15 18:20:59 UTC (rev 346) +++ pkg/man/manyfirmssubperiod.lmAMM.Rd 2014-05-15 18:22:31 UTC (rev 347) @@ -59,11 +59,12 @@ } \examples{ +data("StockPriceReturns") data("OtherReturns") firm.returns <- StockPriceReturns[, c("Infosys","TCS")] market.returns <- OtherReturns$NiftyIndex -currency.returns <- OtherReturns$INRUSD +currency.returns <- OtherReturns$USDINR X <- makeX(market.returns, others = currency.returns, @@ -71,12 +72,12 @@ switch.to.innov = FALSE, market.returns.purge = FALSE, verbose = FALSE, - dates = as.Date(c("2012-02-01", "2013-01-01", "2014-01-20"))) + dates = as.Date(c("2010-07-01", "2011-11-17", "2013-03-29"))) res <- manyfirmssubperiod.lmAMM(firm.returns = firm.returns, X = X, lags = 1, - dates = as.Date(c("2012-02-01", "2013-01-01", "2014-01-20")), + dates = as.Date(c("2010-07-01", "2011-11-17", "2013-03-29")), periodnames = c("P1", "P2"), verbose = FALSE) print(res) Modified: pkg/man/subperiod.lmAMM.Rd =================================================================== --- pkg/man/subperiod.lmAMM.Rd 2014-05-15 18:20:59 UTC (rev 346) +++ pkg/man/subperiod.lmAMM.Rd 2014-05-15 18:22:31 UTC (rev 347) @@ -77,16 +77,14 @@ switch.to.innov = TRUE, market.returns.purge = TRUE, nlags = 1, - dates = as.Date(c("2012-02-01","2013-01-01","2014-01-20")), + dates = as.Date(c("2010-07-01", "2011-11-17", "2013-03-29")), verbose = FALSE) res <- subperiod.lmAMM(firm.returns, X = regressors, nlags = 1, verbose = FALSE, - dates = as.Date(c("2012-02-01", - "2013-01-01", - "2014-01-20"))) + dates = as.Date(c("2010-07-01", "2011-11-17", "2013-03-29"))) str(res) } \keyword{subperiod.lmAMM} From noreply at r-forge.r-project.org Thu May 15 20:27:01 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 15 May 2014 20:27:01 +0200 (CEST) Subject: [Eventstudies-commits] r348 - pkg/man Message-ID: <20140515182701.B0E3D187240@r-forge.r-project.org> Author: chiraganand Date: 2014-05-15 20:27:01 +0200 (Thu, 15 May 2014) New Revision: 348 Modified: pkg/man/eesSummary.Rd Log: Modified variable name in the example. Modified: pkg/man/eesSummary.Rd =================================================================== --- pkg/man/eesSummary.Rd 2014-05-15 18:22:31 UTC (rev 347) +++ pkg/man/eesSummary.Rd 2014-05-15 18:27:01 UTC (rev 348) @@ -77,10 +77,10 @@ \examples{ data(OtherReturns) -## Formatting extreme event dates -input <- get.clusters.formatted(event.series = OtherReturns[,"SP500"], - response.series = OtherReturns[,"NiftyIndex"]) -## Extreme event summary tables -ees.summary.tables <- eesSummary(input) + +formattedClusters <- get.clusters.formatted(event.series = OtherReturns[, "SP500"], + response.series = OtherReturns[, "NiftyIndex"]) + +ees.summary.tables <- eesSummary(formattedClusters) str(ees.summary.tables, max.level = 2) } From noreply at r-forge.r-project.org Thu May 15 20:44:20 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 15 May 2014 20:44:20 +0200 (CEST) Subject: [Eventstudies-commits] r349 - in pkg: R data inst/tests man Message-ID: <20140515184420.C9726185EF9@r-forge.r-project.org> Author: chiraganand Date: 2014-05-15 20:44:20 +0200 (Thu, 15 May 2014) New Revision: 349 Modified: pkg/R/eesInference.R pkg/R/phys2eventtime.R pkg/data/SplitDates.rda pkg/inst/tests/test_SplitDates.rda pkg/inst/tests/test_eventstudy.R pkg/inst/tests/test_inr_inference.R pkg/inst/tests/test_interfaces.R pkg/man/eventstudy.Rd pkg/man/phys2eventtime.Rd Log: Changed colnames of event list. Modified: pkg/R/eesInference.R =================================================================== --- pkg/R/eesInference.R 2014-05-15 18:27:01 UTC (rev 348) +++ pkg/R/eesInference.R 2014-05-15 18:44:20 UTC (rev 349) @@ -693,18 +693,18 @@ days.bad.purged <- index(data.no.cluster[which(data.no.cluster[,"left.tail"]==1)]) days.good.purged <- index(data.no.cluster[which(data.no.cluster[,"right.tail"]==1)]) ## Event list - events.good.normal <- data.frame(outcome.unit=rep("response.series", + events.good.normal <- data.frame(when=rep("response.series", length(days.good.normal)), - event.when=days.good.normal) - events.bad.normal <- data.frame(outcome.unit=rep("response.series", + when=days.good.normal) + events.bad.normal <- data.frame(name=rep("response.series", length(days.bad.normal)), - event.when=days.bad.normal) - events.good.purged <- data.frame(outcome.unit=rep("response.series", + when=days.bad.normal) + events.good.purged <- data.frame(name=rep("response.series", length(days.good.purged)), - event.when=days.good.purged) - events.bad.purged <- data.frame(outcome.unit=rep("response.series", + when=days.good.purged) + events.bad.purged <- data.frame(name=rep("response.series", length(days.bad.purged)), - event.when=days.bad.purged) + when=days.bad.purged) dates <- list(events.good.normal=events.good.normal, events.bad.normal=events.bad.normal, events.good.purged=events.good.purged, Modified: pkg/R/phys2eventtime.R =================================================================== --- pkg/R/phys2eventtime.R 2014-05-15 18:27:01 UTC (rev 348) +++ pkg/R/phys2eventtime.R 2014-05-15 18:44:20 UTC (rev 349) @@ -2,8 +2,8 @@ # z is a zoo object containing input data. E.g. this could be all the # prices of a bunch of stocks. The column name is the unit name. # events is a data.frame containing 2 columns. The first column -# ("outcome.unit") is the name of the unit. The second column is the date/time -# ("event.when") when the event happened. +# ("name") is the name of the unit. The second column is the date/time +# ("when") when the event happened. # For each event, the outcome can be: # unitmissing : a unit named in events isn't in z # wrongspan : the event date isn't placed within the span of data for the unit @@ -13,9 +13,9 @@ phys2eventtime <- function(z, events, width=10) { ## Ensuring class of event matrix - events$outcome.unit <- as.character(events$outcome.unit) - if(is.factor(events$event.when)) { - stop("The column 'event.when' cannot be a factor. Cannot proceed with data manipulation.") + events$name <- as.character(events$name) + if(is.factor(events$when)) { + stop("The column 'when' cannot be a factor. Cannot proceed with data manipulation.") } ## z: physical time matrix. Check dimensions of "z" Modified: pkg/data/SplitDates.rda =================================================================== (Binary files differ) Modified: pkg/inst/tests/test_SplitDates.rda =================================================================== (Binary files differ) Modified: pkg/inst/tests/test_eventstudy.R =================================================================== --- pkg/inst/tests/test_eventstudy.R 2014-05-15 18:27:01 UTC (rev 348) +++ pkg/inst/tests/test_eventstudy.R 2014-05-15 18:44:20 UTC (rev 349) @@ -14,12 +14,12 @@ 12426, 12429, 12430, 12431, 12432), class = "Date"), class = "zoo") # An example events list -eventslist <- data.frame(outcome.unit=c("ITC","Reliance","Infosys", +eventslist <- data.frame(name=c("ITC","Reliance","Infosys", "ITC","Reliance","Junk"), - event.when=as.Date(c( + when=as.Date(c( "2004-01-02", "2004-01-08", "2004-01-14", "2005-01-15", "2004-01-01", "2005-01-01"))) -eventslist$outcome.unit <- as.character(eventslist$outcome.unit) +eventslist$name <- as.character(eventslist$name) # What we expect if we don't worry about width -- rawres <- structure(list(z.e = structure(c(NA, NA, NA, NA, NA, NA, @@ -62,7 +62,7 @@ cat("\nTesting handling of missing data on event date: ") eventdate <- "2004-01-10" eventdate_output <- "2004-01-09" -eventslist <- data.frame(outcome.unit = "ITC", event.when = eventdate, +eventslist <- data.frame(name = "ITC", when = eventdate, stringsAsFactors = FALSE) a <- phys2eventtime(p, eventslist, width = 2) expect_that(as.numeric(a$z.e["0",]), Modified: pkg/inst/tests/test_inr_inference.R =================================================================== --- pkg/inst/tests/test_inr_inference.R 2014-05-15 18:27:01 UTC (rev 348) +++ pkg/inst/tests/test_inr_inference.R 2014-05-15 18:44:20 UTC (rev 349) @@ -7,8 +7,8 @@ inr_returns <- diff(log(INR))[-1] -eventslist <- data.frame(outcome.unit=rep("inr",10), - event.when=as.Date(c( +eventslist <- data.frame(name=rep("inr",10), + when=as.Date(c( "2010-04-20","2010-07-02","2010-07-27", "2010-09-16","2010-11-02","2011-01-25", "2011-03-17","2011-05-03","2011-06-16", Modified: pkg/inst/tests/test_interfaces.R =================================================================== --- pkg/inst/tests/test_interfaces.R 2014-05-15 18:27:01 UTC (rev 348) +++ pkg/inst/tests/test_interfaces.R 2014-05-15 18:44:20 UTC (rev 349) @@ -13,8 +13,8 @@ 2.37333344340029) expected_outcomes <- c("success", "success") - test_events <- data.frame(outcome.unit = "ONGC", - event.when = c("2011-08-01", "2010-05-14"), + test_events <- data.frame(name = "ONGC", + when = c("2011-08-01", "2010-05-14"), stringsAsFactors = FALSE) test_returns<- StockPriceReturns[complete.cases(StockPriceReturns$ONGC), "ONGC", drop = FALSE] @@ -34,8 +34,8 @@ 0.904144365357373, -0.806779427723603) expected_outcomes <- c("success", "success") - test_events <- data.frame(outcome.unit = "ONGC", - event.when = c("2011-08-01", "2010-05-14"), + test_events <- data.frame(name = "ONGC", + when = c("2011-08-01", "2010-05-14"), stringsAsFactors = FALSE) test_returns<- StockPriceReturns[complete.cases(StockPriceReturns$ONGC), "ONGC", drop = FALSE] @@ -78,7 +78,7 @@ 1.15001275036422, 2.88646832315114, 3.32315429568726) expected_outcomes <- c("success", "success") - test_events <- data.frame(outcome.unit = "ONGC", + test_events <- data.frame(name = "ONGC", event.when = c("2011-08-01", "2010-05-14"), stringsAsFactors = FALSE) test_returns<- StockPriceReturns[complete.cases(StockPriceReturns$ONGC), "ONGC", @@ -96,7 +96,7 @@ ### Remapping cat("\nChecking remapping: ") - test_events <- data.frame(outcome.unit = "ONGC", + test_events <- data.frame(name = "ONGC", event.when = c("2011-08-01", "2010-05-14"), stringsAsFactors = FALSE) test_returns <- StockPriceReturns[complete.cases(StockPriceReturns$ONGC), "ONGC", @@ -178,8 +178,8 @@ load("test_StockPriceReturns.rda") cat("\nChecking single series handling: ") - test_events <- data.frame(outcome.unit = "ONGC", - event.when = c("2011-08-01", "2010-05-14"), + test_events <- data.frame(name = "ONGC", + when = c("2011-08-01", "2010-05-14"), stringsAsFactors = FALSE) test_returns<- StockPriceReturns$ONGC expect_error(eventstudy(firm.returns = test_returns, Modified: pkg/man/eventstudy.Rd =================================================================== --- pkg/man/eventstudy.Rd 2014-05-15 18:27:01 UTC (rev 348) +++ pkg/man/eventstudy.Rd 2014-05-15 18:44:20 UTC (rev 349) @@ -31,8 +31,8 @@ \item{eventList}{ a \code{data.frame} of two columns with event dates (colname: - \dQuote{event.when}) and column names of the \sQuote{response} - series from \sQuote{firm.returns} (colname \dQuote{outcome.unit}). + \dQuote{when}) and column names of the \sQuote{response} + series from \sQuote{firm.returns} (colname \dQuote{name}). } \item{width}{an \sQuote{integer} of length 1 that specifies a @@ -245,8 +245,8 @@ plot(es) # Event study using Augmented Market Model -events <- data.frame(outcome.unit = c("Infosys", "TCS"), - event.when = c("2012-04-01", "2012-06-01"), +events <- data.frame(name = c("Infosys", "TCS"), + when = c("2012-04-01", "2012-06-01"), stringsAsFactors = FALSE) es <- eventstudy(firm.returns = StockPriceReturns, Modified: pkg/man/phys2eventtime.Rd =================================================================== --- pkg/man/phys2eventtime.Rd 2014-05-15 18:27:01 UTC (rev 348) +++ pkg/man/phys2eventtime.Rd 2014-05-15 18:44:20 UTC (rev 349) @@ -29,8 +29,8 @@ \details{ - \dQuote{events} object contains two columns: \dQuote{outcome.unit} - consists of names of the event, and \dQuote{event.when} is the + \dQuote{events} object contains two columns: \dQuote{name} + consists of names of the event, and \dQuote{when} is the respective event identifier. For instance, if \sQuote{z} is a matrix of class \pkg{xts} with 10 stocks over 300 days, the names of stocks in \sQuote{z} is the superset of names for the event and the time From noreply at r-forge.r-project.org Thu May 15 21:16:22 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 15 May 2014 21:16:22 +0200 (CEST) Subject: [Eventstudies-commits] r350 - pkg/R Message-ID: <20140515191622.3EF3B186F1F@r-forge.r-project.org> Author: chiraganand Date: 2014-05-15 21:16:21 +0200 (Thu, 15 May 2014) New Revision: 350 Modified: pkg/R/phys2eventtime.R Log: Fixed bug related to order of when and name columns. Modified: pkg/R/phys2eventtime.R =================================================================== --- pkg/R/phys2eventtime.R 2014-05-15 18:44:20 UTC (rev 349) +++ pkg/R/phys2eventtime.R 2014-05-15 19:16:21 UTC (rev 350) @@ -24,16 +24,16 @@ } timeshift <- function(x) { - firm.present <- match(x[1], colnames(z), nomatch = -1) != -1 + firm.present <- match(x["name"], colnames(z), nomatch = -1) != -1 if (!firm.present) { return(list(result=NULL, outcome="unitmissing")) } ## Take previous date if exact data is not found. - location <- findInterval(as.Date(x[2]), index(z[, x[1]])) + location <- findInterval(as.Date(x["when"]), index(z[, x["name"]])) if ((location <= 1) | (location >= length(index(z)))) { return(list(result=NULL, outcome="wrongspan")) } - remapped <- zoo(as.numeric(z[,x[1]]), order.by=(-location+1):(length(z[,x[1]])-location)) + remapped <- zoo(as.numeric(z[,x["name"]]), order.by=(-location+1):(length(z[,x["name"]])-location)) return(list(result=remapped, outcome="success")) } From noreply at r-forge.r-project.org Thu May 15 21:16:43 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 15 May 2014 21:16:43 +0200 (CEST) Subject: [Eventstudies-commits] r351 - pkg/inst/tests Message-ID: <20140515191643.34277186F3F@r-forge.r-project.org> Author: chiraganand Date: 2014-05-15 21:16:42 +0200 (Thu, 15 May 2014) New Revision: 351 Modified: pkg/inst/tests/test_interfaces.R Log: Fixed tests based on the new colnames of event list. Modified: pkg/inst/tests/test_interfaces.R =================================================================== --- pkg/inst/tests/test_interfaces.R 2014-05-15 19:16:21 UTC (rev 350) +++ pkg/inst/tests/test_interfaces.R 2014-05-15 19:16:42 UTC (rev 351) @@ -55,8 +55,8 @@ expected_outcomes <- c("success", "success") - test_events <- data.frame(outcome.unit = "ONGC", - event.when = c("2011-08-01", "2010-05-14"), + test_events <- data.frame(name = "ONGC", + when = c("2011-08-01", "2010-05-14"), stringsAsFactors = FALSE) test_returns<- StockPriceReturns[complete.cases(StockPriceReturns$ONGC), "ONGC", drop = FALSE] @@ -79,7 +79,7 @@ expected_outcomes <- c("success", "success") test_events <- data.frame(name = "ONGC", - event.when = c("2011-08-01", "2010-05-14"), + when = c("2011-08-01", "2010-05-14"), stringsAsFactors = FALSE) test_returns<- StockPriceReturns[complete.cases(StockPriceReturns$ONGC), "ONGC", drop = FALSE] @@ -97,7 +97,7 @@ ### Remapping cat("\nChecking remapping: ") test_events <- data.frame(name = "ONGC", - event.when = c("2011-08-01", "2010-05-14"), + when = c("2011-08-01", "2010-05-14"), stringsAsFactors = FALSE) test_returns <- StockPriceReturns[complete.cases(StockPriceReturns$ONGC), "ONGC", drop = FALSE] From noreply at r-forge.r-project.org Thu May 15 21:33:49 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 15 May 2014 21:33:49 +0200 (CEST) Subject: [Eventstudies-commits] r352 - pkg/R Message-ID: <20140515193349.EAA8E1873A4@r-forge.r-project.org> Author: vikram Date: 2014-05-15 21:33:49 +0200 (Thu, 15 May 2014) New Revision: 352 Modified: pkg/R/eesInference.R Log: Corrected eesDate code by changing outcome.unit to name and event.when to when Modified: pkg/R/eesInference.R =================================================================== --- pkg/R/eesInference.R 2014-05-15 19:16:42 UTC (rev 351) +++ pkg/R/eesInference.R 2014-05-15 19:33:49 UTC (rev 352) @@ -693,7 +693,7 @@ days.bad.purged <- index(data.no.cluster[which(data.no.cluster[,"left.tail"]==1)]) days.good.purged <- index(data.no.cluster[which(data.no.cluster[,"right.tail"]==1)]) ## Event list - events.good.normal <- data.frame(when=rep("response.series", + events.good.normal <- data.frame(name=rep("response.series", length(days.good.normal)), when=days.good.normal) events.bad.normal <- data.frame(name=rep("response.series", From noreply at r-forge.r-project.org Fri May 16 03:32:09 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 16 May 2014 03:32:09 +0200 (CEST) Subject: [Eventstudies-commits] r353 - pkg/man Message-ID: <20140516013209.764FC18749E@r-forge.r-project.org> Author: chiraganand Date: 2014-05-16 03:32:08 +0200 (Fri, 16 May 2014) New Revision: 353 Modified: pkg/man/OtherReturns.Rd pkg/man/eesDates.Rd pkg/man/get.clusters.formatted.Rd Log: Changed formatting and language. Modified: pkg/man/OtherReturns.Rd =================================================================== --- pkg/man/OtherReturns.Rd 2014-05-15 19:33:49 UTC (rev 352) +++ pkg/man/OtherReturns.Rd 2014-05-16 01:32:08 UTC (rev 353) @@ -3,7 +3,7 @@ \docType{data} \title{Data set containing daily returns of Nifty index, USD INR, call momey - rate, and S&P 500 index.} + rate, and S&P 500 index} \description{This data set consists of daily time series of market returns (Nifty index and S&P 500 index), currency returns (USD/INR), Modified: pkg/man/eesDates.Rd =================================================================== --- pkg/man/eesDates.Rd 2014-05-15 19:33:49 UTC (rev 352) +++ pkg/man/eesDates.Rd 2014-05-16 01:32:08 UTC (rev 353) @@ -1,7 +1,7 @@ \name{eesDates} \alias{eesDates} -\title{Event list for extreme event study analysis } +\title{Get event list for extreme event study analysis} \description{ This function creates event list (clustered and unclustered events) for extreme event study analysis. Modified: pkg/man/get.clusters.formatted.Rd =================================================================== --- pkg/man/get.clusters.formatted.Rd 2014-05-15 19:33:49 UTC (rev 352) +++ pkg/man/get.clusters.formatted.Rd 2014-05-16 01:32:08 UTC (rev 353) @@ -1,12 +1,16 @@ \name{get.clusters.formatted} \alias{get.clusters.formatted} -\title{Extreme event analysis (ees)} -\description{Formats extreme event dates, dealing with clusters in the event frame} +\title{Get formatted clusters to perform extreme event analysis (ees)} +\description{The functions formats extreme event dates, dealing with + clusters in the event frame.} \usage{ - get.clusters.formatted(event.series, response.series, probvalue = 5, - event.value = "nonreturns", response.value = "nonreturns") +get.clusters.formatted(event.series, + response.series, + probvalue = 5, + event.value = "nonreturns", + response.value = "nonreturns") } \arguments{ @@ -31,34 +35,35 @@ } \details{ - Tail (Rare) events are often the object of interest in finance. These - events are defined as those that have a low probability of - occurrence. This function identifies such events based on - \sQuote{probvalue} mentioned by the user and generates summary - statistics about the events. If \sQuote{probvalue} is 2.5\%, events - below 2.5\% (lower tail) and above 97.5\% (upper tail) of the - distribution are identified as extreme events. - - Once the extreme events are defined, this function further formats the events. The - extreme event functionality is muddled if we have another event occurring in the - event time frame. Following the methodology of Patnaik. Shah and Singh (2013), we - handle clustered events. Clustered events are handled in following ways: + Tail (Rare) events are often the object of interest in finance. These + events are defined as those that have a low probability of + occurrence. This function identifies such events based on + \sQuote{probvalue} mentioned by the user and generates summary + statistics about the events. If \sQuote{probvalue} is 2.5\%, events + below 2.5\% (lower tail) and above 97.5\% (upper tail) of the + distribution are identified as extreme events. - \itemize{ - \item Clustered events which are defined as consecutive events, are fused - into a single event and respective returns of response series are also fused. - \item Mixed clusters are the left and right tail events occurring on - consecutive days. These are identified and discarded from the analysis. - } - + Once the extreme events are defined, this function further formats the + events. The extreme event functionality is muddled if we have another + event occurring in the event time frame. Following the methodology of + Patnaik. Shah and Singh (2013), we handle clustered events. Clustered + events are handled in following ways: + + \itemize{ + \item Clustered events which are defined as consecutive events, are fused + into a single event and respective returns of response series are also fused. + \item Mixed clusters are the left and right tail events occurring on + consecutive days. These are identified and discarded from the analysis. + } } \value{ - A \pkg{zoo} object is returned with formatted \sQuote{event.series} and - \sQuote{response.series}. It also has separate columns to identify tail events, - named \sQuote{left.tail} and \sQuote{right.tail}, with binary outcome (1 equals - tail event). Finally, the object has column named \sQuote{cluster.pattern} which - identifies the length of the cluster in the event series. + A \pkg{zoo} object is returned with formatted \sQuote{event.series} + and \sQuote{response.series}. It also has separate columns to identify + tail events, named \sQuote{left.tail} and \sQuote{right.tail}, with + binary outcome (1 equals tail event). Finally, the object has column + named \sQuote{cluster.pattern} which identifies the length of the + cluster in the event series. } \references{ @@ -78,6 +83,6 @@ gcf <- get.clusters.formatted(event.series = OtherReturns$SP500, response.series = OtherReturns$NiftyIndex) - + str(gcf, max.level = 2) } From noreply at r-forge.r-project.org Fri May 16 08:05:35 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 16 May 2014 08:05:35 +0200 (CEST) Subject: [Eventstudies-commits] r354 - pkg/man Message-ID: <20140516060535.A79D2187162@r-forge.r-project.org> Author: chiraganand Date: 2014-05-16 08:05:35 +0200 (Fri, 16 May 2014) New Revision: 354 Modified: pkg/man/eventstudy.Rd Log: Fixed input information in attributes and not part of the list. Modified: pkg/man/eventstudy.Rd =================================================================== --- pkg/man/eventstudy.Rd 2014-05-16 01:32:08 UTC (rev 353) +++ pkg/man/eventstudy.Rd 2014-05-16 06:05:35 UTC (rev 354) @@ -160,13 +160,14 @@ elements: \itemize{ - \item{eventstudy.output}{ + \item{eventstudy.output:}{ a \sQuote{matrix} containing mean (bootstrap) or median (with wilcoxon) estimate with confidence interval; \sQuote{NULL} if there are no \dQuote{success} \dQuote{outcomes}. } - \item{outcomes}{a character vector that is the output from + \item{outcomes:}{ + a character vector that is the output from \code{\link{phys2eventtime}} containing details of the successful use of an event: @@ -179,7 +180,10 @@ \item{unitmissing: when the unit (firm name) is missing in the event list.} } } + } + The returned object contains input information in other attributes: + \itemize{ \item{inference}{ a \sQuote{character} providing information about which inference strategy was utilised to estimate the confidence intervals. From noreply at r-forge.r-project.org Fri May 16 08:11:45 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 16 May 2014 08:11:45 +0200 (CEST) Subject: [Eventstudies-commits] r355 - pkg/man Message-ID: <20140516061145.B9D90185DEB@r-forge.r-project.org> Author: chiraganand Date: 2014-05-16 08:11:45 +0200 (Fri, 16 May 2014) New Revision: 355 Modified: pkg/man/eventstudy.Rd Log: Fixed formatting of output variables. Modified: pkg/man/eventstudy.Rd =================================================================== --- pkg/man/eventstudy.Rd 2014-05-16 06:05:35 UTC (rev 354) +++ pkg/man/eventstudy.Rd 2014-05-16 06:11:45 UTC (rev 355) @@ -160,13 +160,13 @@ elements: \itemize{ - \item{eventstudy.output:}{ + \item{\dQuote{eventstudy.output}:}{ a \sQuote{matrix} containing mean (bootstrap) or median (with wilcoxon) estimate with confidence interval; \sQuote{NULL} if there are no \dQuote{success} \dQuote{outcomes}. } - \item{outcomes:}{ + \item{\dQuote{outcomes}:}{ a character vector that is the output from \code{\link{phys2eventtime}} containing details of the successful use of an event: @@ -184,16 +184,16 @@ The returned object contains input information in other attributes: \itemize{ - \item{inference}{ + \item{\dQuote{inference}:}{ a \sQuote{character} providing information about which inference strategy was utilised to estimate the confidence intervals. } - \item{width}{ + \item{\dQuote{width}:}{ a \sQuote{numeric} specifying the window width for event study output. } - \item{remap}{ + \item{\dQuote{remap}:}{ a \sQuote{character} specifying the remapping technique used. Options are mentioned in \dQuote{remap} argument description. } From noreply at r-forge.r-project.org Fri May 16 08:15:18 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 16 May 2014 08:15:18 +0200 (CEST) Subject: [Eventstudies-commits] r356 - pkg/vignettes Message-ID: <20140516061518.4CAD6185C47@r-forge.r-project.org> Author: chiraganand Date: 2014-05-16 08:15:17 +0200 (Fri, 16 May 2014) New Revision: 356 Modified: pkg/vignettes/eventstudies.Rnw Log: Modified language. Modified: pkg/vignettes/eventstudies.Rnw =================================================================== --- pkg/vignettes/eventstudies.Rnw 2014-05-16 06:11:45 UTC (rev 355) +++ pkg/vignettes/eventstudies.Rnw 2014-05-16 06:15:17 UTC (rev 356) @@ -32,11 +32,15 @@ \citep{Corrado2011}. To conduct an event study, you must have a list of firms with -associated dates, and you must have returns data for these -firms. These dates must be stored as a simple data frame. To -illustrate this, we use the object \emph{SplitDates} in the package which -is used for doing examples. +associated dates, and you must have returns data for these firms. In +order to use the package, you have to place your data into two +objects, using certain conventions for the dates and certain +conventions for the returns data. +The dates must be stored as a simple \texttt{data.frame}. To +illustrate this, we use the object \emph{SplitDates} in the package +which is used for doing examples. + <>= library(eventstudies) data(SplitDates) # The sample From noreply at r-forge.r-project.org Fri May 16 08:34:03 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 16 May 2014 08:34:03 +0200 (CEST) Subject: [Eventstudies-commits] r357 - pkg/vignettes Message-ID: <20140516063403.4E2451874FD@r-forge.r-project.org> Author: chiraganand Date: 2014-05-16 08:34:02 +0200 (Fri, 16 May 2014) New Revision: 357 Modified: pkg/vignettes/eventstudies.Rnw Log: Changed five components to two. Modified: pkg/vignettes/eventstudies.Rnw =================================================================== --- pkg/vignettes/eventstudies.Rnw 2014-05-16 06:15:17 UTC (rev 356) +++ pkg/vignettes/eventstudies.Rnw 2014-05-16 06:34:02 UTC (rev 357) @@ -113,7 +113,7 @@ @ The object returned by eventstudy is of \texttt{class} `es'. It is a list with -five components. Three of these are just a record of the way +two components. Three of these are just a record of the way \texttt{eventstudy()} was run: the inference procedure adopted (``\texttt{bootstrap}'' inference in this case), the window width (10 in this case) and the method used for mapping the data (``\texttt{cumsum}''). The two new things are From noreply at r-forge.r-project.org Fri May 16 08:36:20 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 16 May 2014 08:36:20 +0200 (CEST) Subject: [Eventstudies-commits] r358 - pkg/man Message-ID: <20140516063621.01582187512@r-forge.r-project.org> Author: vikram Date: 2014-05-16 08:36:20 +0200 (Fri, 16 May 2014) New Revision: 358 Modified: pkg/man/eesInference.Rd pkg/man/get.clusters.formatted.Rd Log: minor edit in manual Modified: pkg/man/eesInference.Rd =================================================================== --- pkg/man/eesInference.Rd 2014-05-16 06:34:02 UTC (rev 357) +++ pkg/man/eesInference.Rd 2014-05-16 06:36:20 UTC (rev 358) @@ -4,7 +4,7 @@ \title{Extreme event study inference estimation} \description{This function performs event study analysis on extreme event dates - (\sQuote{eesDates}) and formatted output using + (\sQuote{eesDates}) and using formatted output (\sQuote{get.clusters.formatted}) } Modified: pkg/man/get.clusters.formatted.Rd =================================================================== --- pkg/man/get.clusters.formatted.Rd 2014-05-16 06:34:02 UTC (rev 357) +++ pkg/man/get.clusters.formatted.Rd 2014-05-16 06:36:20 UTC (rev 358) @@ -1,7 +1,7 @@ \name{get.clusters.formatted} \alias{get.clusters.formatted} -\title{Get formatted clusters to perform extreme event analysis (ees)} +\title{Get formatted clusters to perform extreme event study analysis (ees)} \description{The functions formats extreme event dates, dealing with clusters in the event frame.} From noreply at r-forge.r-project.org Fri May 16 12:08:31 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 16 May 2014 12:08:31 +0200 (CEST) Subject: [Eventstudies-commits] r359 - in pkg: R man vignettes Message-ID: <20140516100831.9B62D1874EB@r-forge.r-project.org> Author: vikram Date: 2014-05-16 12:08:22 +0200 (Fri, 16 May 2014) New Revision: 359 Modified: pkg/R/eventstudy.R pkg/man/eventstudy.Rd pkg/vignettes/eventstudies.Rnw Log: Fixed the nlags issue Modified: pkg/R/eventstudy.R =================================================================== --- pkg/R/eventstudy.R 2014-05-16 06:36:20 UTC (rev 358) +++ pkg/R/eventstudy.R 2014-05-16 10:08:22 UTC (rev 359) @@ -26,16 +26,18 @@ stop("firm.returns should be a zoo series with at least one column. Use '[' with 'drop = FALSE'.") } firmNames <- colnames(firm.returns) + ## Extracting elipsis values + extra.var <- list(...) + cat("I am here:", extra.var$nlags, "\n") ### Run models ## AMM if (type == "lmAMM") { ## AMM residual to time series - timeseriesAMM <- function(firm.returns, X, verbose = FALSE, nlags = 1) { + timeseriesAMM <- function(firm.returns, X, verbose = FALSE) { tmp <- resid(lmAMM(firm.returns = firm.returns, X = X, - nlags = nlags, - verbose = FALSE)) + verbose = FALSE, nlags = extra.var$nlags)) tmp.res <- zoo(x = tmp, order.by = as.Date(names(tmp))) } ## Estimating AMM regressors @@ -44,8 +46,8 @@ ## One firm outputModel <- timeseriesAMM(firm.returns = firm.returns, X = regressors, - verbose = FALSE, - nlags = 1) + verbose = FALSE) + } else { ## More than one firm # Extracting and merging @@ -53,8 +55,8 @@ { timeseriesAMM(firm.returns = firm.returns[,y], X = regressors, - verbose = FALSE, - nlags = 1) + verbose = FALSE) + }) names(tmp.resid) <- colnames(firm.returns) outputModel <- do.call(merge.zoo, tmp.resid) Modified: pkg/man/eventstudy.Rd =================================================================== --- pkg/man/eventstudy.Rd 2014-05-16 06:36:20 UTC (rev 358) +++ pkg/man/eventstudy.Rd 2014-05-16 10:08:22 UTC (rev 359) @@ -264,7 +264,7 @@ # model arguments market.returns = OtherReturns[, "NiftyIndex"], others = OtherReturns[, c("USDINR", "CallMoneyRate")], - market.returns.purge = TRUE + market.returns.purge = TRUE, nlags = 1 ) str(es) plot(es) Modified: pkg/vignettes/eventstudies.Rnw =================================================================== --- pkg/vignettes/eventstudies.Rnw 2014-05-16 06:36:20 UTC (rev 358) +++ pkg/vignettes/eventstudies.Rnw 2014-05-16 10:08:22 UTC (rev 359) @@ -19,8 +19,8 @@ \maketitle -\begin{abstract} -\end{abstract} +% \begin{abstract} +% \end{abstract} \SweaveOpts{engine=R,pdf=TRUE} \section{The standard event study in finance} From noreply at r-forge.r-project.org Fri May 16 14:37:58 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 16 May 2014 14:37:58 +0200 (CEST) Subject: [Eventstudies-commits] r360 - in pkg: inst/tests vignettes Message-ID: <20140516123758.C23071874F2@r-forge.r-project.org> Author: vikram Date: 2014-05-16 14:37:58 +0200 (Fri, 16 May 2014) New Revision: 360 Modified: pkg/inst/tests/test_interfaces.R pkg/inst/tests/test_lmAMM.R pkg/vignettes/eventstudies.Rnw Log: Corrected vignette amm commands Modified: pkg/inst/tests/test_interfaces.R =================================================================== --- pkg/inst/tests/test_interfaces.R 2014-05-16 10:08:22 UTC (rev 359) +++ pkg/inst/tests/test_interfaces.R 2014-05-16 12:37:58 UTC (rev 360) @@ -66,7 +66,7 @@ width = 3, type = "lmAMM", market.returns = NiftyIndex[index(USDINR)], - others = test_others) + others = test_others, nlags = 1) expect_that(expected_mean, equals(test_es$eventstudy.output[, "Mean"])) expect_that(expected_outcomes, equals(test_es$outcomes)) Modified: pkg/inst/tests/test_lmAMM.R =================================================================== --- pkg/inst/tests/test_lmAMM.R 2014-05-16 10:08:22 UTC (rev 359) +++ pkg/inst/tests/test_lmAMM.R 2014-05-16 12:37:58 UTC (rev 360) @@ -70,7 +70,7 @@ ################################################################################ - cat("\nDoing Testcases P8") + cat("\nDoing Testcases P8\n") load("test_y3c3.rda") NIFTY_INDEX <- y3c3$NIFTY_INDEX Modified: pkg/vignettes/eventstudies.Rnw =================================================================== --- pkg/vignettes/eventstudies.Rnw 2014-05-16 10:08:22 UTC (rev 359) +++ pkg/vignettes/eventstudies.Rnw 2014-05-16 12:37:58 UTC (rev 360) @@ -238,7 +238,8 @@ inference.strategy = "bootstrap", market.returns=OtherReturns$NiftyIndex, others=OtherReturns$USDINR, - market.returns.purge=TRUE + market.returns.purge=TRUE, + nlags = 1 ) @ From noreply at r-forge.r-project.org Fri May 16 22:45:56 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 16 May 2014 22:45:56 +0200 (CEST) Subject: [Eventstudies-commits] r361 - in pkg: R inst/tests man vignettes Message-ID: <20140516204556.D6622186E06@r-forge.r-project.org> Author: chiraganand Date: 2014-05-16 22:45:56 +0200 (Fri, 16 May 2014) New Revision: 361 Modified: pkg/R/eventstudy.R pkg/inst/tests/test_inr_inference.R pkg/inst/tests/test_interfaces.R pkg/man/eventstudy.Rd pkg/vignettes/eventstudies.Rnw Log: Modified the eventstudy interface, changed the man pages and test cases. Modified: pkg/R/eventstudy.R =================================================================== --- pkg/R/eventstudy.R 2014-05-16 12:37:58 UTC (rev 360) +++ pkg/R/eventstudy.R 2014-05-16 20:45:56 UTC (rev 361) @@ -7,16 +7,20 @@ remap = "cumsum", inference = TRUE, inference.strategy = "bootstrap", - ...) { + model.args = NULL) { if (type == "None" && !is.null(firm.returns)) { outputModel <- firm.returns - if (length(list(...)) != 0) { + if (length(model.args) != 0) { warning(deparse("type"), " = ", deparse("None"), " does not take extra arguments, ignoring them.") } } + if (type != "None" && is.null(model.args)) { + stop("modelArgs cannot be NULL when type is not None.") + } + if (is.levels == TRUE) { firm.returns <- diff(log(firm.returns)) * 100 } @@ -26,38 +30,48 @@ stop("firm.returns should be a zoo series with at least one column. Use '[' with 'drop = FALSE'.") } firmNames <- colnames(firm.returns) - ## Extracting elipsis values - extra.var <- list(...) - cat("I am here:", extra.var$nlags, "\n") ### Run models ## AMM if (type == "lmAMM") { - ## AMM residual to time series - timeseriesAMM <- function(firm.returns, X, verbose = FALSE) { - tmp <- resid(lmAMM(firm.returns = firm.returns, - X = X, - verbose = FALSE, nlags = extra.var$nlags)) - tmp.res <- zoo(x = tmp, order.by = as.Date(names(tmp))) + + ## Estimating AMM regressors + args.makeX <- model.args[names(model.args) %in% formalArgs(makeX)] + if (!is.null(model.args$nlag.makeX)) { + args.makeX$nlags <- model.args$nlag.makeX } - ## Estimating AMM regressors - regressors <- makeX(...) + regressors <- do.call(makeX, args.makeX) + + args.lmAMM <- model.args[names(model.args) %in% formalArgs(lmAMM)] + args.lmAMM$X <- regressors + + if (!is.null(model.args$nlag.lmAMM)) { + args.lmAMM$nlags <- model.args$nlag.lmAMM + } + if(NCOL(firm.returns)==1){ ## One firm - outputModel <- timeseriesAMM(firm.returns = firm.returns, - X = regressors, - verbose = FALSE) - + args.lmAMM$firm.returns <- firm.returns + tmp <- resid(do.call(lmAMM, args.lmAMM)) + if (is.null(tmp)) { + cat("lmAMM() returned NULL\n") + return(NULL) + } + outputModel <- zoo(x = tmp, order.by = as.Date(names(tmp))) + } else { ## More than one firm # Extracting and merging tmp.resid <- lapply(colnames(firm.returns), function(y) { - timeseriesAMM(firm.returns = firm.returns[,y], - X = regressors, - verbose = FALSE) - - }) + args.lmAMM$firm.returns <- firm.returns[, y] + tmp <- resid(do.call(lmAMM, args.lmAMM)) + if (is.null(tmp)) { + cat("lmAMM() returned NULL\n") + return(NULL) + } + return(zoo(x = tmp, order.by = as.Date(names(tmp)))) + }) names(tmp.resid) <- colnames(firm.returns) outputModel <- do.call(merge.zoo, tmp.resid) } @@ -65,12 +79,12 @@ ## marketResidual if (type == "marketResidual") { - outputModel <- marketResidual(firm.returns, ...) + outputModel <- marketResidual(firm.returns, model.args$market.returns) } ## excessReturn if (type == "excessReturn") { - outputModel <- excessReturn(firm.returns, ...) + outputModel <- excessReturn(firm.returns, model.args$market.returns) } ### Converting index outputModel to Date Modified: pkg/inst/tests/test_inr_inference.R =================================================================== --- pkg/inst/tests/test_inr_inference.R 2014-05-16 12:37:58 UTC (rev 360) +++ pkg/inst/tests/test_inr_inference.R 2014-05-16 20:45:56 UTC (rev 361) @@ -7,13 +7,13 @@ inr_returns <- diff(log(INR))[-1] -eventslist <- data.frame(name=rep("inr",10), - when=as.Date(c( +eventslist <- data.frame(when=as.Date(c( "2010-04-20","2010-07-02","2010-07-27", "2010-09-16","2010-11-02","2011-01-25", "2011-03-17","2011-05-03","2011-06-16", "2011-07-26") - ) + ), + name=rep("inr",10) ) event_time_data <- phys2eventtime(inr_returns[, , drop = FALSE] , eventslist,width=10) Modified: pkg/inst/tests/test_interfaces.R =================================================================== --- pkg/inst/tests/test_interfaces.R 2014-05-16 12:37:58 UTC (rev 360) +++ pkg/inst/tests/test_interfaces.R 2014-05-16 20:45:56 UTC (rev 361) @@ -21,7 +21,7 @@ test_es <- eventstudy(firm.returns = test_returns, eventList = test_events, width = 3, - market.returns = NiftyIndex) + model.args = list(market.returns = NiftyIndex)) expect_that(expected_mean, equals(test_es$eventstudy.output[, "Mean"])) expect_that(expected_outcomes, equals(test_es$outcomes)) @@ -65,8 +65,8 @@ eventList = test_events, width = 3, type = "lmAMM", - market.returns = NiftyIndex[index(USDINR)], - others = test_others, nlags = 1) + model.args = list(market.returns = NiftyIndex[index(USDINR)], + others = test_others)) expect_that(expected_mean, equals(test_es$eventstudy.output[, "Mean"])) expect_that(expected_outcomes, equals(test_es$outcomes)) @@ -88,7 +88,7 @@ eventList = test_events, width = 3, type = "excessReturn", - market.returns = NiftyIndex) + model.args = list(market.returns = NiftyIndex)) expect_that(expected_mean, equals(test_es$eventstudy.output[, "Mean"])) expect_that(expected_outcomes, equals(test_es$outcomes)) Modified: pkg/man/eventstudy.Rd =================================================================== --- pkg/man/eventstudy.Rd 2014-05-16 12:37:58 UTC (rev 360) +++ pkg/man/eventstudy.Rd 2014-05-16 20:45:56 UTC (rev 361) @@ -21,7 +21,7 @@ remap = "cumsum", inference = TRUE, inference.strategy = "bootstrap", - ...) + model.args = NULL) } \arguments{ @@ -74,7 +74,7 @@ and \dQuote{wilcox}. } - \item{...}{ + \item{model.args}{ All other arguments to be passed depends on whether \sQuote{type} is \dQuote{marketResidual}, \dQuote{excessReturn}, or \dQuote{lmAMM}. When \dQuote{None}, no additional arguments will be @@ -104,7 +104,7 @@ \item{\dQuote{None}: does not use any model.} } - Arguments to a model type can be sent inside \sQuote{...}. See + Arguments to a model type can be sent inside \sQuote{model.args}. See \sQuote{Model arguments} section for details on accepted fields. @@ -122,7 +122,7 @@ \code{\link{inference.bootstrap}} and \code{\link{inference.wilcox}} for more details. - \sQuote{...} is directly supplied to the model mentioned in the + \sQuote{model.args} is directly supplied to the model mentioned in the \dQuote{type} argument. See section on \sQuote{Model arguments} for more details. @@ -131,7 +131,7 @@ } \section{\bold{Model arguments}}{ - Each model can take extra arguments (supplied as \sQuote{...}) apart + Each model can take extra arguments (supplied as \sQuote{model.args}) apart from mandatory ones for finer control over the analysis. Check the respective function documentation for definitions. The arguments from the relevant functions are listed here: @@ -146,11 +146,12 @@ - others \cr - switch.to.innov \cr - market.returns.purge \cr - - nlags \cr + - nlag.makeX \cr + - nlag.lmAMM \cr - dates \cr - verbose \cr - Note: arguments are directly passed to \sQuote{makeX}, see + Note: arguments (except nlag.lmAMM) are directly passed to \sQuote{makeX}, see \code{\link{lmAMM}} for more details. \cr } } @@ -243,7 +244,9 @@ remap = "cumsum", inference = TRUE, inference.strategy = "bootstrap", - market.returns = OtherReturns[, "NiftyIndex"], + model.args = list( + market.returns = OtherReturns[, "NiftyIndex"] + ) ) str(es) plot(es) @@ -262,9 +265,13 @@ inference = TRUE, inference.strategy = "bootstrap", # model arguments - market.returns = OtherReturns[, "NiftyIndex"], - others = OtherReturns[, c("USDINR", "CallMoneyRate")], - market.returns.purge = TRUE, nlags = 1 + model.args = list( + market.returns = OtherReturns[, "NiftyIndex"], + others = OtherReturns[, c("USDINR", "CallMoneyRate")], + market.returns.purge = TRUE, + nlag.makeX = 5, + nlag.lmAMM = NULL + ) ) str(es) plot(es) Modified: pkg/vignettes/eventstudies.Rnw =================================================================== --- pkg/vignettes/eventstudies.Rnw 2014-05-16 12:37:58 UTC (rev 360) +++ pkg/vignettes/eventstudies.Rnw 2014-05-16 20:45:56 UTC (rev 361) @@ -178,7 +178,7 @@ remap = "cumsum", inference = TRUE, inference.strategy = "bootstrap", - market.returns=OtherReturns$NiftyIndex + model.args = list(market.returns=OtherReturns$NiftyIndex) ) @ @@ -236,12 +236,13 @@ remap = "cumsum", inference = TRUE, inference.strategy = "bootstrap", - market.returns=OtherReturns$NiftyIndex, - others=OtherReturns$USDINR, - market.returns.purge=TRUE, - nlags = 1 + model.args = list( + market.returns=OtherReturns$NiftyIndex, + others=OtherReturns$USDINR, + market.returns.purge=TRUE + ) ) -@ +@ Here the additional regressor on the augmented market model is the returns on the exchange rate, which is the slot `\texttt{USDINR}' in From noreply at r-forge.r-project.org Fri May 16 22:49:54 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 16 May 2014 22:49:54 +0200 (CEST) Subject: [Eventstudies-commits] r362 - pkg/R Message-ID: <20140516204954.9A58D186EA6@r-forge.r-project.org> Author: chiraganand Date: 2014-05-16 22:49:54 +0200 (Fri, 16 May 2014) New Revision: 362 Modified: pkg/R/lmAMM.R Log: Continue to next iteration if a NULL is found, don't return NULL. Return NULL only if the best lag could not be found. Modified: pkg/R/lmAMM.R =================================================================== --- pkg/R/lmAMM.R 2014-05-16 20:45:56 UTC (rev 361) +++ pkg/R/lmAMM.R 2014-05-16 20:49:54 UTC (rev 362) @@ -155,7 +155,7 @@ bestAIC <- Inf for (trylag in 0:min(10,log10(length(firm.returns)))) { thism <- do.ols(trylag) - if (is.null(m)) {return(NULL)} + if (is.null(thism)) {next} thisAIC <- AIC(thism, k=log(length(thism$fitted.values))) if (verbose) {cat(trylag, " lags, SBC = ", thisAIC, "\n")} if (thisAIC < bestAIC) { @@ -164,6 +164,9 @@ bestm <- thism } } + if (is.null(bestm)) { + return(NULL) + } nlags <- bestlag m <- bestm } else { From noreply at r-forge.r-project.org Fri May 23 19:19:46 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 23 May 2014 19:19:46 +0200 (CEST) Subject: [Eventstudies-commits] r363 - in pkg: R inst/tests man vignettes Message-ID: <20140523171946.8E9C6186DF0@r-forge.r-project.org> Author: chiraganand Date: 2014-05-23 19:19:46 +0200 (Fri, 23 May 2014) New Revision: 363 Modified: pkg/R/eesInference.R pkg/R/eventstudy.R pkg/inst/tests/test_interfaces.R pkg/man/eventstudy.Rd pkg/vignettes/eventstudies.Rnw Log: Changed the width argument to event.window, its more intuitive. Modified: pkg/R/eesInference.R =================================================================== --- pkg/R/eesInference.R 2014-05-16 20:49:54 UTC (rev 362) +++ pkg/R/eesInference.R 2014-05-23 17:19:46 UTC (rev 363) @@ -729,23 +729,23 @@ # Good days inf$good.normal <- eventstudy(input, eventList=eventLists$events.good.normal, type="None", to.remap=to.remap, - remap=remap, width=width, inference=inference, + remap=remap, event.window=width, inference=inference, inference.strategy=inference.strategy) # Bad days inf$bad.normal <- eventstudy(input, eventList=eventLists$events.bad.normal, type="None", to.remap=to.remap, - remap=remap, width=width, inference=inference, + remap=remap, event.window=width, inference=inference, inference.strategy=inference.strategy) ## Purged # Good days inf$good.purged <- eventstudy(input, eventList=eventLists$events.good.purged, type="None", to.remap=to.remap, - remap=remap, width=width, inference=inference, + remap=remap, event.window=width, inference=inference, inference.strategy=inference.strategy) # Bad days inf$bad.purged <- eventstudy(input, eventList=eventLists$events.bad.purged, type="None", to.remap=to.remap, - remap=remap, width=width, inference=inference, + remap=remap, event.window=width, inference=inference, inference.strategy=inference.strategy) class(inf) <- "ees" Modified: pkg/R/eventstudy.R =================================================================== --- pkg/R/eventstudy.R 2014-05-16 20:49:54 UTC (rev 362) +++ pkg/R/eventstudy.R 2014-05-23 17:19:46 UTC (rev 363) @@ -1,6 +1,6 @@ eventstudy <- function(firm.returns, eventList, - width = 10, + event.window = 10, is.levels = FALSE, type = "marketResidual", to.remap = TRUE, @@ -104,7 +104,7 @@ es.w <- NULL cn.names <- character(length = 0) } else { - es.w <- window(es$z.e, start = -width, end = width) + es.w <- window(es$z.e, start = -event.window, end = event.window) # Adding column names to event output cn.names <- eventList[which(es$outcomes=="success"),1] } @@ -156,7 +156,7 @@ outcomes = as.character(es$outcomes)) attr(final.result, which = "inference") <- inference.strategy - attr(final.result, which = "width") <- width + attr(final.result, which = "event.window") <- event.window attr(final.result, which = "remap") <- remapping class(final.result) <- "es" Modified: pkg/inst/tests/test_interfaces.R =================================================================== --- pkg/inst/tests/test_interfaces.R 2014-05-16 20:49:54 UTC (rev 362) +++ pkg/inst/tests/test_interfaces.R 2014-05-23 17:19:46 UTC (rev 363) @@ -20,7 +20,7 @@ drop = FALSE] test_es <- eventstudy(firm.returns = test_returns, eventList = test_events, - width = 3, + event.window = 3, model.args = list(market.returns = NiftyIndex)) expect_that(expected_mean, equals(test_es$eventstudy.output[, "Mean"])) @@ -41,7 +41,7 @@ drop = FALSE] test_es <- eventstudy(firm.returns = test_returns, eventList = test_events, - width = 3, + event.window = 3, type = "None") expect_that(expected_mean, equals(test_es$eventstudy.output[, "Mean"])) @@ -63,7 +63,7 @@ test_others <- USDINR test_es <- eventstudy(firm.returns = test_returns, eventList = test_events, - width = 3, + event.window = 3, type = "lmAMM", model.args = list(market.returns = NiftyIndex[index(USDINR)], others = test_others)) @@ -86,7 +86,7 @@ test_es <- eventstudy(firm.returns = test_returns, eventList = test_events, - width = 3, + event.window = 3, type = "excessReturn", model.args = list(market.returns = NiftyIndex)) @@ -105,14 +105,14 @@ ## cumsum test_es <- eventstudy(firm.returns = test_returns, eventList = test_events, - width = 3, + event.window = 3, type = "None", to.remap = FALSE, remap = "cumsum") test_es_remap <- eventstudy(firm.returns = test_returns, eventList = test_events, - width = 3, + event.window = 3, type = "None", to.remap = TRUE, remap = "cumsum") @@ -122,14 +122,14 @@ ## cumprod test_es <- eventstudy(firm.returns = test_returns, eventList = test_events, - width = 3, + event.window = 3, type = "None", to.remap = FALSE, remap = "cumprod") test_es_remap <- eventstudy(firm.returns = test_returns, eventList = test_events, - width = 3, + event.window = 3, type = "None", to.remap = TRUE, remap = "cumprod") @@ -141,14 +141,14 @@ ## bootstrap test_es_inference <- eventstudy(firm.returns = test_returns, eventList = test_events, - width = 3, + event.window = 3, type = "None", inference = TRUE, inference.strategy = "bootstrap") test_es <- eventstudy(firm.returns = test_returns, eventList = test_events, - width = 3, + event.window = 3, type = "None", inference = FALSE, inference.strategy = "bootstrap") @@ -158,14 +158,14 @@ ## wilcoxon test_es_inference <- eventstudy(firm.returns = test_returns, eventList = test_events, - width = 3, + event.window = 3, type = "None", inference = TRUE, inference.strategy = "wilcoxon") test_es <- eventstudy(firm.returns = test_returns, eventList = test_events, - width = 3, + event.window = 3, type = "None", inference = FALSE, inference.strategy = "wilcoxon") @@ -184,6 +184,6 @@ test_returns<- StockPriceReturns$ONGC expect_error(eventstudy(firm.returns = test_returns, eventList = test_events, - width = 3, + event.window = 3, type = "None")) }) Modified: pkg/man/eventstudy.Rd =================================================================== --- pkg/man/eventstudy.Rd 2014-05-16 20:49:54 UTC (rev 362) +++ pkg/man/eventstudy.Rd 2014-05-23 17:19:46 UTC (rev 363) @@ -14,7 +14,7 @@ \usage{ eventstudy(firm.returns, eventList, - width = 10, + event.window = 10, is.levels = FALSE, type = "marketResidual", to.remap = TRUE, @@ -35,7 +35,7 @@ series from \sQuote{firm.returns} (colname \dQuote{name}). } - \item{width}{an \sQuote{integer} of length 1 that specifies a + \item{event.window}{an \sQuote{integer} of length 1 that specifies a symmetric event window around the event date. } @@ -190,7 +190,7 @@ strategy was utilised to estimate the confidence intervals. } - \item{\dQuote{width}:}{ + \item{\dQuote{event.window}:}{ a \sQuote{numeric} specifying the window width for event study output. } @@ -226,7 +226,7 @@ # Event study without adjustment es <- eventstudy(firm.returns = StockPriceReturns, eventList = SplitDates, - width = 10, + event.window = 10, type = "None", to.remap = TRUE, remap = "cumsum", @@ -238,7 +238,7 @@ # Event study using Market Model es <- eventstudy(firm.returns = StockPriceReturns, eventList = SplitDates, - width = 10, + event.window = 10, type = "marketResidual", to.remap = TRUE, remap = "cumsum", @@ -258,7 +258,7 @@ es <- eventstudy(firm.returns = StockPriceReturns, eventList = events, - width = 10, + event.window = 10, type = "lmAMM", to.remap = TRUE, remap = "cumsum", Modified: pkg/vignettes/eventstudies.Rnw =================================================================== --- pkg/vignettes/eventstudies.Rnw 2014-05-16 20:49:54 UTC (rev 362) +++ pkg/vignettes/eventstudies.Rnw 2014-05-23 17:19:46 UTC (rev 363) @@ -81,7 +81,7 @@ <>= es <- eventstudy(firm.returns = StockPriceReturns, eventList = SplitDates, - width = 10, + event.window = 10, type = "None", to.remap = TRUE, remap = "cumsum", @@ -172,7 +172,7 @@ data(OtherReturns) es.mm <- eventstudy(firm.returns = StockPriceReturns, eventList = SplitDates, - width = 10, + event.window = 10, type = "marketResidual", to.remap = TRUE, remap = "cumsum", @@ -230,7 +230,7 @@ <>= es.amm <- eventstudy(firm.returns = StockPriceReturns, eventList = SplitDates, - width = 10, + event.window = 10, type = "lmAMM", to.remap = TRUE, remap = "cumsum", From noreply at r-forge.r-project.org Fri May 23 19:26:01 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 23 May 2014 19:26:01 +0200 (CEST) Subject: [Eventstudies-commits] r364 - in pkg: R inst/tests man vignettes Message-ID: <20140523172601.17F79187368@r-forge.r-project.org> Author: chiraganand Date: 2014-05-23 19:26:00 +0200 (Fri, 23 May 2014) New Revision: 364 Modified: pkg/R/eesInference.R pkg/R/eventstudy.R pkg/inst/tests/test_interfaces.R pkg/man/eventstudy.Rd pkg/vignettes/eventstudies.Rnw Log: Changed eventList argument to event.list, made it consistent with other args. Modified: pkg/R/eesInference.R =================================================================== --- pkg/R/eesInference.R 2014-05-23 17:19:46 UTC (rev 363) +++ pkg/R/eesInference.R 2014-05-23 17:26:00 UTC (rev 364) @@ -727,23 +727,23 @@ ## Computing inference ## Normal # Good days - inf$good.normal <- eventstudy(input, eventList=eventLists$events.good.normal, + inf$good.normal <- eventstudy(input, event.list=eventLists$events.good.normal, type="None", to.remap=to.remap, remap=remap, event.window=width, inference=inference, inference.strategy=inference.strategy) # Bad days - inf$bad.normal <- eventstudy(input, eventList=eventLists$events.bad.normal, + inf$bad.normal <- eventstudy(input, event.list=eventLists$events.bad.normal, type="None", to.remap=to.remap, remap=remap, event.window=width, inference=inference, inference.strategy=inference.strategy) ## Purged # Good days - inf$good.purged <- eventstudy(input, eventList=eventLists$events.good.purged, + inf$good.purged <- eventstudy(input, event.list=eventLists$events.good.purged, type="None", to.remap=to.remap, remap=remap, event.window=width, inference=inference, inference.strategy=inference.strategy) # Bad days - inf$bad.purged <- eventstudy(input, eventList=eventLists$events.bad.purged, + inf$bad.purged <- eventstudy(input, event.list=eventLists$events.bad.purged, type="None", to.remap=to.remap, remap=remap, event.window=width, inference=inference, inference.strategy=inference.strategy) Modified: pkg/R/eventstudy.R =================================================================== --- pkg/R/eventstudy.R 2014-05-23 17:19:46 UTC (rev 363) +++ pkg/R/eventstudy.R 2014-05-23 17:26:00 UTC (rev 364) @@ -1,5 +1,5 @@ eventstudy <- function(firm.returns, - eventList, + event.list, event.window = 10, is.levels = FALSE, type = "marketResidual", @@ -98,7 +98,7 @@ colnames(outputModel) <- firmNames } - es <- phys2eventtime(z = outputModel, events=eventList, width=0) + es <- phys2eventtime(z = outputModel, events=event.list, width=0) if (is.null(es$z.e) || length(es$z.e) == 0) { es.w <- NULL @@ -106,7 +106,7 @@ } else { es.w <- window(es$z.e, start = -event.window, end = event.window) # Adding column names to event output - cn.names <- eventList[which(es$outcomes=="success"),1] + cn.names <- event.list[which(es$outcomes=="success"),1] } ## replace NAs with 0 as it's returns now Modified: pkg/inst/tests/test_interfaces.R =================================================================== --- pkg/inst/tests/test_interfaces.R 2014-05-23 17:19:46 UTC (rev 363) +++ pkg/inst/tests/test_interfaces.R 2014-05-23 17:26:00 UTC (rev 364) @@ -19,7 +19,7 @@ test_returns<- StockPriceReturns[complete.cases(StockPriceReturns$ONGC), "ONGC", drop = FALSE] test_es <- eventstudy(firm.returns = test_returns, - eventList = test_events, + event.list = test_events, event.window = 3, model.args = list(market.returns = NiftyIndex)) @@ -40,7 +40,7 @@ test_returns<- StockPriceReturns[complete.cases(StockPriceReturns$ONGC), "ONGC", drop = FALSE] test_es <- eventstudy(firm.returns = test_returns, - eventList = test_events, + event.list = test_events, event.window = 3, type = "None") @@ -62,7 +62,7 @@ drop = FALSE] test_others <- USDINR test_es <- eventstudy(firm.returns = test_returns, - eventList = test_events, + event.list = test_events, event.window = 3, type = "lmAMM", model.args = list(market.returns = NiftyIndex[index(USDINR)], @@ -85,7 +85,7 @@ drop = FALSE] test_es <- eventstudy(firm.returns = test_returns, - eventList = test_events, + event.list = test_events, event.window = 3, type = "excessReturn", model.args = list(market.returns = NiftyIndex)) @@ -104,14 +104,14 @@ ## cumsum test_es <- eventstudy(firm.returns = test_returns, - eventList = test_events, + event.list = test_events, event.window = 3, type = "None", to.remap = FALSE, remap = "cumsum") test_es_remap <- eventstudy(firm.returns = test_returns, - eventList = test_events, + event.list = test_events, event.window = 3, type = "None", to.remap = TRUE, @@ -121,14 +121,14 @@ ## cumprod test_es <- eventstudy(firm.returns = test_returns, - eventList = test_events, + event.list = test_events, event.window = 3, type = "None", to.remap = FALSE, remap = "cumprod") test_es_remap <- eventstudy(firm.returns = test_returns, - eventList = test_events, + event.list = test_events, event.window = 3, type = "None", to.remap = TRUE, @@ -140,14 +140,14 @@ cat("\nChecking inference interface: ") ## bootstrap test_es_inference <- eventstudy(firm.returns = test_returns, - eventList = test_events, + event.list = test_events, event.window = 3, type = "None", inference = TRUE, inference.strategy = "bootstrap") test_es <- eventstudy(firm.returns = test_returns, - eventList = test_events, + event.list = test_events, event.window = 3, type = "None", inference = FALSE, @@ -157,14 +157,14 @@ ## wilcoxon test_es_inference <- eventstudy(firm.returns = test_returns, - eventList = test_events, + event.list = test_events, event.window = 3, type = "None", inference = TRUE, inference.strategy = "wilcoxon") test_es <- eventstudy(firm.returns = test_returns, - eventList = test_events, + event.list = test_events, event.window = 3, type = "None", inference = FALSE, @@ -183,7 +183,7 @@ stringsAsFactors = FALSE) test_returns<- StockPriceReturns$ONGC expect_error(eventstudy(firm.returns = test_returns, - eventList = test_events, + event.list = test_events, event.window = 3, type = "None")) }) Modified: pkg/man/eventstudy.Rd =================================================================== --- pkg/man/eventstudy.Rd 2014-05-23 17:19:46 UTC (rev 363) +++ pkg/man/eventstudy.Rd 2014-05-23 17:26:00 UTC (rev 364) @@ -13,7 +13,7 @@ \usage{ eventstudy(firm.returns, - eventList, + event.list, event.window = 10, is.levels = FALSE, type = "marketResidual", @@ -29,7 +29,7 @@ a \pkg{zoo} matrix of \sQuote{outcome} or \sQuote{response} series. } - \item{eventList}{ + \item{event.list}{ a \code{data.frame} of two columns with event dates (colname: \dQuote{when}) and column names of the \sQuote{response} series from \sQuote{firm.returns} (colname \dQuote{name}). @@ -225,7 +225,7 @@ # Event study without adjustment es <- eventstudy(firm.returns = StockPriceReturns, - eventList = SplitDates, + event.list = SplitDates, event.window = 10, type = "None", to.remap = TRUE, @@ -237,7 +237,7 @@ # Event study using Market Model es <- eventstudy(firm.returns = StockPriceReturns, - eventList = SplitDates, + event.list = SplitDates, event.window = 10, type = "marketResidual", to.remap = TRUE, @@ -257,7 +257,7 @@ stringsAsFactors = FALSE) es <- eventstudy(firm.returns = StockPriceReturns, - eventList = events, + event.list = events, event.window = 10, type = "lmAMM", to.remap = TRUE, Modified: pkg/vignettes/eventstudies.Rnw =================================================================== --- pkg/vignettes/eventstudies.Rnw 2014-05-23 17:19:46 UTC (rev 363) +++ pkg/vignettes/eventstudies.Rnw 2014-05-23 17:26:00 UTC (rev 364) @@ -80,7 +80,7 @@ <>= es <- eventstudy(firm.returns = StockPriceReturns, - eventList = SplitDates, + event.list = SplitDates, event.window = 10, type = "None", to.remap = TRUE, @@ -171,7 +171,7 @@ <>= data(OtherReturns) es.mm <- eventstudy(firm.returns = StockPriceReturns, - eventList = SplitDates, + event.list = SplitDates, event.window = 10, type = "marketResidual", to.remap = TRUE, @@ -229,7 +229,7 @@ <>= es.amm <- eventstudy(firm.returns = StockPriceReturns, - eventList = SplitDates, + event.list = SplitDates, event.window = 10, type = "lmAMM", to.remap = TRUE, From noreply at r-forge.r-project.org Fri May 23 19:31:42 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 23 May 2014 19:31:42 +0200 (CEST) Subject: [Eventstudies-commits] r365 - pkg/R Message-ID: <20140523173142.83D0B187454@r-forge.r-project.org> Author: chiraganand Date: 2014-05-23 19:31:42 +0200 (Fri, 23 May 2014) New Revision: 365 Modified: pkg/R/eventstudy.R Log: Fixed error message, added comment. Modified: pkg/R/eventstudy.R =================================================================== --- pkg/R/eventstudy.R 2014-05-23 17:26:00 UTC (rev 364) +++ pkg/R/eventstudy.R 2014-05-23 17:31:42 UTC (rev 365) @@ -18,7 +18,7 @@ } if (type != "None" && is.null(model.args)) { - stop("modelArgs cannot be NULL when type is not None.") + stop("model.args cannot be NULL when type is not None.") } if (is.levels == TRUE) { @@ -29,6 +29,7 @@ if (is.null(ncol(firm.returns))) { stop("firm.returns should be a zoo series with at least one column. Use '[' with 'drop = FALSE'.") } + # store firm names for later use firmNames <- colnames(firm.returns) ### Run models From noreply at r-forge.r-project.org Fri May 23 19:36:26 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 23 May 2014 19:36:26 +0200 (CEST) Subject: [Eventstudies-commits] r366 - in pkg: R man Message-ID: <20140523173626.CC1C8187497@r-forge.r-project.org> Author: chiraganand Date: 2014-05-23 19:36:26 +0200 (Fri, 23 May 2014) New Revision: 366 Modified: pkg/R/eesInference.R pkg/man/eesInference.Rd Log: Changed ees arguments to event.window and event.lists. Modified: pkg/R/eesInference.R =================================================================== --- pkg/R/eesInference.R 2014-05-23 17:31:42 UTC (rev 365) +++ pkg/R/eesInference.R 2014-05-23 17:36:26 UTC (rev 366) @@ -718,8 +718,8 @@ ##---------------------- ## Event study plot for EES (extreme event studies) ## Input: Output of GCF -## eventLists: Output of eesDates -eesInference <- function(input, eventLists, width, to.remap=TRUE, +## event.lists: Output of eesDates +eesInference <- function(input, event.lists, event.window, to.remap=TRUE, remap="cumsum", inference = TRUE, inference.strategy = "bootstrap"){ @@ -727,25 +727,25 @@ ## Computing inference ## Normal # Good days - inf$good.normal <- eventstudy(input, event.list=eventLists$events.good.normal, + inf$good.normal <- eventstudy(input, event.list=event.lists$events.good.normal, type="None", to.remap=to.remap, - remap=remap, event.window=width, inference=inference, + remap=remap, event.window=event.window, inference=inference, inference.strategy=inference.strategy) # Bad days - inf$bad.normal <- eventstudy(input, event.list=eventLists$events.bad.normal, + inf$bad.normal <- eventstudy(input, event.list=event.lists$events.bad.normal, type="None", to.remap=to.remap, - remap=remap, event.window=width, inference=inference, + remap=remap, event.window=event.window, inference=inference, inference.strategy=inference.strategy) ## Purged # Good days - inf$good.purged <- eventstudy(input, event.list=eventLists$events.good.purged, + inf$good.purged <- eventstudy(input, event.list=event.lists$events.good.purged, type="None", to.remap=to.remap, - remap=remap, event.window=width, inference=inference, + remap=remap, event.window=event.window, inference=inference, inference.strategy=inference.strategy) # Bad days - inf$bad.purged <- eventstudy(input, event.list=eventLists$events.bad.purged, + inf$bad.purged <- eventstudy(input, event.list=event.lists$events.bad.purged, type="None", to.remap=to.remap, - remap=remap, event.window=width, inference=inference, + remap=remap, event.window=event.window, inference=inference, inference.strategy=inference.strategy) class(inf) <- "ees" Modified: pkg/man/eesInference.Rd =================================================================== --- pkg/man/eesInference.Rd 2014-05-23 17:31:42 UTC (rev 365) +++ pkg/man/eesInference.Rd 2014-05-23 17:36:26 UTC (rev 366) @@ -9,7 +9,7 @@ } \usage{ - eesInference(input, eventLists, width, to.remap = TRUE, remap = "cumsum", + eesInference(input, event.lists, event.window, to.remap = TRUE, remap = "cumsum", inference = "TRUE", inference.strategy = "bootstrap") } @@ -19,12 +19,12 @@ \sQuote{get.clusters.formatted} function. } - \item{eventLists}{ + \item{event.lists}{ a \sQuote{list} of normal and purged events as returned by \sQuote{eesDates}. } - \item{width}{ + \item{event.window}{ an \sQuote{integer} of length 1 that specifies a symmetric event window around the event date. } @@ -115,7 +115,7 @@ event.lists <- eesDates(formattedClusters) inference <- eesInference(input = formattedClusters, - eventLists = event.lists, - width = 5) + event.lists = event.lists, + event.window = 5) str(inference, max.level = 2) } From noreply at r-forge.r-project.org Fri May 23 19:43:44 2014 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 23 May 2014 19:43:44 +0200 (CEST) Subject: [Eventstudies-commits] r367 - in pkg: R inst/tests vignettes Message-ID: <20140523174344.4FF97184DE7@r-forge.r-project.org> Author: chiraganand Date: 2014-05-23 19:43:44 +0200 (Fri, 23 May 2014) New Revision: 367 Modified: pkg/R/eventstudy.R pkg/inst/tests/test_interfaces.R pkg/vignettes/eventstudies.Rnw Log: Changed eventstudy.output with result. Modified: pkg/R/eventstudy.R =================================================================== --- pkg/R/eventstudy.R 2014-05-23 17:36:26 UTC (rev 366) +++ pkg/R/eventstudy.R 2014-05-23 17:43:44 UTC (rev 367) @@ -153,7 +153,7 @@ } if(to.remap==TRUE){remapping <- remap} else {remapping <- "none"} - final.result <- list(eventstudy.output = result, + final.result <- list(result = result, outcomes = as.character(es$outcomes)) attr(final.result, which = "inference") <- inference.strategy @@ -169,9 +169,9 @@ ######################### print.es <- function(x, ...){ - cat("Event study", colnames(x$eventstudy.output)[2], "response with", + cat("Event study", colnames(x$result)[2], "response with", attr(x, "inference"), "inference for CI:\n") - print(x$eventstudy.output) + print(x$result) cat("\n","Event outcome has",length(which(x$outcomes=="success")), "successful outcomes out of", length(x$outcomes),"events:","\n") print(x$outcomes) @@ -182,13 +182,13 @@ } plot.es <- function(x, xlab = NULL, ylab = NULL, ...){ - if (NCOL(x$eventstudy.output) < 3) { + if (NCOL(x$result) < 3) { cat("Error: No confidence bands available to plot.\n") return(invisible(NULL)) } - big <- max(abs(x$eventstudy.output)) + big <- max(abs(x$result)) hilo <- c(-big,big) - width <- (nrow(x$eventstudy.output)-1)/2 + width <- (nrow(x$result)-1)/2 ## assign own labels if they're missing if (is.null(ylab)) { @@ -208,13 +208,13 @@ xlab <- "Event time" } - plot(-width:width, x$eventstudy.output[,2], type="l", lwd=2, ylim=hilo, + plot(-width:width, x$result[,2], type="l", lwd=2, ylim=hilo, xlab = xlab, ylab = ylab, ...) - points(-width:width, x$eventstudy.output[,2]) - lines(-width:width, x$eventstudy.output[,"2.5%"], + points(-width:width, x$result[,2]) + lines(-width:width, x$result[,"2.5%"], lwd=1, lty=2, ...) - lines(-width:width, x$eventstudy.output[,"97.5%"], + lines(-width:width, x$result[,"97.5%"], lwd=1, lty=2, ...) abline(h=0,v=0) } Modified: pkg/inst/tests/test_interfaces.R =================================================================== --- pkg/inst/tests/test_interfaces.R 2014-05-23 17:36:26 UTC (rev 366) +++ pkg/inst/tests/test_interfaces.R 2014-05-23 17:43:44 UTC (rev 367) @@ -23,7 +23,7 @@ event.window = 3, model.args = list(market.returns = NiftyIndex)) - expect_that(expected_mean, equals(test_es$eventstudy.output[, "Mean"])) + expect_that(expected_mean, equals(test_es$result[, "Mean"])) expect_that(expected_outcomes, equals(test_es$outcomes)) expect_is(test_es, "es") @@ -44,7 +44,7 @@ event.window = 3, type = "None") - expect_that(expected_mean, equals(test_es$eventstudy.output[, "Mean"])) + expect_that(expected_mean, equals(test_es$result[, "Mean"])) expect_that(expected_outcomes, equals(test_es$outcomes)) expect_is(test_es, "es") @@ -68,7 +68,7 @@ model.args = list(market.returns = NiftyIndex[index(USDINR)], others = test_others)) - expect_that(expected_mean, equals(test_es$eventstudy.output[, "Mean"])) + expect_that(expected_mean, equals(test_es$result[, "Mean"])) expect_that(expected_outcomes, equals(test_es$outcomes)) expect_is(test_es, "es") @@ -90,7 +90,7 @@ type = "excessReturn", model.args = list(market.returns = NiftyIndex)) - expect_that(expected_mean, equals(test_es$eventstudy.output[, "Mean"])) + expect_that(expected_mean, equals(test_es$result[, "Mean"])) expect_that(expected_outcomes, equals(test_es$outcomes)) expect_is(test_es, "es") Modified: pkg/vignettes/eventstudies.Rnw =================================================================== --- pkg/vignettes/eventstudies.Rnw 2014-05-23 17:36:26 UTC (rev 366) +++ pkg/vignettes/eventstudies.Rnw 2014-05-23 17:43:44 UTC (rev 367) @@ -117,7 +117,7 @@ \texttt{eventstudy()} was run: the inference procedure adopted (``\texttt{bootstrap}'' inference in this case), the window width (10 in this case) and the method used for mapping the data (``\texttt{cumsum}''). The two new things are -`\texttt{outcomes}' and `\texttt{eventstudy.output}'. +`\texttt{outcomes}' and `\texttt{result}'. The vector `\texttt{outcomes}' shows the disposition of each event in the events table. There are 22 rows in \emph{SplitDates}, hence there will be 22 @@ -256,9 +256,9 @@ interval at date 0 as a measure of efficiency. <>= -tmp <- rbind(es$eventstudy.output[10, ], - es.mm$eventstudy.output[10, ], - es.amm$eventstudy.output[10, ] +tmp <- rbind(es$result[10, ], + es.mm$result[10, ], + es.amm$result[10, ] )[,c(1,3)] rownames(tmp) <- c("None", "MM", "AMM")