From noreply at r-forge.r-project.org Sun Feb 1 22:46:09 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 1 Feb 2015 22:46:09 +0100 (CET) Subject: [Blotter-commits] r1677 - pkg/FinancialInstrument/R Message-ID: <20150201214609.E9E82185DD8@r-forge.r-project.org> Author: braverock Date: 2015-02-01 22:46:09 +0100 (Sun, 01 Feb 2015) New Revision: 1677 Modified: pkg/FinancialInstrument/R/load.instruments.R Log: - clean up merge conflict with r1656 Modified: pkg/FinancialInstrument/R/load.instruments.R =================================================================== --- pkg/FinancialInstrument/R/load.instruments.R 2015-01-30 22:33:50 UTC (rev 1676) +++ pkg/FinancialInstrument/R/load.instruments.R 2015-02-01 21:46:09 UTC (rev 1677) @@ -344,9 +344,7 @@ if (hasArg.days_to_omit <- hasArg(days_to_omit)) .days_to_omit <- days_to_omit if (hasArg.indexTZ <- hasArg(indexTZ)) .indexTZ <- indexTZ - - #importDefaults("getSymbols.FI") - + # Now get the values for each formal that we'll use if not provided # by the user and not found in the SymbolLookup table default.from <- from @@ -403,7 +401,7 @@ if (!use_identifier[1]=='primary_id') { instr_str <- make.names(tmp_instr$identifiers[use_identifier]) instr_str <- instr_str[!is.null(instr_str)] - } else instr_str <- make.names(tmp_instr[[use_identifier]]) + } else instr_str <- make.names(tmp_instr[use_identifier]) if (length(instr_str) == 0L) stop("Could not find instrument. Try with use_identifier=NA") } @@ -424,26 +422,14 @@ .days_to_omit <- days_to_omit if (hasArg.indexTZ <- hasArg(indexTZ)) .indexTZ <- indexTZ - importDefaults("getSymbols.FI") - - # Now get the values for each formal that we'll use if not provided - # by the user and not found in the SymbolLookup table - default.from <- from - default.to <- to - default.dir <- dir - default.return.class <- return.class - default.extension <- extension - default.split_method <- split_method[1] - default.use_identifier <- use_identifier - default.date_format <- date_format - default.verbose <- verbose - default.days_to_omit <- days_to_omit - default.indexTZ <- indexTZ + Symbol <- ifelse(is.na(instr_str), make.names(Symbols[[i]]), instr_str) + ndc<-nchar(dir) + if(substr(dir,ndc,ndc)=='/') dir <- substr(dir,1,ndc-1) #remove trailing forward slash + dirs <- paste(dir, Symbol, sep="/") - tmpr<-list() tmp <- list() dirstr<-paste(dirs, collapse=' ') - if(!length(dirs)==1) warning(paste0('multiple directories ',dirstr,' referenced, merge may be messy.')) + if(!length(dirs)==1) warning(paste0('multiple directories ',dirstr,' referenced, merge may interleave dissimilar data.')) for(dir in dirs) { if(!dir=="" && !file.exists(dir)) { if (verbose) cat("\ndirectory ",dir," does not exist, skipping\n") @@ -492,10 +478,9 @@ } # end 'common'/default method (same as getSymbols.rda) ) # end split_method switch fr <- convert.time.series(fr=fr,return.class=return.class) - Symbols[[i]] <-make.names(Symbols[[i]]) - #tmp <- list() - tmp[[Symbols[[i]]]] <- fr - if(!dir==dirs[1]) tmp[[Symbols[[i]]]] <- rbind(tmp[[Symbols[[i]]]],fr) + Symbols[[i]] <-make.names(Symbols[[i]]) + if(dir==dirs[1]) tmp[[Symbols[[i]]]] <- fr + else tmp[[Symbols[[i]]]] <- rbind(tmp[[Symbols[[i]]]],fr) } # end Symbols else } if(verbose) cat("done.\n") From noreply at r-forge.r-project.org Sun Feb 1 22:53:08 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 1 Feb 2015 22:53:08 +0100 (CET) Subject: [Blotter-commits] r1678 - pkg/quantstrat/R Message-ID: <20150201215308.8D87A1860C6@r-forge.r-project.org> Author: bodanker Date: 2015-02-01 22:53:08 +0100 (Sun, 01 Feb 2015) New Revision: 1678 Modified: pkg/quantstrat/R/applyStrategy.rebalancing.R Log: - Fix bug #5990; and fix unreported bug that caused first rebalance signal to not be evaluated. Modified: pkg/quantstrat/R/applyStrategy.rebalancing.R =================================================================== --- pkg/quantstrat/R/applyStrategy.rebalancing.R 2015-02-01 21:46:09 UTC (rev 1677) +++ pkg/quantstrat/R/applyStrategy.rebalancing.R 2015-02-01 21:53:08 UTC (rev 1678) @@ -127,6 +127,8 @@ # combine plist into one sorted index pindex <- unique(sort(do.call(c, c(plist, use.names=FALSE)))) + # prepend minimum value, so first rebalance period will be evaluated + pindex <- c(.POSIXct(-8520336000, tz=attr(pindex,'tzone')), pindex) st$rebalance_index<-pindex #now we need to do the endpoints loop. @@ -138,7 +140,12 @@ #sret<-ret[[portfolio]][[symbol]] mktdata<-get(symbol,pos=st) #now subset - md_subset<-mktdata[as.POSIXct(index(mktdata))>pindex[i-1]&as.POSIXct(index(mktdata))<=pindex[i]] + posix_ix <- as.POSIXct(index(mktdata)) + # include prior row, in case there is a signal on the last + # observation of the rebalance period (#5990) + mdi <- seq.int(.firstCross(posix_ix, pindex[i-1], "gt")-1, + .firstCross(posix_ix, pindex[i], "gte")) + md_subset <- mktdata[mdi,] if(nrow(md_subset)<1) { next() } else { From noreply at r-forge.r-project.org Mon Feb 2 01:19:23 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 2 Feb 2015 01:19:23 +0100 (CET) Subject: [Blotter-commits] r1679 - pkg/FinancialInstrument/R Message-ID: <20150202001923.935B1187963@r-forge.r-project.org> Author: braverock Date: 2015-02-02 01:19:23 +0100 (Mon, 02 Feb 2015) New Revision: 1679 Modified: pkg/FinancialInstrument/R/load.instruments.R Log: - add back importDefaults, adjust hasArg - fix dir/symbol name for vector of use_identifiers Modified: pkg/FinancialInstrument/R/load.instruments.R =================================================================== --- pkg/FinancialInstrument/R/load.instruments.R 2015-02-01 21:53:08 UTC (rev 1678) +++ pkg/FinancialInstrument/R/load.instruments.R 2015-02-02 00:19:23 UTC (rev 1679) @@ -344,7 +344,9 @@ if (hasArg.days_to_omit <- hasArg(days_to_omit)) .days_to_omit <- days_to_omit if (hasArg.indexTZ <- hasArg(indexTZ)) .indexTZ <- indexTZ - + + importDefaults("getSymbols.FI") + # Now get the values for each formal that we'll use if not provided # by the user and not found in the SymbolLookup table default.from <- from @@ -399,30 +401,12 @@ if (inherits(tmp_instr,'try-error') || !is.instrument(tmp_instr)) stop("must define instrument first to call with 'use_identifier'") if (!use_identifier[1]=='primary_id') { - instr_str <- make.names(tmp_instr$identifiers[use_identifier]) - instr_str <- instr_str[!is.null(instr_str)] + instr_str <- make.names(unlist(tmp_instr$identifiers[use_identifier])) } else instr_str <- make.names(tmp_instr[use_identifier]) if (length(instr_str) == 0L) stop("Could not find instrument. Try with use_identifier=NA") } - # Find out if user provided a value for each formal - if (hasArg.from <- hasArg(from)) .from <- from - if (hasArg.to <- hasArg(to)) .to <- to - if (hasArg.dir <- hasArg(dir)) .dir <- dir - if (hasArg.return.class <- hasArg(return.class)) - .return.class <- return.class - if (hasArg.extension <- hasArg(extension)) .extension <- extension - if (hasArg.split_method <- hasArg(split_method)) - .split_method <- split_method - if (hasArg.use_identifier <- hasArg(use_identifier)) - .use_identifier <- use_identifier - if (hasArg.date_format <- hasArg(date_format)) .date_format <- date_format - if (hasArg.verbose <- hasArg(verbose)) .verbose <- verbose - if (hasArg.days_to_omit <- hasArg(days_to_omit)) - .days_to_omit <- days_to_omit - if (hasArg.indexTZ <- hasArg(indexTZ)) .indexTZ <- indexTZ - - Symbol <- ifelse(is.na(instr_str), make.names(Symbols[[i]]), instr_str) + Symbol <- ifelse(is.na(instr_str), make.names(Symbols[[i]]), instr_str) ndc<-nchar(dir) if(substr(dir,ndc,ndc)=='/') dir <- substr(dir,1,ndc-1) #remove trailing forward slash dirs <- paste(dir, Symbol, sep="/") @@ -442,7 +426,7 @@ date.vec <- as.Date(StartDate:EndDate) date.vec <- date.vec[!weekdays(date.vec) %in% days_to_omit] date.vec <- format(date.vec, format=date_format) - sym.files <- paste(date.vec, Symbol, extension, sep=".") + sym.files <- paste(date.vec, basename(dir), extension, sep=".") if (dir != "") sym.files <- file.path(dir, sym.files) dl <- lapply(sym.files, function(fp) { sf <- strsplit(fp, "/")[[1]] From noreply at r-forge.r-project.org Tue Feb 10 20:40:01 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Tue, 10 Feb 2015 20:40:01 +0100 (CET) Subject: [Blotter-commits] r1680 - pkg/quantstrat/sandbox/backtest_musings Message-ID: <20150210194001.B62FC183BF0@r-forge.r-project.org> Author: braverock Date: 2015-02-10 20:40:01 +0100 (Tue, 10 Feb 2015) New Revision: 1680 Modified: pkg/quantstrat/sandbox/backtest_musings/research_replication.Rmd pkg/quantstrat/sandbox/backtest_musings/research_replication.pdf pkg/quantstrat/sandbox/backtest_musings/strat_dev_process.Rmd pkg/quantstrat/sandbox/backtest_musings/strat_dev_process.pdf Log: - more on cross validation, MAE, White's Reality Check - include comments and fix typos Modified: pkg/quantstrat/sandbox/backtest_musings/research_replication.Rmd =================================================================== --- pkg/quantstrat/sandbox/backtest_musings/research_replication.Rmd 2015-02-02 00:19:23 UTC (rev 1679) +++ pkg/quantstrat/sandbox/backtest_musings/research_replication.Rmd 2015-02-10 19:40:01 UTC (rev 1680) @@ -108,13 +108,13 @@ source paper need extraction, enumeration, and expansion. Expected tests for the hypotheses also need to be considered and specified in this stage. -A hypothesis statement includes: +The hypothesis descriptions should include: - what is being analyzed (the subject), - the dependent variable(s) (the output/result/prediction) - the independent variables (inputs into the model) - the anticipated possible outcomes, including direction or comparison -- address *how you will validate or refute each hypothesis* +- addresses *how you will validate or refute each hypothesis* The pr?cis form of structured paragraphs (containing the points above) may be useful in stating the hypotheses, or a less regimented hypothesis/test pairing Modified: pkg/quantstrat/sandbox/backtest_musings/research_replication.pdf =================================================================== (Binary files differ) Modified: pkg/quantstrat/sandbox/backtest_musings/strat_dev_process.Rmd =================================================================== --- pkg/quantstrat/sandbox/backtest_musings/strat_dev_process.Rmd 2015-02-02 00:19:23 UTC (rev 1679) +++ pkg/quantstrat/sandbox/backtest_musings/strat_dev_process.Rmd 2015-02-10 19:40:01 UTC (rev 1680) @@ -89,15 +89,18 @@ 3. custom tracking portfolios - In some ways an extension of (1.) above, you can create your - own benchmark by creating a custom tracking portfolio. - As the most common example, a cap-weighted index is - really a strategy archetype. The tracking portfolio for such - an index invests in a basket of securities using a capitalization - as the weights for the portfolio,and rebalances this portfolio on - the schedule defined in the index specification. - Other custom tracking portfolios or synthetic strategies - may also be appropriate for measuring your strategy against. + In some ways an extension of (1.) above, you can create your own benchmark + by creating a custom tracking portfolio. As the most common example, a + cap-weighted index is really a strategy archetype. The tracking portfolio + for a capitalization-weighted index invests in a basket of securities using + market capitalization as the weights for the portfolio. This portfolio is + then rebalanced on the schedule defined in the index specification + (typically quarterly or annually). Components may be added or removed + following some rule at these rebalancing periods, or on some other abnormal + event such as a bankruptcy. Other custom tracking portfolios or synthetic + strategies may also be appropriate for measuring your strategy against, + depending on what edge(s) the strategy hopes to capture, and from what + universe of investible products. 4. market observables @@ -159,16 +162,18 @@ or are willing to use, and your drawdown constraints (which are closely related to the leverage you intend to employ). -Some of these may be dictated by the constraints your business structure -has (see above). -For example, leverage constraints generally have a hard limit imposed +Some of the constraints on your business objective may be dictated by the +constraints your business structure has (see above). + +For example: +- Leverage constraints generally have a hard limit imposed by the entity you are using to access the market, whether that is a broker/dealer, a 106.J membership, a leased seat, a clearing member, -or a customer relationship. Drawdown constraints have hard limits dictated +or a customer relationship. +- Drawdown constraints have hard limits dictated by the leverage you intend to employ: 10:1 leverage imposes a 10% hard drawdown constraint, 4:1 imposes a 25% drawdown constraint, and so on. - -Often, there will also be certain return objectives below which a strategy +- Often, there will also be certain return objectives below which a strategy is not worth doing. Ideally, the business objectives for the strategy will be specified as @@ -277,6 +282,14 @@ \includegraphics{hypothesis_process} \end{marginfigure} +A good/complete hypothesis statement includes: + +- what is being analyzed (the subject), +- the dependent variable(s) (the output/result/prediction) +- the independent variables (inputs into the model) +- the anticipated possible outcomes, including direction or comparison +- addresses *how you will validate or refute each hypothesis* + \newthought{Most strategy ideas will be rejected} during hypothesis creation and testing. @@ -320,14 +333,16 @@ *filters*, *indicators*, *signals*, and *rules*. \newthought{Filters} help to select the instruments to trade. -They may be part of the formulated hypothesis, or they may be -market characteristics that allow the rest of the strategy to -trade better. In fundamental equity investing, some strategies -consist only of filters. For example, the StarMine package -that was bought by Thomson Reuters defines quantitative stock screens. -Lo's Variance Ratio is another measure often used as a filter -to turn the strategy on or off for particular instruments -(but can also be used as an indicator, since it is time-varying). +They may be part of the formulated hypothesis, or they may be market +characteristics that allow the rest of the strategy to trade better. +In fundamental equity investing, some strategies consist only of filters. +For example, the StarMine package that was bought by Thomson Reuters defines +quantitative stock screens based on technicals or fundamentals.^[a modern, free +alternative may be found at http://finviz.com/screener.ashx] +Many analysts will expand or shrink their investible universe based on screens. +Lo's Variance Ratio is another measure often used as a filter to turn the +strategy on or off for particular instruments (but can also be used as an +indicator, since it is time-varying). \newthought{Indicators} are quantitative values derived from market data. @@ -492,12 +507,47 @@ and evaluating against those goals on an ongoing basis will guard against many of the error types described above by discarding results that are not in line with the stated hypotheses. - ___ # Evaluating Each Component of the Strategy ^[*Maintain alertness in each particular instance of particular ways in which our knowledge is incomplete*. - John @Tukey1962 p. 14] -___ +It is important to evaluate each component of the strategy separately. If we +wish to evaluate whether out hypotheses about the market are correct, it does +not make sense to first build a strategy with many moving parts and meticulously +fit it to the data until after all the components have been evaluated for their +own "goodness of fit". + +The different components of the strategy, from filters, through indicators, +signals, and different types of rules, are all trying to express different parts +of the strategy's hypothesis and business objectives. Our goal, at every stage, +should be to confirm that each individual component of the strategy is working: +adding value, improving the prediction, validating the hypothesis, etc. before +moving on to the next component. + +There are several reasons why it is important to test components separately: + +\newthought{Testing individually guards against overfitting.} +As described in the prior section, one of the largest risks of overfitting comes +from data snooping. Rejecting an indicator, signal process, or other strategy +component as quickly in the process as possible guards against doing too much +work fitting a poorly conceived strategy to the data. + +\newthought{Tests can be specific to the technique.} +In many cases, specific indicators, statistical models, or signal processes will +have test methods that are tuned to that technique. These tests will generally +have better *power* to detect a specific effect in the data. General tests, +such as *p-values* or *t-tests*, may also be valuable, but their interpretation may +vary from technique to technique, or they may be inappropriate for certain techniques. + +\newthought{It is more efficient.} The most expensive thing an analyst has is +time. Building strategies is a long, intensive process. By testing individual +components you can reject a badly-formed specification. Re-using components +with known positive properties increases chances of success on a new strategy. +In all cases, this is a more efficient use of time than going all the way through +the strategy creation process only to reject it at the end. + +______ + # Evaluating Indicators In many ways, evaluating indicators in a vacuum is harder than evaluating @@ -563,6 +613,8 @@ particularly stable analytical unit, and are often sensitive to exact starting or ending time of the bar, or to the methodologies used to calculate the components (open, high, low, close, volume, etc.) of the bar. +Further, you don't know the ordering of events, whether the high came before +the low, To mitigate these dangers, it is important to test the robustness of the bar generating process itself, e.g. by varying the start time of the first bar. We will almost never run complete strategy tests on bar data, preferring @@ -603,10 +655,10 @@ parameters. When comparing input parameter expectations, you should see 'clusters' of similar positive and/or negative return expectations in similar or contiguous parameter combinations. Existence of these clusters indicates -what @Tomasini2009 refer to as a 'stable region' for the parameters -(see parameter optimization below). -A random assortment of positive expectations is a bad sign, and should -lead to reviewing whether your hypotheses and earlier steps are robust. +what @Tomasini2009 refer to as a 'stable region' for the parameters (see +parameter optimization below). A random assortment of positive expectations is a +bad sign, and should lead to reviewing whether your hypotheses and earlier steps +are robust. \begin{marginfigure} \includegraphics{gamlss} @@ -648,11 +700,12 @@ can begin to fully apply the literature on model specification and testing of predictions. From the simplest available methods such as mean squared model error or kernel distance from an ideal process, through extensive evaluation -as suggested for BIC, effective number of parameters, and cross validation of +as suggested for Akaike's Information Criterion(AIC), Bayesian Information +Criterion(BIC), effective number of parameters, cross validation of @Hastie2009, and including time series specific models such as the data driven -approach "revealed performance" approach of @Racine2009, all available tools -from the forecasting literature should be considered for evaluating proposed -signal processes. +"revealed performance" approach of @Racine2009: all available tools from the +forecasting literature should be considered for evaluating proposed signal +processes. It should be clear that evaluating the signal generating process offers multiple opportunities to re-evaluate assumptions about the method of @@ -726,7 +779,8 @@ types of exits, or after parameter optimization (see below). They include classic risk stops (see below) and profit targets, as well as trailing take profits or pullback stops. Empirical profit rules are usually identified -using the outputs of things like MAE/MFE, for example: +using the outputs of things like MEan Adverse Excursion(MAE)/Mean Favorable +Excurison(MFE), for example: - MFE shows that trades that have advanced *x* % or ticks are unlikely to advance further, so the trade should be taken off @@ -734,6 +788,8 @@ indicates to be on the lookout for an exit opportunity, so a trailing take profit may be in order +See more on MAE/MFE below. + ## risk rules There are several types of risk rules that may be tested in the backtest, @@ -1140,7 +1196,7 @@ we believe this is an invitation to overfitting, and prefer to only perform that kind of speculative analysis inside the structure of a defined experimental design such as parameter optimization, walk forward analysis, -or *k*-fold cross validation on strategy implementations, and leave the +or *k*-fold cross validation on strategy implementations. Leave the simulated data much earlier in the process when confirming the power of the strategy components. @@ -1194,14 +1250,20 @@ paths, and properties of these quantiles will frequently provide insight into methods of action in the strategy, and can lead to further strategy development. +It is important when evaluating MAE/MFE to do this type of analysis in your test +set. One thing that you want to test out of smaple is whether the MAE threshold +is stable over time. You want to avoid, as with other parts of the strategy, +going over and "snooping" the data for the entire test period, or all your +target instruments. + ___ # Post Trade Analysis Post trade analysis offers an opportunity to calibrate the things you learned in the backtest, and generate more hypotheses for improving the strategy. Analyzing fills may proceed using all the tools described earlier in this -document. Additionally, you now have enough data to model slippage from the -model prices, as well as any slippage (positive or negative) from the +document. Additionally, you now have enough data with which to model slippage +from the model prices, as well as any slippage (positive or negative) from the other backtest statistics. One immediate benefit for post trade analysis is that you already have all @@ -1543,9 +1605,15 @@ the autocorrelation structure of the original data to the degree possible. ## White's Reality Check -- White's Reality Check from @White2000 and @Hansen2005 +White's Data Mining Reality Check from @White2000 (usually referred to as DRMC or +just "White's Reality Check" WRC) is a bootstrap based test which compares the +strategy returns to a benchmark. The ideas were expanded in @Hansen2005. It +creates a set of bootstrap returns and then checks via abolute or mean squared +error what the chances that the model could have been the result of random +selection. It applies a *p-value* test between the bootstrap distribution and +the backtest results to determine whether the results of the backtest appear to +be statistically significant. - ## cross validation Cross validation is a widely used statistical technique for model evaluation. @@ -1595,6 +1663,17 @@ always important to understand the statistical error bounds of your calculations, it is not a fatal flaw. +Some question exists whether *k*-fold cross validation is appropriate for time +series in the same way that it is for categorical or panel data. Rob Hyndman +addresses this directly here^[http://robjhyndman.com/hyndsight/tscvexample/] +and here^[https://www.otexts.org/fpp/2/5/]. What he describes as "forecast +evaluation with a rolling origin" is essentially Walk Forward Analysis. One +important takeaway from Prof. Hyndman's treatment of the subject is that it is +important to define the expected result and tests to measure forecast accuracy +before performing the (back)test. Then, all the tools of forecast evaluation +may be applied to evaluate how well your forecast is doing out of sample, and +whether you are likely to have overfit your model. + ## linear models such as @Bailey2014pm and @Bailey2014deSharpe - modifying existing expectations @@ -1620,7 +1699,7 @@ # Acknowledgements -I would like to thank my team for thoughtful comments and questions, +I would like to thank my team for thoughtful comments and questions, John Bollinger, and Stephen Rush at the University of Connecticut for his insightful comments on an early draft of this paper. All remaining errors or omissions should be attributed to the author. All views expressed in this paper are to be viewed Modified: pkg/quantstrat/sandbox/backtest_musings/strat_dev_process.pdf =================================================================== (Binary files differ) From noreply at r-forge.r-project.org Thu Feb 12 13:34:19 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 12 Feb 2015 13:34:19 +0100 (CET) Subject: [Blotter-commits] r1681 - pkg/blotter/R Message-ID: <20150212123419.8F7F0187792@r-forge.r-project.org> Author: bodanker Date: 2015-02-12 13:34:19 +0100 (Thu, 12 Feb 2015) New Revision: 1681 Modified: pkg/blotter/R/addTxn.R Log: - Call match.fun(TxnFees) before splitting transactions that cross through zero. Fixes #5808. Modified: pkg/blotter/R/addTxn.R =================================================================== --- pkg/blotter/R/addTxn.R 2015-02-10 19:40:01 UTC (rev 1680) +++ pkg/blotter/R/addTxn.R 2015-02-12 12:34:19 UTC (rev 1681) @@ -74,10 +74,27 @@ if(!is.timeBased(TxnDate) ){ TxnDate<-as.POSIXct(TxnDate) } + + # Coerce the transaction fees to a function if a string was supplied + if(is.character(TxnFees)) { + TF <- try(match.fun(TxnFees), silent=TRUE) + if (!inherits(TF,"try-error")) TxnFees<-TF + } + # Compute transaction fees if a function was supplied + if (is.function(TxnFees)) { + txnfees <- TxnFees(TxnQty, TxnPrice, Symbol) + } else { + txnfees<- as.numeric(TxnFees) + } + if(is.null(txnfees) || is.na(txnfees)) + txnfees <- 0 + if(txnfees>0 && !isTRUE(allowRebates)) + stop('Positive Transaction Fees should only be used in the case of broker/exchange rebates for TxnFees ',TxnFees,'. See Documentation.') + # split transactions that would cross through zero if(PrevPosQty!=0 && sign(PrevPosQty+TxnQty)!=sign(PrevPosQty) && PrevPosQty!=-TxnQty){ - txnFeeQty=TxnFees/abs(TxnQty) # calculate fees pro-rata by quantity + txnFeeQty=txnfees/abs(TxnQty) # calculate fees pro-rata by quantity addTxn(Portfolio=pname, Symbol=Symbol, TxnDate=TxnDate, TxnQty=-PrevPosQty, TxnPrice=TxnPrice, ..., TxnFees = txnFeeQty*abs(PrevPosQty), ConMult = ConMult, verbose = verbose, eps=eps) TxnDate=TxnDate+2*eps #transactions need unique timestamps, so increment a bit @@ -96,23 +113,6 @@ } } - # FUNCTION - # Coerce the transaction fees to a function if a string was supplied - - if(is.character(TxnFees)) { - TF <- try(match.fun(TxnFees), silent=TRUE) - if (!inherits(TF,"try-error")) TxnFees<-TF - } - # Compute transaction fees if a function was supplied - if (is.function(TxnFees)) { - txnfees <- TxnFees(TxnQty, TxnPrice, Symbol) - } else { - txnfees<- as.numeric(TxnFees) - } - - if(is.null(txnfees) | is.na(txnfees)) txnfees = 0 - if(txnfees>0 && !isTRUE(allowRebates)) stop('Positive Transaction Fees should only be used in the case of broker/exchange rebates for TxnFees ',TxnFees,'. See Documentation.') - # Calculate the value and average cost of the transaction TxnValue = .calcTxnValue(TxnQty, TxnPrice, 0, ConMult) # Gross of Fees TxnAvgCost = .calcTxnAvgCost(TxnValue, TxnQty, ConMult)