From noreply at r-forge.r-project.org Sun Aug 2 19:21:04 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 2 Aug 2015 19:21:04 +0200 (CEST) Subject: [Returnanalytics-commits] r3895 - in pkg/Meucci: . R data demo man Message-ID: <20150802172104.622BE187A00@r-forge.r-project.org> Author: xavierv Date: 2015-08-02 19:21:04 +0200 (Sun, 02 Aug 2015) New Revision: 3895 Added: pkg/Meucci/R/DynamicPortfolioManagement.R pkg/Meucci/R/MVOUPosterior.R pkg/Meucci/R/MVOUPrior.R pkg/Meucci/R/QuadraticMatVC.R pkg/Meucci/data/dynamicManagement.rda pkg/Meucci/demo/S_DynamicManagementCase1.R pkg/Meucci/demo/S_DynamicManagementCase2.R pkg/Meucci/man/BellmanEq_CS1.Rd pkg/Meucci/man/BellmanEq_CS2.Rd pkg/Meucci/man/MVOU_Posterior.Rd pkg/Meucci/man/MVOU_Prior.Rd pkg/Meucci/man/QuadraticMat_Vc.Rd pkg/Meucci/man/dynamicManagement.Rd Modified: pkg/Meucci/DESCRIPTION pkg/Meucci/NAMESPACE pkg/Meucci/R/FullyFlexibleBayesNets.R pkg/Meucci/R/data.R pkg/Meucci/demo/FullyFlexibleBayesNets.R pkg/Meucci/man/Equities.Rd pkg/Meucci/man/JGB.Rd pkg/Meucci/man/StockSeries.Rd pkg/Meucci/man/TimeSeries.Rd pkg/Meucci/man/UsSwapRates.Rd pkg/Meucci/man/bondAttribution.Rd pkg/Meucci/man/butterfliesAnalytics.Rd pkg/Meucci/man/covNRets.Rd pkg/Meucci/man/db.Rd pkg/Meucci/man/dbFFP.Rd pkg/Meucci/man/db_FX.Rd pkg/Meucci/man/derivatives.Rd pkg/Meucci/man/fILMR.Rd pkg/Meucci/man/factorsDistribution.Rd pkg/Meucci/man/fixedIncome.Rd pkg/Meucci/man/freaqEst.Rd pkg/Meucci/man/highYieldIndices.Rd pkg/Meucci/man/implVol.Rd pkg/Meucci/man/linRet.Rd pkg/Meucci/man/linearModel.Rd pkg/Meucci/man/returnsDistribution.Rd pkg/Meucci/man/sectorsSnP500.Rd pkg/Meucci/man/sectorsTS.Rd pkg/Meucci/man/securitiesIndustryClassification.Rd pkg/Meucci/man/securitiesTS.Rd pkg/Meucci/man/swap2y4y.Rd pkg/Meucci/man/swapParRates.Rd pkg/Meucci/man/swaps.Rd Log: Added Dynamic Portfolio Management paper scripts and case studies, reformatted and modified FullyFlexibleBayesNets scripts Modified: pkg/Meucci/DESCRIPTION =================================================================== --- pkg/Meucci/DESCRIPTION 2015-07-31 09:27:25 UTC (rev 3894) +++ pkg/Meucci/DESCRIPTION 2015-08-02 17:21:04 UTC (rev 3895) @@ -29,7 +29,6 @@ R (>= 2.14.0), zoo, xts (>= 0.8), - matlab, pracma, R.utils, mvtnorm, @@ -38,6 +37,7 @@ kernlab, nloptr, limSolve, + linprog, Suggests: Matrix, MASS, Modified: pkg/Meucci/NAMESPACE =================================================================== --- pkg/Meucci/NAMESPACE 2015-07-31 09:27:25 UTC (rev 3894) +++ pkg/Meucci/NAMESPACE 2015-08-02 17:21:04 UTC (rev 3895) @@ -1,5 +1,7 @@ # Generated by roxygen2 (4.1.1): do not edit by hand +export(BellmanEq_CS1) +export(BellmanEq_CS2) export(BlackLittermanFormula) export(BlackScholesCallPrice) export(BlackScholesCallPutPrice) @@ -37,6 +39,8 @@ export(LognormalMoments2Parameters) export(LognormalParam2Statistics) export(LongShortMeanCVaRFrontier) +export(MVOU_Posterior) +export(MVOU_Prior) export(MaxRsqCS) export(MaxRsqTS) export(MleRecursionForStudentT) @@ -55,6 +59,7 @@ export(PlotVolVsCompositionEfficientFrontier) export(Prior2Posterior) export(ProjectionStudentT) +export(QuadraticMat_Vc) export(QuantileMixture) export(RIEfficientFrontier) export(RandNormalInverseWishart) Added: pkg/Meucci/R/DynamicPortfolioManagement.R =================================================================== --- pkg/Meucci/R/DynamicPortfolioManagement.R (rev 0) +++ pkg/Meucci/R/DynamicPortfolioManagement.R 2015-08-02 17:21:04 UTC (rev 3895) @@ -0,0 +1,384 @@ +#' Solves the Bellman Equation for the case study 1. +#' +#' @details In Case Study 1 there is only one risk driver (the 10 year rate) and +#' only one view. The view is that the expected value of the 10-year rate will +#' be the actual value minus 50 basis points at t^view = 1 year from the current +#' time. +#' The solution is analytical in the prior case. +#' The solution is found recursivelly in the posterior case starting from +#' t_view and going back to time 0 with a time step = tau. +#' +#' In case study 1: n_ = 1 N_meanViews = 1 +#' +#' @param eta [scalar] overall weight of the market impact of transactions +#' @param gamma [scalar] risk aversion parameter +#' @param lambda [scalar] discounting parameter +#' @param tau [scalar] trading interval +#' @param theta [n_ x n_] transition matrix of the MVOU process +#' @param mu [n_ x 1] drift vector of the MVOU process +#' @param sig2 [n_ x n_] covariance parameters of the MVOU process +#' @param c2 [n_ x n_] matrix of the market impact +#' @param b_legacy [n_ x 1] legacy portfolio exposure at time 0 +#' @param x [t_ x n_] path of the risk drivers (with time step = tau) +#' @param t_view [1 x N_MeanViews] times of the views +#' @param view [1 x N_MeanViews] views on the risk drivers +#' +#' @return prior [t_x n_ matrix] optimal prior exposure +#' @return post [t_x n_ matrix] optimal posterior exposure +#' +#' @references +#' A. Meucci - "Dynamic Portfolio Management with Views at Multiple Horizons" +#' \url{http://symmys.com/node/831}. See Meucci script for "BellmanEq_CS1.m" +#' +#' @author Xavier Valls \email{xavievallspla@@gmail.com} +#' @export + +BellmanEq_CS1 <- function(eta, gamma, lambda, tau, theta, mu, sig2, c2, + b_legacy, x, t_view, view) { + t_ <- nrow(x) + n_ <- length(theta) + + ############################################################################## + #compute the prior at time 0 + Prior0 <- MVOU_Prior(c(0, tau), x[1], theta, sig2, mu) + # first period covariance matrix + sig2_1 <- Prior0$cov[(n_ + 1):(2 * n_), (n_ + 1):(2 * n_)] + + ############################################################################## + #Coefficients of the Bellman equation according to the prior + + alpha_prior <- Prior0$mean_cost[(n_ + 1):(2 * n_)] + beta_prior <- Prior0$mean_lin[(n_ + 1):(2 * n_), 1:n_] - diag(1, n_) + HATsig2 <- exp(lambda) * (eta * c2) ^ (-1 / 2) * gamma * sig2_1 * (eta * c2) ^ + (-1 / 2) + HATpsi_bb <- (0.25 * ( HATsig2 + diag(1, n_) * (exp(lambda) - 1)) ^ 2 + + HATsig2) ^ (1 / 2) - 0.5 * (HATsig2 + diag(1, n_) * + (exp(lambda) - 1)) + psi_bb_prior <- (eta * c2) ^ (1 / 2) * HATpsi_bb * (eta * c2) ^ (1 / 2) + q_prior <- gamma * sig2_1 + eta * c2 + exp(-lambda) * psi_bb_prior + tmp <- (eta * c2 * (solve(q_prior) %*% beta_prior)) + psi_bx_prior <- solve(diag(1, n_ ^ 2) - exp(-lambda) * kron(t(beta_prior) + + diag(1, n_), eta * ( c2 / q_prior))) %*% array(tmp) + psi_bx_prior <- matrix(psi_bx_prior, nrow = n_) + psi_b_prior <- solve(q_prior / (eta * c2) - exp(-lambda) * diag(1, n_)) %*% + (diag(1, n_) + exp(-lambda) * psi_bx_prior) * alpha_prior + + # #Alternatively, if and only if c2 <- sig2_1 + # a_ <- (sqrt(4*gamma*eta*exp(-lambda)+(gamma+(1-exp(-lambda))*eta)^2) - + # ((1-exp(-lambda))*eta+gamma))/(2*exp(-lambda)) + # psi_bb_prior <- a_*sig2_1 + # psi_bx_prior <- eta*(beta_prior)*inv((gamma+eta+exp(-lambda)*a_)* + # diag(1, n_)-eta*exp(-lambda)*(beta_prior+diag(1, n_))) + # psi_b_prior <- eta*(alpha_prior + exp(-lambda)*psi_bx_prior*alpha_prior)/ + # (gamma+eta+exp(-lambda)*a_-exp(-lambda)*eta) + + ############################################################################## + #Coefficients of the Bellman equation according to the posterior distribution + ############################################################################## + + ############################################################################## + #Inizialize the variables + Hor <- ceil(max(t_view) / tau) + psi_t_bb <- array(0, dim = c(n_,n_, Hor)) + q_t <- array(0, dim = c(n_,n_, Hor)) + psi_t_bx <- array(0, dim = c(n_,n_, Hor)) + psi_t_b <- matrix(0, n_, Hor) + alpha_t <- matrix(0, n_, Hor) + beta_t <- array(0, dim = c(n_,n_, Hor)) + + ############################################################################## + #Set the boundary conditions asintotically. After the last view, the + #solution is equal to the prior + psi_t_bb[1:n_, 1:n_, Hor] <- psi_bb_prior + q_t[1:n_, 1:n_, Hor] <- q_prior + psi_t_bx[1:n_, 1:n_, Hor] <- psi_bx_prior + psi_t_b[1:n_, Hor] <- psi_b_prior + alpha_t[1:n_, Hor] <- alpha_prior + beta_t[1:n_, 1:n_, Hor] <- beta_prior + + for (k in seq(Hor - 1, 1, -1)) { + q <- drop(q_t[1:n_, 1:n_, k + 1]) + alpha <- drop(alpha_t[1:n_, k + 1]) + beta <- drop(beta_t[1:n_, 1:n_, k + 1]) + psi_bx <- drop(psi_t_bx[1:n_, 1:n_, k + 1]) + psi_b <- drop(psi_t_b[1:n_, k + 1]) + + ############################################################################ + #update of the coefficients of the Bellman equation + psi_t_b[1:n_, k] <- eta * c2 * (solve(q) %*% (alpha + exp(-lambda) * + psi_bx * alpha + exp(-lambda) * psi_b)) + psi_t_bx[1:n_, 1:n_, k] <- eta * c2 * (solve(q) %*% (beta + exp(-lambda) * + psi_bx * (beta + diag(1, n_)))) + psi_t_bb[1:n_, 1:n_, k] <- -eta ^ 2 * c2 * (solve(q) %*% c2) + eta * c2 + + ############################################################################ + #set the monitoring times of interest + t <- c(0, tau, t_view - k * tau) + if ((t[length(t)] - t[length(t) - 1]) < t[length(t)] * 10 ^ (-10)) + t <- t[-length(t)] + T_ <- length(t) + + #compute the prior + Prior <- MVOU_Prior(t, 0, theta, sig2, mu) + + #set the views + grid <- meshgrid(t, 1:n_) + T <- grid$X + N <- grid$Y + N_Meanviews <- 1 #Number of views on expectations + v_mu_tmp <- array(0, dim = c(N_Meanviews, n_, T_)) + v_mu_tmp[1, 1, T_] <- 1 + mu_view <- view[1] + v_mu <- matrix(v_mu_tmp, nrow = nrow(v_mu_tmp)) + views <- list() + views$N_Meanviews <- N_Meanviews + views$N_Covviews <- c() + views$dimension <- array(N) + views$monitoring_time <- array(T) + views$v_mu <- v_mu + views$v_sig <- NaN + views$mu_view <- mu_view + views$sig2_view <- c() + + ############################################################################ + #update the Posterior moments of the process + Posterior <- MVOU_Posterior(Prior, views) + alpha_t[1:n_, k] <- Posterior$mean_cost[(n_ + 1):(2 * n_)] + beta_t[1:n_, 1:n_, k] <- Posterior$mean_lin[(n_ + 1):(2 * n_), 1:n_] - + diag(1, n_) + sig2_1 <- Posterior$cov[(n_ + 1):(2 * n_), (n_ + 1):(2 * n_)] + + ############################################################################ + #update matrix q_t + psi_bb <- drop(psi_t_bb[1:n_, 1:n_, k]) + q_t[1:n_, 1:n_, k] <- gamma * sig2_1 + eta * c2 + exp(-lambda) * psi_bb + } + + b_prior <- matrix(NaN, t_, 1 ) + b_post <- matrix(NaN, t_, 1 ) + + #Reconstructing the optimal exposure on the simulated path x + for (t in 1:t_) { + if (t == 1) { + b_legacy_prior <- b_legacy + b_legacy_post <- b_legacy + } else { + b_legacy_prior <- b_prior[t - 1] + b_legacy_post <- b_post[t - 1] + } + # prior exposure + b_prior[t] <- solve(q_prior) %*% (alpha_prior + (beta_prior) * x[t] + eta * + c2 * b_legacy_prior + exp(-lambda) * psi_bx_prior * (alpha_prior + + (beta_prior + diag(1, n_)) * x[t]) + exp(-lambda) * psi_b_prior) + #posterior exposure + q <- drop(q_t[1:n_, 1:n_, t]) + alpha <- drop(alpha_t[1:n_, t]) + beta <- drop(beta_t[1:n_, 1:n_, t]) + psi_bx <- drop(psi_t_bx[1:n_, 1:n_, t]) + psi_b <- drop(psi_t_b[1:n_, t]) + l <- alpha + beta * x[t] + eta * c2 * b_legacy_post + exp(-lambda) * + psi_bx * (alpha + (beta + diag(1, n_)) * x[t]) + exp(-lambda) * psi_b + b_post[t] <- solve(q) %*% l + } + return(list(prior = b_prior, post = b_post)) +} + +#' Solves the Bellman Equation for the case study 2. +#' +#' @details In Case Study 2 we consider two risk drivers, the 10 year rate and +#' the TIP spread, and two non-synchronous views on them. The view on the rate +#' is that its expected value will be the actual value minus 50 basis points +#' at t_viewX = 1 year from the current time (as in Case Study 1). +#' The view on the TIP spread is that its expected value will be the actual +#' value plus 50 basis points at t_viewTIP = 0.75 years. +#' +#' In case study 2: n_ = 2 k_ = 1 N_meanViews = 2 +#' +#' @param eta [scalar] overall weight of the market impact of transactions +#' @param gamma [scalar] risk aversion parameter +#' @param lambda [scalar] discounting parameter +#' @param tau [scalar] trading interval +#' @param theta [n_ x n_] transition matrix of the MVOU process +#' @param mu [n_ x 1] drift vector of the MVOU process +#' @param sig2 [n_ x n_] covariance parameters of the MVOU process +#' @param c2 [n_ x n_] matrix of the market impact +#' @param b_legacy [n_ x 1] legacy portfolio exposure at time 0 +#' @param x [t_ x n_] path of the risk drivers (with time step = tau) +#' @param t_view [1 x N_MeanViews] times of the views +#' @param view [1 x N_MeanViews] views on the risk drivers +#' @param i_view [1 x N_MeanViews] vector of the labels of the risk drivers +#' to which views refer +#' @param omega [k_ x n_] matrix to select the investible risk drivers +#' +#' @return prior [t_x n_ matrix] optimal prior exposure +#' @return post [t_x n_ matrix] optimal posterior exposure +#' +#' @references +#' A. Meucci - "Dynamic Portfolio Management with Views at Multiple Horizons" +#' \url{http://symmys.com/node/831}. See Meucci script for "BellmanEq_CS2.m" +#' +#' @author Xavier Valls \email{xavievallspla@@gmail.com} +#' @export + +BellmanEq_CS2 <- function(eta, gamma, lambda, tau, theta, mu, sig2, c2, + b_legacy, x, t_view, view, i_view, omega) { + + t_ <- nrow(x) + n_ <- nrow(theta) + k_ <- length(c2) + + ############################################################################## + #compute the prior at time 0 + Prior0 <- MVOU_Prior(c(0, tau), matrix(x[1:n_]), theta, sig2, mu) + # first period covariance matrix + sig2_1 <- Prior0$cov[(n_ + 1):(2 * n_), (n_ + 1):(2 * n_)] + print(omega) + print(sig2_1) + print(t(omega)) + sig2_1 <- omega %*% sig2_1 %*% t(omega) + ############################################################################## + #Coefficients of the Bellman equation according to the prior + + alpha_prior <- Prior0$mean_cost[(n_ + 1):(2 * n_)] + beta_prior <- Prior0$mean_lin[(n_ + 1):(2 * n_), 1:n_] - diag(1, n_) + HATsig2 <- exp(lambda) * (eta * c2) ^ (-1 / 2) * gamma * sig2_1 * (eta * c2) ^ + (-1 / 2) + HATpsi_bb <- (0.25 * ( HATsig2 + diag(1, k_) * (exp(lambda) - 1)) ^ 2 + + HATsig2) ^ (1 / 2) - 0.5 * (HATsig2 + diag(1, k_) * + (exp(lambda) - 1)) + psi_bb_prior <- (eta * c2) ^ (1 / 2) * HATpsi_bb * (eta * c2) ^ (1 / 2) + q_prior <- gamma * sig2_1 + eta * c2 + exp(-lambda) * psi_bb_prior + tmp <- (eta * c2 * (solve(q_prior) %*% (omega %*% beta_prior))) + psi_bx_prior <- solve(diag(1, k_ * n_) - exp(-lambda) * kron(t(beta_prior) + + diag(1, n_), eta * ( c2 / q_prior))) %*% array(tmp) + psi_bx_prior <- matrix(psi_bx_prior, nrow = k_, ncol = n_) + psi_b_prior <- solve(q_prior / (eta * c2) - exp(-lambda) * diag(1, k_)) %*% + (omega + exp(-lambda) * psi_bx_prior) %*% alpha_prior + + ############################################################################## + #Coefficients of the Bellman equation according to the posterior distribution + ############################################################################## + + ############################################################################## + #Inizialize the variables + Hor <- ceil(max(t_view) / tau) + psi_t_bb <- array(0, dim = c( k_, k_, Hor)) + q_t <- array(0, dim = c( k_, k_, Hor)) + psi_t_bx <- array(0, dim = c( k_, n_, Hor)) + psi_t_b <- matrix(0, k_, Hor) + alpha_t <- matrix(0, n_, Hor) + beta_t <- array(0, dim = c( n_, n_, Hor)) + + ############################################################################## + #Set the boundary conditions asintotically. After the last view, the + #solution is equal to the prior + psi_t_bb[1:k_, 1:k_, Hor] <- psi_bb_prior + q_t[1:k_, 1:k_, Hor] <- q_prior + psi_t_bx[1:k_, 1:n_, Hor] <- psi_bx_prior + psi_t_b[1:k_, Hor] <- psi_b_prior + alpha_t[1:n_, Hor] <- alpha_prior + beta_t[1:n_, 1:n_, Hor] <- beta_prior + + for (k in seq(Hor - 1, 1, -1)) { + q <- drop(q_t[1:k_, 1:k_, k + 1]) + alpha <- drop(alpha_t[1:n_, k + 1]) + beta <- drop(beta_t[1:n_, 1:n_, k + 1]) + psi_bx <- drop(psi_t_bx[1:k_, 1:n_, k + 1]) + psi_b <- drop(psi_t_b[1:k_, k + 1]) + + ############################################################################ + #update of the coefficients of the Bellman equation + psi_t_b[1:k_, k] <- eta * c2 * (solve(q) %*% (omega %*% alpha + + exp(-lambda) * psi_bx %*% alpha + exp(-lambda) * psi_b)) + psi_t_bx[1:k_, 1:n_, k] <- eta * c2 * (solve(q) %*% (omega %*% beta + + exp(-lambda) * psi_bx %*% (beta + diag(1, n_)))) + psi_t_bb[1:k_, 1:k_, k] <- -eta ^ 2 * c2 * (solve(q) %*% c2) + eta * c2 + + ############################################################################ + #set the monitoring times of interest + t <- c(0, tau, t_view - k * tau) + t <- sort(t) + t <- t[t>=0] + idx <- which(diff(t) < tau * 10 ^ -10) + t <- t(setdiff(1:length(t), idx)) + T_ <- length(t) + + #compute the prior + Prior <- MVOU_Prior(t, matrix(c(0,0)), theta, sig2, mu) + + #set the views + grid <- meshgrid(t, 1:n_) + T <- grid$X + N <- grid$Y + views <- list() + + if ((k * tau) >= t_view[2]) { + N_Meanviews <- 1 #Number of views on expectations + v_mu_tmp <- array(0, dim = c(N_Meanviews, n_, T_)) + v_mu_tmp[1, i_view[1], T_] <- 1 + mu_view <- view[1] + } else{ + N_Meanviews <- 2 #Number of views on expectations + v_mu_tmp <- array(0, dim = c(N_Meanviews, n_, T_)) + v_mu_tmp[1, i_view[1], T_] <- 1 + v_mu_tmp[2, i_view[2], T_ - 1] <- 1 + mu_view <- view[1] + mu_view <- c( mu_view, view[2]) + } + + v_mu <- matrix(v_mu_tmp, nrow = nrow(v_mu_tmp)) + views$N_Meanviews <- N_Meanviews + views$N_Covviews <- c() + views$dimension <- array(N) + views$monitoring_time <- array(T) + views$v_mu <- v_mu + views$v_sig <- NaN + views$mu_view <- mu_view + views$sig2_view <- c() + + ############################################################################ + #update the Posterior moments of the process + Posterior <- MVOU_Posterior(Prior, views) + alpha_t[1:n_, k] <- Posterior$mean_cost[(n_ + 1):(2 * n_)] + beta_t[1:n_, 1:n_, k] <- Posterior$mean_lin[(n_ + 1):(2 * n_), 1:n_] - + diag(1, n_) + sig2_1 <- Posterior$cov[(n_ + 1):(2 * n_), (n_ + 1):(2 * n_)] + sig2_1 <- omega %*% sig2_1 %*% t(omega) + + ############################################################################ + #update matrix q_t + psi_bb <- drop(psi_t_bb[1:k_, 1:k_, k]) + q_t[1:k_, 1:k_, k] <- gamma * sig2_1 + eta * c2 + exp(-lambda) * psi_bb + } + + b_prior <- matrix(NaN, t_, 1 ) + b_post <- matrix(NaN, t_, 1 ) + + #Reconstructing the optimal exposure on the simulated path x + for (t in 1:t_) { + if (t == 1) { + b_legacy_prior <- b_legacy + b_legacy_post <- b_legacy + } else { + b_legacy_prior <- b_prior[t - 1] + b_legacy_post <- b_post[t - 1] + } + # prior exposure + b_prior[t] <- solve(q_prior) %*% (omega %*% alpha_prior + omega %*% + beta_prior %*% x[t,] + eta * c2 %*% b_legacy_prior + + exp(-lambda) %*% psi_bx_prior %*% (alpha_prior + (beta_prior + + diag(1, n_)) %*% x[t,]) + exp(-lambda) * psi_b_prior) + + #posterior exposure + q <- drop(q_t[1:k_, 1:k_, t]) + alpha <- drop(alpha_t[1:n_, t]) + beta <- drop(beta_t[1:n_, 1:n_, t]) + psi_bx <- drop(psi_t_bx[1:k_, 1:n_, t]) + psi_b <- drop(psi_t_b[1:k_, t]) + l <- omega %*% alpha + omega %*% beta %*% x[t,] + eta * c2 * + b_legacy_post + exp(-lambda) %*% psi_bx %*% (alpha + (beta + + diag(1, n_)) %*% x[t,]) + exp(-lambda) * psi_b + b_post[t] <- solve(q) %*% l + } + return(list(prior = b_prior, post = b_post)) +} Modified: pkg/Meucci/R/FullyFlexibleBayesNets.R =================================================================== --- pkg/Meucci/R/FullyFlexibleBayesNets.R 2015-07-31 09:27:25 UTC (rev 3894) +++ pkg/Meucci/R/FullyFlexibleBayesNets.R 2015-08-02 17:21:04 UTC (rev 3895) @@ -21,7 +21,7 @@ # initialize parameters A <- matrix(, nrow = 0, ncol = nrow(X)) b <- g <- matrix(, nrow = 0, ncol = 1) - + c = c() # for each view... for (k in 1:length(View)) { I_mrg <- (X[, 1] < Inf) @@ -64,8 +64,9 @@ A <- rbind(A, New_A) # constraint for the conditional expectations... b <- rbind(b, New_b) g <- rbind(g, -log(1 - View[[k]]$c)) + c <- rbind(c,I_mrg) } - return(list(A = A, b = b, g = g)) + return(list(A = A, b = b, g = g, c=c)) } #' tweak a matrix @@ -78,70 +79,29 @@ #' @author Ram Ahluwalia \email{ram@@wingedfootcapital.com} Tweak <- function(A, b, g) { - library(matlab) - library(limSolve) - K <- nrow(A) J <- ncol(A) - g_ <- rbind(g, zeros(J, 1)) + g_ <- rbind(g, matrix(0, J, 1)) - Aeq_ <- cbind(zeros(1, K), ones(1, J)) + Aeq_ <- cbind(matrix(0, 1, K), matrix(1, 1, J)) beq_ <- 1 - lb_ <- rbind(zeros(K, 1), zeros(J, 1)) - ub_ <- rbind(Inf * ones(K, 1), ones(J, 1)) + lb_ <- rbind(matrix(0, K, 1), matrix(0, J, 1)) + ub_ <- rbind(Inf * matrix(1, K, 1), matrix(1, J, 1)) - A_ <- cbind(-eye(K), A) + A_ <- cbind(-diag(1, K), A) b_ <- b # add lower-bound and upper-bound constraints - A_ <- rbind(A_, -eye(ncol(A_))) - b_ <- rbind(b_, zeros(ncol(A_), 1)) + A_ <- rbind(A_, -diag(1, ncol(A_))) + b_ <- rbind(b_, matrix(0, ncol(A_), 1)) - x0 <- rep(1/ncol(Aeq_), ncol(Aeq_)) - - # db_ = linprog(g_, A_, b_, Aeq_,beq_, lb_, ub_) # MATLAB version - # matrix containing coefficients of equality constraints Ex=F - # optimResult = linp(E = Aeq_, - # vector containing the right-hand side of equality constraints - # F = beq_, - # matrix containint coefficients of the inequality constraints GX >= H - # G = -1*A_, - # vector containing the right-hand side of the inequality constraints - # H = -1*b_, - # vector containing the coefficients of the cost function - # Cost = -1*g_, - # ispos = FALSE) - - costFunction <- function(x) { - matrix(x, nrow = 1) %*% matrix(-1*g_, ncol = 1) - } - gradient <- function(x) { - -1*g_ - } - optimResult <- optim(par = x0, - fn = costFunction, # CHECK - gr = gradient, - method = "L-BFGS-B", - lower = lb_, - upper = ub_, - hessian = FALSE) - - # library(linprog) - # optimResult2 = solveLP(E = Aeq_, - # vector containing the right-hand side of equality constraints - # F = beq_, - # matrix containint coefficients of the inequality constraints GX >= H - # G = -1*A_, - # vector containing the right-hand side of the inequality constraints - # H = -1*b_, - # vector containing the coefficients of the cost function - # Cost = -1*g_, - # ispos = FALSE) - - db_ <- optimResult$X - + x0 <- rep(1 / ncol(Aeq_), ncol(Aeq_)) + dim(A_) + dim(b_) + dim(g_) + db_ <- solveLP(g_,A_,b_,Aeq_,beq_,lb_,ub_) db <- db_[1:K] return(db) @@ -173,7 +133,6 @@ #' @export ComputeMoments <- function(X, p) { - library(matlab) N <- ncol(X) m <- t(X) %*% p Sm <- t(X) %*% (X * repmat(p, 1, N)) # repmat : repeats/tiles a matrix Added: pkg/Meucci/R/MVOUPosterior.R =================================================================== --- pkg/Meucci/R/MVOUPosterior.R (rev 0) +++ pkg/Meucci/R/MVOUPosterior.R 2015-08-02 17:21:04 UTC (rev 3895) @@ -0,0 +1,106 @@ +#' @title Computes the posterior conditional expectation & covariance matrix of +#' +#' @description This function computes the posterior conditional expectation +#' and covariance matrix at different monitoring dates t1,t2,...,t_ of the process +#' X_{t1,t2...,t_} =(X_t1, +#' X_t2, +#' . +#' . +#' X_t_) +#' X_t follows a MVOU process: dX_t <- (-theta*X_t+mu)dt + sig*dB_t +#' The Views are: E{v_mu*X}=mu_view and Cov{v_sig*X}=sig2_view +#' +#' @param Mom list list of risk drivesrs +#' @param Views list list of views +#' +#' @return Posterior list of posterior distribution information +#' +#' @details The list Mom consists of. +#' \itemize{ +#' \item{Mom$monitoring_time }{ monitoring times =t_*n_ x 1]} +#' \item{Mom$dimension }{ labels of the risk drivers [t_*n_ x 1]} +#' \item{Mom$cov }{ prior covariance matrix of X_\{t1,t2...t_\} [t_*n_ x t_*n_]} +#' \item{Mom$mean }{ prior vector of the means of X_\{t1,t2...t_\} [t_*n_ x 1]} +#' \item{Mom$mean_cost}{[t_*n_ x 1]} +#' \item{Mom$mean_lin}{[t_*n_ x 1]} +#' \item{}{Mom$mean_cost and Mom$mean_lin are such that Mom$mean_cost + +#' Mom$mean_lin*x0 = Mom$mean}} +#' +#' while the list of Views has the elements: +#' \itemize{ +#' \item{Views$N_MeanViews }{ Number of views on the expectations [scalar]} +#' \item{Views$N_CovViews }{ Number of views on the covariance matrix [scalar]} +#' \item{Views$dimension }{ labels of the risk drivers [t_*n_ x 1]} +#' \item{Views$monitoring_time }{ monitoring times [t_*n_ x 1]} +#' \item{Views$v_mu }{ matrix that qualifies the views on expectations +#' [N_MeanViews x t_*n_]} +#' \item{Views$v_sig }{ matrix that qualifies the views on covariance +#' [N_CovViews x t_*n_]} +#' \item{Views$mu_view }{ extent of the views on expectation [N_MeanViews x 1]} +#' \item{Views$sig2_view }{ extent of the views on the covariance +#' [N_CovViews x 1]}} +#' +#' And the returned Posterior distribution list includes the elements: +#' \itemize{ +#' \item{Posterior$monitoring_time }{ monitoring times [t_*n_ x 1]} +#' \item{Posterior$dimension }{ labels of the risk drivers [t_*n_ x 1]} +#' \item{Posterior$cov }{ posterior covariance matrice of X_{t1,t2...t_} +#' [t_*n_ x t_*n_]} +#' \item{Posterior$mean }{ posterior vector of the means of X_{t1,t2...t_} +#' [t_*n_ x 1]} +#' \item{Posterior$mean_cost [t_*n_ x 1]} +#' \item{Posterior$mean_lin [t_*n_ x 1]}} +#' Posterior$mean_cost and Posterior$mean_lin are such that +#' Posterior$mean_cost + Posterior$mean_lin*x0 = Posterior$mean +#' +#' @references +#' A. Meucci - "Dynamic Portfolio Management with Views at Multiple Horizons" +#' \url{http://symmys.com/node/831}. See Meucci script for "MVOU_Prior.m" +#' +#' @author Xavier Valls \email{xavievallspla@@gmail.com} +#' @export + +MVOU_Posterior <- function(Mom, Views) { + n <- unique(Mom$dimension) + t <- unique(Mom$monitoring_time) + n_ <- length(n) + t_ <- length(t) + grid <- meshgrid(t, 1:n_) + T <- grid$X + N <- grid$Y + Posterior <- list() + Posterior$monitoring_time <- array(T) + Posterior$dimension <- array(N) + Posterior$mean <- matrix(NaN, t_ * n_, 1) + Posterior$cov <- matrix(NaN, n_ * t_, n_ * t_) + Posterior$mean_cost <- matrix(NaN, t_ * n_, 1) + Posterior$mean_lin <- matrix(NaN, t_ * n_, n_) + + S2 <- Mom$cov + mu <- Mom$mean + v_mu <- Views$v_mu + v_sig <- Views$v_sig + mu_view <- Views$mu_view + sig2_view <- Views$sig2_view + + if (all(is.nan(Views$v_mu))) { + Posterior$mean <- mu + }else { + v_mu_dag <- (S2 %*% t(v_mu)) / (v_mu %*% S2 %*% t(v_mu))[1] + P_orth <- v_mu_dag %*% v_mu + P <- diag(1, dim(P_orth)[1], dim(P_orth)[2]) - P_orth + Posterior$mean <- P %*% mu + P_orth %*% v_mu_dag %*% mu_view + Posterior$mean_lin <- P %*% Mom$mean_lin + Posterior$mean_cost <- P %*% Mom$mean_cost + P_orth %*% v_mu_dag %*% mu_view + } + if (all(is.nan(Views$v_sig))) { + Posterior$cov <- S2 + } else { + v_sig_dag <- (S2 %*% t(v_sig)) / (v_sig %*% S2 %*% t(v_sig))[1] + P_orth <- v_sig_dag %*% v_sig + diag(1, dim(P_orth)[1], dim(P_orth)[2]) - P_orth + Posterior$cov <- P %*% S2 %*% t(P) + P_orth %*% (v_sig_dag %*% sig2_view %*% + t(v_sig_dag)) %*% t(P_orth) + } + return(Posterior) +} Added: pkg/Meucci/R/MVOUPrior.R =================================================================== --- pkg/Meucci/R/MVOUPrior.R (rev 0) +++ pkg/Meucci/R/MVOUPrior.R 2015-08-02 17:21:04 UTC (rev 3895) @@ -0,0 +1,106 @@ +#' @title Computes the conditional mean and covariance matrix at different +#' dates +#' +#' @description This function computes the conditional mean and covariance +#' matrix at different monitoring dates t1,t2,...,t_ of the process +#' X_{t1,t2...,t_} =(X_t1, +#' X_t2, +#' . +#' . +#' X_t_) +#' X_t follows a MVOU process: dX_t <- (-theta*X_t+mu)dt + sig*dB_t +#' +#' @param t [t_ x 1] vector of monitoring dates +#' @param x0 [n_ x 1] observation at time 0 +#' @param theta [n_ x n_] transition matrix +#' @param sig2 [n_ x n_] covariance matrix +#' @param mu [n_ x 1] vector of drift parameters +#' +#' @return Mom list of the risk drivers +#' +#' @references +#' A. Meucci - "Dynamic Portfolio Management with Views at Multiple Horizons" +#' \url{http://symmys.com/node/831}. See Meucci script for "MVOU_Prior.m" +#' +#' @author Xavier Valls \email{xavievallspla@@gmail.com} +#' +#' @export + +#OUTPUT +#Mom.monitoring_time <- monitoring times [t_*n_ x 1] +#Mom.dimension <- labels of the risk drivers [t_*n_ x 1] +#Mom.cov <- covariance matrix of X_{t1,t2...t_} [t_*n_ x t_*n_] +#Mom.mean <- vector of the means of X_{t1,t2...t_} [t_*n_ x 1] +#Mom.mean_cost [t_*n_ x 1] +#Mom.mean_lin [t_*n_ x 1] +#Mom.mean_cost and Mom.mean_lin are such that +#t_ <- length(x) + +MVOU_Prior <- function (t, x0, theta, sig2, mu) { + + Tol_eigb <- 10 ^ -8 + t_ <- length(t) + n_ <- length(x0) + grid <- meshgrid(t, 1:n_) + Mom <- list() + Mom$monitoring_time <- grid$X + Mom$dimension <- t(grid$Y) + Mom$mean <- array(NaN, dim <- c(t_ * n_, 1)) + Mom$cov <- array(NaN, dim <- c(t_ * n_, n_ * t_)) + Mom$mean_cost <- array(NaN, dim <- c(t_ * n_, 1)) + Mom$mean_lin <- array(NaN, dim <- c(t_ * n_, n_)) + + kronsum <- kronecker(theta, diag(1, n_)) + kronecker(diag(1, n_), theta) + e <- eigen(kronsum) + V <- e$vectors + D <- e$values + lambda_A <- array( NaN, length(D)) + + M <- matrix(NaN, n_, t_) + mean_lin <- array(NaN, dim <- c(n_, n_, t_)) + mean_cost <- matrix(NaN, n_, t_) + e <- eigen(theta) + V1 <- e$vectors + theta_diag <- e$values + + F <- array(NaN, n_) + + for (i in 1:t_) { + F[theta_diag <= Tol_eigb] <- t[i] + F[theta_diag > Tol_eigb] <- (1 - exp(-theta_diag[theta_diag > Tol_eigb] * + t[i])) / theta_diag[theta_diag > Tol_eigb] + E <- expm(-theta * t[i]) + M[ 1:n_, i] <- (E %*% x0 + V1 %*% diag(F) %*% pinv(V1) %*% mu) + M[, i] <- Re(M[,i]) + + mean_lin[1:n_, 1:n_, i] <- t(E) + mean_lin[ , , i] <- Re(mean_lin[, , i] ) + mean_cost[1:n_, i] <- (V1 %*% diag(F) %*% pinv(V1) %*% mu) + mean_cost[ , i] <- Re(mean_cost[, i]) + + vecsig2 <- matrix(sig2, nrow = n_ ^ 2) + lambda_A[(abs(lambda) <= Tol_eigb)] <- t[i] + index <- abs(lambda) > Tol_eigb + lambda_A[index] <- (1 - exp(-lambda[index] * t[i])) / lambda[index] + A <- V %*% diag(lambda_A) %*% solve(V) + vecsig2_t <- A %*% vecsig2 + sig2_t <- matrix(vecsig2_t, nrow = n_) + sig2_t <- Re(sig2_t) + + for ( j in i:t_ ) { + Mom$cov[(i - 1) * n_ + (1:n_), (j - 1) * n_ + (1:n_)] <- sig2_t %*% + expm(-t(theta) * + (t[j] - t[i])) + Mom$cov[(j - 1) * n_ + (1:n_), (i - 1) * n_ + (1:n_)] <- expm(-theta * + (t[j] - t[i])) %*% + sig2_t + } + } + #Note that Mom.mean <- Mom.mean_cost + Mom.mean_lin*x0 + Mom$mean <- matrix(M, ncol = 1) + Mom$mean_lin <- t(array(mean_lin, c(dim(mean_lin)[1], dim(mean_lin)[2] * + dim(mean_lin)[3]))) + Mom$mean_cost <- t(matrix(mean_cost, nrow = 1)) + + return(Mom) +} Added: pkg/Meucci/R/QuadraticMatVC.R =================================================================== --- pkg/Meucci/R/QuadraticMatVC.R (rev 0) +++ pkg/Meucci/R/QuadraticMatVC.R 2015-08-02 17:21:04 UTC (rev 3895) @@ -0,0 +1,75 @@ +#' @title Computes the matrix q_t of the problem to solve when using +#' CALCULUS of VARIATION +#' +#' @description This function computes the matrix q_t of the problem to solve +#' when using CALCULUS of VARIATION: +#' argmin_b (b' q_t b - b'l_t) +#' +#' @param lambda [scalar] discounting parameter +#' @param gamma [scalar] risk aversion parameter +#' @param eta [scalar] overall weight of the market impact of transactions +#' @param sig2 [n_*t_ x n_*t_] covariance matrix of the process of the risk +#' drivers +#' @param c2 [k_ x k_] market impact matrix +#' @param tau_ [scalar] effective number of future time steps considered +#' @param n_ [scalar] number of risk drivers +#' @param i_invest [k_ x 1] labels of the investible risk drivers +#' +#' @return q_t [k_*tau_ x k_*tau_] matrix computed +#' @note +#' t_ = number of monitoring times at which sig2 is computed +#' k_ = number of investible risk drivers +#' +#' +#' @references +#' A. Meucci - "Dynamic Portfolio Management with Views at Multiple Horizons" +#' \url{http://symmys.com/node/831}. See Meucci script for "MVOU_Prior.m" +#' +#' @author Xavier Valls \email{xavievallspla@@gmail.com} +#' @export + +QuadraticMat_Vc <- function(lambda, gamma, eta, sig2, c2, tau_, n_, + i_invest = NULL) { + + if(length(i_invest) == 0) + i_invest <- 1:length(c2) + + + ExtractBlockMtx <- function( A, t1, t2, n1_, n2_, i_invest){ + # This function extracts the (t1,t2)-block out of the block-diagonal matrix + # A and then it selects the indices given by i_invest + # A is a matrix t_*n1_ x t_*n2_. The matrix has t_ blocks. Each block is + # n1_ x n2_ + blk <- A[(1 + (t1 - 1) * n1_):(t1 * n1_), (1 + (t2 - 1) * n2_):(t2 * n2_)] + if(length(blk) > 1) + blk <- blk[i_invest, i_invest] + return(blk) + } + + k_ <- length(i_invest) #number of investible risk drivers + q_t <- matrix(0, k_ * tau_, k_ * tau_) + for (t in 1:(tau_ - 1)) { + sig2_t <- ExtractBlockMtx(sig2, t, t, n_, n_, i_invest) + sig2_t1 <- ExtractBlockMtx(sig2, t + 1, t + 1, n_, n_, i_invest) + sig2_tt1 <- ExtractBlockMtx(sig2, t, t + 1, n_, n_, i_invest) + sig2_t <- sig2_t + sig2_t1 - 2 * sig2_tt1 + q_t[((t - 1) * k_ + 1):(t * k_), ((t - 1) * k_ + 1):(t * k_)] <- + exp(-lambda * (t - 1)) * (-gamma * 0.5 * sig2_t - eta * 0.5 * c2 * + (1 + exp(-lambda))) [TRUNCATED] To get the complete diff run: svnlook diff /svnroot/returnanalytics -r 3895 From noreply at r-forge.r-project.org Mon Aug 3 22:04:26 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 3 Aug 2015 22:04:26 +0200 (CEST) Subject: [Returnanalytics-commits] r3896 - pkg/Dowd Message-ID: <20150803200426.0E4921861AB@r-forge.r-project.org> Author: dacharya Date: 2015-08-03 22:04:25 +0200 (Mon, 03 Aug 2015) New Revision: 3896 Modified: pkg/Dowd/NAMESPACE Log: Functions DBPensionVaR, DCPensionVaR and Insurance VaR added. Modified: pkg/Dowd/NAMESPACE =================================================================== --- pkg/Dowd/NAMESPACE 2015-08-02 17:21:04 UTC (rev 3895) +++ pkg/Dowd/NAMESPACE 2015-08-03 20:04:25 UTC (rev 3896) @@ -27,6 +27,8 @@ export(ChristoffersenBacktestForUnconditionalCoverage) export(CornishFisherES) export(CornishFisherVaR) +export(DBPensionVaR) +export(DCPensionVaR) export(FrechetES) export(FrechetESPlot2DCl) export(FrechetVaR) @@ -54,6 +56,7 @@ export(HillPlot) export(HillQuantileEstimator) export(InsuranceVaR) +export(InsuranceVaRES) export(JarqueBeraBacktest) export(KSTestStat) export(KernelESBoxKernel) From noreply at r-forge.r-project.org Mon Aug 3 22:11:19 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 3 Aug 2015 22:11:19 +0200 (CEST) Subject: [Returnanalytics-commits] r3897 - pkg/Dowd/R Message-ID: <20150803201119.75EBF186C85@r-forge.r-project.org> Author: dacharya Date: 2015-08-03 22:11:19 +0200 (Mon, 03 Aug 2015) New Revision: 3897 Added: pkg/Dowd/R/InsuranceVaRES.R Log: Function InsuranceVaR added. Added: pkg/Dowd/R/InsuranceVaRES.R =================================================================== --- pkg/Dowd/R/InsuranceVaRES.R (rev 0) +++ pkg/Dowd/R/InsuranceVaRES.R 2015-08-03 20:11:19 UTC (rev 3897) @@ -0,0 +1,53 @@ +#' VaR and ES of Insurance Portfolio +#' +#' Generates Monte Carlo VaR and ES for insurance portfolio. +#' +#' @param mu Mean of returns +#' @param sigma Volatility of returns +#' @param n Number of contracts +#' @param p Probability of any loss event +#' @param theta Expected profit per contract +#' @param deductible Deductible +#' @param number.trials Number of simulation trials +#' @param cl VaR confidence level +#' @return A list with "VaR" and "ES" of the specified portfolio +#' +#' @references Dowd, K. Measuring Market Risk, Wiley, 2007. +#' +#' +#' @author Dinesh Acharya +#' @examples +#' +#' # Estimates VaR and ES of Insurance portfolio with given parameters +#' y<-InsuranceVaRES(.8, 1.3, 100, .6, 21, 12, 50, .95) +#' +#' @export +InsuranceVaRES<- function(mu, sigma, n, p, theta, deductible, number.trials, cl){ + M <- number.trials + D <- deductible + L <- matrix(0, n, M) + company.loss <- matrix(0, n, M) + total.company.loss <- double(M) + for (j in 1:M) { + L[1, j] <- rbinom( 1, 1, p) * rlnorm(1, mu, sigma) # Realisation of L + company.loss[1, j] <- max(L[1,j] - D, 0) # Adjust for deductible + + for (i in 2:n) { + L[i, j] <- rbinom(1, 1, p) * rlnorm(1, mu, sigma) # Realisation of L + company.loss[i, j] <- max(L[i,j] - D, 0) + company.loss[i - 1, j] # Adjust + # for deductible + } + total.company.loss[j] <- company.loss[n,j] # Total company loss for + # given j trial + } + # Sample of total company losses + adjusted.total.company.loss <- total.company.loss - mean(total.company.loss) - + theta * mean(total.company.loss) / n # Adjusts for premium + profit.or.loss <- - adjusted.total.company.loss # Convert to P/L + hist(adjusted.total.company.loss, col = "blue", + xlab = "Total Company Loss", ylab = "Frequency", + main = "Adjusted Total Company Loss") + VaR <- HSVaR(profit.or.loss, cl) + ES <- HSES(profit.or.loss, cl) + return(list("VaR" = VaR, "ES" = ES)) +} \ No newline at end of file From noreply at r-forge.r-project.org Mon Aug 3 22:11:52 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 3 Aug 2015 22:11:52 +0200 (CEST) Subject: [Returnanalytics-commits] r3898 - pkg/Dowd/man Message-ID: <20150803201152.55069186C85@r-forge.r-project.org> Author: dacharya Date: 2015-08-03 22:11:52 +0200 (Mon, 03 Aug 2015) New Revision: 3898 Added: pkg/Dowd/man/InsuranceVaRES.Rd Log: Function InsuranceVaR added. Added: pkg/Dowd/man/InsuranceVaRES.Rd =================================================================== --- pkg/Dowd/man/InsuranceVaRES.Rd (rev 0) +++ pkg/Dowd/man/InsuranceVaRES.Rd 2015-08-03 20:11:52 UTC (rev 3898) @@ -0,0 +1,42 @@ +% Generated by roxygen2 (4.1.1): do not edit by hand +% Please edit documentation in R/InsuranceVaRES.R +\name{InsuranceVaRES} +\alias{InsuranceVaRES} +\title{VaR and ES of Insurance Portfolio} +\usage{ +InsuranceVaRES(mu, sigma, n, p, theta, deductible, number.trials, cl) +} +\arguments{ +\item{mu}{Mean of returns} + +\item{sigma}{Volatility of returns} + +\item{n}{Number of contracts} + +\item{p}{Probability of any loss event} + +\item{theta}{Expected profit per contract} + +\item{deductible}{Deductible} + +\item{number.trials}{Number of simulation trials} + +\item{cl}{VaR confidence level} +} +\value{ +A list with "VaR" and "ES" of the specified portfolio +} +\description{ +Generates Monte Carlo VaR and ES for insurance portfolio. +} +\examples{ +# Estimates VaR and ES of Insurance portfolio with given parameters + y<-InsuranceVaRES(.8, 1.3, 100, .6, 21, 12, 50, .95) +} +\author{ +Dinesh Acharya +} +\references{ +Dowd, K. Measuring Market Risk, Wiley, 2007. +} + From noreply at r-forge.r-project.org Mon Aug 3 22:12:44 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 3 Aug 2015 22:12:44 +0200 (CEST) Subject: [Returnanalytics-commits] r3899 - pkg/Dowd/R Message-ID: <20150803201244.D048B186C85@r-forge.r-project.org> Author: dacharya Date: 2015-08-03 22:12:44 +0200 (Mon, 03 Aug 2015) New Revision: 3899 Added: pkg/Dowd/R/DBPensionVaR.R Log: Function DBPensionVaR added. Added: pkg/Dowd/R/DBPensionVaR.R =================================================================== --- pkg/Dowd/R/DBPensionVaR.R (rev 0) +++ pkg/Dowd/R/DBPensionVaR.R 2015-08-03 20:12:44 UTC (rev 3899) @@ -0,0 +1,92 @@ +#' Monte Carlo VaR for DB pension +#' +#' Generates Monte Carlo VaR for DB pension in Chapter 6.7. +#' +#' @param mu Expected rate of return on pension-fund assets +#' @param sigma Volatility of rate of return of pension-fund assets +#' @param p Probability of unemployment in any period +#' @param life.expectancy Life expectancy +#' @param number.trials Number of trials +#' @param cl VaR confidence level +#' @return VaR for DB pension +#' @references Dowd, Kevin. Measuring Market Risk, Wiley, 2007. +#' +#' @author Dinesh Acharya +#' @examples +#' +#' # Estimates the price of an American Put +#' DBPensionVaR(.06, .2, .05, 80, 100, .95) +#' +#' @export +DBPensionVaR <- function(mu, sigma, p, life.expectancy, number.trials, cl){ + # Parameter Setting + contribution.rate <- .15 + initial.income <- 25 + income.growth.rate <- .02 + M <- number.trials + L <- life.expectancy + # r is return on investment + # Asset Side + # Initialization + r <- matrix(0, 40, M) + fund <- matrix(0, 40, M) + employment.state <- matrix(0, 40, M) + actual.income <- matrix(0, 40, M) + contribution <- matrix(0, 40, M) + years.contributed <- matrix(0, 40, M) + terminal.fund <- double(M) + terminal.return <- double(M) + years.contributed <- matrix(0, 40, M) + employment.income <- matrix(0, 40, M) + total.years.contributed <- double(M) + for (j in 1:M) { + fund[1, j] <- contribution.rate * initial.income + years.contributed[1, j] <- 1 + for (i in 2:40) { + r[i, j] <- rnorm(1, mu, sigma) + employment.state[i, j] <- rbinom(1,1,1-p) + employment.income[i, j] <- initial.income * exp(income.growth.rate*(i-1)) + actual.income[i, j] <- employment.state[i, j] * employment.income[i, j] + contribution[i, j] <- contribution.rate * actual.income[i, j] + fund[i, j] <- contribution[i, j] + fund[i - 1, j] * (1 + r[i, j]) + terminal.fund[j] <- fund[i, j] + terminal.return[j] <- r[i, j] + years.contributed[i, j] <- employment.state[i, j] + years.contributed[i - 1, j] + total.years.contributed[j] <- years.contributed[i,j] + } + } + mean.terminal.fund <- mean(terminal.fund) + std.terminal.fund <- sd(terminal.fund) + + terminal.employment.income <- (1 - p) * initial.income * exp(income.growth.rate * 39) + pension <- double(M) + annuity.rate <- double(M) + implied.fund <- double(M) + for (j in 1:M){ + pension[j] <- (total.years.contributed[j] / 40) * terminal.employment.income + annuity.rate[j] <- .04 + implied.fund[j] <- pvfix(annuity.rate[j], L-65, pension[j]) + } + mean.terminal.employment.income <- mean(terminal.employment.income) + mean.t.years.contributed <- mean(total.years.contributed) + mean.pension <- mean(pension) + mean.implied.func <- mean(implied.fund) + std.implied.fund <- sd(implied.fund) + # Profit Loss and VaR + profit.or.loss <- terminal.fund - implied.fund + mean.profit.or.loss <- mean(profit.or.loss) + std.profit.or.loss <- sd(profit.or.loss) + hist(-profit.or.loss, 20) + y <- HSVaR(profit.or.loss, cl) + return(y) +} +# Accessory functions +pvfix<-function(r, n, c){ + # pvfix computes the present value of a series of future cashflows (e.g. savings) + # parameters: + # r interest rate per period (constant throughout the period) + # n number of periods + # c cashflow each month (assumed to be fixed) + s <- (c/r)*(1-(1/(1+r)^n)) + return(s) +} \ No newline at end of file From noreply at r-forge.r-project.org Mon Aug 3 22:13:13 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 3 Aug 2015 22:13:13 +0200 (CEST) Subject: [Returnanalytics-commits] r3900 - pkg/Dowd/man Message-ID: <20150803201313.AFCEF186C85@r-forge.r-project.org> Author: dacharya Date: 2015-08-03 22:13:13 +0200 (Mon, 03 Aug 2015) New Revision: 3900 Added: pkg/Dowd/man/DBPensionVaR.Rd Log: Function DBPensionVaR added. Added: pkg/Dowd/man/DBPensionVaR.Rd =================================================================== --- pkg/Dowd/man/DBPensionVaR.Rd (rev 0) +++ pkg/Dowd/man/DBPensionVaR.Rd 2015-08-03 20:13:13 UTC (rev 3900) @@ -0,0 +1,38 @@ +% Generated by roxygen2 (4.1.1): do not edit by hand +% Please edit documentation in R/DBPensionVaR.R +\name{DBPensionVaR} +\alias{DBPensionVaR} +\title{Monte Carlo VaR for DB pension} +\usage{ +DBPensionVaR(mu, sigma, p, life.expectancy, number.trials, cl) +} +\arguments{ +\item{mu}{Expected rate of return on pension-fund assets} + +\item{sigma}{Volatility of rate of return of pension-fund assets} + +\item{p}{Probability of unemployment in any period} + +\item{life.expectancy}{Life expectancy} + +\item{number.trials}{Number of trials} + +\item{cl}{VaR confidence level} +} +\value{ +VaR for DB pension +} +\description{ +Generates Monte Carlo VaR for DB pension in Chapter 6.7. +} +\examples{ +# Estimates the price of an American Put + DBPensionVaR(.06, .2, .05, 80, 100, .95) +} +\author{ +Dinesh Acharya +} +\references{ +Dowd, Kevin. Measuring Market Risk, Wiley, 2007. +} + From noreply at r-forge.r-project.org Mon Aug 3 22:13:56 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 3 Aug 2015 22:13:56 +0200 (CEST) Subject: [Returnanalytics-commits] r3901 - pkg/Dowd/R Message-ID: <20150803201356.75997186C85@r-forge.r-project.org> Author: dacharya Date: 2015-08-03 22:13:56 +0200 (Mon, 03 Aug 2015) New Revision: 3901 Added: pkg/Dowd/R/DCPensionVaR.R Log: Function DCPensionVaR added. Added: pkg/Dowd/R/DCPensionVaR.R =================================================================== --- pkg/Dowd/R/DCPensionVaR.R (rev 0) +++ pkg/Dowd/R/DCPensionVaR.R 2015-08-03 20:13:56 UTC (rev 3901) @@ -0,0 +1,81 @@ +#' Monte Carlo VaR for DC pension +#' +#' Generates Monte Carlo VaR for DC pension in Chapter 6.7. +#' +#' @param mu Expected rate of return on pension-fund assets +#' @param sigma Volatility of rate of return of pension-fund assets +#' @param p Probability of unemployment in any period +#' @param life.expectancy Life expectancy +#' @param number.trials Number of trials +#' @param cl VaR confidence level +#' @return VaR for DC pension +#' @references Dowd, Kevin. Measuring Market Risk, Wiley, 2007. +#' +#' @author Dinesh Acharya +#' @examples +#' +#' # Estimates the price of an American Put +#' DCPensionVaR(.06, .2, .05, 80, 100, .95) +#' +#' @export +DCPensionVaR <- function(mu, sigma, p, life.expectancy, number.trials, cl){ + # Parameter Setting + contribution.rate <- .15 + initial.income <- 25 + income.growth.rate <- .02 + M <- number.trials + L <- life.expectancy + # r is return on investment + # Asset Side + # Initialization + r <- matrix(0, 40, M) + fund <- matrix(0, 40, M) + employment.state <- matrix(0, 40, M) + actual.income <- matrix(0, 40, M) + contribution <- matrix(0, 40, M) + years.contributed <- matrix(0, 40, M) + terminal.fund <- double(M) + terminal.return <- double(M) + years.contributed <- matrix(0, 40, M) + employment.income <- matrix(0, 40, M) + total.years.contributed <- double(M) + annuity.rate <- double(M) + pension <- double(M) + pension.ratio <- double(M) + for (j in 1:M) { + fund[1, j] <- contribution.rate * initial.income + years.contributed[1, j] <- 1 + for (i in 2:40) { + r[i, j] <- rnorm(1, mu, sigma) + employment.state[i, j] <- rbinom(1,1,1-p) + employment.income[i, j] <- initial.income * exp(income.growth.rate*(i-1)) + actual.income[i, j] <- employment.state[i, j] * employment.income[i, j] + contribution[i, j] <- contribution.rate * actual.income[i, j] + fund[i, j] <- contribution[i, j] + fund[i - 1, j] * (1 + r[i, j]) + terminal.fund[j] <- fund[i, j] + terminal.return[j] <- r[i, j] + annuity.rate[j] <- .04 + pension[j] <- payper(annuity.rate[j], L - 65, terminal.fund[j]) + terminal.employment.income <- (1 - p) * initial.income * exp(income.growth.rate * 39) + pension.ratio[j] <- pension[j]/terminal.employment.income + } + } + mean.terminal.fund <- mean(terminal.fund) + std.terminal.fund <- sd(terminal.fund) + # Histogram + hist(pension.ratio, 20) + # VaR + VaR <- -HSVaR(pension.ratio, cl) + return(VaR) +} + +# Access Function +payper <- function(r, n, p){ + # Computes payment per period for annuity or loans + # parameters: + # r interest rate per period + # n number of periods + # p present value of the instrument + payment <- (p * r) / (1-(1/(1+r)^n)) + return(payment) +} \ No newline at end of file From noreply at r-forge.r-project.org Mon Aug 3 22:14:19 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 3 Aug 2015 22:14:19 +0200 (CEST) Subject: [Returnanalytics-commits] r3902 - pkg/Dowd/man Message-ID: <20150803201419.89F66186C85@r-forge.r-project.org> Author: dacharya Date: 2015-08-03 22:14:19 +0200 (Mon, 03 Aug 2015) New Revision: 3902 Added: pkg/Dowd/man/DCPensionVaR.Rd Log: Function DCPensionVaR added. Added: pkg/Dowd/man/DCPensionVaR.Rd =================================================================== --- pkg/Dowd/man/DCPensionVaR.Rd (rev 0) +++ pkg/Dowd/man/DCPensionVaR.Rd 2015-08-03 20:14:19 UTC (rev 3902) @@ -0,0 +1,38 @@ +% Generated by roxygen2 (4.1.1): do not edit by hand +% Please edit documentation in R/DCPensionVaR.R +\name{DCPensionVaR} +\alias{DCPensionVaR} +\title{Monte Carlo VaR for DC pension} +\usage{ +DCPensionVaR(mu, sigma, p, life.expectancy, number.trials, cl) +} +\arguments{ +\item{mu}{Expected rate of return on pension-fund assets} + +\item{sigma}{Volatility of rate of return of pension-fund assets} + +\item{p}{Probability of unemployment in any period} + +\item{life.expectancy}{Life expectancy} + +\item{number.trials}{Number of trials} + +\item{cl}{VaR confidence level} +} +\value{ +VaR for DC pension +} +\description{ +Generates Monte Carlo VaR for DC pension in Chapter 6.7. +} +\examples{ +# Estimates the price of an American Put + DCPensionVaR(.06, .2, .05, 80, 100, .95) +} +\author{ +Dinesh Acharya +} +\references{ +Dowd, Kevin. Measuring Market Risk, Wiley, 2007. +} + From noreply at r-forge.r-project.org Tue Aug 4 00:56:11 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Tue, 4 Aug 2015 00:56:11 +0200 (CEST) Subject: [Returnanalytics-commits] r3903 - pkg/Dowd/R Message-ID: <20150803225611.76D45185509@r-forge.r-project.org> Author: dacharya Date: 2015-08-04 00:56:10 +0200 (Tue, 04 Aug 2015) New Revision: 3903 Modified: pkg/Dowd/R/LogNormalESPlot3D.R pkg/Dowd/R/LogNormalVaRPlot3D.R pkg/Dowd/R/LogtESPlot3D.R pkg/Dowd/R/LogtVaRPlot3D.R pkg/Dowd/R/NormalESPlot3D.R pkg/Dowd/R/NormalVaRPlot3D.R Log: Perspective, shading and various parameters of 3D plot were adjusted to make plots look like that of MMRII Toolbox Modified: pkg/Dowd/R/LogNormalESPlot3D.R =================================================================== --- pkg/Dowd/R/LogNormalESPlot3D.R 2015-08-03 20:14:19 UTC (rev 3902) +++ pkg/Dowd/R/LogNormalESPlot3D.R 2015-08-03 22:56:10 UTC (rev 3903) @@ -26,10 +26,10 @@ #' #' # Plots VaR against confidene level given geometric return data #' data <- runif(5, min = 0, max = .2) -#' LogNormalESPlot3D(returns = data, investment = 5, cl = seq(.85,.99,.01), hp = 60:90) +#' LogNormalESPlot3D(returns = data, investment = 5, cl = seq(.9,.99,.01), hp = 1:100) #' #' # Computes VaR against confidence level given mean and standard deviation of return data -#' LogNormalESPlot3D(mu = .012, sigma = .03, investment = 5, cl = seq(.85,.99,.02), hp = 40:80) +#' LogNormalESPlot3D(mu = .012, sigma = .03, investment = 5, cl = seq(.9,.99,.01), hp = 1:100) #' #' #' @export @@ -124,7 +124,9 @@ v <- v/n # Plotting - persp(x=cl, y=hp, t(VaR), xlab = "Confidence Level", - ylab = "Holding Period", zlab = "VaR", - main = "Log-t ES against confidence level") + persp(x=cl, y=hp, t(v), xlab = "Confidence Level", + ylab = "Holding Period", zlab = "ES", border=NA, + theta = -45, phi = 40, shade = .75, ltheta = 120, + col = "lightgray", ticktype = "detailed", nticks = 5, + main = "Log-Normal ES against CL and HP") } Modified: pkg/Dowd/R/LogNormalVaRPlot3D.R =================================================================== --- pkg/Dowd/R/LogNormalVaRPlot3D.R 2015-08-03 20:14:19 UTC (rev 3902) +++ pkg/Dowd/R/LogNormalVaRPlot3D.R 2015-08-03 22:56:10 UTC (rev 3903) @@ -27,11 +27,11 @@ #' @examples #' #' # Plots VaR against confidene level given geometric return data -#' data <- runif(5, min = 0, max = .2) -#' LogNormalVaRPlot3D(returns = data, investment = 5, cl = seq(.85,.99,.01), hp = 60:90) +#' data <- rnorm(5, .09, .03) +#' LogNormalVaRPlot3D(returns = data, investment = 5, cl = seq(.9,.99,.01), hp = 1:100) #' #' # Computes VaR against confidence level given mean and standard deviation of return data -#' LogNormalVaRPlot3D(mu = .012, sigma = .03, investment = 5, cl = seq(.85,.99,.02), hp = 40:80) +#' LogNormalVaRPlot3D(mu = .012, sigma = .03, investment = 5, cl = seq(.9,.99,.01), hp = 1:100) #' #' #' @export @@ -114,7 +114,8 @@ VaR <- investment - exp( sigma[1,1] * sqrt(hp) %*% qnorm(1 - cl, 0, 1) + mu[1,1] * hp %*% matrix(1,cl.row,cl.col) + log(investment)) # VaR # Plotting persp(x=cl, y=hp, t(VaR), xlab = "Confidence Level", - ylab = "Holding Period", zlab = "VaR", - main = "Log Normal VaR against confidence level and holding period") - + ylab = "Holding Period", zlab = "VaR", border=NA, + theta = -45, phi = 35, shade = .75, ltheta = 90, cex.axis=.85, cex.lab=.85, + col = "lightgray", ticktype = "detailed", nticks = 5, + main = "Log Normal VaR against CL and HP") } Modified: pkg/Dowd/R/LogtESPlot3D.R =================================================================== --- pkg/Dowd/R/LogtESPlot3D.R 2015-08-03 20:14:19 UTC (rev 3902) +++ pkg/Dowd/R/LogtESPlot3D.R 2015-08-03 22:56:10 UTC (rev 3903) @@ -29,11 +29,11 @@ #' @examples #' #' # Plots ES against confidene level given geometric return data -#' data <- runif(5, min = 0, max = .2) -#' LogtESPlot3D(returns = data, investment = 5, df = 6, cl = seq(.85,.99,.01), hp = 60:90) +#' data <- rnorm(5, .09, .03) +#' LogtESPlot3D(returns = data, investment = 5, df = 6, cl = seq(.9,.99,.01), hp = 1:100) #' #' # Computes ES against confidence level given mean and standard deviation of return data -#' LogtESPlot3D(mu = .012, sigma = .03, investment = 5, df = 6, cl = seq(.85,.99,.02), hp = 40:80) +#' LogtESPlot3D(mu = .012, sigma = .03, investment = 5, df = 6, cl = seq(.9,.99,.01), hp = 1:100) #' #' #' @export @@ -132,7 +132,10 @@ v <- v/n # Plotting - persp(x=cl, y=hp, t(VaR), xlab = "Confidence Level", - ylab = "Holding Period", zlab = "VaR", - main = "Log-t ES against confidence level") + persp(x=cl, y=hp, t(v), xlab = "Confidence Level", + ylab = "Holding Period", zlab = "ES", border=NA, + theta = -45, phi = 35, shade = .75, ltheta = 90, cex.axis=.85, cex.lab=.85, + col = "lightgray", ticktype = "detailed", nticks = 5, + main = "Log-t ES against CL and HP") + } Modified: pkg/Dowd/R/LogtVaRPlot3D.R =================================================================== --- pkg/Dowd/R/LogtVaRPlot3D.R 2015-08-03 20:14:19 UTC (rev 3902) +++ pkg/Dowd/R/LogtVaRPlot3D.R 2015-08-03 22:56:10 UTC (rev 3903) @@ -30,10 +30,10 @@ #' #' # Plots VaR against confidene level given geometric return data #' data <- runif(5, min = 0, max = .2) -#' LogtVaRPlot3D(returns = data, investment = 5, df = 6, cl = seq(.85,.99,.01), hp = 60:90) +#' LogtVaRPlot3D(returns = data, investment = 5, df = 6, cl = seq(.9,.99,.01), hp = 1:100) #' #' # Computes VaR against confidence level given mean and standard deviation of return data -#' LogtVaRPlot3D(mu = .012, sigma = .03, investment = 5, df = 6, cl = seq(.85,.99,.02), hp = 40:80) +#' LogtVaRPlot3D(mu = .012, sigma = .03, investment = 5, df = 6, cl = seq(.9,.99,.01), hp = 1:100) #' #' #' @export @@ -117,7 +117,8 @@ VaR <- investment - exp( ((df-2)/df) * sigma[1,1] * sqrt(hp) %*% qt(1 - cl, df) + mu[1,1] * hp %*% matrix(1,cl.row,cl.col) + log(investment)) # VaR # Plotting persp(x=cl, y=hp, t(VaR), xlab = "Confidence Level", - ylab = "Holding Period", zlab = "VaR", - main = "Log-t VaR against confidence level") - + ylab = "Holding Period", zlab = "VaR", border=NA, + theta = -45, phi = 35, shade = .75, ltheta = 90, cex.axis=.85, cex.lab=.85, + col = "lightgray", ticktype = "detailed", nticks = 5, + main = "Log-t VaR against CL and HP") } Modified: pkg/Dowd/R/NormalESPlot3D.R =================================================================== --- pkg/Dowd/R/NormalESPlot3D.R 2015-08-03 20:14:19 UTC (rev 3902) +++ pkg/Dowd/R/NormalESPlot3D.R 2015-08-03 22:56:10 UTC (rev 3903) @@ -25,10 +25,10 @@ #' #' # Plots VaR against confidene level given geometric return data #' data <- runif(5, min = 0, max = .2) -#' NormalESPlot3D(returns = data, cl = seq(.85,.99,.01), hp = 60:90) +#' NormalESPlot3D(returns = data, cl = seq(.9,.99,.01), hp = 1:100) #' #' # Computes VaR against confidence level given mean and standard deviation of return data -#' NormalESPlot3D(mu = .012, sigma = .03, cl = seq(.85,.99,.02), hp = 40:80) +#' NormalESPlot3D(mu = .012, sigma = .03, cl = seq(.9,.99,.01), hp = 1:100) #' #' #' @export @@ -110,10 +110,12 @@ VaR <- - sigma[1,1] * sqrt(hp) %*% qnorm(1 - cl, 0, 1) + mu[1,1] * hp %*% matrix(1,cl.row,cl.col) # VaR # ES estimation - es <-NormalES(mu = mu ,sigma = sigma, cl = cl, hp = hp) + ES <-NormalES(mu = mu ,sigma = sigma, cl = cl, hp = hp) # Plotting - persp(x=cl, y=hp, t(VaR), xlab = "Confidence Level", - ylab = "Holding Period", zlab = "VaR", - main = "Normal ES against confidence level") + persp(x=cl, y=hp, t(ES), xlab = "Confidence Level", + ylab = "Holding Period", zlab = "ES", border=NA, + theta = -45, phi = 40, shade = .75, ltheta = 120, cex.axis=.85, cex.lab=.85, + col = "lightgray", ticktype = "detailed", nticks = 5, + main = "Normal ES against CL and HP") } Modified: pkg/Dowd/R/NormalVaRPlot3D.R =================================================================== --- pkg/Dowd/R/NormalVaRPlot3D.R 2015-08-03 20:14:19 UTC (rev 3902) +++ pkg/Dowd/R/NormalVaRPlot3D.R 2015-08-03 22:56:10 UTC (rev 3903) @@ -24,11 +24,11 @@ #' @examples #' #' # Plots VaR against confidene level given geometric return data -#' data <- runif(5, min = 0, max = .2) -#' NormalVaRPlot3D(returns = data, cl = seq(.85,.99,.01), hp = 60:90) +#' data <- rnorm(5, .07, .03) +#' NormalVaRPlot3D(returns = data, cl = seq(.9,.99,.01), hp = 1:100) #' #' # Computes VaR against confidence level given mean and standard deviation of return data -#' NormalVaRPlot3D(mu = .012, sigma = .03, cl = seq(.85,.99,.02), hp = 40:80) +#' NormalVaRPlot3D(mu = .012, sigma = .03, cl = seq(.9,.99,.01), hp = 1:100) #' #' #' @export @@ -109,7 +109,9 @@ VaR <- - sigma[1,1] * sqrt(hp) %*% qnorm(1 - cl, 0, 1) - mu[1,1] * hp %*% matrix(1,cl.row,cl.col) # VaR # Plotting persp(x=cl, y=hp, t(VaR), xlab = "Confidence Level", - ylab = "Holding Period", zlab = "VaR", - main = "Normal VaR against confidence level and holding period") + ylab = "Holding Period", zlab = "VaR", border=NA, + theta = -45, phi = 40, shade = .75, ltheta = 120, cex.axis=.85, cex.lab=.85, + col = "lightgray", ticktype = "detailed", nticks = 5, + main = "Normal VaR against CL and HP") } From noreply at r-forge.r-project.org Tue Aug 4 00:56:44 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Tue, 4 Aug 2015 00:56:44 +0200 (CEST) Subject: [Returnanalytics-commits] r3904 - pkg/Dowd/man Message-ID: <20150803225644.62563185509@r-forge.r-project.org> Author: dacharya Date: 2015-08-04 00:56:44 +0200 (Tue, 04 Aug 2015) New Revision: 3904 Modified: pkg/Dowd/man/LogNormalESPlot3D.Rd pkg/Dowd/man/LogNormalVaRPlot3D.Rd pkg/Dowd/man/LogtESPlot3D.Rd pkg/Dowd/man/LogtVaRPlot3D.Rd pkg/Dowd/man/NormalESPlot3D.Rd pkg/Dowd/man/NormalVaRPlot3D.Rd Log: Perspective, shading and various parameters of 3D plot were adjusted to make plots look like that of MMRII Toolbox Modified: pkg/Dowd/man/LogNormalESPlot3D.Rd =================================================================== --- pkg/Dowd/man/LogNormalESPlot3D.Rd 2015-08-03 22:56:10 UTC (rev 3903) +++ pkg/Dowd/man/LogNormalESPlot3D.Rd 2015-08-03 22:56:44 UTC (rev 3904) @@ -30,10 +30,10 @@ \examples{ # Plots VaR against confidene level given geometric return data data <- runif(5, min = 0, max = .2) - LogNormalESPlot3D(returns = data, investment = 5, cl = seq(.85,.99,.01), hp = 60:90) + LogNormalESPlot3D(returns = data, investment = 5, cl = seq(.9,.99,.01), hp = 1:100) # Computes VaR against confidence level given mean and standard deviation of return data - LogNormalESPlot3D(mu = .012, sigma = .03, investment = 5, cl = seq(.85,.99,.02), hp = 40:80) + LogNormalESPlot3D(mu = .012, sigma = .03, investment = 5, cl = seq(.9,.99,.01), hp = 1:100) } \author{ Dinesh Acharya Modified: pkg/Dowd/man/LogNormalVaRPlot3D.Rd =================================================================== --- pkg/Dowd/man/LogNormalVaRPlot3D.Rd 2015-08-03 22:56:10 UTC (rev 3903) +++ pkg/Dowd/man/LogNormalVaRPlot3D.Rd 2015-08-03 22:56:44 UTC (rev 3904) @@ -31,11 +31,11 @@ } \examples{ # Plots VaR against confidene level given geometric return data - data <- runif(5, min = 0, max = .2) - LogNormalVaRPlot3D(returns = data, investment = 5, cl = seq(.85,.99,.01), hp = 60:90) + data <- rnorm(5, .09, .03) + LogNormalVaRPlot3D(returns = data, investment = 5, cl = seq(.9,.99,.01), hp = 1:100) # Computes VaR against confidence level given mean and standard deviation of return data - LogNormalVaRPlot3D(mu = .012, sigma = .03, investment = 5, cl = seq(.85,.99,.02), hp = 40:80) + LogNormalVaRPlot3D(mu = .012, sigma = .03, investment = 5, cl = seq(.9,.99,.01), hp = 1:100) } \author{ Dinesh Acharya Modified: pkg/Dowd/man/LogtESPlot3D.Rd =================================================================== --- pkg/Dowd/man/LogtESPlot3D.Rd 2015-08-03 22:56:10 UTC (rev 3903) +++ pkg/Dowd/man/LogtESPlot3D.Rd 2015-08-03 22:56:44 UTC (rev 3904) @@ -33,11 +33,11 @@ } \examples{ # Plots ES against confidene level given geometric return data - data <- runif(5, min = 0, max = .2) - LogtESPlot3D(returns = data, investment = 5, df = 6, cl = seq(.85,.99,.01), hp = 60:90) + data <- rnorm(5, .09, .03) + LogtESPlot3D(returns = data, investment = 5, df = 6, cl = seq(.9,.99,.01), hp = 1:100) # Computes ES against confidence level given mean and standard deviation of return data - LogtESPlot3D(mu = .012, sigma = .03, investment = 5, df = 6, cl = seq(.85,.99,.02), hp = 40:80) + LogtESPlot3D(mu = .012, sigma = .03, investment = 5, df = 6, cl = seq(.9,.99,.01), hp = 1:100) } \author{ Dinesh Acharya Modified: pkg/Dowd/man/LogtVaRPlot3D.Rd =================================================================== --- pkg/Dowd/man/LogtVaRPlot3D.Rd 2015-08-03 22:56:10 UTC (rev 3903) +++ pkg/Dowd/man/LogtVaRPlot3D.Rd 2015-08-03 22:56:44 UTC (rev 3904) @@ -34,10 +34,10 @@ \examples{ # Plots VaR against confidene level given geometric return data data <- runif(5, min = 0, max = .2) - LogtVaRPlot3D(returns = data, investment = 5, df = 6, cl = seq(.85,.99,.01), hp = 60:90) + LogtVaRPlot3D(returns = data, investment = 5, df = 6, cl = seq(.9,.99,.01), hp = 1:100) # Computes VaR against confidence level given mean and standard deviation of return data - LogtVaRPlot3D(mu = .012, sigma = .03, investment = 5, df = 6, cl = seq(.85,.99,.02), hp = 40:80) + LogtVaRPlot3D(mu = .012, sigma = .03, investment = 5, df = 6, cl = seq(.9,.99,.01), hp = 1:100) } \author{ Dinesh Acharya Modified: pkg/Dowd/man/NormalESPlot3D.Rd =================================================================== --- pkg/Dowd/man/NormalESPlot3D.Rd 2015-08-03 22:56:10 UTC (rev 3903) +++ pkg/Dowd/man/NormalESPlot3D.Rd 2015-08-03 22:56:44 UTC (rev 3904) @@ -29,10 +29,10 @@ \examples{ # Plots VaR against confidene level given geometric return data data <- runif(5, min = 0, max = .2) - NormalESPlot3D(returns = data, cl = seq(.85,.99,.01), hp = 60:90) + NormalESPlot3D(returns = data, cl = seq(.9,.99,.01), hp = 1:100) # Computes VaR against confidence level given mean and standard deviation of return data - NormalESPlot3D(mu = .012, sigma = .03, cl = seq(.85,.99,.02), hp = 40:80) + NormalESPlot3D(mu = .012, sigma = .03, cl = seq(.9,.99,.01), hp = 1:100) } \author{ Dinesh Acharya Modified: pkg/Dowd/man/NormalVaRPlot3D.Rd =================================================================== --- pkg/Dowd/man/NormalVaRPlot3D.Rd 2015-08-03 22:56:10 UTC (rev 3903) +++ pkg/Dowd/man/NormalVaRPlot3D.Rd 2015-08-03 22:56:44 UTC (rev 3904) @@ -28,11 +28,11 @@ } \examples{ # Plots VaR against confidene level given geometric return data - data <- runif(5, min = 0, max = .2) - NormalVaRPlot3D(returns = data, cl = seq(.85,.99,.01), hp = 60:90) + data <- rnorm(5, .07, .03) + NormalVaRPlot3D(returns = data, cl = seq(.9,.99,.01), hp = 1:100) # Computes VaR against confidence level given mean and standard deviation of return data - NormalVaRPlot3D(mu = .012, sigma = .03, cl = seq(.85,.99,.02), hp = 40:80) + NormalVaRPlot3D(mu = .012, sigma = .03, cl = seq(.9,.99,.01), hp = 1:100) } \author{ Dinesh Acharya From noreply at r-forge.r-project.org Wed Aug 5 07:20:45 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 5 Aug 2015 07:20:45 +0200 (CEST) Subject: [Returnanalytics-commits] r3905 - pkg/Dowd Message-ID: <20150805052045.621B0185F5E@r-forge.r-project.org> Author: dacharya Date: 2015-08-05 07:20:44 +0200 (Wed, 05 Aug 2015) New Revision: 3905 Modified: pkg/Dowd/NAMESPACE Log: Functions AmericanPutESSim, tESPlot2DCL and tESPlot2DHP added Modified: pkg/Dowd/NAMESPACE =================================================================== --- pkg/Dowd/NAMESPACE 2015-08-03 22:56:44 UTC (rev 3904) +++ pkg/Dowd/NAMESPACE 2015-08-05 05:20:44 UTC (rev 3905) @@ -6,6 +6,7 @@ export(AdjustedVarianceCovarianceES) export(AdjustedVarianceCovarianceVaR) export(AmericanPutESBinomial) +export(AmericanPutESSim) export(AmericanPutPriceBinomial) export(AmericanPutVaRBinomial) export(BinomialBacktest) @@ -125,5 +126,7 @@ export(tES) export(tESDFPerc) export(tESFigure) +export(tESPlot2DCL) +export(tESPlot2DHP) import(MASS) import(bootstrap) From noreply at r-forge.r-project.org Wed Aug 5 07:21:29 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 5 Aug 2015 07:21:29 +0200 (CEST) Subject: [Returnanalytics-commits] r3906 - pkg/Dowd/R Message-ID: <20150805052129.F3C75185F5E@r-forge.r-project.org> Author: dacharya Date: 2015-08-05 07:21:29 +0200 (Wed, 05 Aug 2015) New Revision: 3906 Added: pkg/Dowd/R/AmericanPutESSim.R Log: Function AmericanPutESSim added Added: pkg/Dowd/R/AmericanPutESSim.R =================================================================== --- pkg/Dowd/R/AmericanPutESSim.R (rev 0) +++ pkg/Dowd/R/AmericanPutESSim.R 2015-08-05 05:21:29 UTC (rev 3906) @@ -0,0 +1,84 @@ +#' Estimates ES of American vanilla put using binomial option valuation tree and Monte Carlo +#' Simulation +#' +#' Estimates ES of American Put Option using binomial tree to price the option +#' valuation tree and Monte Carlo simulation with a binomial option valuation +#' tree nested within the MCS. Historical method to compute the VaR. +#' +#' @param amountInvested Total amount paid for the Put Option and is positive +#' (negative) if the option position is long (short) +#' @param stockPrice Stock price of underlying stock +#' @param strike Strike price of the option +#' @param r Risk-free rate +#' @param mu Expected rate of return on the underlying asset and is in +#' annualised term +#' @param sigma Volatility of the underlying stock and is in annualised +#' term +#' @param maturity The term to maturity of the option in days +#' @param numberTrials The number of interations in the Monte Carlo simulation +#' exercise +#' @param numberSteps The number of steps over the holding period at each +#' of which early exercise is checked and is at least 2 +#' @param cl Confidence level for which VaR is computed and is scalar +#' @param hp Holding period of the option in days and is scalar +#' @return Monte Carlo Simulation VaR estimate and the bounds of the 95% +#' confidence interval for the VaR, based on an order-statistics analysis +#' of the P/L distribution +#' @references Dowd, Kevin. Measuring Market Risk, Wiley, 2007. +#' +#' Lyuu, Yuh-Dauh. Financial Engineering & Computation: Principles, +#' Mathematics, Algorithms, Cambridge University Press, 2002. +#' +#' @author Dinesh Acharya +#' @examples +#' +#' # Market Risk of American Put with given parameters. +#' AmericanPutESSim(0.20, 27.2, 25, .16, .2, .05, 60, 30, 20, .95, 30) +#' +#' @export +AmericanPutESSim <- function(amountInvested, stockPrice, strike, r, mu, sigma, + maturity, numberTrials, numberSteps, cl, hp){ + # Precompute Constants + annualMaturity <- maturity / 360 # Annualised maturity + annualHp <- hp / 360 # Annualised holding period + N <- numberSteps # Number of steps + dt <- annualHp / N # Size of time-increment equal to holding period + nudt <- (mu - .5 * sigma^2) * dt + sigmadt <- sigma * sqrt(dt) + lnS <- log(stockPrice) + M <- numberTrials + initialOptionPrice <- AmericanPutPriceBinomial(stockPrice, strike, r, sigma, maturity, N) + numberOfOptions <- abs(amountInvested) / initialOptionPrice + # Stock price simulation process + lnSt <- matrix(0, M, N) + newStockPrice <- matrix(0, M, N) + for (i in 1:M){ + lnSt[i, 1] <- lnS + rnorm(1, nudt, sigmadt) + newStockPrice[i, 1] <- exp(lnSt[i, 1]) + for (j in 2:N){ + lnSt[i, j] <- lnSt[i, j - 1] + rnorm(1, nudt, sigmadt) + newStockPrice[i, j] <- exp(lnSt[i,j]) # New stock price + } + } + + # Option calculation over time + newOptionValue <- matrix(0, M, N-1) + for (i in 1:M) { + for (j in 1:(N-1)) { + newOptionValue[i, j] <- AmericanPutPriceBinomial(newStockPrice[i, j], + strike, r, sigma, maturity - j * hp / N, N) + } + } + # Profit/Loss + profitOrLoss <- (newOptionValue - initialOptionPrice)*numberOfOptions + + # Now adjust for short position + if (amountInvested < 0) {# If option position is short + profitOrLoss <- -profitOrLoss + } + + # VaR estimation + ES <- HSESDFPerc(profitOrLoss, .5, cl) # VaR + confidenceInterval <- c(HSESDFPerc(profitOrLoss, .025, cl), HSESDFPerc(profitOrLoss, .975, cl)) + return(list('ES' = ES, 'confidenceInterval' = confidenceInterval)) +} \ No newline at end of file From noreply at r-forge.r-project.org Wed Aug 5 07:21:46 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 5 Aug 2015 07:21:46 +0200 (CEST) Subject: [Returnanalytics-commits] r3907 - pkg/Dowd/man Message-ID: <20150805052146.2DE98185F5E@r-forge.r-project.org> Author: dacharya Date: 2015-08-05 07:21:45 +0200 (Wed, 05 Aug 2015) New Revision: 3907 Added: pkg/Dowd/man/AmericanPutESSim.Rd Log: Function AmericanPutESSim added Added: pkg/Dowd/man/AmericanPutESSim.Rd =================================================================== --- pkg/Dowd/man/AmericanPutESSim.Rd (rev 0) +++ pkg/Dowd/man/AmericanPutESSim.Rd 2015-08-05 05:21:45 UTC (rev 3907) @@ -0,0 +1,62 @@ +% Generated by roxygen2 (4.1.1): do not edit by hand +% Please edit documentation in R/AmericanPutESSim.R +\name{AmericanPutESSim} +\alias{AmericanPutESSim} +\title{Estimates ES of American vanilla put using binomial option valuation tree and Monte Carlo +Simulation} +\usage{ +AmericanPutESSim(amountInvested, stockPrice, strike, r, mu, sigma, maturity, + numberTrials, numberSteps, cl, hp) +} +\arguments{ +\item{amountInvested}{Total amount paid for the Put Option and is positive +(negative) if the option position is long (short)} + +\item{stockPrice}{Stock price of underlying stock} + +\item{strike}{Strike price of the option} + +\item{r}{Risk-free rate} + +\item{mu}{Expected rate of return on the underlying asset and is in +annualised term} + +\item{sigma}{Volatility of the underlying stock and is in annualised +term} + +\item{maturity}{The term to maturity of the option in days} + +\item{numberTrials}{The number of interations in the Monte Carlo simulation +exercise} + +\item{numberSteps}{The number of steps over the holding period at each +of which early exercise is checked and is at least 2} + +\item{cl}{Confidence level for which VaR is computed and is scalar} + +\item{hp}{Holding period of the option in days and is scalar} +} +\value{ +Monte Carlo Simulation VaR estimate and the bounds of the 95% +confidence interval for the VaR, based on an order-statistics analysis +of the P/L distribution +} +\description{ +Estimates ES of American Put Option using binomial tree to price the option +valuation tree and Monte Carlo simulation with a binomial option valuation +tree nested within the MCS. Historical method to compute the VaR. +} +\examples{ +# Market Risk of American Put with given parameters. + AmericanPutESSim(0.20, 27.2, 25, .16, .2, .05, 60, 30, 20, .95, 30) +} +\author{ +Dinesh Acharya +} +\references{ +Dowd, Kevin. Measuring Market Risk, Wiley, 2007. + +Lyuu, Yuh-Dauh. Financial Engineering & Computation: Principles, +Mathematics, Algorithms, Cambridge University Press, 2002. +} + From noreply at r-forge.r-project.org Wed Aug 5 07:22:35 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 5 Aug 2015 07:22:35 +0200 (CEST) Subject: [Returnanalytics-commits] r3908 - pkg/Dowd/R Message-ID: <20150805052235.C6782185F5E@r-forge.r-project.org> Author: dacharya Date: 2015-08-05 07:22:35 +0200 (Wed, 05 Aug 2015) New Revision: 3908 Added: pkg/Dowd/R/tESPlot2DCL.R Log: Function tESPlot2DCL added Added: pkg/Dowd/R/tESPlot2DCL.R =================================================================== --- pkg/Dowd/R/tESPlot2DCL.R (rev 0) +++ pkg/Dowd/R/tESPlot2DCL.R 2015-08-05 05:22:35 UTC (rev 3908) @@ -0,0 +1,144 @@ +#' Plots t- ES against confidence level +#' +#' Plots the ES of a portfolio against confidence level, assuming that L/P is +#' t distributed, for specified confidence level and holding period. +#' +#' @param ... The input arguments contain either return data or else mean and +#' standard deviation data. Accordingly, number of input arguments is either 4 +#' or 5. In case there 4 input arguments, the mean and standard deviation of +#' data is computed from return data. See examples for details. +#' +#' returns Vector of daily geometric return data +#' +#' mu Mean of daily geometric return data +#' +#' sigma Standard deviation of daily geometric return data +#' +#' df Number of degrees of freedom in the t distribution +#' +#' cl ES confidence level and must be a vector +#' +#' hp ES holding period and must be a scalar +#' +#' @references Dowd, K. Measuring Market Risk, Wiley, 2007. +#' +#' Evans, M., Hastings, M. and Peacock, B. Statistical Distributions, 3rd +#' edition, New York: John Wiley, ch. 38,39. +#' +#' @author Dinesh Acharya +#' @examples +#' +#' # Computes ES given geometric return data +#' data <- runif(5, min = 0, max = .2) +#' tESPlot2DCL(returns = data, df = 6, cl = seq(.9,.99,.01), hp = 60) +#' +#' # Computes v given mean and standard deviation of return data +#' tESPlot2DCL(mu = .012, sigma = .03, df = 6, cl = seq(.9,.99,.01), hp = 40) +#' +#' +#' @export +tESPlot2DCL <- function(...){ + # Determine if there are five or six arguments, and ensure that arguments are read as intended + if (nargs() < 4) { + stop("Too few arguments") + } + if (nargs() > 5) { + stop("Too many arguments") + } + args <- list(...) + if (nargs() == 5) { + mu <- args$mu + df <- args$df + cl <- args$cl + sigma <- args$sigma + hp <- args$hp + } + if (nargs() == 4) { + mu <- mean(args$returns) + df <- args$df + cl <- args$cl + sigma <- sd(args$returns) + hp <- args$hp + } + # Check that inputs have correct dimensions + mu <- as.matrix(mu) + mu.row <- dim(mu)[1] + mu.col <- dim(mu)[2] + if (max(mu.row, mu.col) > 1) { + stop("Mean must be a scalar") + } + sigma <- as.matrix(sigma) + sigma.row <- dim(sigma)[1] + sigma.col <- dim(sigma)[2] + if (max(sigma.row, sigma.col) > 1) { + stop("Standard deviation must be a scalar") + } + cl <- as.matrix(cl) + cl.row <- dim(cl)[1] + cl.col <- dim(cl)[2] + if (min(cl.row, cl.col) > 1) { + stop("Confidence level must be a vector") + } + hp <- as.matrix(hp) + hp.row <- dim(hp)[1] + hp.col <- dim(hp)[2] + if (max(hp.row, hp.col) > 1) { + stop("Holding period must be a scalar") + } + df <- as.matrix(df) + df.row <- dim(df)[1] + df.col <- dim(df)[2] + if (max(df.row, df.col)>1){ + stop('Number of degrees of freedom must be a scalar') + } + # Check that hp is read as row vector + if (cl.row > cl.col) { + cl <- t(cl) + } + + # Check that inputs obey sign and value restrictions + if (sigma < 0) { + stop("Standard deviation must be non-negative") + } + if (df < 3) { + stop("Number of degrees of freedom must be at least 3 for first two moments of distribution to be defined") + } + if (max(cl) >= 1){ + stop("Confidence level(s) must be less than 1") + } + if (min(cl) <= 0){ + stop("Confidence level(s) must be greater than 0") + } + if (min(hp) <= 0){ + stop("Holding period(s) must be greater than 0") + } + # VaR estimation + cl.row <- dim(cl)[1] + cl.col <- dim(cl)[2] + VaR <- (-sigma[1,1] * sqrt(hp[1,1]) * sqrt((df - 2) / df) %*% qt(1 - cl, df)) + (- mu[1,1] * hp[1,1] * matrix(1, cl.row, cl.col)) # VaR + # ES etimation + n <- 1000 # Number of slices into which tail is divided + cl0 <- cl # Initial confidence level + delta.cl <- (1 - cl) / n # Increment to confidence level as each slice is taken + v <- VaR + for (i in 1:(n-1)) { + cl <- cl0 + i * delta.cl # Revised cl + v <- v + (-sigma[1,1] * sqrt(hp[1,1]) * sqrt((df - 2) / df) %*% qt(1 - cl, df)) + (- mu[1,1] * hp[1,1] * matrix(1, cl.row, cl.col)) + } + v <- v/n + + # Plotting + plot(cl0, v, type = "l", xlab = "Holding Period", ylab = "ES") + title("t ES against confidence level") + xmin <-min(cl0)+.25*(max(cl0)-min(cl0)) + text(xmin,max(v)-.1*(max(v)-min(v)), + 'Input parameters', cex=.75, font = 2) + text(xmin,max(v)-.15*(max(v)-min(v)), + paste('Daily mean L/P = ',round(mu[1,1],3)),cex=.75) + text(xmin,max(v)-.2*(max(v)-min(v)), + paste('Stdev. of daily L/P = ',round(sigma[1,1],3)),cex=.75) + text(xmin,max(v)-.25*(max(v)-min(v)), + paste('Degrees of freedom = ',df),cex=.75) + text(xmin,max(v)-.3*(max(v)-min(v)), + paste('Holding Period = ',hp),cex=.75) +} \ No newline at end of file From noreply at r-forge.r-project.org Wed Aug 5 07:22:54 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 5 Aug 2015 07:22:54 +0200 (CEST) Subject: [Returnanalytics-commits] r3909 - pkg/Dowd/man Message-ID: <20150805052254.EA240185F5E@r-forge.r-project.org> Author: dacharya Date: 2015-08-05 07:22:54 +0200 (Wed, 05 Aug 2015) New Revision: 3909 Added: pkg/Dowd/man/tESPlot2DCL.Rd Log: Function tESPlot2DCL added Added: pkg/Dowd/man/tESPlot2DCL.Rd =================================================================== --- pkg/Dowd/man/tESPlot2DCL.Rd (rev 0) +++ pkg/Dowd/man/tESPlot2DCL.Rd 2015-08-05 05:22:54 UTC (rev 3909) @@ -0,0 +1,48 @@ +% Generated by roxygen2 (4.1.1): do not edit by hand +% Please edit documentation in R/tESPlot2DCL.R +\name{tESPlot2DCL} +\alias{tESPlot2DCL} +\title{Plots t- ES against confidence level} +\usage{ +tESPlot2DCL(...) +} +\arguments{ +\item{...}{The input arguments contain either return data or else mean and + standard deviation data. Accordingly, number of input arguments is either 4 + or 5. In case there 4 input arguments, the mean and standard deviation of + data is computed from return data. See examples for details. + + returns Vector of daily geometric return data + + mu Mean of daily geometric return data + + sigma Standard deviation of daily geometric return data + + df Number of degrees of freedom in the t distribution + + cl ES confidence level and must be a vector + + hp ES holding period and must be a scalar} +} +\description{ +Plots the ES of a portfolio against confidence level, assuming that L/P is +t distributed, for specified confidence level and holding period. +} +\examples{ +# Computes ES given geometric return data + data <- runif(5, min = 0, max = .2) + tESPlot2DCL(returns = data, df = 6, cl = seq(.9,.99,.01), hp = 60) + + # Computes v given mean and standard deviation of return data + tESPlot2DCL(mu = .012, sigma = .03, df = 6, cl = seq(.9,.99,.01), hp = 40) +} +\author{ +Dinesh Acharya +} +\references{ +Dowd, K. Measuring Market Risk, Wiley, 2007. + +Evans, M., Hastings, M. and Peacock, B. Statistical Distributions, 3rd +edition, New York: John Wiley, ch. 38,39. +} + From noreply at r-forge.r-project.org Wed Aug 5 07:23:10 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 5 Aug 2015 07:23:10 +0200 (CEST) Subject: [Returnanalytics-commits] r3910 - pkg/Dowd/R Message-ID: <20150805052310.3A210185F5E@r-forge.r-project.org> Author: dacharya Date: 2015-08-05 07:23:09 +0200 (Wed, 05 Aug 2015) New Revision: 3910 Added: pkg/Dowd/R/tESPlot2DHP.R Log: Function tESPlot2DHP added Added: pkg/Dowd/R/tESPlot2DHP.R =================================================================== --- pkg/Dowd/R/tESPlot2DHP.R (rev 0) +++ pkg/Dowd/R/tESPlot2DHP.R 2015-08-05 05:23:09 UTC (rev 3910) @@ -0,0 +1,142 @@ +#' Plots t ES against holding period +#' +#' Plots the ES of a portfolio against holding period assuming that L/P is t distributed, for specified confidence level and holding periods. +#' +#' @param ... The input arguments contain either return data or else mean and +#' standard deviation data. Accordingly, number of input arguments is either 4 +#' or 5. In case there 4 input arguments, the mean and standard deviation of +#' data is computed from return data. See examples for details. +#' +#' returns Vector of daily P/L data +#' +#' mu Mean of daily P/L data +#' +#' sigma Standard deviation of daily P/L data +#' +#' df Number of degrees of freedom in the t distribution +#' +#' cl ES confidence level and must be a scalar +#' +#' hp ES holding period and must be a vector +#' +#' @references Dowd, K. Measuring Market Risk, Wiley, 2007. +#' +#' Evans, M., Hastings, M. and Peacock, B. Statistical Distributions, 3rd +#' edition, New York: John Wiley, ch. 38,39. +#' +#' @author Dinesh Acharya +#' @examples +#' +#' # Computes ES given geometric return data +#' data <- runif(5, min = 0, max = .2) +#' tESPlot2DHP(returns = data, df = 6, cl = .95, hp = 60:90) +#' +#' # Computes v given mean and standard deviation of return data +#' tESPlot2DHP(mu = .012, sigma = .03, df = 6, cl = .99, hp = 40:80) +#' +#' @export +tESPlot2DHP <- function(...){ + if (nargs() < 4) { + stop("Too few arguments") + } + if (nargs() > 5) { + stop("Too many arguments") + } + args <- list(...) + if (nargs() == 5) { + mu <- args$mu + df <- args$df + cl <- args$cl + sigma <- args$sigma + hp <- args$hp + } + if (nargs() == 4) { + mu <- mean(args$returns) + df <- args$df + cl <- args$cl + sigma <- sd(args$returns) + hp <- args$hp + } + + # Check that inputs have correct dimensions + mu <- as.matrix(mu) + mu.row <- dim(mu)[1] + mu.col <- dim(mu)[2] + if (max(mu.row, mu.col) > 1) { + stop("Mean must be a scalar") + } + sigma <- as.matrix(sigma) + sigma.row <- dim(sigma)[1] + sigma.col <- dim(sigma)[2] + if (max(sigma.row, sigma.col) > 1) { + stop("Standard deviation must be a scalar") + } + cl <- as.matrix(cl) + cl.row <- dim(cl)[1] + cl.col <- dim(cl)[2] + if (max(cl.row, cl.col) > 1) { + stop("Confidence level must be a scalar") + } + hp <- as.matrix(hp) + hp.row <- dim(hp)[1] + hp.col <- dim(hp)[2] + if (min(hp.row, hp.col) > 1) { + stop("Holding period must be a vector") + } + df <- as.matrix(df) + df.row <- dim(df)[1] + df.col <- dim(df)[2] + if (max(df.row, df.col) > 1) { + stop("Number of degrees of freedom must be a scalar") + } + # Check that hp is read as row vector + if (hp.row > hp.col) { + hp <- t(hp) + } + + # Check that inputs obey sign and value restrictions + if (sigma < 0) { + stop("Standard deviation must be non-negative") + } + if (df < 3) { + stop("Number of degrees of freedom must be at least 3 for first two moments of distribution to be defined") + } + if (max(cl) >= 1){ + stop("Confidence level(s) must be less than 1") + } + if (min(cl) <= 0){ + stop("Confidence level(s) must be greater than 0") + } + if (min(hp) <= 0){ + stop("Holding period(s) must be greater than 0") + } + # VaR estimation + VaR <- (-sigma[1,1] * sqrt(t(hp)) %*% sqrt((df - 2) / df) %*% qt(1 - cl, df)) + (- mu[1,1] * t(hp)) # VaR + + # ES etimation + n <- 1000 # Number of slices into which tail is divided + cl0 <- cl # Initial confidence level + delta.cl <- (1 - cl) / n # Increment to confidence level as each slice is taken + v <- VaR + for (i in 1:(n-1)) { + cl <- cl0 + i * delta.cl # Revised cl + v <- v + (-sigma[1,1] * sqrt(t(hp)) %*% sqrt((df - 2) / df) %*% qt(1 - cl, df)) + (- mu[1,1] * t(hp) %*% matrix(1, cl.row, cl.col)) + } + v <- v/n + + # Plotting + plot(hp, v, type = "l", xlab = "Holding Period", ylab = "ES") + title("t ES against holding period") + xmin <-min(hp)+.25*(max(hp)-min(hp)) + cl.label <- cl0 * 100 + text(xmin,max(v)-.5*(max(v)-min(v)), + 'Input parameters', cex=.75, font = 2) + text(xmin,max(v)-.55*(max(v)-min(v)), + paste('Daily mean L/P data = ', round(mu[1,1], 3)),cex=.75) + text(xmin,max(v)-.6*(max(v)-min(v)), + paste('Stdev. of daily L/P data = ',round(sigma[1,1],3)),cex=.75) + text(xmin,max(v)-.65*(max(v)-min(v)), + paste('Degrees of freedom = ',df),cex=.75) + text(xmin,max(v)-.7*(max(v)-min(v)), + paste('Confidence level = ',cl.label,'%'),cex=.75) +} \ No newline at end of file From noreply at r-forge.r-project.org Wed Aug 5 07:23:25 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 5 Aug 2015 07:23:25 +0200 (CEST) Subject: [Returnanalytics-commits] r3911 - pkg/Dowd/man Message-ID: <20150805052325.DD59618740F@r-forge.r-project.org> Author: dacharya Date: 2015-08-05 07:23:25 +0200 (Wed, 05 Aug 2015) New Revision: 3911 Added: pkg/Dowd/man/tESPlot2DHP.Rd Log: Function tESPlot2DHP added Added: pkg/Dowd/man/tESPlot2DHP.Rd =================================================================== --- pkg/Dowd/man/tESPlot2DHP.Rd (rev 0) +++ pkg/Dowd/man/tESPlot2DHP.Rd 2015-08-05 05:23:25 UTC (rev 3911) @@ -0,0 +1,47 @@ +% Generated by roxygen2 (4.1.1): do not edit by hand +% Please edit documentation in R/tESPlot2DHP.R +\name{tESPlot2DHP} +\alias{tESPlot2DHP} +\title{Plots t ES against holding period} +\usage{ +tESPlot2DHP(...) +} +\arguments{ +\item{...}{The input arguments contain either return data or else mean and + standard deviation data. Accordingly, number of input arguments is either 4 + or 5. In case there 4 input arguments, the mean and standard deviation of + data is computed from return data. See examples for details. + + returns Vector of daily P/L data + + mu Mean of daily P/L data + + sigma Standard deviation of daily P/L data + + df Number of degrees of freedom in the t distribution + + cl ES confidence level and must be a scalar + + hp ES holding period and must be a vector} +} +\description{ +Plots the ES of a portfolio against holding period assuming that L/P is t distributed, for specified confidence level and holding periods. +} +\examples{ +# Computes ES given geometric return data + data <- runif(5, min = 0, max = .2) + tESPlot2DHP(returns = data, df = 6, cl = .95, hp = 60:90) + + # Computes v given mean and standard deviation of return data + tESPlot2DHP(mu = .012, sigma = .03, df = 6, cl = .99, hp = 40:80) +} +\author{ +Dinesh Acharya +} +\references{ +Dowd, K. Measuring Market Risk, Wiley, 2007. + +Evans, M., Hastings, M. and Peacock, B. Statistical Distributions, 3rd +edition, New York: John Wiley, ch. 38,39. +} + From noreply at r-forge.r-project.org Wed Aug 5 22:33:29 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 5 Aug 2015 22:33:29 +0200 (CEST) Subject: [Returnanalytics-commits] r3912 - pkg/Dowd/R Message-ID: <20150805203329.8FB9A187AA2@r-forge.r-project.org> Author: dacharya Date: 2015-08-05 22:33:29 +0200 (Wed, 05 Aug 2015) New Revision: 3912 Added: pkg/Dowd/R/tESPlot3D.R Log: Function tESPlot3D added Added: pkg/Dowd/R/tESPlot3D.R =================================================================== --- pkg/Dowd/R/tESPlot3D.R (rev 0) +++ pkg/Dowd/R/tESPlot3D.R 2015-08-05 20:33:29 UTC (rev 3912) @@ -0,0 +1,142 @@ +#' Plots t ES against confidence level and holding period +#' +#' Plots the ES of a portfolio against confidence level and holding period +#' assuming that P/L are Student-t distributed, for specified confidence level +#' and holding period. +#' +#' @param ... The input arguments contain either return data or else mean and +#' standard deviation data. Accordingly, number of input arguments is either 4 +#' or 5. In case there 4 input arguments, the mean and standard deviation of +#' data is computed from return data. See examples for details. +#' +#' returns Vector of daily P/L data +#' +#' mu Mean of daily P/L data +#' +#' sigma Standard deviation of daily P/L data +#' +#' df Number of degrees of freedom in the t distribution +#' +#' cl VaR confidence level and must be a vector +#' +#' hp VaR holding period and must be a vector +#' +#' @references Dowd, K. Measuring Market Risk, Wiley, 2007. +#' +#' @author Dinesh Acharya +#' @examples +#' +#' # Plots ES against confidene level given P/L data +#' data <- runif(5, min = 0, max = .2) +#' tESPlot3D(returns = data, df = 6, cl = seq(.85,.99,.01), hp = 60:90) +#' +#' # Computes ES against confidence level given mean and standard deviation of return data +#' tESPlot3D(mu = .012, sigma = .03, df = 6, cl = seq(.85,.99,.02), hp = 40:80) +#' +#' +#' @export +tESPlot3D <- function(...){ + # Determine if there are four or five arguments, and ensure that arguments are read as intended + if (nargs() < 4) { + stop("Too few arguments") + } + if (nargs() > 5) { + stop("Too many arguments") + } + args <- list(...) + if (nargs() == 5) { + mu <- args$mu + df <- args$df + cl <- args$cl + sigma <- args$sigma + hp <- args$hp + } + if (nargs() == 4) { + mu <- mean(args$returns) + df <- args$df + cl <- args$cl + sigma <- sd(args$returns) + hp <- args$hp + } + + # Check that inputs have correct dimensions + mu <- as.matrix(mu) + mu.row <- dim(mu)[1] + mu.col <- dim(mu)[2] + if (max(mu.row, mu.col) > 1) { + stop("Mean must be a scalar") + } + sigma <- as.matrix(sigma) + sigma.row <- dim(sigma)[1] + sigma.col <- dim(sigma)[2] + if (max(sigma.row, sigma.col) > 1) { + stop("Standard deviation must be a scalar") + } + cl <- as.matrix(cl) + cl.row <- dim(cl)[1] + cl.col <- dim(cl)[2] + if (min(cl.row, cl.col) > 1) { + stop("Confidence level must be a vector") + } + hp <- as.matrix(hp) + hp.row <- dim(hp)[1] + hp.col <- dim(hp)[2] + if (min(hp.row, hp.col) > 1) { + stop("Holding period must be a vector") + } + df <- as.matrix(df) + df.row <- dim(df)[1] + df.col <- dim(df)[2] + if (max(df.row, df.col) > 1) { + stop("Number of degrees of freedom must be a scalar") + } + + # Check that cl is read as row vector + if (cl.row > cl.col) { + cl <- t(cl) + } + # Check that hp is read as column vector + if (hp.col > hp.row) { + hp <- t(hp) + } + + # Check that inputs obey sign and value restrictions + if (sigma < 0) { + stop("Standard deviation must be non-negative") + } + if (df < 3) { + stop("Number of degrees of freedom must be at least 3 for first two moments + of distribution to be defined") + } + if (max(cl) >= 1){ + stop("Confidence level(s) must be less than 1") + } + if (min(cl) <= 0){ + stop("Confidence level(s) must be greater than 0") + } + if (min(hp) <= 0){ + stop("Confidence level(s) must be greater than 0") + } + + # VaR estimation + cl.row <- dim(cl)[1] + cl.col <- dim(cl)[2] + VaR <- (-sigma[1,1] * sqrt(hp) %*% sqrt((df - 2) / df) %*% qt(1 - cl, df)) + + (- mu[1,1] * hp %*% matrix(1, cl.row, cl.col)) # VaR + # ES etimation + n <- 1000 # Number of slices into which tail is divided + cl0 <- cl # Initial confidence level + delta.cl <- (1 - cl) / n # Increment to confidence level as each slice is taken + v <- VaR + for (i in 1:(n-1)) { + cl <- cl0 + i * delta.cl # Revised cl + v <- v + (-sigma[1,1] * sqrt(hp) %*% sqrt((df - 2) / df) %*% qt(1 - cl, df)) + + (- mu[1,1] * hp %*% matrix(1, cl.row, cl.col)) + } + v <- v/n + + # Plotting + persp(x=cl, y=hp, t(VaR), xlab = "Confidence Level", + ylab = "Holding Period", zlab = "VaR", + main = "t ES against confidence level") +} From noreply at r-forge.r-project.org Wed Aug 5 22:33:50 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 5 Aug 2015 22:33:50 +0200 (CEST) Subject: [Returnanalytics-commits] r3913 - pkg/Dowd/man Message-ID: <20150805203350.E1729187AA2@r-forge.r-project.org> Author: dacharya Date: 2015-08-05 22:33:50 +0200 (Wed, 05 Aug 2015) New Revision: 3913 Added: pkg/Dowd/man/tESPlot3D.Rd Log: Function tESPlot3D added Added: pkg/Dowd/man/tESPlot3D.Rd =================================================================== --- pkg/Dowd/man/tESPlot3D.Rd (rev 0) +++ pkg/Dowd/man/tESPlot3D.Rd 2015-08-05 20:33:50 UTC (rev 3913) @@ -0,0 +1,46 @@ +% Generated by roxygen2 (4.1.1): do not edit by hand +% Please edit documentation in R/tESPlot3D.R +\name{tESPlot3D} +\alias{tESPlot3D} +\title{Plots t ES against confidence level and holding period} +\usage{ +tESPlot3D(...) +} +\arguments{ +\item{...}{The input arguments contain either return data or else mean and + standard deviation data. Accordingly, number of input arguments is either 4 + or 5. In case there 4 input arguments, the mean and standard deviation of + data is computed from return data. See examples for details. + + returns Vector of daily P/L data + + mu Mean of daily P/L data + + sigma Standard deviation of daily P/L data + + df Number of degrees of freedom in the t distribution + + cl VaR confidence level and must be a vector + + hp VaR holding period and must be a vector} +} +\description{ +Plots the ES of a portfolio against confidence level and holding period +assuming that P/L are Student-t distributed, for specified confidence level +and holding period. +} +\examples{ +# Plots ES against confidene level given P/L data + data <- runif(5, min = 0, max = .2) + tESPlot3D(returns = data, df = 6, cl = seq(.85,.99,.01), hp = 60:90) + + # Computes ES against confidence level given mean and standard deviation of return data + tESPlot3D(mu = .012, sigma = .03, df = 6, cl = seq(.85,.99,.02), hp = 40:80) +} +\author{ +Dinesh Acharya +} +\references{ +Dowd, K. Measuring Market Risk, Wiley, 2007. +} + From noreply at r-forge.r-project.org Wed Aug 5 22:34:44 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 5 Aug 2015 22:34:44 +0200 (CEST) Subject: [Returnanalytics-commits] r3914 - pkg/Dowd/R Message-ID: <20150805203444.2E5A4187AA6@r-forge.r-project.org> Author: dacharya Date: 2015-08-05 22:34:43 +0200 (Wed, 05 Aug 2015) New Revision: 3914 Added: pkg/Dowd/R/tVaR.R Log: Function tVaR added Added: pkg/Dowd/R/tVaR.R =================================================================== --- pkg/Dowd/R/tVaR.R (rev 0) +++ pkg/Dowd/R/tVaR.R 2015-08-05 20:34:43 UTC (rev 3914) @@ -0,0 +1,137 @@ +#' VaR for t distributed P/L +#' +#' Estimates the VaR of a portfolio assuming that P/L are +#' t distributed, for specified confidence level and holding period. +#' +#' @param ... The input arguments contain either return data or else mean and +#' standard deviation data. Accordingly, number of input arguments is either 4 +#' or 5. In case there 4 input arguments, the mean and standard deviation of +#' data is computed from return data. See examples for details. +#' +#' returns Vector of daily geometric return data +#' +#' mu Mean of daily geometric return data +#' +#' sigma Standard deviation of daily geometric return data +#' +#' df Number of degrees of freedom in the t distribution +#' +#' cl VaR confidence level +#' +#' hp VaR holding period +#' +#' @return Matrix of VaRs whose dimension depends on dimension of hp and cl. If +#' cl and hp are both scalars, the matrix is 1 by 1. If cl is a vector and hp is +#' a scalar, the matrix is row matrix, if cl is a scalar and hp is a vector, +#' the matrix is column matrix and if both cl and hp are vectors, the matrix +#' has dimension length of cl * length of hp. +#' +#' @references Dowd, K. Measuring Market Risk, Wiley, 2007. +#' +#'Evans, M., Hastings, M. and Peacock, B. Statistical Distributions, 3rd +#' edition, New York: John Wiley, ch. 38,39. +#' +#' @author Dinesh Acharya +#' @examples +#' +#' # Computes VaR given P/L data +#' data <- runif(5, min = 0, max = .2) +#' tVaR(returns = data, df = 6, cl = .95, hp = 90) +#' +#' # Computes VaR given mean and standard deviation of P/L data +#' tVaR(mu = .012, sigma = .03, df = 6, cl = .95, hp = 90) +#' +#' +#' @export +tVaR <- function(...){ + # Determine if there are four or five arguments, and ensure that arguments are + # read as intended + if (nargs() < 4) { + stop("Too few arguments") + } + if (nargs() > 5) { + stop("Too many arguments") + } + args <- list(...) + if (nargs() == 5) { + mu <- args$mu + df <- args$df + cl <- args$cl + sigma <- args$sigma + hp <- args$hp + } + if (nargs() == 4) { + mu <- mean(args$returns) + df <- args$df + cl <- args$cl + sigma <- sd(args$returns) + hp <- args$hp + } + + # Check that inputs have correct dimensions + mu <- as.matrix(mu) + mu.row <- dim(mu)[1] + mu.col <- dim(mu)[2] + if (max(mu.row, mu.col) > 1) { + stop("Mean must be a scalar") + } + sigma <- as.matrix(sigma) + sigma.row <- dim(sigma)[1] + sigma.col <- dim(sigma)[2] + if (max(sigma.row, sigma.col) > 1) { + stop("Standard deviation must be a scalar") + } + cl <- as.matrix(cl) + cl.row <- dim(cl)[1] + cl.col <- dim(cl)[2] + if (min(cl.row, cl.col) > 1) { + stop("Confidence level must be a scalar or a vector") + } + hp <- as.matrix(hp) + hp.row <- dim(hp)[1] + hp.col <- dim(hp)[2] + if (min(hp.row, hp.col) > 1) { + stop("Holding period must be a scalar or a vector") + } + df <- as.matrix(df) + df.row <- dim(df)[1] + df.col <- dim(df)[2] + if (max(df.row, df.col) > 1) { + stop("Number of degrees of freedom must be a scalar") + } + + + # Check that cl and hp are read as row and column vectors respectively + if (cl.row > cl.col) { + cl <- t(cl) + } + if (hp.row > hp.col) { + hp <- t(hp) + } + + # Check that inputs obey sign and value restrictions + if (sigma < 0) { + stop("Standard deviation must be non-negative") + } + if (df < 3) { + stop("Number of degrees of freedom must be at least 3 for first two moments + of distribution to be defined") + } + if (max(cl) >= 1){ + stop("Confidence level(s) must be less than 1") + } + if (min(cl) <= 0){ + stop("Confidence level(s) must be greater than 0") + } + if (min(hp) <= 0){ + stop("Holding Period(s) must be greater than 0") + } + + + cl.row <- dim(cl)[1] + cl.col <- dim(cl)[2] + # VaR estimation + y <- (-sigma[1,1] * sqrt(t(hp)) %*% sqrt((df - 2) / df) %*% qt(1 - cl, df)) + + (- mu[1,1] * t(hp) %*% matrix(1, cl.row, cl.col)) # VaR + return (y) +} From noreply at r-forge.r-project.org Wed Aug 5 22:35:14 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 5 Aug 2015 22:35:14 +0200 (CEST) Subject: [Returnanalytics-commits] r3915 - pkg/Dowd/man Message-ID: <20150805203514.A3D09187AA6@r-forge.r-project.org> Author: dacharya Date: 2015-08-05 22:35:14 +0200 (Wed, 05 Aug 2015) New Revision: 3915 Added: pkg/Dowd/man/tVaR.Rd Log: Function tVaR added Added: pkg/Dowd/man/tVaR.Rd =================================================================== --- pkg/Dowd/man/tVaR.Rd (rev 0) +++ pkg/Dowd/man/tVaR.Rd 2015-08-05 20:35:14 UTC (rev 3915) @@ -0,0 +1,55 @@ +% Generated by roxygen2 (4.1.1): do not edit by hand +% Please edit documentation in R/tVaR.R +\name{tVaR} +\alias{tVaR} +\title{VaR for t distributed P/L} +\usage{ +tVaR(...) +} +\arguments{ +\item{...}{The input arguments contain either return data or else mean and + standard deviation data. Accordingly, number of input arguments is either 4 + or 5. In case there 4 input arguments, the mean and standard deviation of + data is computed from return data. See examples for details. + + returns Vector of daily geometric return data + + mu Mean of daily geometric return data + + sigma Standard deviation of daily geometric return data + + df Number of degrees of freedom in the t distribution + + cl VaR confidence level + + hp VaR holding period} +} +\value{ +Matrix of VaRs whose dimension depends on dimension of hp and cl. If +cl and hp are both scalars, the matrix is 1 by 1. If cl is a vector and hp is + a scalar, the matrix is row matrix, if cl is a scalar and hp is a vector, + the matrix is column matrix and if both cl and hp are vectors, the matrix + has dimension length of cl * length of hp. +} +\description{ +Estimates the VaR of a portfolio assuming that P/L are +t distributed, for specified confidence level and holding period. +} +\examples{ +# Computes VaR given P/L data + data <- runif(5, min = 0, max = .2) + tVaR(returns = data, df = 6, cl = .95, hp = 90) + + # Computes VaR given mean and standard deviation of P/L data + tVaR(mu = .012, sigma = .03, df = 6, cl = .95, hp = 90) +} +\author{ +Dinesh Acharya +} +\references{ +Dowd, K. Measuring Market Risk, Wiley, 2007. + +Evans, M., Hastings, M. and Peacock, B. Statistical Distributions, 3rd +edition, New York: John Wiley, ch. 38,39. +} + From noreply at r-forge.r-project.org Wed Aug 5 22:36:42 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 5 Aug 2015 22:36:42 +0200 (CEST) Subject: [Returnanalytics-commits] r3916 - pkg/Dowd/man Message-ID: <20150805203642.1F2A0187AA6@r-forge.r-project.org> Author: dacharya Date: 2015-08-05 22:36:41 +0200 (Wed, 05 Aug 2015) New Revision: 3916 Added: pkg/Dowd/man/tVaRESPlot2DCL.Rd Log: Function tVaRESPlot2DCL added Added: pkg/Dowd/man/tVaRESPlot2DCL.Rd =================================================================== --- pkg/Dowd/man/tVaRESPlot2DCL.Rd (rev 0) +++ pkg/Dowd/man/tVaRESPlot2DCL.Rd 2015-08-05 20:36:41 UTC (rev 3916) @@ -0,0 +1,44 @@ +% Generated by roxygen2 (4.1.1): do not edit by hand +% Please edit documentation in R/tVaRESPlot2DCL.R +\name{tVaRESPlot2DCL} +\alias{tVaRESPlot2DCL} +\title{Plots t VaR and ES against confidence level} +\usage{ +tVaRESPlot2DCL(...) +} +\arguments{ +\item{...}{The input arguments contain either return data or else mean and + standard deviation data. Accordingly, number of input arguments is either 4 + or 5. In case there are 4 input arguments, the mean and standard deviation of + data is computed from return data. See examples for details. + + returns Vector of daily geometric return data + + mu Mean of daily geometric return data + + sigma Standard deviation of daily geometric return data + + cl VaR confidence level and must be a vector + + hp VaR holding period and must be a scalar} +} +\description{ +Plots the VaR and ES of a portfolio against confidence level assuming that P/L +data are t distributed, for specified confidence level and + holding period. +} +\examples{ +# Plots VaR and ETL against confidene level given P/L data + data <- runif(5, min = 0, max = .2) + tVaRESPlot2DCL(returns = data, df = 7, cl = seq(.85,.99,.01), hp = 60) + + # Computes VaR against confidence level given mean and standard deviation of P/L data + tVaRESPlot2DCL(mu = .012, sigma = .03, df = 7, cl = seq(.85,.99,.01), hp = 40) +} +\author{ +Dinesh Acharya +} +\references{ +Dowd, K. Measuring Market Risk, Wiley, 2007. +} + From noreply at r-forge.r-project.org Wed Aug 5 22:37:08 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 5 Aug 2015 22:37:08 +0200 (CEST) Subject: [Returnanalytics-commits] r3917 - pkg/Dowd/R Message-ID: <20150805203708.1074A187AA6@r-forge.r-project.org> Author: dacharya Date: 2015-08-05 22:37:07 +0200 (Wed, 05 Aug 2015) New Revision: 3917 Added: pkg/Dowd/R/tVaRESPlot2DCL.R Log: Function tVaRESPlot2DCL added Added: pkg/Dowd/R/tVaRESPlot2DCL.R =================================================================== --- pkg/Dowd/R/tVaRESPlot2DCL.R (rev 0) +++ pkg/Dowd/R/tVaRESPlot2DCL.R 2015-08-05 20:37:07 UTC (rev 3917) @@ -0,0 +1,155 @@ +#' Plots t VaR and ES against confidence level +#' +#' Plots the VaR and ES of a portfolio against confidence level assuming that P/L +#' data are t distributed, for specified confidence level and +#' holding period. +#' +#' @param ... The input arguments contain either return data or else mean and +#' standard deviation data. Accordingly, number of input arguments is either 4 +#' or 5. In case there are 4 input arguments, the mean and standard deviation of +#' data is computed from return data. See examples for details. +#' +#' returns Vector of daily geometric return data +#' +#' mu Mean of daily geometric return data +#' +#' sigma Standard deviation of daily geometric return data +#' +#' cl VaR confidence level and must be a vector +#' +#' hp VaR holding period and must be a scalar +#' +#' @references Dowd, K. Measuring Market Risk, Wiley, 2007. +#' +#' @author Dinesh Acharya +#' @examples +#' +#' # Plots VaR and ETL against confidene level given P/L data +#' data <- runif(5, min = 0, max = .2) +#' tVaRESPlot2DCL(returns = data, df = 7, cl = seq(.85,.99,.01), hp = 60) +#' +#' # Computes VaR against confidence level given mean and standard deviation of P/L data +#' tVaRESPlot2DCL(mu = .012, sigma = .03, df = 7, cl = seq(.85,.99,.01), hp = 40) +#' +#' +#' @export +tVaRESPlot2DCL<- function(...){ + # Determine if there are four or five arguments, and ensure that arguments + # are read as intended + if (nargs() < 4) { + stop("Too few arguments") + } + if (nargs() > 5) { + stop("Too many arguments") + } + args <- list(...) + if (nargs() == 5) { + mu <- args$mu + cl <- args$cl + sigma <- args$sigma + hp <- args$hp + df <- args$df + } + if (nargs() == 4) { + mu <- mean(args$returns) + cl <- args$cl + sigma <- sd(args$returns) + hp <- args$hp + df <- args$df + } + + # Check that inputs have correct dimensions + mu <- as.matrix(mu) + mu.row <- dim(mu)[1] + mu.col <- dim(mu)[2] + if (max(mu.row, mu.col) > 1) { + stop("Mean must be a scalar") + } + sigma <- as.matrix(sigma) + sigma.row <- dim(sigma)[1] + sigma.col <- dim(sigma)[2] + if (max(sigma.row, sigma.col) > 1) { + stop("Standard deviation must be a scalar") + } + cl <- as.matrix(cl) + cl.row <- dim(cl)[1] + cl.col <- dim(cl)[2] + if (min(cl.row, cl.col) > 1) { + stop("Confidence level must be a vector") + } + hp <- as.matrix(hp) + hp.row <- dim(hp)[1] + hp.col <- dim(hp)[2] + if (max(hp.row, hp.col) > 1) { + stop("Holding period must be a scalar") + } + df <- as.matrix(df) + df.row <- dim(df)[1] + df.col <- dim(df)[2] + if (max(df.row, df.col) > 1) { + stop("Number of degrees of freedom must be a scalar") + } + + # Check that cl is read as row vector + if (cl.row > cl.col) { + cl <- t(cl) + } + + # Check that inputs obey sign and value restrictions + if (sigma < 0) { + stop("Standard deviation must be non-negative") + } + if (df < 3) { + stop("Number of degrees of freedom must be at least 3 for first two moments of distribution to be defined") + } + if (max(cl) >= 1){ + stop("Confidence level(s) must be less than 1") + } + if (min(cl) <= 0){ + stop("Confidence level(s) must be greater than 0") + } + if (min(hp) <= 0){ + stop("Holding period must be greater than 0") + } + # VaR estimation + cl.row <- dim(cl)[1] + cl.col <- dim(cl)[2] + VaR <- (-sigma[1,1] * sqrt(t(hp)) %*% sqrt((df - 2) / df) %*% qt(1 - cl, df)) + (- mu[1,1] * t(hp) %*% matrix(1, cl.row, cl.col)) # VaR + + # ES estimation + n <- 1000 # Number of slices into which tail is divided + cl0 <- cl # Initial confidence level + v <- VaR + delta.cl <- (1 - cl)/n # Increment to confidence level as each slice is taken + for (i in 1:(n-1)) { + cl <- cl0 + i * delta.cl # Revised cl + v <- v + (-sigma[1,1] * sqrt(t(hp)) %*% sqrt((df - 2) / df) %*% qt(1 - cl, df)) + (- mu[1,1] * t(hp) %*% matrix(1, cl.row, cl.col)) + } + v <- v/n # ES + + # Plotting + ymin <- min(VaR, v) + ymax <- max(VaR, v) + xmin <- min(cl0) + xmax <- max(cl0) + plot(cl0, VaR, type = "l", xlim = c(xmin, xmax), ylim = c(ymin, ymax), xlab = "Confidence level", ylab = "VaR/ETL") + par(new=TRUE) + plot(cl0, v, type = "l", xlim = c(xmin, xmax), ylim = c(ymin, ymax), xlab = "Confidence level", ylab = "VaR/ETL") + + title("t VaR and ETL against confidence level") + xmin <- min(cl0)+.3*(max(cl0)-min(cl0)) + text(xmin,max(VaR)-.1*(max(VaR)-min(VaR)), + 'Input parameters', cex=.75, font = 2) + text(xmin,max(VaR)-.175*(max(VaR)-min(VaR)), + paste('Daily mean L/P = ',round(mu[1,1],3)),cex=.75) + text(xmin,max(VaR)-.25*(max(VaR)-min(VaR)), + paste('Stdev. of daily L/P = ',round(sigma[1,1],3)),cex=.75) + text(xmin,max(VaR)-.325*(max(VaR)-min(VaR)), + paste('Degrees of freedom = ',df),cex=.75) + text(xmin,max(VaR)-.4*(max(VaR)-min(VaR)), + paste('Holding period = ',hp,'days'),cex=.75) + # VaR and ETL labels + text(max(cl0)-.4*(max(cl0)-min(cl0)),min(VaR)+.3*(max(VaR)-min(VaR)),'Upper line - ETL',cex=.75); + text(max(cl0)-.4*(max(cl0)-min(cl0)),min(VaR)+.2*(max(VaR)-min(VaR)),'Lower line - VaR',cex=.75); + +} From noreply at r-forge.r-project.org Wed Aug 5 23:19:16 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 5 Aug 2015 23:19:16 +0200 (CEST) Subject: [Returnanalytics-commits] r3918 - pkg/Dowd Message-ID: <20150805211916.81E05187A4C@r-forge.r-project.org> Author: dacharya Date: 2015-08-05 23:19:16 +0200 (Wed, 05 Aug 2015) New Revision: 3918 Modified: pkg/Dowd/NAMESPACE Log: Functions tESPlot3D, tVaR and tVaRESPlot2DCL added Modified: pkg/Dowd/NAMESPACE =================================================================== --- pkg/Dowd/NAMESPACE 2015-08-05 20:37:07 UTC (rev 3917) +++ pkg/Dowd/NAMESPACE 2015-08-05 21:19:16 UTC (rev 3918) @@ -128,5 +128,8 @@ export(tESFigure) export(tESPlot2DCL) export(tESPlot2DHP) +export(tESPlot3D) +export(tVaR) +export(tVaRESPlot2DCL) import(MASS) import(bootstrap) From noreply at r-forge.r-project.org Wed Aug 5 23:28:57 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 5 Aug 2015 23:28:57 +0200 (CEST) Subject: [Returnanalytics-commits] r3919 - in pkg/Dowd: . vignettes Message-ID: <20150805212857.412A51862E3@r-forge.r-project.org> Author: dacharya Date: 2015-08-05 23:28:56 +0200 (Wed, 05 Aug 2015) New Revision: 3919 Added: pkg/Dowd/vignettes/ pkg/Dowd/vignettes/Dowd.Rnw Log: Vignettes folder added Added: pkg/Dowd/vignettes/Dowd.Rnw =================================================================== --- pkg/Dowd/vignettes/Dowd.Rnw (rev 0) +++ pkg/Dowd/vignettes/Dowd.Rnw 2015-08-05 21:28:56 UTC (rev 3919) @@ -0,0 +1,39 @@ +\documentclass{article} +\usepackage{amsmath, amsthm} +\usepackage{hyperref} +\usepackage{Rd} +\usepackage{Sweave} +%\VignetteDepends{Dowd, MASS, bootstrap} +%\VignetteIndexEntry{Dowd} +%\VignetteKeywords{risk measurement, parametric methods, non-parametric methods backtest} +%\VignettePackage{Dowd} +\title{Usage of \pkg{Dowd} Package} +\author{Dinesh Acharya} +\begin{document} +\maketitle +\begin{abstract} +In this vignette, use of package \pkg{Dowd} for various parametric and non-parametric methods to measure market risk is demonstrated. Additionally, methods for backtesting risk measures are also discussed. +\end{abstract} +\tableofcontents +\section{Introduction} +Market Risks are those risks that are associated with fluctuations in market prices or rates. For example, risk associated with fluctuation in price of a particular stock or a certain commodity is a market risk where as risk associated with default of a loan or financial system collapse is not market risk.\\ +\\ +Since the early works of Harry Markowitz, and particularly in the last two decades, there has been significant development in the area of risk measurement. Value-at-Risk (VaR) has become widely used measure of risk. VaR at $\alpha$ confidence level is defined as the negative of $\alpha-$th quantile of the profit/loss distribution, i.e. +\[VaR_{\alpha}(F) = -inf\{x\in R:F(x) \ge \alpha\}\] +where $F$ is the distribution function associated with random variable .\\ +\\ +VaR has its own weaknesses. Consequently, ES has been put championed by some as a better alternative to VaR. At $\alpha-$ confidence level, it is defined as: +\[ES_{\alpha}(F)=\frac{1}{\alpha}\int_0^{\alpha}VaR_u(F)d(u)\] +ES too has its own weaknesses and few other alternative riskmeasures have also been proposed. + +\section{Parametric Methods} +Parametric methods are based on certain assumption on the profit/loss distribution. Based on those assumptions, the parameters of the theoretical distribution are approximated with the data. Given a theoretical distribution, the definition of VaR or ES given above usually reduces to a definite form, and can be approximated using estimates of parameters. +<>= +library(Dowd) +library(MASS) +library(bootstrap) +library(PerformanceAnalytics) +@ + + +\end{document} \ No newline at end of file From noreply at r-forge.r-project.org Fri Aug 7 10:03:52 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 7 Aug 2015 10:03:52 +0200 (CEST) Subject: [Returnanalytics-commits] r3920 - pkg/Dowd Message-ID: <20150807080352.1735D1879B1@r-forge.r-project.org> Author: dacharya Date: 2015-08-07 10:03:50 +0200 (Fri, 07 Aug 2015) New Revision: 3920 Modified: pkg/Dowd/NAMESPACE Log: Functions tVaRDPPerc, tVaRFigure and tVaRPlot2DCL added Modified: pkg/Dowd/NAMESPACE =================================================================== --- pkg/Dowd/NAMESPACE 2015-08-05 21:28:56 UTC (rev 3919) +++ pkg/Dowd/NAMESPACE 2015-08-07 08:03:50 UTC (rev 3920) @@ -130,6 +130,9 @@ export(tESPlot2DHP) export(tESPlot3D) export(tVaR) +export(tVaRDFPerc) export(tVaRESPlot2DCL) +export(tVaRFigure) +export(tVaRPlot2DCL) import(MASS) import(bootstrap) From noreply at r-forge.r-project.org Fri Aug 7 10:04:27 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 7 Aug 2015 10:04:27 +0200 (CEST) Subject: [Returnanalytics-commits] r3921 - pkg/Dowd/man Message-ID: <20150807080427.6D6CE1879B1@r-forge.r-project.org> Author: dacharya Date: 2015-08-07 10:04:27 +0200 (Fri, 07 Aug 2015) New Revision: 3921 Added: pkg/Dowd/man/tVaRFigure.Rd Log: Function tVaRFigure added Added: pkg/Dowd/man/tVaRFigure.Rd =================================================================== --- pkg/Dowd/man/tVaRFigure.Rd (rev 0) +++ pkg/Dowd/man/tVaRFigure.Rd 2015-08-07 08:04:27 UTC (rev 3921) @@ -0,0 +1,46 @@ +% Generated by roxygen2 (4.1.1): do not edit by hand +% Please edit documentation in R/tVaRFigure.R +\name{tVaRFigure} +\alias{tVaRFigure} +\title{Figure of t- VaR and pdf against L/P} +\usage{ +tVaRFigure(...) +} +\arguments{ +\item{...}{The input arguments contain either return data or else mean and + standard deviation data. Accordingly, number of input arguments is either 4 + or 5. In case there 4 input arguments, the mean and standard deviation of + data is computed from return data. See examples for details. + + returns Vector of daily geometric return data + + mu Mean of daily geometric return data + + sigma Standard deviation of daily geometric return data + + df Number of degrees of freedom + + cl VaR confidence level and should be scalar + + hp VaR holding period in days and should be scalar} +} +\description{ +Gives figure showing the VaR and probability distribution function against L/P + of a portfolio assuming P/L are normally distributed, for specified + confidence level and holding period. +} +\examples{ +# Plots normal VaR and pdf against L/P data for given returns data + data <- runif(5, min = 0, max = .2) + tVaRFigure(returns = data, df = 7, cl = .95, hp = 90) + + # Plots normal VaR and pdf against L/P data with given parameters + tVaRFigure(mu = .012, sigma = .03, df=7, cl = .95, hp = 90) +} +\author{ +Dinesh Acharya +} +\references{ +Dowd, K. Measuring Market Risk, Wiley, 2007. +} + From noreply at r-forge.r-project.org Fri Aug 7 10:05:03 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 7 Aug 2015 10:05:03 +0200 (CEST) Subject: [Returnanalytics-commits] r3922 - pkg/Dowd/R Message-ID: <20150807080503.30E641879B1@r-forge.r-project.org> Author: dacharya Date: 2015-08-07 10:04:59 +0200 (Fri, 07 Aug 2015) New Revision: 3922 Added: pkg/Dowd/R/tVaRFigure.R Log: Function tVaRFigure added Added: pkg/Dowd/R/tVaRFigure.R =================================================================== --- pkg/Dowd/R/tVaRFigure.R (rev 0) +++ pkg/Dowd/R/tVaRFigure.R 2015-08-07 08:04:59 UTC (rev 3922) @@ -0,0 +1,152 @@ +#' Figure of t- VaR and pdf against L/P +#' +#' Gives figure showing the VaR and probability distribution function against L/P +#' of a portfolio assuming P/L are normally distributed, for specified +#' confidence level and holding period. +#' +#' @param ... The input arguments contain either return data or else mean and +#' standard deviation data. Accordingly, number of input arguments is either 4 +#' or 5. In case there 4 input arguments, the mean and standard deviation of +#' data is computed from return data. See examples for details. +#' +#' returns Vector of daily geometric return data +#' +#' mu Mean of daily geometric return data +#' +#' sigma Standard deviation of daily geometric return data +#' +#' df Number of degrees of freedom +#' +#' cl VaR confidence level and should be scalar +#' +#' hp VaR holding period in days and should be scalar +#' +#' @references Dowd, K. Measuring Market Risk, Wiley, 2007. +#' +#' @author Dinesh Acharya +#' @examples +#' +#' # Plots normal VaR and pdf against L/P data for given returns data +#' data <- runif(5, min = 0, max = .2) +#' tVaRFigure(returns = data, df = 7, cl = .95, hp = 90) +#' +#' # Plots normal VaR and pdf against L/P data with given parameters +#' tVaRFigure(mu = .012, sigma = .03, df=7, cl = .95, hp = 90) +#' +#' +#' @export +tVaRFigure <- function(...){ + # Determine if there are four or five arguments and ensure that arguments are + # read as intended + if (nargs() < 4) { + stop("Too few arguments") + } + if (nargs() > 5) { + stop("Too many arguments") + } + args <- list(...) + if (nargs() == 5) { + mu <- args$mu + cl <- args$cl + sigma <- args$sigma + hp <- args$hp + df <- args$df + } + if (nargs() == 4) { + mu <- mean(args$returns) + cl <- args$cl + sigma <- sd(args$returns) + hp <- args$hp + df <- args$df + } + + # Check that inputs have correct dimensions + mu <- as.matrix(mu) + mu.row <- dim(mu)[1] + mu.col <- dim(mu)[2] + if (max(mu.row, mu.col) > 1) { + stop("Mean must be a scalar") + } + sigma <- as.matrix(sigma) + sigma.row <- dim(sigma)[1] + sigma.col <- dim(sigma)[2] + if (max(sigma.row, sigma.col) > 1) { + stop("Standard deviation must be a scalar") + } + cl <- as.matrix(cl) + cl.row <- dim(cl)[1] + cl.col <- dim(cl)[2] + if (min(cl.row, cl.col) > 1) { + stop("Confidence level must be a scalar or a vector") + } + hp <- as.matrix(hp) + hp.row <- dim(hp)[1] + hp.col <- dim(hp)[2] + if (min(hp.row, hp.col) > 1) { + stop("Holding period must be a scalar or a vector") + } + df <- as.matrix(df) + df.row <- dim(df)[1] + df.col <- dim(df)[2] + if (max(df.row, df.col) > 1) { + stop("Number of degrees of freedom must be a scalar") + } + + # Check that cl and hp are read as row and column vectors respectively + if (cl.row > cl.col) { + cl <- t(cl) + } + if (hp.row > hp.col) { + hp <- t(hp) + } + + # Check that inputs obey sign and value restrictions + if (sigma < 0) { + stop("Standard deviation must be non-negative") + } + if (df < 3) { + stop("Number of degrees of freedom must be at least 3 for first two moments + of distribution to be defined") + } + if (max(cl) >= 1){ + stop("Confidence level(s) must be less than 1") + } + if (min(cl) <= 0){ + stop("Confidence level(s) must be greater than 0") + } + if (min(hp) <= 0){ + stop("Holding Period(s) must be greater than 0") + } + + # Message to indicate how matrix of results is to be interpreted, if cl and hp + # both vary and results are given in matrix form + if (max(cl.row, cl.col) > 1 & max(hp.row, hp.col) > 1) { + print('VaR results with confidence level varying across row and holding + period down column') + } + + # VaR estimation + cl.row <- dim(cl)[1] + cl.col <- dim(cl)[2] + VaR <- - sigma[1,1] * sqrt(hp) %*% qnorm(1 - cl, 0, 1) - mu[1,1] * hp %*% matrix(1,cl.row,cl.col) # VaR + + # Plotting + x.min <- -mu - 5 * sigma + x.max <- -mu + 5 * sigma + delta <- (x.max-x.min) / 100 + x <- seq(x.min, x.max, delta) + p <- dt((x-mu)/sigma, df) + plot(x, p, type = "l", xlim = c(x.min, x.max), ylim = c(0, max(p)*1.1), + xlab = "Loss (+) / Profit (-)", ylab = "Probability", main = "t- VaR") + u <- c(VaR, VaR) + v <- c(0, .6*max(p)) + lines(0,0,2,.6,type="l") + lines(u, v, type = "l", col = "blue") + cl.for.label <- 100*cl + text(1,.95*max(p), pos = 1, 'Input parameters', cex=.75, font = 2) + text(1, .875*max(p),pos = 1, paste('Daily mean L/P = ', round(mu,2)), cex=.75) + text(1, .8*max(p),pos = 1, paste('St. dev. of daily L/P = ',round(sigma,2)), cex=.75) + text(1, .725*max(p),pos = 1, paste('Holding period = ', hp,' day(s)'), cex=.75) + text(VaR, .7*max(p),pos = 2, paste('VaR at ', cl.for.label,'% CL'), cex=.75) + text(VaR, .64 * max(p),pos = 2, paste('= ',VaR), cex=.75) +} From noreply at r-forge.r-project.org Fri Aug 7 10:18:58 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 7 Aug 2015 10:18:58 +0200 (CEST) Subject: [Returnanalytics-commits] r3923 - pkg/Dowd/R Message-ID: <20150807081858.8AD3F185C2C@r-forge.r-project.org> Author: dacharya Date: 2015-08-07 10:18:58 +0200 (Fri, 07 Aug 2015) New Revision: 3923 Added: pkg/Dowd/R/tVaRDFPerc.R Log: Function tVaRDFPerc added Added: pkg/Dowd/R/tVaRDFPerc.R =================================================================== --- pkg/Dowd/R/tVaRDFPerc.R (rev 0) +++ pkg/Dowd/R/tVaRDFPerc.R 2015-08-07 08:18:58 UTC (rev 3923) @@ -0,0 +1,172 @@ +#' Percentiles of VaR distribution function +#' +#' Plots the VaR of a portfolio against confidence level assuming that P/L are +#' t- distributed, for specified confidence level and holding period. +#' +#' @param ... The input arguments contain either return data or else mean and +#' standard deviation data. Accordingly, number of input arguments is either 5 +#' or 7. In case there 6 input arguments, the mean, standard deviation and +#' number of observations of the data is computed from return data. See examples +#' for details. +#' +#' returns Vector of daily geometric return data +#' +#' mu Mean of daily geometric return data +#' +#' sigma Standard deviation of daily geometric return data +#' +#' n Sample size +#' +#' perc Desired percentile +#' +#' df Number of degrees of freedom in the t distribution +#' +#' cl VaR confidence level and must be a scalar +#' +#' hp VaR holding period and must be a a scalar +#' +#' Percentiles of VaR distribution function +#' +#' @references Dowd, K. Measuring Market Risk, Wiley, 2007. +#' +#' @author Dinesh Acharya +#' @examples +#' +#' # Estimates Percentiles of VaR distribution +#' data <- runif(5, min = 0, max = .2) +#' tVaRDFPerc(returns = data, perc = .7, +#' df = 6, cl = .95, hp = 60) +#' +#' # Computes v given mean and standard deviation of return data +#' tVaRDFPerc(mu = .012, sigma = .03, n= 10, +#' perc = .8, df = 6, cl = .99, hp = 40) +#' +#' +#' @export +tVaRDFPerc <- function(...){ + # Determine if there are five or seven arguments, and ensure that arguments are read as intended + if (nargs() < 5) { + stop("Too few arguments") + } + if (nargs() == 6) { + stop("Incorrect number of arguments") + } + if (nargs() > 7) { + stop("Too many arguments") + } + args <- list(...) + if (nargs() == 7) { + mu <- args$mu + df <- args$df + cl <- args$cl + perc <- args$perc + n <- args$n + sigma <- args$sigma + hp <- args$hp + } + if (nargs() == 5) { + mu <- mean(args$returns) + df <- args$df + n <- max(dim(as.matrix(args$returns))) + perc <- args$perc + cl <- args$cl + sigma <- sd(args$returns) + hp <- args$hp + } + + # Check that inputs have correct dimensions + mu <- as.matrix(mu) + mu.row <- dim(mu)[1] + mu.col <- dim(mu)[2] + if (max(mu.row, mu.col) > 1) { + stop("Mean must be a scalar") + } + sigma <- as.matrix(sigma) + sigma.row <- dim(sigma)[1] + sigma.col <- dim(sigma)[2] + if (max(sigma.row, sigma.col) > 1) { + stop("Standard deviation must be a scalar") + } + n <- as.matrix(n) + n.row <- dim(n)[1] + n.col <- dim(n)[2] + if (max(n.row, n.col) > 1) { + stop("Number of observations in a sample must be an integer") + } + perc <- as.matrix(perc) + perc.row <- dim(perc)[1] + perc.col <- dim(perc)[2] + if (max(perc.row, perc.col) > 1) { + stop("Chosen percentile of the distribution must be a scalar") + } + cl <- as.matrix(cl) + cl.row <- dim(cl)[1] + cl.col <- dim(cl)[2] + if (max(cl.row, cl.col) > 1) { + stop("Confidence level must be a scalar") + } + hp <- as.matrix(hp) + hp.row <- dim(hp)[1] + hp.col <- dim(hp)[2] + if (max(hp.row, hp.col) > 1) { + stop("Holding period must be a scalar") + } + df <- as.matrix(df) + df.row <- dim(df)[1] + df.col <- dim(df)[2] + if (max(df.row, df.col) > 1) { + stop("Number of degrees of freedom must be a scalar") + } + + + # Check that inputs obey sign and value restrictions + if (sigma < 0) { + stop("Standard deviation must be non-negative") + } + if (n < 0) { + stop("Number of observations must be non-negative") + } + if (perc > 1){ + stop("Chosen percentile must not exceed 1") + } + if (perc <= 0){ + stop("Chosen percentile must be positive") + } + if (cl >= 1){ + stop("Confidence level(s) must be less than 1") + } + if (cl <= 0){ + stop("Confidence level(s) must be greater than 0") + } + if (hp <= 0){ + stop("Honding period(s) must be greater than 0") + } + if (df < 3) { + stop("Number of degrees of freedom must be at least 3 for first two moments of distribution to be defined") + } + + # Derive order statistic and ensure it is an integer + w <- n * cl # Derive r-th order statistic + r <- round(w) # Round r to nearest integer + # Bisection routine + a <- 0 + fa <- -Inf + b <- 1 + fb <- Inf + eps <- .Machine$double.eps + while (b - a > eps * b) { + x <- (a + b) / 2 + fx <- 1 - pbinom(r - 1, n, x) - perc + if (sign(fx) == sign(fa)){ + a <- x + fa <- fx + } else { + b <- x + fb <- fx + } + } + # VaR estimation + y <- -mu %*% hp + sigma %*% sqrt(hp) %*% sqrt((df - 2) / df) %*% qt(x, df)# VaR + + return(y) +} From noreply at r-forge.r-project.org Fri Aug 7 10:19:20 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 7 Aug 2015 10:19:20 +0200 (CEST) Subject: [Returnanalytics-commits] r3924 - pkg/Dowd/man Message-ID: <20150807081920.4D1A2185C2C@r-forge.r-project.org> Author: dacharya Date: 2015-08-07 10:19:19 +0200 (Fri, 07 Aug 2015) New Revision: 3924 Added: pkg/Dowd/man/tVaRDFPerc.Rd Log: Function tVaRDFPerc added Added: pkg/Dowd/man/tVaRDFPerc.Rd =================================================================== --- pkg/Dowd/man/tVaRDFPerc.Rd (rev 0) +++ pkg/Dowd/man/tVaRDFPerc.Rd 2015-08-07 08:19:19 UTC (rev 3924) @@ -0,0 +1,54 @@ +% Generated by roxygen2 (4.1.1): do not edit by hand +% Please edit documentation in R/tVaRDFPerc.R +\name{tVaRDFPerc} +\alias{tVaRDFPerc} +\title{Percentiles of VaR distribution function} +\usage{ +tVaRDFPerc(...) +} +\arguments{ +\item{...}{The input arguments contain either return data or else mean and +standard deviation data. Accordingly, number of input arguments is either 5 +or 7. In case there 6 input arguments, the mean, standard deviation and +number of observations of the data is computed from return data. See examples +for details. + + returns Vector of daily geometric return data + + mu Mean of daily geometric return data + + sigma Standard deviation of daily geometric return data + + n Sample size + + perc Desired percentile + + df Number of degrees of freedom in the t distribution + + cl VaR confidence level and must be a scalar + + hp VaR holding period and must be a a scalar + + Percentiles of VaR distribution function} +} +\description{ +Plots the VaR of a portfolio against confidence level assuming that P/L are +t- distributed, for specified confidence level and holding period. +} +\examples{ +# Estimates Percentiles of VaR distribution + data <- runif(5, min = 0, max = .2) + tVaRDFPerc(returns = data, perc = .7, + df = 6, cl = .95, hp = 60) + + # Computes v given mean and standard deviation of return data + tVaRDFPerc(mu = .012, sigma = .03, n= 10, + perc = .8, df = 6, cl = .99, hp = 40) +} +\author{ +Dinesh Acharya +} +\references{ +Dowd, K. Measuring Market Risk, Wiley, 2007. +} + From noreply at r-forge.r-project.org Fri Aug 7 10:32:23 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 7 Aug 2015 10:32:23 +0200 (CEST) Subject: [Returnanalytics-commits] r3925 - pkg/Dowd/man Message-ID: <20150807083223.AFF071877BF@r-forge.r-project.org> Author: dacharya Date: 2015-08-07 10:32:23 +0200 (Fri, 07 Aug 2015) New Revision: 3925 Added: pkg/Dowd/man/tVaRPlot2DCL.Rd Log: Function tVaRPlot2DCL added Added: pkg/Dowd/man/tVaRPlot2DCL.Rd =================================================================== --- pkg/Dowd/man/tVaRPlot2DCL.Rd (rev 0) +++ pkg/Dowd/man/tVaRPlot2DCL.Rd 2015-08-07 08:32:23 UTC (rev 3925) @@ -0,0 +1,45 @@ +% Generated by roxygen2 (4.1.1): do not edit by hand +% Please edit documentation in R/tVaRPlot2DCL.R +\name{tVaRPlot2DCL} +\alias{tVaRPlot2DCL} +\title{Plots t VaR against confidence level} +\usage{ +tVaRPlot2DCL(...) +} +\arguments{ +\item{...}{The input arguments contain either return data or else mean and + standard deviation data. Accordingly, number of input arguments is either 4 + or 5. In case there 4 input arguments, the mean and standard deviation of + data is computed from return data. See examples for details. + + returns Vector of daily P/L data data + + mu Mean of daily P/L data data + + sigma Standard deviation of daily P/L data data + + df Number of degrees of freedom in the t distribution + + cl VaR confidence level and must be a vector + + hp VaR holding period and must be a scalar} +} +\description{ +Plots the VaR of a portfolio against confidence level assuming that P/L data +is t distributed, for specified confidence level and holding period. +} +\examples{ +# Plots VaR against confidene level given P/L data data + data <- runif(5, min = 0, max = .2) + tVaRPlot2DCL(returns = data, df = 6, cl = seq(.85,.99,.01), hp = 60) + + # Computes VaR against confidence level given mean and standard deviation of P/L data + tVaRPlot2DCL(mu = .012, sigma = .03, df = 6, cl = seq(.85,.99,.01), hp = 40) +} +\author{ +Dinesh Acharya +} +\references{ +Dowd, K. Measuring Market Risk, Wiley, 2007. +} + From noreply at r-forge.r-project.org Fri Aug 7 10:33:04 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 7 Aug 2015 10:33:04 +0200 (CEST) Subject: [Returnanalytics-commits] r3926 - pkg/Dowd/R Message-ID: <20150807083304.100031877BF@r-forge.r-project.org> Author: dacharya Date: 2015-08-07 10:33:03 +0200 (Fri, 07 Aug 2015) New Revision: 3926 Added: pkg/Dowd/R/tVaRPlot2DCL.R Log: Function tVaRPlot2DCL added Added: pkg/Dowd/R/tVaRPlot2DCL.R =================================================================== --- pkg/Dowd/R/tVaRPlot2DCL.R (rev 0) +++ pkg/Dowd/R/tVaRPlot2DCL.R 2015-08-07 08:33:03 UTC (rev 3926) @@ -0,0 +1,135 @@ +#' Plots t VaR against confidence level +#' +#' Plots the VaR of a portfolio against confidence level assuming that P/L data +#' is t distributed, for specified confidence level and holding period. +#' +#' @param ... The input arguments contain either return data or else mean and +#' standard deviation data. Accordingly, number of input arguments is either 4 +#' or 5. In case there 4 input arguments, the mean and standard deviation of +#' data is computed from return data. See examples for details. +#' +#' returns Vector of daily P/L data data +#' +#' mu Mean of daily P/L data data +#' +#' sigma Standard deviation of daily P/L data data +#' +#' df Number of degrees of freedom in the t distribution +#' +#' cl VaR confidence level and must be a vector +#' +#' hp VaR holding period and must be a scalar +#' +#' @references Dowd, K. Measuring Market Risk, Wiley, 2007. +#' +#' @author Dinesh Acharya +#' @examples +#' +#' # Plots VaR against confidene level given P/L data data +#' data <- runif(5, min = 0, max = .2) +#' tVaRPlot2DCL(returns = data, df = 6, cl = seq(.85,.99,.01), hp = 60) +#' +#' # Computes VaR against confidence level given mean and standard deviation of P/L data +#' tVaRPlot2DCL(mu = .012, sigma = .03, df = 6, cl = seq(.85,.99,.01), hp = 40) +#' +#' +#' @export +tVaRPlot2DCL <- function(...){ + # Determine if there are four or five arguments, and ensure that arguments are read as intended + if (nargs() < 4) { + stop("Too few arguments") + } + if (nargs() > 5) { + stop("Too many arguments") + } + args <- list(...) + if (nargs() == 5) { + mu <- args$mu + df <- args$df + cl <- args$cl + sigma <- args$sigma + hp <- args$hp + } + if (nargs() == 4) { + mu <- mean(args$returns) + df <- args$df + cl <- args$cl + sigma <- sd(args$returns) + hp <- args$hp + } + + # Check that inputs have correct dimensions + mu <- as.matrix(mu) + mu.row <- dim(mu)[1] + mu.col <- dim(mu)[2] + if (max(mu.row, mu.col) > 1) { + stop("Mean must be a scalar") + } + sigma <- as.matrix(sigma) + sigma.row <- dim(sigma)[1] + sigma.col <- dim(sigma)[2] + if (max(sigma.row, sigma.col) > 1) { + stop("Standard deviation must be a scalar") + } + cl <- as.matrix(cl) + cl.row <- dim(cl)[1] + cl.col <- dim(cl)[2] + if (min(cl.row, cl.col) > 1) { + stop("Confidence level must be a vector") + } + hp <- as.matrix(hp) + hp.row <- dim(hp)[1] + hp.col <- dim(hp)[2] + if (max(hp.row, hp.col) > 1) { + stop("Holding period must be a scalar") + } + df <- as.matrix(df) + df.row <- dim(df)[1] + df.col <- dim(df)[2] + if (max(df.row, df.col) > 1) { + stop("Number of degrees of freedom must be a scalar") + } + + # Check that cl is read as row vector + if (cl.row > cl.col) { + cl <- t(cl) + } + + # Check that inputs obey sign and value restrictions + if (sigma < 0) { + stop("Standard deviation must be non-negative") + } + if (df < 3) { + stop("Number of degrees of freedom must be at least 3 for first two moments + of distribution to be defined") + } + if (max(cl) >= 1){ + stop("Confidence level(s) must be less than 1") + } + if (min(cl) <= 0){ + stop("Confidence level(s) must be greater than 0") + } + if (min(hp) <= 0){ + stop("Holding period(s) must be greater than 0") + } + # VaR estimation + cl.row <- dim(cl)[1] + cl.col <- dim(cl)[2] + VaR <- (-sigma[1,1] * sqrt(t(hp)) %*% sqrt((df - 2) / df) %*% qt(1 - cl, df)) + + (- mu[1,1] * t(hp) %*% matrix(1, cl.row, cl.col)) # VaR + + # Plotting + plot(cl, VaR, type = "l", xlab = "Confidence Level", ylab = "VaR") + title("t VaR against confidence level") + xmin <-min(cl)+.3*(max(cl)-min(cl)) + text(xmin,max(VaR)-.1*(max(VaR)-min(VaR)), + 'Input parameters', cex=.75, font = 2) + text(xmin,max(VaR)-.15*(max(VaR)-min(VaR)), + paste('Daily mean L/P = ',-round(mu[1,1],3)),cex=.75) + text(xmin,max(VaR)-.2*(max(VaR)-min(VaR)), + paste('Stdev. of daily L/P data = ',round(sigma[1,1],3)),cex=.75) + text(xmin,max(VaR)-.25*(max(VaR)-min(VaR)), + paste('Degrees of freedom = ',df),cex=.75) + text(xmin,max(VaR)-.3*(max(VaR)-min(VaR)), + paste('Holding period = ',hp,'days'),cex=.75) +} From noreply at r-forge.r-project.org Fri Aug 7 17:06:54 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 7 Aug 2015 17:06:54 +0200 (CEST) Subject: [Returnanalytics-commits] r3927 - in pkg/Meucci: . R demo man Message-ID: <20150807150654.4DC6A187A3A@r-forge.r-project.org> Author: xavierv Date: 2015-08-07 17:06:53 +0200 (Fri, 07 Aug 2015) New Revision: 3927 Modified: pkg/Meucci/DESCRIPTION pkg/Meucci/NAMESPACE pkg/Meucci/R/RobustBayesianAllocation.R pkg/Meucci/demo/RobustBayesianAllocation.R pkg/Meucci/man/PartialConfidencePosterior.Rd pkg/Meucci/man/efficientFrontier.Rd pkg/Meucci/man/robustBayesianPortfolioOptimization.Rd Log: fixed and refformated Robust Bayesian Allocation demo script and its functions Modified: pkg/Meucci/DESCRIPTION =================================================================== --- pkg/Meucci/DESCRIPTION 2015-08-07 08:33:03 UTC (rev 3926) +++ pkg/Meucci/DESCRIPTION 2015-08-07 15:06:53 UTC (rev 3927) @@ -37,7 +37,7 @@ kernlab, nloptr, limSolve, - linprog, + linprog Suggests: Matrix, MASS, Modified: pkg/Meucci/NAMESPACE =================================================================== --- pkg/Meucci/NAMESPACE 2015-08-07 08:33:03 UTC (rev 3926) +++ pkg/Meucci/NAMESPACE 2015-08-07 15:06:53 UTC (rev 3927) @@ -76,6 +76,7 @@ export(ViewImpliedVol) export(ViewRanking) export(ViewRealizedVol) +export(efficientFrontier) export(garch1f4) export(garch2f8) export(hermitePolynomial) Modified: pkg/Meucci/R/RobustBayesianAllocation.R =================================================================== --- pkg/Meucci/R/RobustBayesianAllocation.R 2015-08-07 08:33:03 UTC (rev 3926) +++ pkg/Meucci/R/RobustBayesianAllocation.R 2015-08-07 15:06:53 UTC (rev 3927) @@ -1,229 +1,360 @@ -library( matlab ) -library( quadprog ) -library( ggplot2 ) -library( MASS ) - -#' Construct the mean-variance efficient frontier using a quadratic solver +#' @title Construct the mean-variance efficient frontier using quadratic solver #' -#' Construct a number of long-only or long-short portfolios on the mean-variance efficient frontier where each -#' portfolio is equally distanced in return space -#' @param discretizations number of portfolios to generate along efficient frontier (where each portfolio is equally distanced in return spaced) +#' @description Construct a number of long-only or long-short portfolios on the +#' mean-variance efficient frontier where each portfolio is equally distanced in +#' return space +#' +#' @param discretizations number of portfolios to generate along efficient +#' frontier (where each portfolio is equally distanced +#' in return spaced) #' @param cov arithmetic covariance matrix of asset returns #' @param mu a vector of arithmetic returns for each asset #' @param longonly a boolean which constrains weights to > 0 if true #' -#' @return a list of portfolios along the frontier from least risky to most risky +#' @return list of portfolios along the frontier from least risky to most risky #' The indices in each list correspond to each other -#' returns the expected portfolio returns along the frontier -#' volatility the variance of the portfolio along the frontier -#' weights the weights of the portfolio components along the frontier +#' returns expected portfolio returns along the frontier +#' volatility variance of the portfolio along the frontier +#' weights weights of the portfolio components in the frontier #' @references Attilio Meucci, 2011, Robust Bayesian Allocation #' \url{http://papers.ssrn.com/sol3/papers.cfm?abstract_id=681553} #' @seealso \url{http://symmys.com/node/102} #' @author Ram Ahluwalia \email{ram@@wingedfootcapital.com} -efficientFrontier = function( discretizations , cov , mu , longonly = FALSE ) -{ +#' @export + +efficientFrontier <- function(discretizations, cov, mu, longonly = FALSE) { # setup quadratic program - N = nrow( cov ) - firstDegree = zeros( N , 1 ) - secondDegree = cov - Aeq = ones( 1 , N ) ; beq = 1 - A = eye( N ) - b = zeros( N , 1 ) - - if ( !longonly ) - { Aqp = t( Aeq ) ; bqp = beq } - else - { Aqp = t( rbind( Aeq , A ) ) ; bqp = c( beq , b ) } - + N <- nrow(cov) + firstDegree <- matrix(0, N, 1) + secondDegree <- cov + Aeq <- matrix(1, 1, N) + beq <- 1 + A <- diag(1, N) + b <- matrix(0, N, 1) + + if (!longonly){ + Aqp <- t(Aeq) + bqp <- beq + } else { + Aqp <- t(rbind(Aeq, A)) + bqp <- c(beq, b) + } + # determine return of minimum-risk portfolio - minVolWeights = solve.QP( secondDegree , firstDegree , Aqp , bqp , length( beq ) )$solution - minVolRet = minVolWeights %*% mu - + minVolWeights <- solve.QP(secondDegree, firstDegree, Aqp, bqp, + length(beq))$solution + minVolRet <- minVolWeights %*% mu + # determine return of maximum-return portfolio - maxRet = max( mu ) - - # slice efficient frontier in 'discretizations' number of equally thick horizontal sectors in the upper branch only - step = ( maxRet - minVolRet ) / ( discretizations - 1 ) - targetReturns = seq( minVolRet , maxRet , step ) - - # compute the compositions and risk-return coordinates of the optimal allocations relative to each slice - + maxRet <- max(mu) + + # slice efficient frontier in 'discretizations' number of equally thick + # horizontal sectors in the upper branch only + step <- (maxRet - minVolRet) / (discretizations - 1) + targetReturns <- seq(minVolRet, maxRet, step) + + # compute the compositions and risk-return coordinates of the optimal + # allocations relative to each slice + # start with min vol portfolio - weights = minVolWeights - volatility = sqrt( minVolWeights %*% cov %*% minVolWeights ) - returns = minVolRet - - for( i in 2:( discretizations - 1 ) ){ + weights <- minVolWeights + volatility <- sqrt(minVolWeights %*% cov %*% minVolWeights) + returns <- minVolRet + + for (i in 2:(discretizations - 1)){ # determine least risky portfolio for given expected return - Aeq = ones( 1 , N ) - Aeq = rbind( Aeq , t( mu ) ) - - beq = c( 1 , targetReturns[i] ) - if( !longonly ){ - Aqp = t( Aeq ) #combine A matrices - bqp = beq #combine b vectors - }else{ - Aqp = t( rbind( Aeq , A )) - bqp = c(beq,b) + Aeq <- matrix(1, 1, N) + Aeq <- rbind(Aeq, t(mu)) + + beq <- c(1, targetReturns[i]) + if (!longonly) { + Aqp <- t(Aeq) #combine A matrices + bqp <- beq #combine b vectors + } else { + Aqp <- t(rbind(Aeq, A)) + bqp <- c(beq, b) } - - solvedWeights = solve.QP( secondDegree , firstDegree , Aqp , bqp , 1 )$solution - weights = rbind( weights , solvedWeights ) - volatility = c( volatility , sqrt( solvedWeights %*% cov %*% solvedWeights ) ) - returns = c( returns , solvedWeights %*% mu ) - - } - return( list( returns = returns , volatility = volatility , weights = weights ) ) + + solvedWeights <- solve.QP(secondDegree, firstDegree, Aqp, bqp, 1)$solution + weights <- rbind(weights, solvedWeights) + volatility <- c(volatility, sqrt(solvedWeights %*% cov %*% solvedWeights)) + returns <- c(returns, solvedWeights %*% mu) + } + return(list(returns = returns, volatility = volatility, weights = weights)) } -#' Construct a Bayesian mean-variance efficient frontier and identifies the most robust portfolio +#' @title Construct a Bayesian mean-variance efficient frontier and identifies +#' the most robust portfolio #' -#' Construct a collection of portfolios along the Bayesian mean-variance efficient frontier -#' where each portfolio is equally distanced in return space. The function also returns the most robust -#' portfolio along the Bayesian efficient frontier +#' @description Construct a collection of portfolios along the Bayesian +#' mean-variance efficient frontier where each portfolio is equally distanced in +#' return space. The function also returns the most robust portfolio along the +#' Bayesian efficient frontier #' -#' @param mean_post the posterior vector of means (after blending prior and sample data) -#' @param cov_post the posterior covariance matrix (after blending prior and sample data) -#' @param nu_post a numeric with the relative confidence in the prior vs. the sample data. A value of 2 indicates twice as much weight to assign to the prior vs. the sample data. Must be greater than or equal to zero +#' @param mean_post the posterior vector of means (after blending prior +#' and sample data) +#' @param cov_post the posterior covariance matrix (after blending +#' prior and sample data) +#' @param nu_post a numeric with the relative confidence in the prior +#' vs. the sample data. A value of 2 indicates twice +#' as much weight to assign to the prior vs. the +#' sample data. Must be greater than or equal to zero. #' @param time_post a numeric #' @param riskAversionMu risk aversion coefficient for estimation of means. #' @param riskAversionSigma risk aversion coefficient for estimation of Sigma. -#' @param discretizations an integer with the number of portfolios to generate along efficient frontier (equally distanced in return space). Parameter must be an integer greater or equal to 1. -#' @param longonly a boolean for suggesting whether an asset in a portfolio can be shorted or not -#' @param volatility a numeric with the volatility used to calculate gamma-m. gamma-m acts as a constraint on the maximum volatility of the robust portfolio. A higher volatility means a higher volatile robust portfolio may be identified. +#' @param discretizations an integer with the number of portfolios to +#' generate along efficient frontier (equally +#' distanced in return space). Parameter must be an +#' integer greater or equal to 1. +#' @param longonly a boolean for suggesting whether an asset in a +#' portfolio can be shorted or not. +#' @param volatility a numeric with the volatility used to calculate +#' gamma-m. gamma-m acts as a constraint on the +#' maximum volatility of the robust portfolio. A +#' higher volatility means a higher volatile robust +#' portfolio may be identified. #' -#' @return a list of portfolios along the frontier from least risky to most risky -#' bayesianFrontier a list with portfolio along the Bayesian efficient frontier. Specifically: -#' returns: the expected returns of each portfolio along the Bayesian efficient frontier -#' volatility: the expected volatility of each portfolio along the Bayesian efficient frontier -#' weights: the weights of each portfolio along the Bayesian efficient frontier -#' robustPortfolio the most robust portfolio along the Bayesian efficient frontier. Specifically: -#' returns: the expected returns of each portfolio along the Bayesian efficient frontier -#' volatility: the expected volatility of each portfolio along the Bayesian efficient frontier -#' weights: the weights of each portfolio along the Bayesian efficient frontier +#' @return a list of portfolios along the frontier from least risky to most +#' risky +#' bayesianFrontier a list with portfolio along the Bayesian efficient +#' frontier. Specifically: +#' returns: the expected returns of each portfolio +#' along the Bayesian efficient frontier +#' volatility: the expected volatility of each +#' portfolio along the Bayesian +#' efficient frontier +#' weights: the weights of each portfolio along +#' the Bayesian efficient frontier +#' robustPortfolio the most robust portfolio along the Bayesian +#' efficient frontier. Specifically: +#' returns: the expected returns of each portfolio +#' along the Bayesian efficient frontier +#' volatility: the expected volatility of each +#' portfolio along the Bayesian +#' efficient frontier +#' weights: the weights of each portfolio along +#' the Bayesian efficient frontier #' -#' \deqn{ w_{rB}^{(i)} = argmax_{w \in C, w' \Sigma_{1} w \leq \gamma_{\Sigma}^{(i)} } \big\{w' \mu^{1} - \gamma _{\mu} \sqrt{w' \Sigma_{1} w} \big\}, -#' \gamma_{\mu} \equiv \sqrt{ \frac{q_{\mu}^{2}}{T_{1}} \frac{v_{1}}{v_{1} - 2} } -#' \gamma_{\Sigma}^{(i)} \equiv \frac{v^{(i)}}{ \frac{ \nu_{1}}{\nu_{1}+N+1} + \sqrt{ \frac{2\nu_{1}^{2}q_{\Sigma}^{2}}{ (\nu_{1}+N+1)^{3} } } } } +#' \deqn{ w_{rB}^{(i)} = argmax_{w \in C, w' \Sigma_{1} w \leq +#' \gamma_{\Sigma}^{(i)} } \big\{w' \mu^{1} - \gamma _{\mu} +#' \sqrt{w' \Sigma_{1} w} \big\}, +#' +#' \gamma_{\mu} \equiv \sqrt{ \frac{q_{\mu}^{2}}{T_{1}} +#' \frac{v_{1}}{v_{1} - 2} } +#' +#' \gamma_{\Sigma}^{(i)} \equiv \frac{v^{(i)}}{ \frac{ \nu_{1}}{\nu_{1}+N+1} + +#' \sqrt{ \frac{2\nu_{1}^{2}q_{\Sigma}^{2}}{ (\nu_{1}+N+1)^{3} } } } } #' @references #' A. Meucci - Robust Bayesian Allocation - See formula (19) - (21) #' \url{ http://papers.ssrn.com/sol3/papers.cfm?abstract_id=681553 } #' @author Ram Ahluwalia \email{ram@@wingedfootcapital.com} #' @export -robustBayesianPortfolioOptimization = function( mean_post , cov_post , nu_post , time_post, riskAversionMu = .1 , riskAversionSigma = .1 , discretizations = 10 , longonly = FALSE , volatility ) -{ - # parameter checks - N = length( mean ) # number of assets - if ( ( N < 2 ) == TRUE ) { stop( "Requires a minimum of two assets to perform optimization" ) } - if ( discretizations < 1 ) { stop( "Number of discretizations must be an integer greater than 1" ) } - if ( volatility < 0 ) { stop( "Volatility cannot be a negative number" ) } - if ( nu_post < 3 ) { stop( "nu_post must be greater than 2 otherwise g_m is undefined " ) } - if ( riskAversionMu < 0 ) { stop( "riskAversionMu must be a positive number" ) } - if ( riskAversionSigma < 0 ) { stop( "riskAversionSigma must be a positive number" ) } - + +robustBayesianPortfolioOptimization <- function(mean_post, cov_post, nu_post, + time_post, riskAversionMu = .1, + riskAversionSigma = .1, + discretizations = 10, + longonly = FALSE, volatility) { + # parameter checks + N <- length(mean_post) # number of assets + if ((N < 2) == TRUE) + stop("Requires a minimum of two assets to perform optimization") + if (discretizations < 1) + stop("Number of discretizations must be an integer greater than 1") + if (volatility < 0) + stop("Volatility cannot be a negative number") + if (nu_post < 3) + stop("nu_post must be greater than 2 otherwise g_m is undefined ") + if (riskAversionMu < 0) + stop("riskAversionMu must be a positive number") + if (riskAversionSigma < 0) + stop("riskAversionSigma must be a positive number") + # construct Bayesian efficient frontier - bayesianFrontier = efficientFrontier( discretizations , cov_post , mean_post , longonly = TRUE ) # returns a list of returns, volatility, and assets weights along the posterior frontier. Each row represents a point on the frontier - - # measure gamma-m and gamma-s to identify which portfolios along the frontier are robust - quantileMeanSquared = qchisq( riskAversionMu , N ) # the value of q-u is typically set to the quantile of the chi-squared distribution with N degrees of freedom (formula 6) - - # g_m is defined as a constraint on the optimal robust portfolio such that the variance of the robust portfolio must be less than gamma-m - g_m = sqrt( quantileMeanSquared / time_post * nu_post / ( nu_post - 2 ) ) # gamma-m (formula 20) - - quantileCovSquared = qchisq( riskAversionSigma , N * ( N + 1 ) / 2 ) # from formula 7. N*(N+1)/2 is the degrees of freedom in a symmetric matrix (number of unique elements) - g_s = volatility / ( nu_post / ( nu_post + N + 1 ) + sqrt( 2 * nu_post * nu_post * quantileCovSquared / ( ( nu_post + N + 1 ) ^ 3 ) ) ) # gamma-sigma (formula 21) corresponding to the i'th portfolio along the sample efficient frontier - + # returns a list of returns, volatility, and assets weights along the + # posterior frontier. Each row represents a point on the frontier + bayesianFrontier <- efficientFrontier(discretizations, cov_post, mean_post, + longonly = TRUE) + + # measure gamma-m and gamma-s to identify which portfolios along the frontier + # are robust + + # the value of q-u is typically set to the quantile of the chi-squared + # distribution with N degrees of freedom (formula 6) + quantileMeanSquared <- qchisq(riskAversionMu, N) + + # g_m is defined as a constraint on the optimal robust portfolio such that the + # variance of the robust portfolio must be less than gamma-m + + # gamma-m (formula 20) + g_m <- sqrt(quantileMeanSquared / time_post * nu_post / (nu_post - 2)) + + # from formula 7. N*(N+1)/2 is the degrees of freedom in a symmetric matrix + # (number of unique elements) + quantileCovSquared <- qchisq(riskAversionSigma, N * (N + 1) / 2) + # gamma-sigma (formula 21) corresponding to the i'th portfolio along the + # sample efficient frontier + g_s <- volatility / (nu_post / (nu_post + N + 1) + sqrt(2 * nu_post * + nu_post * quantileCovSquared / ((nu_post + N + 1) ^ 3))) + # initialize parameters - target = NULL + target <- NULL - # for each of the portfolios along the efficient Bayesian frontier identify the most robust portfolio - for( k in 1:( discretizations - 1 ) ) - { - weightsBay = bayesianFrontier[[ "weights" ]][ k , ] - - # reject portfolios that do not satisfy the constraints of formula 19 (i.e. Bayesian portfolios that are not robust, for example, the portfolios at the limit -- 100% confidence in prior or 100% confidence in sample) - # identify Robust Bayesian frontier which is a subset of the Bayesian frontier that is further shrunk to toward the global minimumm variance portfolio - # and even more closely tight to the right of the efficient frontier - if ( weightsBay %*% cov_post %*% weightsBay <= g_s ) # constraint for formula 19 - { target = c( target , weightsBay %*% mean_post - g_m * sqrt( weightsBay %*% cov_post %*% weightsBay )) } # formula 19 - else { target = c( target , -999999999 ) } # if the Bayesian efficient portfolio does not satisfy the constraint we assign a large negative value (we will reject these portfolios in the next step) - } + # for each of the portfolios along the efficient Bayesian frontier identify + # the most robust portfolio + for(k in 1:(discretizations - 1)) { + weightsBay <- bayesianFrontier[["weights"]][k, ] + # reject portfolios that do not satisfy the constraints of formula 19 (i.e. + # Bayesian portfolios that are not robust, for example, the portfolios at + # the limit -- 100% confidence in prior or 100% confidence in sample) + # + # identify Robust Bayesian frontier which is a subset of the Bayesian + # frontier that is further shrunk to toward the global minimumm variance + # portfolio and even more closely tight to the right of the efficient + # frontier - maxTarget = max( target ) - if ( maxTarget == -999999999 ) { stop( "No robust portfolio found within credibility set. Try increasing volatility or adjusting risk aversion parameters." ) } - maxIndex = which( target == maxTarget , arr.ind = TRUE ) # identify most robust Bayesian portfolio - if ( length( maxIndex ) > 1 ) { stop( "The number of robust portfolios identified is greater than 1. Debug. " )} - - # identify Robust portfolio as a subset of Bayesian frontier - robustPortfolio = list( returns = bayesianFrontier[[ "returns" ]][ maxIndex ] , - volatility = bayesianFrontier[[ "volatility" ]][ maxIndex ] , - weights = bayesianFrontier[[ "weights" ]][ maxIndex , ] ) - - return( list( bayesianFrontier = bayesianFrontier , robustPortfolio = robustPortfolio , g_m = g_m , g_s = g_s ) ) - + # constraint for formula 19 + if (weightsBay %*% cov_post %*% weightsBay <= g_s) { + # formula 19 + target <- c(target, weightsBay %*% mean_post - g_m * sqrt(weightsBay %*% + cov_post %*% weightsBay)) + } else { + # if the Bayesian efficient portfolio does not satisfy the constraint we + # assign a large negative value (we will reject these portfolios in the + # next step) + target <- c(target, -999999999) + } + } + + maxTarget <- max(target) + if (maxTarget == -999999999) { + stop("No robust portfolio found within credibility set. Try increasing + volatility or adjusting risk aversion parameters.") + } + # identify most robust Bayesian portfolio + maxIndex <- which(target == maxTarget, arr.ind = TRUE) + if (length(maxIndex) > 1) + stop("The number of robust portfolios identified is greater than 1. Debug") + + # identify Robust portfolio as a subset of Bayesian frontier + robustPortfolio <- list(returns = bayesianFrontier[["returns"]][maxIndex], + volatility = bayesianFrontier[["volatility"]][maxIndex], + weights = bayesianFrontier[["weights"]][maxIndex, ]) + + return(list(bayesianFrontier = bayesianFrontier, + robustPortfolio = robustPortfolio, + g_m = g_m, g_s = g_s)) + # Test that the number of returns portfolios is <= number of discretizations # Test that there are no NA's in the return results } # Example: - # robustBayesianPortfolioOptimization( mean_post = mean_post , cov_post = cov_post , nu_post = 156 , riskAversionMu = .1 , riskAversionSigma = .1 , discretizations = 10 , longonly = TRUE , volatility = .10 ) + # robustBayesianPortfolioOptimization(mean_post = mean_post, + # cov_post = cov_post, nu_post = 156, riskAversionMu = .1, + # riskAversionSigma = .1, discretizations = 10, longonly = TRUE, + # volatility = .10) -#' Constructs the partial confidence posterior based on a prior, sample mu/covariance, and relative confidence in the prior +#' @title Constructs the partial confidence posterior based on a prior, sample +#' mu/covariance, and relative confidence in the prior #' -#' Constructs the partial confidence posterior based on prior (mean vector and covariance matrix) and a posterior -#' with a relative confidence in the prior vs. the sample data +#' @description Constructs the partial confidence posterior based on prior (mean +#' vector and covariance matrix) and a posterior with a relative confidence in +#' the prior vs. the sample data #' #' \deqn{ T_{1} \equiv T_{0} + T -#' \\ \mu_{1} \equiv \frac{1}{ T_{1} } \big( T_{0} \mu_{0} + T \hat{ \mu } \big) +#' \\ \mu_{1} \equiv \frac{1}{T_{1}} \big(T_{0} \mu_{0} + T \hat{\mu} \big) #' \\ \nu_{1} \equiv \nu_{0} + T -#' \\ \Sigma_{1} \equiv \big( \nu_{0} \Sigma_{0} + T \hat{ \Sigma } + \frac{ \big(\mu_{0} - \hat{\mu} \big) \big(\mu_{0} - \hat{\mu} \big)' }{ \big( \frac{1}{T} + \frac{1}{T_{0} } \big) } } +#' \\ \Sigma_{1} \equiv \big( \nu_{0} \Sigma_{0} + T \hat{ \Sigma } + +#' \frac{ \big(\mu_{0} - \hat{\mu} \big) \big(\mu_{0} - \hat{\mu} \big)' } +#' { \big(\frac{1}{T} + \frac{1}{T_{0} } \big) } } #' @param mean_sample the mean of the sample returns #' @param cov_sample the sample covariance matrix #' @param mean_prior the prior for the mean returns #' @param cov_prior the covariance matrix prior -#' @param relativeConfidenceInMeanPrior a numeric with the relative confidence in the mean prior vs. the sample mean. A value of 2 indicates twice as much weight to assign to the prior vs. the sample data. Must be greater than or equal to zero -#' @param relativeConfidenceInCovPrior a numeric with the relative confidence in the covariance prior vs. the sample covariance. A value of 2 indicates twice as much weight to assign to the prior vs. the sample data. Must be greater than or equal to zero -#' @param sampleSize a numeric with the number of rows in the sample data used to estimate mean_sample and cov_sample +#' @param relativeConfidenceInMeanPrior a numeric with the relative confidence +#' in the mean prior vs. the sample mean. +#' A value of 2 indicates twice as much +#' weight to assign to the prior vs. the +#' sample data. Must be greater than or +#' equal to zero +#' @param relativeConfidenceInCovPrior a numeric with the relative confidence +#' in the covariance prior vs. the sample +#' covariance. A value of 2 indicates twice +#' as much weight to assign to the prior vs +#' the sample data. Must be greater than or +#' equal to zero +#' @param sampleSize a numeric with the number of rows in the +#' sample data used to estimate mean_sample +#' and cov_sample #' -#' @return mean_post a vector with the confidence weighted posterior mean vector of asset returns blended from the prior and sample mean vector -#' @return cov_post a covariance matrix the confidence weighted posterior covariance matrix of asset returns blended from the prior and sample covariance matrix +#' @return mean_post a vector with the confidence weighted posterior +#' mean vector of asset returns blended from the +#' prior and sample mean vector +#' @return cov_post a covariance matrix the confidence weighted +#' posterior covariance matrix of asset returns +#' blended from the prior and sample covariance +#' matrix #' @return time_post a numeric #' @return nu_pst a numeric -#' @author Ram Ahluwalia \email{ram@@wingedfootcapital.com} -#' @export +#' #' @references #' A. Meucci - Robust Bayesian Allocation - See formula (11) - (14) #' \url{ http://papers.ssrn.com/sol3/papers.cfm?abstract_id=681553 } -PartialConfidencePosterior = function( mean_sample , cov_sample , mean_prior , cov_prior , relativeConfidenceInMeanPrior , relativeConfidenceInCovPrior , sampleSize ) -{ +#' @author Ram Ahluwalia \email{ram@@wingedfootcapital.com} +#' @export + +PartialConfidencePosterior <- function(mean_sample, cov_sample, mean_prior, + cov_prior, relativeConfidenceInMeanPrior, + relativeConfidenceInCovPrior, sampleSize){ # parameter checks - if ( (length( mean_sample ) == nrow( cov_sample )) == FALSE ) { stop( "number of assets in mean must match number of assets in covariance matrix")} - if ( (length( mean_sample ) == length( mean_prior )) == FALSE ) { stop( "number of assets in mean must match number of assets in mean_prior")} - if ( ( nrow( cov_sample ) == nrow( cov_prior ) ) == FALSE ) { stop( "number of assets in sample covariance must match number of assets in prior covariance matrix")} - N = length( mean_sample ) # number of assets - if ( ( N < 2 ) == TRUE ) { stop( "requires a minimum of two assets to perform optimization" ) } - if ( relativeConfidenceInMeanPrior < 0 ) { stop( "Confidence in mean prior must be a number greater than or equal to zero" ) } - if ( relativeConfidenceInCovPrior < 0 ) { stop( "Confidence in covariance prior must be a number greater than or equal to zero" ) } - - # Investor's experience and confidence is summarized by mean_prior, cov_prior, time_prior, and nu_prior - # nu_prior = confidence on the inverse of cov_prior (see 7.25 - Meucci Risk & Asset Allocation Text). A larger value of nu_prior corresponds to little uncertainty about the view on inverse of Sigma, and thus Sigma - # confidenceInPrior = time_prior = T0 = confidence in the prior view mean_prior - confidenceInSample = sampleSize # typically the number of observations on which the mean_sample and cov_sample is based on - confidenceInMeanPrior = sampleSize * relativeConfidenceInMeanPrior - confidenceInCovPrior = sampleSize * relativeConfidenceInCovPrior - + if ((length(mean_sample) == nrow(cov_sample)) == FALSE) + stop("number of assets in mean must match number of assets in cov matrix") + if ((length(mean_sample) == length(mean_prior)) == FALSE) + stop("number of assets in mean must match number of assets in mean_prior") + if ((nrow(cov_sample) == nrow(cov_prior)) == FALSE) { + stop("number of assets in sample covariance must match number of assets in + prior covariance matrix") + } + N <- length(mean_sample) # number of assets + if ((N < 2) == TRUE) + stop("requires a minimum of two assets to perform optimization") + if (relativeConfidenceInMeanPrior < 0) + stop("Confidence in mean prior must be a number >= 0") + if (relativeConfidenceInCovPrior < 0) + stop("Confidence in covariance prior must be a number >= 0") + + # Investor's experience and confidence is summarized by mean_prior, cov_prior, + # time_prior, and nu_prior. + # nu_prior = confidence on the inverse of cov_prior (see 7.25 - Meucci Risk & + # Asset Allocation Text). A larger value of nu_prior corresponds to little + # uncertainty about the view on inverse of Sigma, and thus Sigma + # confidenceInPrior=time_prior=T0=confidence in the prior view mean_prior + # typically the number of observations on which the mean_sample and cov_sample + # is based on + confidenceInSample <- sampleSize + confidenceInMeanPrior <- sampleSize * relativeConfidenceInMeanPrior + confidenceInCovPrior <- sampleSize * relativeConfidenceInCovPrior + # blend prior and the sample data to construct posterior - time_post = confidenceInSample + confidenceInMeanPrior - nu_post = confidenceInSample + confidenceInCovPrior - mean_post = 1/time_post * ( mean_sample * confidenceInSample + mean_prior * confidenceInMeanPrior ) - cov_post = 1/nu_post * (cov_sample * confidenceInSample + cov_prior * confidenceInCovPrior + ( mean_sample - mean_prior ) %*% t( ( mean_sample - mean_prior ) ) / ( 1 / confidenceInSample + 1 / confidenceInMeanPrior ) ) - - return( list( mean_post = mean_post , cov_post = cov_post , time_post = time_post , nu_post = nu_post ) ) - + time_post <- confidenceInSample + confidenceInMeanPrior + nu_post <- confidenceInSample + confidenceInCovPrior + mean_post <- 1 / time_post * (mean_sample * confidenceInSample + mean_prior * + confidenceInMeanPrior) + cov_post <- 1 / nu_post * (cov_sample * confidenceInSample + cov_prior * + confidenceInCovPrior + (mean_sample - mean_prior) %*% + t((mean_sample - mean_prior)) / (1 / confidenceInSample + + 1 / confidenceInMeanPrior)) + + return(list(mean_post = mean_post, cov_post = cov_post, time_post = time_post, + nu_post = nu_post)) + # TODO: Test expectations - # Test 1: If relative confidence in prior is 0, then returns mean_sample and cov_sample - # Test 2: If relative confidence in prior is 1, and sampleSize = 0 then returns mean_prior and cov_prior - # Test 3: As the number of sample size observations increase, the posterior mean and covariance shrinks toward mean_sample and cov_sample -} \ No newline at end of file + # Test 1: If relative confidence in prior is 0, then returns mean_sample and + # cov_sample + # Test 2: If relative confidence in prior is 1, and sampleSize = 0 then + # returns mean_prior and cov_prior + # Test 3: As the number of sample size observations increase, the posterior + # mean and covariance shrinks toward mean_sample and cov_sample +} Modified: pkg/Meucci/demo/RobustBayesianAllocation.R =================================================================== --- pkg/Meucci/demo/RobustBayesianAllocation.R 2015-08-07 08:33:03 UTC (rev 3926) +++ pkg/Meucci/demo/RobustBayesianAllocation.R 2015-08-07 15:06:53 UTC (rev 3927) @@ -4,156 +4,227 @@ # See MATLAB package "Meucci_RobustBayesian" for original MATLAB # source on www.symmys.com #################################################################### - +library(MASS) #################################################################### # inputs #################################################################### -J = 50 # number of simulations [TRUNCATED] To get the complete diff run: svnlook diff /svnroot/returnanalytics -r 3927 From noreply at r-forge.r-project.org Sat Aug 8 20:42:16 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sat, 8 Aug 2015 20:42:16 +0200 (CEST) Subject: [Returnanalytics-commits] r3928 - in pkg/FactorAnalytics: . R man Message-ID: <20150808184216.8E6FB18794A@r-forge.r-project.org> Author: pragnya Date: 2015-08-08 20:42:15 +0200 (Sat, 08 Aug 2015) New Revision: 3928 Modified: pkg/FactorAnalytics/DESCRIPTION pkg/FactorAnalytics/R/fitTsfm.R pkg/FactorAnalytics/R/fmCov.R pkg/FactorAnalytics/R/fmmcSemiParam.R pkg/FactorAnalytics/man/fitTsfm.Rd pkg/FactorAnalytics/man/fmCov.Rd Log: Updtaed Description; Add option to pass factor.cov to fmCov; Edits to fitTsfm, fmmcSemiParam. Modified: pkg/FactorAnalytics/DESCRIPTION =================================================================== --- pkg/FactorAnalytics/DESCRIPTION 2015-08-07 15:06:53 UTC (rev 3927) +++ pkg/FactorAnalytics/DESCRIPTION 2015-08-08 18:42:15 UTC (rev 3928) @@ -1,19 +1,16 @@ Package: factorAnalytics Type: Package Title: Factor Analytics -Version:2.0.23 -Date:2015-07-24 +Version: 2.0.24 +Date: 2015-08-08 Author: Eric Zivot, Sangeetha Srinivasan and Yi-An Chen Maintainer: Sangeetha Srinivasan -Description: An R package for the estimation and risk analysis of linear factor - models for asset returns and portfolios. It contains model fitting methods - for the three major types of factor models: time series (or, macroeconomic) - factor model, fundamental factor model and statistical factor model. They - allow for different types of distributions to be specified for modeling the - fat-tailed behavior of financial returns, including Edgeworth expansions. - Risk analysis measures such as VaR and ES, as well as performance - attribution for factor models (factor-contributed vs idiosyncratic returns) - are included. +Description: Linear factor model fitting for asset returns (three major types- + time series, fundamental and statistical factor models); related risk + (volatility, VaR and ES) and performance attribution (factor-contributed vs + idiosyncratic returns); tabular displays of risk and performance reports; + factor model Monte Carlo, single and multiple imputation methods for + simulating returns and backfilling unequal histories. License: GPL-2 Depends: R (>= 3.0.0), Modified: pkg/FactorAnalytics/R/fitTsfm.R =================================================================== --- pkg/FactorAnalytics/R/fitTsfm.R 2015-08-07 15:06:53 UTC (rev 3927) +++ pkg/FactorAnalytics/R/fitTsfm.R 2015-08-08 18:42:15 UTC (rev 3928) @@ -27,7 +27,7 @@ #' \code{\link[leaps]{regsubsets}}; chooses the best performing subset of any #' given size or within a range of subset sizes. Different methods such as #' exhaustive search (default), forward or backward stepwise, or sequential -#' replacement can be employed.See \code{\link{fitTsfm.control}} for more +#' replacement can be employed. See \code{\link{fitTsfm.control}} for more #' details on the control arguments. #' #' \code{variable.selection="lars"} corresponds to least angle regression @@ -51,21 +51,19 @@ #' with \code{xts} objects internally and colnames won't be left as they are. #' } #' -#' @param asset.names vector containing names of assets, whose returns or -#' excess returns are the dependent variable. -#' @param factor.names vector containing names of the macroeconomic factors. +#' @param asset.names vector of asset names, whose returns are the dependent +#' variable in the factor model. +#' @param factor.names vector containing names of the factors. #' @param mkt.name name of the column for market returns. Default is \code{NULL}. -#' @param rf.name name of the column of risk free rate variable to calculate -#' excess returns for all assets (in \code{asset.names}) and factors (in -#' \code{factor.names}). Default is \code{NULL}, and no action is taken. +#' @param rf.name name of the column for the risk free rate; if excess returns +#' should be calculated for all assets and factors. Default is \code{NULL}. #' @param data vector, matrix, data.frame, xts, timeSeries or zoo object -#' containing column(s) named in \code{asset.names}, \code{factor.names} and +#' containing the columns \code{asset.names}, \code{factor.names}, and #' optionally, \code{mkt.name} and \code{rf.name}. #' @param fit.method the estimation method, one of "LS", "DLS" or "Robust". #' See details. Default is "LS". #' @param variable.selection the variable selection method, one of "none", #' "stepwise","subsets","lars". See details. Default is "none". -#' \code{mkt.name} is required if any of these options are to be implemented. #' @param control list of control parameters. Refer to #' \code{\link{fitTsfm.control}} for details. #' @param ... arguments passed to \code{\link{fitTsfm.control}} @@ -76,7 +74,7 @@ #' The generic accessor functions \code{coef}, \code{fitted} and #' \code{residuals} extract various useful features of the fit object. #' Additionally, \code{fmCov} computes the covariance matrix for asset returns -#' based on the fitted factor model +#' based on the fitted factor model. #' #' An object of class \code{"tsfm"} is a list containing the following #' components: Modified: pkg/FactorAnalytics/R/fmCov.R =================================================================== --- pkg/FactorAnalytics/R/fmCov.R 2015-08-07 15:06:53 UTC (rev 3927) +++ pkg/FactorAnalytics/R/fmCov.R 2015-08-08 18:42:15 UTC (rev 3928) @@ -1,128 +1,132 @@ -#' @title Covariance Matrix for assets' returns from fitted factor model. -#' -#' @description Computes the covariance matrix for assets' returns based on a -#' fitted factor model. This is a generic function with methods for classes -#' \code{tsfm}, \code{sfm} and \code{ffm}. -#' -#' @details \code{R(i, t)}, the return on asset \code{i} at time \code{t}, -#' is assumed to follow a factor model of the form, \cr \cr -#' \code{R(i,t) = alpha(i) + beta(i)*f(t) + e(i,t)}, \cr \cr -#' where, \code{alpha(i)} is the intercept, \code{f(t)} is a {K x 1} vector of -#' factor returns at time \code{t}, \code{beta(i)} is a \code{1 x K} vector of -#' factor exposures and the error terms \code{e(i,t)} are serially -#' uncorrelated across time and contemporaneously uncorrelated across assets -#' so that \code{e(i,t) ~ iid(0,sig(i)^2)}. Thus, the variance of asset -#' \code{i}'s return is given by \cr \cr -#' \code{var(R(i)) = beta(i)*cov(F)*tr(beta(i)) + sig(i)^2}. \cr \cr -#' And, the \code{N x N} covariance matrix of asset returns is \cr \cr -#' \code{var(R) = B*cov(F)*tr(B) + D}, \cr \cr -#' where, B is the \code{N x K} matrix of factor betas and \code{D} is a -#' diagonal matrix with \code{sig(i)^2} along the diagonal. -#' -#' The method for computing covariance can be specified via the \dots -#' argument. Note that the default of \code{use="pairwise.complete.obs"} for -#' handling NAs restricts the method to "pearson". -#' -#' @param object fit object of class \code{tsfm}, \code{sfm} or \code{ffm}. -#' @param use an optional character string giving a method for computing -#' covariances in the presence of missing values. This must be (an -#' abbreviation of) one of the strings "everything", "all.obs", -#' "complete.obs", "na.or.complete", or "pairwise.complete.obs". Default is -#' "pairwise.complete.obs". -#' @param ... optional arguments passed to \code{\link[stats]{cov}}. -#' -#' @return The computed \code{N x N} covariance matrix for asset returns based -#' on the fitted factor model. -#' -#' @author Eric Zivot, Yi-An Chen and Sangeetha Srinivasan. -#' -#' @references -#' Zivot, E., & Jia-hui, W. A. N. G. (2006). Modeling Financial Time -#' Series with S-Plus Springer-Verlag. -#' -#' @seealso \code{\link{fitTsfm}}, \code{\link{fitSfm}}, \code{\link{fitFfm}} -#' -#' \code{\link[stats]{cov}} for more details on arguments \code{use} and -#' \code{method}. -#' -#' @examples -#' # Time Series Factor model -#' data(managers) -#' fit <- fitTsfm(asset.names=colnames(managers[, (1:6)]), -#' factor.names=c("EDHEC.LS.EQ","SP500.TR"), data=managers) -#' fmCov(fit) -#' -#' # Statistical Factor Model -#' data(StockReturns) -#' sfm.pca.fit <- fitSfm(r.M, k=2) -#' fmCov(sfm.pca.fit) -#' -#' \dontrun{ -#' # Fundamental Factor Model -#' data(stock) -#' # there are 447 assets -#' exposure.names <- c("BOOK2MARKET", "LOG.MARKETCAP") -#' beta.mat <- subset(stock, DATE=="2003-12-31")[, exposure.names] -#' beta.mat1 <- cbind(rep(1, 447), beta.mat1) -#' # FM return covariance -#' fit.fund <- fitFfm(exposure.names=c("BOOK2MARKET", "LOG.MARKETCAP"), -#' data=stock, returnsvar="RETURN", datevar="DATE", -#' assetvar="TICKER", wls=TRUE, regression="classic", -#' covariance="classic", full.resid.cov=FALSE) -#' ret.cov.fundm <- fmCov(beta.mat1,fit.fund$factor.cov$cov,fit.fund$resid.sd) -#' fit.fund$returns.cov$cov == ret.cov.fundm -#' } -#' -#' @rdname fmCov -#' @export - -fmCov <- function(object, ...){ - # check input object validity - if (!inherits(object, c("tsfm", "sfm", "ffm"))) { - stop("Invalid argument: Object should be of class 'tsfm', 'sfm' or 'ffm'.") - } - UseMethod("fmCov") -} - -#' @rdname fmCov -#' @method fmCov tsfm -#' @export - -fmCov.tsfm <- function(object, use="pairwise.complete.obs", ...) { - - # get parameters and factors from factor model - beta <- as.matrix(object$beta) - # convert NAs to 0 to enable matrix multiplication - beta[is.na(beta)] <- 0 - sig2.e = object$resid.sd^2 - factor <- as.matrix(object$data[, object$factor.names]) - - # factor covariance matrix - factor.cov = cov(factor, use=use, ...) - - # residual covariance matrix D - if (length(sig2.e) > 1) { - D.e = diag(sig2.e) - } else { - D.e = as.vector(sig2.e) - } - - cov.fm = beta %*% factor.cov %*% t(beta) + D.e - - if (any(diag(chol(cov.fm))==0)) { - warning("Covariance matrix is not positive definite!") - } - - return(cov.fm) -} - -#' @rdname fmCov -#' @method fmCov sfm -#' @export - -fmCov.sfm <- function(object, use="pairwise.complete.obs", ...) { - - # already computed via fitSfm function - return(object$Omega) -} - +#' @title Covariance Matrix for assets' returns from fitted factor model. +#' +#' @description Computes the covariance matrix for assets' returns based on a +#' fitted factor model. This is a generic function with methods for classes +#' \code{tsfm}, \code{sfm} and \code{ffm}. +#' +#' @details \code{R(i, t)}, the return on asset \code{i} at time \code{t}, +#' is assumed to follow a factor model of the form, \cr \cr +#' \code{R(i,t) = alpha(i) + beta(i)*f(t) + e(i,t)}, \cr \cr +#' where, \code{alpha(i)} is the intercept, \code{f(t)} is a {K x 1} vector of +#' factor returns at time \code{t}, \code{beta(i)} is a \code{1 x K} vector of +#' factor exposures and the error terms \code{e(i,t)} are serially +#' uncorrelated across time and contemporaneously uncorrelated across assets +#' so that \code{e(i,t) ~ iid(0,sig(i)^2)}. Thus, the variance of asset +#' \code{i}'s return is given by \cr \cr +#' \code{var(R(i)) = beta(i)*cov(F)*tr(beta(i)) + sig(i)^2}. \cr \cr +#' And, the \code{N x N} covariance matrix of asset returns is \cr \cr +#' \code{var(R) = B*cov(F)*tr(B) + D}, \cr \cr +#' where, B is the \code{N x K} matrix of factor betas and \code{D} is a +#' diagonal matrix with \code{sig(i)^2} along the diagonal. +#' +#' The method for computing covariance can be specified via the \dots +#' argument. Note that the default of \code{use="pairwise.complete.obs"} for +#' handling NAs restricts the method to "pearson". +#' +#' @param object fit object of class \code{tsfm}, \code{sfm} or \code{ffm}. +#' @param factor.cov factor covariance matrix (optional); defaults to the +#' sample covariance matrix. +#' @param use method for computing covariances in the presence of missing +#' values; one of "everything", "all.obs", "complete.obs", "na.or.complete", or +#' "pairwise.complete.obs". Default is "pairwise.complete.obs". +#' @param ... optional arguments passed to \code{\link[stats]{cov}}. +#' +#' @return The computed \code{N x N} covariance matrix for asset returns based +#' on the fitted factor model. +#' +#' @author Eric Zivot, Yi-An Chen and Sangeetha Srinivasan. +#' +#' @references +#' Zivot, E., & Jia-hui, W. A. N. G. (2006). Modeling Financial Time +#' Series with S-Plus Springer-Verlag. +#' +#' @seealso \code{\link{fitTsfm}}, \code{\link{fitSfm}}, \code{\link{fitFfm}} +#' +#' \code{\link[stats]{cov}} for more details on arguments \code{use} and +#' \code{method}. +#' +#' @examples +#' # Time Series Factor model +#' data(managers) +#' fit <- fitTsfm(asset.names=colnames(managers[, (1:6)]), +#' factor.names=c("EDHEC.LS.EQ","SP500.TR"), data=managers) +#' fmCov(fit) +#' +#' # Statistical Factor Model +#' data(StockReturns) +#' sfm.pca.fit <- fitSfm(r.M, k=2) +#' fmCov(sfm.pca.fit) +#' +#' \dontrun{ +#' # Fundamental Factor Model +#' data(stock) +#' # there are 447 assets +#' exposure.names <- c("BOOK2MARKET", "LOG.MARKETCAP") +#' beta.mat <- subset(stock, DATE=="2003-12-31")[, exposure.names] +#' beta.mat1 <- cbind(rep(1, 447), beta.mat1) +#' # FM return covariance +#' fit.fund <- fitFfm(exposure.names=c("BOOK2MARKET", "LOG.MARKETCAP"), +#' data=stock, returnsvar="RETURN", datevar="DATE", +#' assetvar="TICKER", wls=TRUE, regression="classic", +#' covariance="classic", full.resid.cov=FALSE) +#' ret.cov.fundm <- fmCov(beta.mat1,fit.fund$factor.cov$cov,fit.fund$resid.sd) +#' fit.fund$returns.cov$cov == ret.cov.fundm +#' } +#' +#' @rdname fmCov +#' @export + +fmCov <- function(object, ...){ + # check input object validity + if (!inherits(object, c("tsfm", "sfm", "ffm"))) { + stop("Invalid argument: Object should be of class 'tsfm', 'sfm' or 'ffm'.") + } + UseMethod("fmCov") +} + +#' @rdname fmCov +#' @method fmCov tsfm +#' @export + +fmCov.tsfm <- function(object, factor.cov, use="pairwise.complete.obs", ...) { + + # get parameters and factors from factor model + beta <- as.matrix(object$beta) + # convert NAs to 0 to enable matrix multiplication + beta[is.na(beta)] <- 0 + sig2.e = object$resid.sd^2 + factor <- as.matrix(object$data[, object$factor.names]) + + # factor covariance matrix + if (missing(factor.cov)) { + factor.cov = cov(factor, use=use, ...) + } else { + identical(dim(factor.cov), as.integer(c(ncol(factor), ncol(factor)))) + } + + # residual covariance matrix D + if (length(sig2.e) > 1) { + D.e = diag(sig2.e) + } else { + D.e = as.vector(sig2.e) + } + + cov.fm = beta %*% factor.cov %*% t(beta) + D.e + + if (any(diag(chol(cov.fm))==0)) { + warning("Covariance matrix is not positive definite!") + } + + return(cov.fm) +} + +#' @rdname fmCov +#' @method fmCov sfm +#' @export + +fmCov.sfm <- function(object, use="pairwise.complete.obs", ...) { + + # already computed via fitSfm function + return(object$Omega) +} + Modified: pkg/FactorAnalytics/R/fmmcSemiParam.R =================================================================== --- pkg/FactorAnalytics/R/fmmcSemiParam.R 2015-08-07 15:06:53 UTC (rev 3927) +++ pkg/FactorAnalytics/R/fmmcSemiParam.R 2015-08-08 18:42:15 UTC (rev 3928) @@ -102,16 +102,18 @@ } else { fund.names <- rownames(beta) N = nrow(beta) - if (colnames(beta) != factor.names) { + if (all(colnames(beta)!=factor.names) || ncol(beta)!=length(factor.names)) { stop("Invalid argument: beta and factor.ret should correspond to the same set of factors") } } resid.dist = resid.dist[1] - if (!(resid.dist %in% c("normal","Cornish-Fisher","skew-t"))) { - stop("Invalid argument: resid.dist must be 'normal','Cornish-Fisher' or - 'skew-t'") - } + switch(resid.dist, + "normal" = {if (ncol(resid.par)!=1) {stop("Invalid argument: resid.par")}}, + "Cornish-Fisher" = {if (ncol(resid.par)!=3) {stop("Invalid argument: resid.par")}}, + "skew-t" = {if (ncol(resid.par)!=4) {stop("Invalid argument: resid.par")}}, + stop("Invalid argument: resid.dist must be 'normal', 'Cornish-Fisher' or 'skew-t'") + ) boot.method = boot.method[1] if (!(boot.method %in% c("random","block"))) { stop("Invalid argument: boot.method must be either 'random' or 'block'") @@ -155,7 +157,7 @@ "normal" = {sim.resid[,i] <- rnorm(n=B, mean=0, sd=resid.par[i,]) }, # Bx1 "Cornish-Fisher" = {sim.resid[,i] <- rCornishFisher(n=B, dp=resid.par[i,])}, "skew-t" = {sim.resid[,i] <- rst(n=B, dp=resid.par[i,])} - ) + ) sim.fund.ret[,i] = alpha[i,1] + boot.factor.ret %*% t(beta[i,,drop=FALSE]) + sim.resid[,i] # Bx1 } Modified: pkg/FactorAnalytics/man/fitTsfm.Rd =================================================================== --- pkg/FactorAnalytics/man/fitTsfm.Rd 2015-08-07 15:06:53 UTC (rev 3927) +++ pkg/FactorAnalytics/man/fitTsfm.Rd 2015-08-08 18:42:15 UTC (rev 3928) @@ -19,27 +19,25 @@ \method{residuals}{tsfm}(object, ...) } \arguments{ -\item{asset.names}{vector containing names of assets, whose returns or -excess returns are the dependent variable.} +\item{asset.names}{vector of asset names, whose returns are the dependent +variable in the factor model.} -\item{factor.names}{vector containing names of the macroeconomic factors.} +\item{factor.names}{vector containing names of the factors.} \item{mkt.name}{name of the column for market returns. Default is \code{NULL}.} -\item{rf.name}{name of the column of risk free rate variable to calculate -excess returns for all assets (in \code{asset.names}) and factors (in -\code{factor.names}). Default is \code{NULL}, and no action is taken.} +\item{rf.name}{name of the column for the risk free rate; if excess returns +should be calculated for all assets and factors. Default is \code{NULL}.} \item{data}{vector, matrix, data.frame, xts, timeSeries or zoo object -containing column(s) named in \code{asset.names}, \code{factor.names} and +containing the columns \code{asset.names}, \code{factor.names}, and optionally, \code{mkt.name} and \code{rf.name}.} \item{fit.method}{the estimation method, one of "LS", "DLS" or "Robust". See details. Default is "LS".} \item{variable.selection}{the variable selection method, one of "none", -"stepwise","subsets","lars". See details. Default is "none". -\code{mkt.name} is required if any of these options are to be implemented.} +"stepwise","subsets","lars". See details. Default is "none".} \item{control}{list of control parameters. Refer to \code{\link{fitTsfm.control}} for details.} @@ -56,7 +54,7 @@ The generic accessor functions \code{coef}, \code{fitted} and \code{residuals} extract various useful features of the fit object. Additionally, \code{fmCov} computes the covariance matrix for asset returns -based on the fitted factor model +based on the fitted factor model. An object of class \code{"tsfm"} is a list containing the following components: @@ -108,7 +106,7 @@ \code{\link[leaps]{regsubsets}}; chooses the best performing subset of any given size or within a range of subset sizes. Different methods such as exhaustive search (default), forward or backward stepwise, or sequential -replacement can be employed.See \code{\link{fitTsfm.control}} for more +replacement can be employed. See \code{\link{fitTsfm.control}} for more details on the control arguments. \code{variable.selection="lars"} corresponds to least angle regression Modified: pkg/FactorAnalytics/man/fmCov.Rd =================================================================== --- pkg/FactorAnalytics/man/fmCov.Rd 2015-08-07 15:06:53 UTC (rev 3927) +++ pkg/FactorAnalytics/man/fmCov.Rd 2015-08-08 18:42:15 UTC (rev 3928) @@ -8,7 +8,7 @@ \usage{ fmCov(object, ...) -\method{fmCov}{tsfm}(object, use = "pairwise.complete.obs", ...) +\method{fmCov}{tsfm}(object, factor.cov, use = "pairwise.complete.obs", ...) \method{fmCov}{sfm}(object, use = "pairwise.complete.obs", ...) } @@ -17,11 +17,12 @@ \item{...}{optional arguments passed to \code{\link[stats]{cov}}.} -\item{use}{an optional character string giving a method for computing -covariances in the presence of missing values. This must be (an -abbreviation of) one of the strings "everything", "all.obs", -"complete.obs", "na.or.complete", or "pairwise.complete.obs". Default is -"pairwise.complete.obs".} +\item{factor.cov}{factor covariance matrix (optional); defaults to the +sample covariance matrix.} + +\item{use}{method for computing covariances in the presence of missing +values; one of "everything", "all.obs", "complete.obs", "na.or.complete", or +"pairwise.complete.obs". Default is "pairwise.complete.obs".} } \value{ The computed \code{N x N} covariance matrix for asset returns based From noreply at r-forge.r-project.org Sun Aug 9 01:05:41 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 9 Aug 2015 01:05:41 +0200 (CEST) Subject: [Returnanalytics-commits] r3929 - pkg/Dowd/R Message-ID: <20150808230541.BBC78186A65@r-forge.r-project.org> Author: dacharya Date: 2015-08-09 01:05:41 +0200 (Sun, 09 Aug 2015) New Revision: 3929 Added: pkg/Dowd/R/tVaRPlot2DHP.R Log: Function tVaRPlot2DHP added Added: pkg/Dowd/R/tVaRPlot2DHP.R =================================================================== --- pkg/Dowd/R/tVaRPlot2DHP.R (rev 0) +++ pkg/Dowd/R/tVaRPlot2DHP.R 2015-08-08 23:05:41 UTC (rev 3929) @@ -0,0 +1,136 @@ +#' Plots t VaR against holding period +#' +#' Plots the VaR of a portfolio against holding period assuming that P/L are +#' t- distributed, for specified confidence level and holding period. +#' +#' @param ... The input arguments contain either return data or else mean and +#' standard deviation data. Accordingly, number of input arguments is either 4 +#' or 5. In case there 4 input arguments, the mean and standard deviation of +#' data is computed from return data. See examples for details. +#' +#' returns Vector of daily P/L data data +#' +#' mu Mean of daily P/L data data +#' +#' sigma Standard deviation of daily P/L data data +#' +#' df Number of degrees of freedom in the t distribution +#' +#' cl VaR confidence level and must be a scalar +#' +#' hp VaR holding period and must be a vector +#' +#' @references Dowd, K. Measuring Market Risk, Wiley, 2007. +#' +#' @author Dinesh Acharya +#' @examples +#' +#' # Computes VaR given P/L data data +#' data <- runif(5, min = 0, max = .2) +#' tVaRPlot2DHP(returns = data, df = 6, cl = .95, hp = 60:90) +#' +#' # Computes VaR given mean and standard deviation of return data +#' tVaRPlot2DHP(mu = .012, sigma = .03, df = 6, cl = .99, hp = 40:80) +#' +#' +#' @export +tVaRPlot2DHP <- function(...){ + # Determine if there are four or five arguments and ensure that arguments are + # read as intended + if (nargs() < 4) { + stop("Too few arguments") + } + if (nargs() > 5) { + stop("Too many arguments") + } + args <- list(...) + if (nargs() == 5) { + mu <- args$mu + df <- args$df + cl <- args$cl + sigma <- args$sigma + hp <- args$hp + } + if (nargs() == 4) { + mu <- mean(args$returns) + df <- args$df + cl <- args$cl + sigma <- sd(args$returns) + hp <- args$hp + } + + # Check that inputs have correct dimensions + mu <- as.matrix(mu) + mu.row <- dim(mu)[1] + mu.col <- dim(mu)[2] + if (max(mu.row, mu.col) > 1) { + stop("Mean must be a scalar") + } + sigma <- as.matrix(sigma) + sigma.row <- dim(sigma)[1] + sigma.col <- dim(sigma)[2] + if (max(sigma.row, sigma.col) > 1) { + stop("Standard deviation must be a scalar") + } + cl <- as.matrix(cl) + cl.row <- dim(cl)[1] + cl.col <- dim(cl)[2] + if (max(cl.row, cl.col) > 1) { + stop("Confidence level must be a scalar") + } + hp <- as.matrix(hp) + hp.row <- dim(hp)[1] + hp.col <- dim(hp)[2] + if (min(hp.row, hp.col) > 1) { + stop("Holding period must be a vector") + } + df <- as.matrix(df) + df.row <- dim(df)[1] + df.col <- dim(df)[2] + if (max(df.row, df.col) > 1) { + stop("Number of degrees of freedom must be a scalar") + } + + # Check that hp is read as row vector + if (hp.row > hp.col) { + hp <- t(hp) + } + + # Check that inputs obey sign and value restrictions + if (sigma < 0) { + stop("Standard deviation must be non-negative") + } + if (df < 3) { + stop("Number of degrees of freedom must be at least 3 for first two moments + of distribution to be defined") + } + if (max(cl) >= 1){ + stop("Confidence level(s) must be less than 1") + } + if (min(cl) <= 0){ + stop("Confidence level(s) must be greater than 0") + } + if (min(hp) <= 0){ + stop("Confidence level(s) must be greater than 0") + } + # VaR estimation + cl.row <- dim(cl)[1] + cl.col <- dim(cl)[2] + VaR <- (-sigma[1,1] * sqrt(t(hp)) %*% sqrt((df - 2) / df) %*% qt(1 - cl, df)) + (- mu[1,1] * t(hp) %*% matrix(1, cl.row, cl.col)) # VaR + + # Plotting + plot(hp, VaR, type = "l", xlab = "Holding Period", ylab = "VaR") + cl.label <- 100 * cl[1,1] + title("t VaR against holding period") + xmin <-min(hp)+.25*(max(hp)-min(hp)) + text(xmin,max(VaR)-.1*(max(VaR)-min(VaR)), + 'Input parameters', cex=.75, font = 2) + text(xmin,max(VaR)-.15*(max(VaR)-min(VaR)), + paste('Daily mean L/P data = ',-mu[1,1]),cex=.75) + text(xmin,max(VaR)-.2*(max(VaR)-min(VaR)), + paste('Stdev. of daily L/P data = ',sigma[1,1]),cex=.75) + text(xmin,max(VaR)-.25*(max(VaR)-min(VaR)), + paste('Degrees of freedom = ',df),cex=.75) + text(xmin,max(VaR)-.3*(max(VaR)-min(VaR)), + paste('Confidence level = ',cl.label,'%'),cex=.75) +} From noreply at r-forge.r-project.org Sun Aug 9 01:06:10 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 9 Aug 2015 01:06:10 +0200 (CEST) Subject: [Returnanalytics-commits] r3930 - pkg/Dowd/man Message-ID: <20150808230610.57999185183@r-forge.r-project.org> Author: dacharya Date: 2015-08-09 01:06:10 +0200 (Sun, 09 Aug 2015) New Revision: 3930 Added: pkg/Dowd/man/tVaRPlot2DHP.Rd Log: Function tVaRPlot2DHP added Added: pkg/Dowd/man/tVaRPlot2DHP.Rd =================================================================== --- pkg/Dowd/man/tVaRPlot2DHP.Rd (rev 0) +++ pkg/Dowd/man/tVaRPlot2DHP.Rd 2015-08-08 23:06:10 UTC (rev 3930) @@ -0,0 +1,45 @@ +% Generated by roxygen2 (4.1.1): do not edit by hand +% Please edit documentation in R/tVaRPlot2DHP.R +\name{tVaRPlot2DHP} +\alias{tVaRPlot2DHP} +\title{Plots t VaR against holding period} +\usage{ +tVaRPlot2DHP(...) +} +\arguments{ +\item{...}{The input arguments contain either return data or else mean and + standard deviation data. Accordingly, number of input arguments is either 4 + or 5. In case there 4 input arguments, the mean and standard deviation of + data is computed from return data. See examples for details. + + returns Vector of daily P/L data data + + mu Mean of daily P/L data data + + sigma Standard deviation of daily P/L data data + + df Number of degrees of freedom in the t distribution + + cl VaR confidence level and must be a scalar + + hp VaR holding period and must be a vector} +} +\description{ +Plots the VaR of a portfolio against holding period assuming that P/L are +t- distributed, for specified confidence level and holding period. +} +\examples{ +# Computes VaR given P/L data data + data <- runif(5, min = 0, max = .2) + tVaRPlot2DHP(returns = data, df = 6, cl = .95, hp = 60:90) + + # Computes VaR given mean and standard deviation of return data + tVaRPlot2DHP(mu = .012, sigma = .03, df = 6, cl = .99, hp = 40:80) +} +\author{ +Dinesh Acharya +} +\references{ +Dowd, K. Measuring Market Risk, Wiley, 2007. +} + From noreply at r-forge.r-project.org Sun Aug 9 01:07:21 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 9 Aug 2015 01:07:21 +0200 (CEST) Subject: [Returnanalytics-commits] r3931 - pkg/Dowd/man Message-ID: <20150808230721.6CFED184220@r-forge.r-project.org> Author: dacharya Date: 2015-08-09 01:07:21 +0200 (Sun, 09 Aug 2015) New Revision: 3931 Added: pkg/Dowd/man/tVaRPlot3D.Rd Log: Function tVaRPlot3D added Added: pkg/Dowd/man/tVaRPlot3D.Rd =================================================================== --- pkg/Dowd/man/tVaRPlot3D.Rd (rev 0) +++ pkg/Dowd/man/tVaRPlot3D.Rd 2015-08-08 23:07:21 UTC (rev 3931) @@ -0,0 +1,46 @@ +% Generated by roxygen2 (4.1.1): do not edit by hand +% Please edit documentation in R/tVaRPlot3D.R +\name{tVaRPlot3D} +\alias{tVaRPlot3D} +\title{Plots t VaR against confidence level and holding period} +\usage{ +tVaRPlot3D(...) +} +\arguments{ +\item{...}{The input arguments contain either return data or else mean and + standard deviation data. Accordingly, number of input arguments is either 4 + or 5. In case there 4 input arguments, the mean and standard deviation of + data is computed from return data. See examples for details. + + returns Vector of daily geometric return data + + mu Mean of daily geometric return data + + sigma Standard deviation of daily geometric return data + + df Number of degrees of freedom in the t distribution + + cl VaR confidence level and must be a vector + + hp VaR holding period and must be a vector} +} +\description{ +Plots the VaR of a portfolio against confidence level and holding period +assuming that P/L are t distributed, for specified confidence level and + holding period. +} +\examples{ +# Plots VaR against confidene level given geometric return data + data <- runif(5, min = 0, max = .2) + tVaRPlot3D(returns = data, df = 6, cl = seq(.85,.99,.01), hp = 60:90) + + # Computes VaR against confidence level given mean and standard deviation of return data + tVaRPlot3D(mu = .012, sigma = .03, df = 6, cl = seq(.85,.99,.02), hp = 40:80) +} +\author{ +Dinesh Acharya +} +\references{ +Dowd, K. Measuring Market Risk, Wiley, 2007. +} + From noreply at r-forge.r-project.org Sun Aug 9 01:07:43 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 9 Aug 2015 01:07:43 +0200 (CEST) Subject: [Returnanalytics-commits] r3932 - pkg/Dowd/R Message-ID: <20150808230743.A52B0184220@r-forge.r-project.org> Author: dacharya Date: 2015-08-09 01:07:43 +0200 (Sun, 09 Aug 2015) New Revision: 3932 Added: pkg/Dowd/R/tVaRPlot3D.R Log: Function tVaRPlot3D added Added: pkg/Dowd/R/tVaRPlot3D.R =================================================================== --- pkg/Dowd/R/tVaRPlot3D.R (rev 0) +++ pkg/Dowd/R/tVaRPlot3D.R 2015-08-08 23:07:43 UTC (rev 3932) @@ -0,0 +1,129 @@ +#' Plots t VaR against confidence level and holding period +#' +#' Plots the VaR of a portfolio against confidence level and holding period +#' assuming that P/L are t distributed, for specified confidence level and +#' holding period. +#' +#' @param ... The input arguments contain either return data or else mean and +#' standard deviation data. Accordingly, number of input arguments is either 4 +#' or 5. In case there 4 input arguments, the mean and standard deviation of +#' data is computed from return data. See examples for details. +#' +#' returns Vector of daily geometric return data +#' +#' mu Mean of daily geometric return data +#' +#' sigma Standard deviation of daily geometric return data +#' +#' df Number of degrees of freedom in the t distribution +#' +#' cl VaR confidence level and must be a vector +#' +#' hp VaR holding period and must be a vector +#' +#' @references Dowd, K. Measuring Market Risk, Wiley, 2007. +#' +#' @author Dinesh Acharya +#' @examples +#' +#' # Plots VaR against confidene level given geometric return data +#' data <- runif(5, min = 0, max = .2) +#' tVaRPlot3D(returns = data, df = 6, cl = seq(.85,.99,.01), hp = 60:90) +#' +#' # Computes VaR against confidence level given mean and standard deviation of return data +#' tVaRPlot3D(mu = .012, sigma = .03, df = 6, cl = seq(.85,.99,.02), hp = 40:80) +#' +#' +#' @export +tVaRPlot3D <- function(...){ + if (nargs() < 4) { + stop("Too few arguments") + } + if (nargs() > 5) { + stop("Too many arguments") + } + args <- list(...) + if (nargs() == 5) { + mu <- args$mu + df <- args$df + cl <- args$cl + sigma <- args$sigma + hp <- args$hp + } + if (nargs() == 4) { + mu <- mean(args$returns) + df <- args$df + cl <- args$cl + sigma <- sd(args$returns) + hp <- args$hp + } + + # Check that inputs have correct dimensions + mu <- as.matrix(mu) + mu.row <- dim(mu)[1] + mu.col <- dim(mu)[2] + if (max(mu.row, mu.col) > 1) { + stop("Mean must be a scalar") + } + sigma <- as.matrix(sigma) + sigma.row <- dim(sigma)[1] + sigma.col <- dim(sigma)[2] + if (max(sigma.row, sigma.col) > 1) { + stop("Standard deviation must be a scalar") + } + cl <- as.matrix(cl) + cl.row <- dim(cl)[1] + cl.col <- dim(cl)[2] + if (min(cl.row, cl.col) > 1) { + stop("Confidence level must be a vector") + } + hp <- as.matrix(hp) + hp.row <- dim(hp)[1] + hp.col <- dim(hp)[2] + if (min(hp.row, hp.col) > 1) { + stop("Holding period must be a vector") + } + df <- as.matrix(df) + df.row <- dim(df)[1] + df.col <- dim(df)[2] + if (max(df.row, df.col) > 1) { + stop("Number of degrees of freedom must be a scalar") + } + + # Check that cl is read as row vector + if (cl.row > cl.col) { + cl <- t(cl) + } + # Check that hp is read as column vector + if (hp.row > hp.col) { + hp <- t(hp) + } + + # Check that inputs obey sign and value restrictions + if (sigma < 0) { + stop("Standard deviation must be non-negative") + } + if (df < 3) { + stop("Number of degrees of freedom must be at least 3 for first two moments of distribution to be defined") + } + if (max(cl) >= 1){ + stop("Confidence level(s) must be less than 1") + } + if (min(cl) <= 0){ + stop("Confidence level(s) must be greater than 0") + } + if (min(hp) <= 0){ + stop("Holding period(s) must be greater than 0") + } + + # VaR estimation + cl.row <- dim(cl)[1] + cl.col <- dim(cl)[2] + # VaR estimation + VaR <- (-sigma[1,1] * sqrt(t(hp)) %*% sqrt((df - 2) / df) %*% qt(1 - cl, df)) + (- mu[1,1] * t(hp) %*% matrix(1, cl.row, cl.col)) # VaR + # Plotting + persp(x=cl, y=hp, t(VaR), xlab = "Confidence Level", + ylab = "Holding Period", zlab = "VaR", + main = "t VaR against Confidence Level and Holding Period") + +} From noreply at r-forge.r-project.org Sun Aug 9 01:09:12 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 9 Aug 2015 01:09:12 +0200 (CEST) Subject: [Returnanalytics-commits] r3933 - pkg/Dowd Message-ID: <20150808230913.0428D184220@r-forge.r-project.org> Author: dacharya Date: 2015-08-09 01:09:12 +0200 (Sun, 09 Aug 2015) New Revision: 3933 Modified: pkg/Dowd/NAMESPACE Log: Functions tVaRPlot3D and tVaRPlot2DHP added. Modified: pkg/Dowd/NAMESPACE =================================================================== --- pkg/Dowd/NAMESPACE 2015-08-08 23:07:43 UTC (rev 3932) +++ pkg/Dowd/NAMESPACE 2015-08-08 23:09:12 UTC (rev 3933) @@ -134,5 +134,7 @@ export(tVaRESPlot2DCL) export(tVaRFigure) export(tVaRPlot2DCL) +export(tVaRPlot2DHP) +export(tVaRPlot3D) import(MASS) import(bootstrap) From noreply at r-forge.r-project.org Tue Aug 11 08:43:41 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Tue, 11 Aug 2015 08:43:41 +0200 (CEST) Subject: [Returnanalytics-commits] r3934 - in pkg/Meucci: . demo Message-ID: <20150811064341.3F77D18045A@r-forge.r-project.org> Author: xavierv Date: 2015-08-11 08:43:40 +0200 (Tue, 11 Aug 2015) New Revision: 3934 Added: pkg/Meucci/demo/RobustBayesianCaseStudy.R Modified: pkg/Meucci/DESCRIPTION pkg/Meucci/demo/00Index pkg/Meucci/demo/RobustBayesianAllocation.R Log: added the case study from the Robust Bayesian Allocation papers Modified: pkg/Meucci/DESCRIPTION =================================================================== --- pkg/Meucci/DESCRIPTION 2015-08-08 23:09:12 UTC (rev 3933) +++ pkg/Meucci/DESCRIPTION 2015-08-11 06:43:40 UTC (rev 3934) @@ -37,7 +37,8 @@ kernlab, nloptr, limSolve, - linprog + linprog, + PEIP Suggests: Matrix, MASS, Modified: pkg/Meucci/demo/00Index =================================================================== --- pkg/Meucci/demo/00Index 2015-08-08 23:09:12 UTC (rev 3933) +++ pkg/Meucci/demo/00Index 2015-08-11 06:43:40 UTC (rev 3934) @@ -104,5 +104,4 @@ S_plotGaussHermite displays mesh points based on Gaussian-Hermite quadrature Bayesian networks S_AnalyzeJGBrates applies the Inverse Call Transformation to Japanese government rates and compares the shadow rates returned. S_MainDiversification computes Diversification Distribution and the Effective Number of Bets. - - +RobustBayesianCaseStudy case study for the display of the Robust Bayesian Allocation Techniques. Modified: pkg/Meucci/demo/RobustBayesianAllocation.R =================================================================== --- pkg/Meucci/demo/RobustBayesianAllocation.R 2015-08-08 23:09:12 UTC (rev 3933) +++ pkg/Meucci/demo/RobustBayesianAllocation.R 2015-08-11 06:43:40 UTC (rev 3934) @@ -1,4 +1,3 @@ - #################################################################### # Example from Meucci's MATLAB script: S_SimulationsCaseStudy.M # See MATLAB package "Meucci_RobustBayesian" for original MATLAB Added: pkg/Meucci/demo/RobustBayesianCaseStudy.R =================================================================== --- pkg/Meucci/demo/RobustBayesianCaseStudy.R (rev 0) +++ pkg/Meucci/demo/RobustBayesianCaseStudy.R 2015-08-11 06:43:40 UTC (rev 3934) @@ -0,0 +1,91 @@ +# Example from Meucci's MATLAB script: S_SnPCaseStudy.m +# See MATLAB package "Meucci_RobustBayesian" for original MATLAB +# source on www.symmys.com + +p_m <- .1 # robustness parameter location +p_s <- .1 # robustness parameter scatter +data(SectorsSnP500) + +################################################################################ +# compute weekly returns +Ps <- sectorsSnP500$P[seq(1, nrow(sectorsSnP500$P), 5), ] +R <- Ps[2:nrow(Ps), ] / Ps[1:(nrow(Ps) - 1), ] - 1 +Dates_P <- sectorsSnP500$DP[seq(1, length(sectorsSnP500$DP), 5)] +Dates_R <- Dates_P[-1] +Ttot <- nrow(R) +N <- ncol(R) + +################################################################################ +# estimation +W <- 52 # rolling estimation period + +NumPortf <- 10 +Ret_hat <- c() +Ret_rB <- c() +Dates <- c() +for (t in (W + 1):(Ttot - 1)) { + Rets <- R[(t - W):t, ] + + # sample estimate + m_hat <- colMeans(Rets) + S_hat <- cov(Rets) + EF <- efficientFrontier(NumPortf, S_hat, m_hat) + de_hat <- EF$returns + ds_hat <- EF$volatility + w_hat <- EF$weights + # Bayesian prior + S0 <- diag(diag(S_hat)) + m0 <- .5 * S0 %*% array(1, N) / N + T <- nrow(Rets) + T0 <- 2 * T + nu0 <- 2 * T + + # Bayesian posterior parameters + T1 <- T + T0 + m1 <- 1 / T1 * (m_hat * T + m0 * T0) + nu1 <- T + nu0 + S1 <- 1 / nu1 * ( S_hat * T + S0 * nu0 + (m_hat - m0) %*% t(m_hat - m0) / + (1 / T + 1 / T0)) + w1 <- efficientFrontier(NumPortf, S1, m1)$weights + + # robustness parameters + q_m2 <- chi2inv(p_m,N) + g_m <- sqrt(q_m2 / T1 * nu1 / (nu1 - 2)) + q_s2 <- chi2inv(p_s, N * (N + 1) / 2) + PickVol <- round(.8 * NumPortf) + v <- (ds_hat[PickVol]) ^ 2 + g_s <- v / ( nu1 / (nu1 + N + 1) + sqrt( 2 * nu1 * nu1 * q_s2 / + ((nu1 + N + 1) ^ 3))) + Target <- c() + + wu <- w_hat[PickVol, ] + Ret_hat <- c(Ret_hat, R[t + 1, ] %*% wu) + + for (k in 1:(NumPortf - 1)) { + NewTarget <- -(10 ^ 10) + if (t(wu) %*% S1 %*% wu <= g_s) + NewTarget <- t(m1) %*% wu - g_m * sqrt(t(wu) %*% S1 %*% wu) + Target <- c(Target, NewTarget) + } + + k <- which.max(Target) + wu <- w1[k, ] + Ret_rB <- c(Ret_rB, R[t + 1, ] %*% wu) + Dates <- c(Dates, Dates_R[t + 1]) +} + +NAV_hat <- cumprod(1 + Ret_hat) +NAV_rB <- cumprod(1 + Ret_rB) + +################################################################################ +# plots + +dev.new() +par(mfrow = c(2, 1)) +plot(Dates, Ret_hat, "l", xlab = "", ylab = "") +plot(Dates, Ret_rB, "l", xlab = "", ylab = "") + +dev.new() +par(mfrow = c(2, 1)) +plot(Dates,NAV_hat, "l", xlab = "", ylab = "") +plot(Dates,NAV_rB, "l", xlab = "", ylab = "") From noreply at r-forge.r-project.org Tue Aug 11 09:33:19 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Tue, 11 Aug 2015 09:33:19 +0200 (CEST) Subject: [Returnanalytics-commits] r3935 - pkg/Dowd Message-ID: <20150811073319.66481185645@r-forge.r-project.org> Author: dacharya Date: 2015-08-11 09:33:19 +0200 (Tue, 11 Aug 2015) New Revision: 3935 Modified: pkg/Dowd/NAMESPACE Log: Functions VarianceCovariance VaR and ES added Modified: pkg/Dowd/NAMESPACE =================================================================== --- pkg/Dowd/NAMESPACE 2015-08-11 06:43:40 UTC (rev 3934) +++ pkg/Dowd/NAMESPACE 2015-08-11 07:33:19 UTC (rev 3935) @@ -123,6 +123,8 @@ export(ShortBlackScholesCallVaR) export(ShortBlackScholesPutVaR) export(TQQPlot) +export(VarianceCovarianceES) +export(VarianceCovarianceVaR) export(tES) export(tESDFPerc) export(tESFigure) From noreply at r-forge.r-project.org Tue Aug 11 09:40:00 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Tue, 11 Aug 2015 09:40:00 +0200 (CEST) Subject: [Returnanalytics-commits] r3936 - pkg/Dowd/R Message-ID: <20150811074000.EE20B183C5C@r-forge.r-project.org> Author: dacharya Date: 2015-08-11 09:40:00 +0200 (Tue, 11 Aug 2015) New Revision: 3936 Added: pkg/Dowd/R/VarianceCovarianceES.R Log: Function VarianceCovarianceES.R added Added: pkg/Dowd/R/VarianceCovarianceES.R =================================================================== --- pkg/Dowd/R/VarianceCovarianceES.R (rev 0) +++ pkg/Dowd/R/VarianceCovarianceES.R 2015-08-11 07:40:00 UTC (rev 3936) @@ -0,0 +1,103 @@ +#' @title Variance-covariance ES for normally distributed returns +#' +#' @description Estimates the variance-covariance VaR of a +#' portfolio assuming individual asset returns are normally distributed, +#' for specified confidence level and holding period. +#' +#' @param vc.matrix Variance covariance matrix for returns +#' @param mu Vector of expected position returns +#' @param positions Vector of positions +#' @param cl Confidence level and is scalar +#' @param hp Holding period and is scalar +#' +#' @references Dowd, K. Measuring Market Risk, Wiley, 2007. +#' +#' @author Dinesh Acharya +#' +#' @examples +#' +#' # Variance-covariance ES for randomly generated portfolio +#' vc.matrix <- matrix(rnorm(16), 4, 4) +#' mu <- rnorm(4) +#' positions <- c(5, 2, 6, 10) +#' cl <- .95 +#' hp <- 280 +#' VarianceCovarianceES(vc.matrix, mu, positions, cl, hp) +#' +#' @export +VarianceCovarianceES <- function(vc.matrix, mu, positions, cl, hp){ + + # Check that cl is read as a row vector + cl <- as.matrix(cl) + if (dim(cl)[1] > dim(cl)[2]) { + cl <- t(cl) + } + + # Check that hp is read as a column vector + hp <- as.matrix(hp) + if (dim(hp)[1] < dim(hp)[2]) { + hp <- t(hp) + } + + # Check that positions vector read as a scalar or row vector + positions <- as.matrix(positions) + if (dim(positions)[1] > dim(positions)[2]){ + positions <- t(positions) + } + + # Check that expected returns vector is read as a scalar or row vector + mu <- as.matrix(mu) + if (dim(mu)[1] > dim(mu)[2]){ + mu <- t(mu) + } + + # Check that dimensions are correct + if (max(dim(mu)) != max(dim(positions))){ + stop("Positions vector and expected returns vector must have same size") + } + if (max(dim(vc.matrix)) != max(dim(positions))){ + stop("Positions vector and expected returns vector must have same size") + } + + # Check that inputs obey sign and value restrictions + if (cl >= 1){ + stop("Confidence level must be less than 1") + } + if (cl <= 0){ + stop("Confidence level must be greater than 0"); + } + if (hp <= 0){ + stop("Holding period must be greater than 0"); + } + + # VaR and ES estimation + VaR <- matrix(0, length(cl), length(hp)) + term <- matrix(0, length(cl), length(hp)) + es <- matrix(0, length(cl), length(hp)) + cl0 <- double(length(cl)) + delta.cl <- double(length(cl)) + for (i in 1:length(cl)) { + for (j in 1:length(hp)) { + VaR[i,j] <- - mu %*% t(positions) * hp[j] - qnorm(1-cl[i],0,1) * + (positions%*%vc.matrix%*%t(positions))*sqrt(hp[j]) # VaR + # ES Estimation + n <- 1000 # Number of slives into which tail is divided + cl0[i] <- cl[i] # Initial confidence level + term[i, j] <- VaR[i, j] + delta.cl[i] <- (1 - cl[i]) / n # Increment to confidence level as each + # slice is taken + + for (k in 1:(n - 1)) { + + cl[i] <- cl0[i] + k * delta.cl[i] # Revised cl + term[i, j] <- term[i, j] - mu %*% t(positions) * hp[j] - + (qnorm(1-cl[i],0,1)) * (positions%*%vc.matrix%*%t(positions))*sqrt(hp[j]) + } + es[i, j] <- term[i, j]/n + + } + } + y <- t(es) + return(y) + +} From noreply at r-forge.r-project.org Tue Aug 11 09:40:45 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Tue, 11 Aug 2015 09:40:45 +0200 (CEST) Subject: [Returnanalytics-commits] r3937 - pkg/Dowd/man Message-ID: <20150811074045.E364C185F8B@r-forge.r-project.org> Author: dacharya Date: 2015-08-11 09:40:45 +0200 (Tue, 11 Aug 2015) New Revision: 3937 Added: pkg/Dowd/man/VarianceCovarianceES.Rd Log: Function VarianceCovarianceES.R added Added: pkg/Dowd/man/VarianceCovarianceES.Rd =================================================================== --- pkg/Dowd/man/VarianceCovarianceES.Rd (rev 0) +++ pkg/Dowd/man/VarianceCovarianceES.Rd 2015-08-11 07:40:45 UTC (rev 3937) @@ -0,0 +1,40 @@ +% Generated by roxygen2 (4.1.1): do not edit by hand +% Please edit documentation in R/VarianceCovarianceES.R +\name{VarianceCovarianceES} +\alias{VarianceCovarianceES} +\title{Variance-covariance ES for normally distributed returns} +\usage{ +VarianceCovarianceES(vc.matrix, mu, positions, cl, hp) +} +\arguments{ +\item{vc.matrix}{Variance covariance matrix for returns} + +\item{mu}{Vector of expected position returns} + +\item{positions}{Vector of positions} + +\item{cl}{Confidence level and is scalar} + +\item{hp}{Holding period and is scalar} +} +\description{ +Estimates the variance-covariance VaR of a +portfolio assuming individual asset returns are normally distributed, +for specified confidence level and holding period. +} +\examples{ +# Variance-covariance ES for randomly generated portfolio + vc.matrix <- matrix(rnorm(16), 4, 4) + mu <- rnorm(4) + positions <- c(5, 2, 6, 10) + cl <- .95 + hp <- 280 + VarianceCovarianceES(vc.matrix, mu, positions, cl, hp) +} +\author{ +Dinesh Acharya +} +\references{ +Dowd, K. Measuring Market Risk, Wiley, 2007. +} + From noreply at r-forge.r-project.org Tue Aug 11 09:41:13 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Tue, 11 Aug 2015 09:41:13 +0200 (CEST) Subject: [Returnanalytics-commits] r3938 - pkg/Dowd/man Message-ID: <20150811074113.1CE45185F8B@r-forge.r-project.org> Author: dacharya Date: 2015-08-11 09:41:12 +0200 (Tue, 11 Aug 2015) New Revision: 3938 Added: pkg/Dowd/man/VarianceCovarianceVaR.Rd Log: Function VarianceCovarianceVaR added Added: pkg/Dowd/man/VarianceCovarianceVaR.Rd =================================================================== --- pkg/Dowd/man/VarianceCovarianceVaR.Rd (rev 0) +++ pkg/Dowd/man/VarianceCovarianceVaR.Rd 2015-08-11 07:41:12 UTC (rev 3938) @@ -0,0 +1,43 @@ +% Generated by roxygen2 (4.1.1): do not edit by hand +% Please edit documentation in R/VarianceCovarianceVaR.R +\name{VarianceCovarianceVaR} +\alias{VarianceCovarianceVaR} +\title{Variance-covariance VaR for normally distributed returns} +\usage{ +VarianceCovarianceVaR(vc.matrix, mu, positions, cl, hp) +} +\arguments{ +\item{vc.matrix}{Assumed variance covariance matrix for returns} + +\item{mu}{Vector of expected position returns} + +\item{positions}{Vector of positions} + +\item{cl}{Confidence level and is scalar or vector} + +\item{hp}{Holding period and is scalar or vector} +} +\description{ +Estimates the variance-covariance VaR of a +portfolio assuming individual asset returns are normally distributed, +for specified confidence level and holding period. +} +\examples{ +# Variance-covariance VaR for randomly generated portfolio + vc.matrix <- matrix(rnorm(16),4,4) + mu <- rnorm(4) + positions <- c(5,2,6,10) + cl <- .95 + hp <- 280 + VarianceCovarianceVaR(vc.matrix, mu, positions, cl, hp) +} +\author{ +Dinesh Acharya +} +\references{ +Dowd, K. Measuring Market Risk, Wiley, 2007. +} +\seealso{ +AdjustedVarianceCovarianceVaR +} + From noreply at r-forge.r-project.org Tue Aug 11 09:41:31 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Tue, 11 Aug 2015 09:41:31 +0200 (CEST) Subject: [Returnanalytics-commits] r3939 - pkg/Dowd/R Message-ID: <20150811074131.62EE8185F8B@r-forge.r-project.org> Author: dacharya Date: 2015-08-11 09:41:31 +0200 (Tue, 11 Aug 2015) New Revision: 3939 Added: pkg/Dowd/R/VarianceCovarianceVaR.R Log: Function VarianceCovarianceVaR added Added: pkg/Dowd/R/VarianceCovarianceVaR.R =================================================================== --- pkg/Dowd/R/VarianceCovarianceVaR.R (rev 0) +++ pkg/Dowd/R/VarianceCovarianceVaR.R 2015-08-11 07:41:31 UTC (rev 3939) @@ -0,0 +1,84 @@ +#' @title Variance-covariance VaR for normally distributed returns +#' +#' @description Estimates the variance-covariance VaR of a +#' portfolio assuming individual asset returns are normally distributed, +#' for specified confidence level and holding period. +#' +#' @param vc.matrix Assumed variance covariance matrix for returns +#' @param mu Vector of expected position returns +#' @param positions Vector of positions +#' @param cl Confidence level and is scalar or vector +#' @param hp Holding period and is scalar or vector +#' +#' @references Dowd, K. Measuring Market Risk, Wiley, 2007. +#' +#' @author Dinesh Acharya +#' +#' @examples +#' +#' # Variance-covariance VaR for randomly generated portfolio +#' vc.matrix <- matrix(rnorm(16),4,4) +#' mu <- rnorm(4) +#' positions <- c(5,2,6,10) +#' cl <- .95 +#' hp <- 280 +#' VarianceCovarianceVaR(vc.matrix, mu, positions, cl, hp) +#' +#' @seealso AdjustedVarianceCovarianceVaR +#' @export +VarianceCovarianceVaR <- function(vc.matrix, mu, positions, cl, hp){ + + # Check that confidence level is read as a row vector + cl <- as.matrix(cl) + if (dim(cl)[1] > dim(cl)[2]){ + cl <- t(cl) + } + + # Check that hp is read as a column vector + hp <- as.matrix(hp) + if (dim(hp)[1] < dim(hp)[2]){ + hp <- t(hp) + } + + # Check that positions vector is read as a scalar or row vector + positions <- as.matrix(positions) + if (dim(positions)[1] > dim(positions)[2]){ + positions <- t(positions) + } + + # Check that expected returns vector is read as a scalar or row vector + mu <- as.matrix(mu) + if (dim(mu)[1] > dim(mu)[2]){ + mu <- t(mu) + } + + # Check that dimensions are correct + if (max(dim(mu)) != max(dim(positions))){ + stop("Positions vector and expected returns vector must have same size") + } + vc.matrix <- as.matrix(vc.matrix) + if (max(dim(vc.matrix)) != max(dim(positions))){ + stop("Positions vector and expected returns vector must have same size") + } + + # Check that inputs obey sign and value restrictions + if (cl >= 1){ + stop("Confidence level must be less than 1") + } + if (cl <= 0){ + stop("Confidence level must be greater than 0"); + } + if (hp <= 0){ + stop("Holding period must be greater than 0"); + } + + # VaR estimation + VaR <- matrix(0, length(cl), length(hp)) + for (i in 1:length(cl)) { + for (j in 1:length(hp)) { + VaR[i, j] <- - mu %*% t(positions) * hp[j] - qnorm(1-cl[i], 0, 1) * (positions %*% vc.matrix %*% t(positions)) * sqrt(hp[j]) + } + } + y <- t(VaR) + return(y) +} \ No newline at end of file From noreply at r-forge.r-project.org Wed Aug 12 09:01:10 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 12 Aug 2015 09:01:10 +0200 (CEST) Subject: [Returnanalytics-commits] r3940 - pkg/Dowd/R Message-ID: <20150812070110.952531878D9@r-forge.r-project.org> Author: dacharya Date: 2015-08-12 09:01:09 +0200 (Wed, 12 Aug 2015) New Revision: 3940 Added: pkg/Dowd/R/FilterStrategyLogNormalVaR.R Log: Function FilterStrategyLogNormalVaR added. Added: pkg/Dowd/R/FilterStrategyLogNormalVaR.R =================================================================== --- pkg/Dowd/R/FilterStrategyLogNormalVaR.R (rev 0) +++ pkg/Dowd/R/FilterStrategyLogNormalVaR.R 2015-08-12 07:01:09 UTC (rev 3940) @@ -0,0 +1,53 @@ +#' Log Normal VaR with filter strategy +#' +#' Generates Monte Carlo lognormal VaR with filter portfolio strategy +#' +#' @param mu Mean arithmetic return +#' @param sigma Standard deviation of arithmetic return +#' @param number.trials Number of trials used in the simulations +#' @param alpha Participation parameter +#' @param cl Confidence Level +#' @param hp Holding Period +#' @return Lognormal VaR +#' +#' @references Dowd, K. Measuring Market Risk, Wiley, 2007. +#' +#' @author Dinesh Acharya +#' @examples +#' +#' # Estimates standard error of normal quantile estimate +#' FilterStrategyLogNormalVaR(0, .2, 100, 1.2, .95, 10) +#' +#' @export +FilterStrategyLogNormalVaR <- function(mu, sigma, number.trials, alpha, cl, hp){ + N <- 100 # Number of increments, taken as 100 + dt <- hp/N # Size of time-increment + nudt <- (mu - 0.5 * sigma ^ 2) * dt + sigmadt <- sigma * sqrt(dt) + stock.price <- 1 # Stock price assumed to be investment assumed to be 1 + lnS <- log(stock.price) + M <- number.trials + # Stock price simulation process + lnSt <- matrix(0,M,N) + new.stock.price <- matrix(0,M,N) + equity.proportion <- matrix(0,M,N) + investment <- matrix(0,M,N) + for (i in 1:M) { + lnSt[i, 1] <- rnorm(1, lnS + nudt, sigmadt) + new.stock.price[i,1] <- exp(lnSt[i,1]) + for (j in 2:N) { + lnSt[i, j] <- rnorm(1, lnSt[i, j-1] + nudt, sigmadt) + new.stock.price[i, j] <- exp(lnSt[i, j]) # New stock price + equity.proportion[i, j] <- .5 + alpha * (new.stock.price[i, j] - + stock.price)/stock.price + investment[i, j] <- equity.proportion[i, j] * new.stock.price[i, j] + (1 - equity.proportion[i, j]) + } + } + # Profit/Loss calculation + profit.or.loss <- double(M) + for (i in 1:M) { + profit.or.loss[i] <- investment[i,j] - stock.price + } + y <- HSVaR(profit.or.loss, cl) + return(y) +} \ No newline at end of file From noreply at r-forge.r-project.org Wed Aug 12 09:01:31 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 12 Aug 2015 09:01:31 +0200 (CEST) Subject: [Returnanalytics-commits] r3941 - pkg/Dowd/man Message-ID: <20150812070131.158201878D9@r-forge.r-project.org> Author: dacharya Date: 2015-08-12 09:01:30 +0200 (Wed, 12 Aug 2015) New Revision: 3941 Added: pkg/Dowd/man/FilterStrategyLogNormalVaR.Rd Log: Function FilterStrategyLogNormalVaR added. Added: pkg/Dowd/man/FilterStrategyLogNormalVaR.Rd =================================================================== --- pkg/Dowd/man/FilterStrategyLogNormalVaR.Rd (rev 0) +++ pkg/Dowd/man/FilterStrategyLogNormalVaR.Rd 2015-08-12 07:01:30 UTC (rev 3941) @@ -0,0 +1,38 @@ +% Generated by roxygen2 (4.1.1): do not edit by hand +% Please edit documentation in R/FilterStrategyLogNormalVaR.R +\name{FilterStrategyLogNormalVaR} +\alias{FilterStrategyLogNormalVaR} +\title{Log Normal VaR with filter strategy} +\usage{ +FilterStrategyLogNormalVaR(mu, sigma, number.trials, alpha, cl, hp) +} +\arguments{ +\item{mu}{Mean arithmetic return} + +\item{sigma}{Standard deviation of arithmetic return} + +\item{number.trials}{Number of trials used in the simulations} + +\item{alpha}{Participation parameter} + +\item{cl}{Confidence Level} + +\item{hp}{Holding Period} +} +\value{ +Lognormal VaR +} +\description{ +Generates Monte Carlo lognormal VaR with filter portfolio strategy +} +\examples{ +# Estimates standard error of normal quantile estimate + FilterStrategyLogNormalVaR(0, .2, 100, 1.2, .95, 10) +} +\author{ +Dinesh Acharya +} +\references{ +Dowd, K. Measuring Market Risk, Wiley, 2007. +} + From noreply at r-forge.r-project.org Wed Aug 12 09:02:34 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 12 Aug 2015 09:02:34 +0200 (CEST) Subject: [Returnanalytics-commits] r3942 - pkg/Dowd/R Message-ID: <20150812070234.8EC241878D9@r-forge.r-project.org> Author: dacharya Date: 2015-08-12 09:02:34 +0200 (Wed, 12 Aug 2015) New Revision: 3942 Added: pkg/Dowd/R/StopLossLogNormalVaR.R Log: Function StopLossLogNormalVaR added. Added: pkg/Dowd/R/StopLossLogNormalVaR.R =================================================================== --- pkg/Dowd/R/StopLossLogNormalVaR.R (rev 0) +++ pkg/Dowd/R/StopLossLogNormalVaR.R 2015-08-12 07:02:34 UTC (rev 3942) @@ -0,0 +1,52 @@ +#' Log Normal VaR with stop loss limit +#' +#' Generates Monte Carlo lognormal VaR with stop-loss limit +#' +#' @param mu Mean arithmetic return +#' @param sigma Standard deviation of arithmetic return +#' @param number.trials Number of trials used in the simulations +#' @param loss.limit Stop Loss limit +#' @param cl Confidence Level +#' @param hp Holding Period +#' @return Lognormal VaR +#' +#' @references Dowd, K. Measuring Market Risk, Wiley, 2007. +#' +#' @author Dinesh Acharya +#' @examples +#' +#' # Estimates standard error of normal quantile estimate +#' StopLossLogNormalVaR(0, .2, 100, 1.2, .95, 10) +#' +#' @export +StopLossLogNormalVaR <- function(mu, sigma, number.trials, loss.limit, cl, hp){ + N <- 100 # Number of increments, taken as 100 + dt <- hp/N # Size of time-increment + nudt <- (mu - 0.5 * sigma ^ 2) * dt + sigmadt <- sigma * sqrt(dt) + stock.price <- 1 # Stock price assumed to be investment assumed to be 1 + lnS <- log(stock.price) + M <- number.trials + L <- loss.limit + # Stock price simulation process + lnSt <- matrix(0,M,N) + new.stock.price <- matrix(0,M,N) + equity.proportion <- matrix(0,M,N) + investment <- matrix(0,M,N) + for (i in 1:M) { + lnSt[i, 1] <- rnorm(1, lnS + nudt, sigmadt) + new.stock.price[i,1] <- exp(lnSt[i,1]) + for (j in 2:N) { + lnSt[i, j] <- rnorm(1, lnSt[i, j-1] + nudt, sigmadt) + new.stock.price[i, j] <- exp(lnSt[i, j]) # New stock price + investment[i, j] <- max(new.stock.price[i, j], stock.price - L) + } + } + # Profit/Loss calculation + profit.or.loss <- double(M) + for (i in 1:M) { + profit.or.loss[i] <- investment[i,j] - stock.price + } + y <- HSVaR(profit.or.loss, cl) + return(y) +} \ No newline at end of file From noreply at r-forge.r-project.org Wed Aug 12 09:03:04 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 12 Aug 2015 09:03:04 +0200 (CEST) Subject: [Returnanalytics-commits] r3943 - pkg/Dowd/man Message-ID: <20150812070304.8B9ED1878D9@r-forge.r-project.org> Author: dacharya Date: 2015-08-12 09:03:04 +0200 (Wed, 12 Aug 2015) New Revision: 3943 Added: pkg/Dowd/man/StopLossLogNormalVaR.Rd Log: Function StopLossLogNormalVaR added. Added: pkg/Dowd/man/StopLossLogNormalVaR.Rd =================================================================== --- pkg/Dowd/man/StopLossLogNormalVaR.Rd (rev 0) +++ pkg/Dowd/man/StopLossLogNormalVaR.Rd 2015-08-12 07:03:04 UTC (rev 3943) @@ -0,0 +1,38 @@ +% Generated by roxygen2 (4.1.1): do not edit by hand +% Please edit documentation in R/StopLossLogNormalVaR.R +\name{StopLossLogNormalVaR} +\alias{StopLossLogNormalVaR} +\title{Log Normal VaR with stop loss limit} +\usage{ +StopLossLogNormalVaR(mu, sigma, number.trials, loss.limit, cl, hp) +} +\arguments{ +\item{mu}{Mean arithmetic return} + +\item{sigma}{Standard deviation of arithmetic return} + +\item{number.trials}{Number of trials used in the simulations} + +\item{loss.limit}{Stop Loss limit} + +\item{cl}{Confidence Level} + +\item{hp}{Holding Period} +} +\value{ +Lognormal VaR +} +\description{ +Generates Monte Carlo lognormal VaR with stop-loss limit +} +\examples{ +# Estimates standard error of normal quantile estimate + StopLossLogNormalVaR(0, .2, 100, 1.2, .95, 10) +} +\author{ +Dinesh Acharya +} +\references{ +Dowd, K. Measuring Market Risk, Wiley, 2007. +} + From noreply at r-forge.r-project.org Wed Aug 12 09:03:47 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 12 Aug 2015 09:03:47 +0200 (CEST) Subject: [Returnanalytics-commits] r3944 - pkg/Dowd Message-ID: <20150812070347.42F6D183E9F@r-forge.r-project.org> Author: dacharya Date: 2015-08-12 09:03:47 +0200 (Wed, 12 Aug 2015) New Revision: 3944 Modified: pkg/Dowd/NAMESPACE Log: Function StopLossLogNormalVaR and FilterStrategyLogNormalVaR added. Modified: pkg/Dowd/NAMESPACE =================================================================== --- pkg/Dowd/NAMESPACE 2015-08-12 07:03:04 UTC (rev 3943) +++ pkg/Dowd/NAMESPACE 2015-08-12 07:03:47 UTC (rev 3944) @@ -30,6 +30,7 @@ export(CornishFisherVaR) export(DBPensionVaR) export(DCPensionVaR) +export(FilterStrategyLogNormalVaR) export(FrechetES) export(FrechetESPlot2DCl) export(FrechetVaR) @@ -122,6 +123,7 @@ export(ProductCopulaVaR) export(ShortBlackScholesCallVaR) export(ShortBlackScholesPutVaR) +export(StopLossLogNormalVaR) export(TQQPlot) export(VarianceCovarianceES) export(VarianceCovarianceVaR) From noreply at r-forge.r-project.org Wed Aug 12 10:50:54 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 12 Aug 2015 10:50:54 +0200 (CEST) Subject: [Returnanalytics-commits] r3945 - in pkg/Meucci: R demo man Message-ID: <20150812085054.5A5831872DF@r-forge.r-project.org> Author: xavierv Date: 2015-08-12 10:50:54 +0200 (Wed, 12 Aug 2015) New Revision: 3945 Modified: pkg/Meucci/R/DynamicPortfolioManagement.R pkg/Meucci/R/LognormalParameters2Statistics.R pkg/Meucci/R/PlotMarginalsNormalInverseWishart.R pkg/Meucci/R/RandNormalInverseWishart.R pkg/Meucci/demo/00Index pkg/Meucci/demo/RobustBayesianCaseStudy.R pkg/Meucci/demo/S_AnalyzeLognormalCorrelation.R pkg/Meucci/demo/S_AnalyzeNormalCorrelation.R pkg/Meucci/demo/S_AnalyzeNormalInverseWishart.R pkg/Meucci/demo/S_AutocorrelatedProcess.R pkg/Meucci/demo/S_DynamicManagementCase2.R pkg/Meucci/man/LognormalParam2Statistics.Rd pkg/Meucci/man/PlotMarginalsNormalInverseWishart.Rd pkg/Meucci/man/RandNormalInverseWishart.Rd Log: Formatted demo scripts and its relating functions up to S_B* Modified: pkg/Meucci/R/DynamicPortfolioManagement.R =================================================================== --- pkg/Meucci/R/DynamicPortfolioManagement.R 2015-08-12 07:03:47 UTC (rev 3944) +++ pkg/Meucci/R/DynamicPortfolioManagement.R 2015-08-12 08:50:54 UTC (rev 3945) @@ -298,8 +298,8 @@ #set the monitoring times of interest t <- c(0, tau, t_view - k * tau) t <- sort(t) - t <- t[t>=0] - idx <- which(diff(t) < tau * 10 ^ -10) + t <- t[t >= 0] + idx <- which(diff(t) < tau * 10 ^ -10) t <- t(setdiff(1:length(t), idx)) T_ <- length(t) Modified: pkg/Meucci/R/LognormalParameters2Statistics.R =================================================================== --- pkg/Meucci/R/LognormalParameters2Statistics.R 2015-08-12 07:03:47 UTC (rev 3944) +++ pkg/Meucci/R/LognormalParameters2Statistics.R 2015-08-12 08:50:54 UTC (rev 3945) @@ -1,33 +1,37 @@ -#' @title Compute expectation, covariance, standard deviation and correlation for a lognormal distribution. +#' @title Compute expectation, covariance, standard deviation and correlation +#' for a lognormal distribution. #' -#' @description Compute expectation, covariance, standard deviation and correlation for a lognormal distribution, as described in -#' A. Meucci "Risk and Asset Allocation", Springer, 2005. +#' @description Compute expectation, covariance, standard deviation and +#' correlation for a lognormal distribution, as described in A. Meucci +#' "Risk and Asset Allocation", Springer, 2005. #' #' @param Mu : [vector] (N x 1) location parameter #' @param Sigma : [matrix] (N x N) scale parameter #' -#' +#' #' @return Exp : [vector] (N x 1) expectation #' @return Cov : [matrix] (N x N) covariance #' @return Std : [vector] (N x 1) standard deviation #' @return Corr : [matrix] (N x N) correlation #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, -#' "E 85 - Correlation in lognormal markets". +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" +#' \url{http://symmys.com/node/170}, "E 85 - Correlation in lognormal markets". #' #' See Meucci's script for "LognormalParam2Statistics.m" #' -#' @author Xavier Valls \email{flamejat@@gmail.com} +#' @author Xavier Valls \email{xaviervallspla@@gmail.com} #' @export -LognormalParam2Statistics = function( Mu, Sigma ) +LognormalParam2Statistics <- function(Mu, Sigma) { - Exp = exp( Mu + (1/2) * diag( Sigma ) ); - Cov = exp( Mu + (1/2) * diag( Sigma ) ) %*% t( exp( Mu + (1/2) * diag( Sigma ) ) ) * ( exp( Sigma ) - 1 ); - Std = sqrt( diag( Cov ) ); - Corr = diag( 1 / Std ) %*% Cov %*% diag( 1 / Std ); + Exp <- exp(Mu + (1 / 2) * diag(Sigma)) + Cov <- exp(Mu + (1 / 2) * diag(Sigma)) %*% t(exp(Mu + (1 / 2) * + diag(Sigma))) * (exp(Sigma) - 1) + Std <- sqrt(diag(Cov)) + Corr <- diag(1 / Std) %*% Cov %*% diag(1 / Std) - return( list( Exp = Exp, Covariance = Cov, Standard_Deviation = Std, Correlation = Corr ) ); -} \ No newline at end of file + return(list(Exp = Exp, Covariance = Cov, Standard_Deviation = Std, + Correlation = Corr)); +} Modified: pkg/Meucci/R/PlotMarginalsNormalInverseWishart.R =================================================================== --- pkg/Meucci/R/PlotMarginalsNormalInverseWishart.R 2015-08-12 07:03:47 UTC (rev 3944) +++ pkg/Meucci/R/PlotMarginalsNormalInverseWishart.R 2015-08-12 08:50:54 UTC (rev 3945) @@ -12,67 +12,70 @@ #' @note Numerically and analytically the marginal pdf of #' - the first entry of the random vector Mu #' - the (1,1)-entry of the random matrix inv(Sigma) -#' when Mu and Sigma are jointly normal-inverse-Wishart: Mu ~ St(Mu_0,Sigma/T_0) -#' inv(Sigma) ~ W(Nu_0,inv(Sigma_0)/Nu_0) +#' when Mu and Sigma are jointly normal-inverse-Wishart: +#' Mu ~ St(Mu_0,Sigma/T_0) +#' inv(Sigma) ~ W(Nu_0,inv(Sigma_0)/Nu_0) #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" +#' \url{http://symmys.com/node/170}. #' #' See Meucci's script for "PlotMarginalsNormalInverseWishart.m" #' -#' @author Xavier Valls \email{flamejat@@gmail.com} +#' @author Xavier Valls \email{xaviervallspla@@gmail.com} #' @export -PlotMarginalsNormalInverseWishart = function(Mu_Simul, InvSigma_Simul, Mu_0, T_0, Sigma_0, Nu_0, Legend) -{ - NumSimulations = nrow( Mu_Simul ); - NumBins = round( 10 * log( NumSimulations )); +PlotMarginalsNormalInverseWishart <- function(Mu_Simul, InvSigma_Simul, Mu_0, + T_0, Sigma_0, Nu_0, Legend) { + NumSimulations <- nrow(Mu_Simul) + NumBins <- round(10 * log(NumSimulations)) - dev.new(); + dev.new() - ################################################################################################################# + ############################################################################ ### Mu # plot empirical pdf (histogram) - par( mfrow = c(2,1) ); - h = hist(Mu_Simul[ , 1 ], NumBins, plot= FALSE); - D = h$mids[ 2 ] - h$mids[ 1 ]; - n= h$counts /( D * NumSimulations ); - plot( h$mids, n, type = "h", main = bquote(paste( .(Legend)," ", mu)) ); - #barplot( n ); + par(mfrow = c(2,1)) + h <- hist(Mu_Simul[, 1], NumBins, plot= FALSE) + D <- h$mids[2] - h$mids[1] + n <- h$counts / (D * NumSimulations) + plot(h$mids, n, type = "h", main = bquote(paste(.(Legend)," ", mu))) + #barplot(n) # superimpose analytical expectation - points( Mu_0[ 1 ], 0, pch = 21, bg = "red" ); - + points(Mu_0[1], 0, pch = 21, bg = "red") + # superimpose analytical pdf - x_lo = min(Mu_Simul[ ,1 ]); - x_hi = max(Mu_Simul[ ,1 ]); - x_grid = seq( x_lo, x_hi, (x_hi-x_lo)/100 ); - m = Mu_0[ 1 ]; - s = sqrt(Sigma_0[ 1, 1] / T_0 ); - f = 1 / s * dt( (x_grid - m )/s, Nu_0 ); - lines(x_grid, f ,col = "red" ); + x_lo <- min(Mu_Simul[,1]) + x_hi <- max(Mu_Simul[,1]) + x_grid <- seq(x_lo, x_hi, (x_hi-x_lo) / 100) + m <- Mu_0[1] + s <- sqrt(Sigma_0[1, 1] / T_0) + f <- 1 / s * dt((x_grid - m)/s, Nu_0) + lines(x_grid, f,col = "red") - ################################################################################################################# + ############################################################################ ### Sigma # plot empirical pdf (histogram) - h = hist(InvSigma_Simul[ ,1 ], NumBins, plot= FALSE ); - D = h$mids[ 2 ] - h$mids[ 1 ]; - n= h$counts /( D * NumSimulations ); - plot( h$mids, n, type = "h", main = bquote(paste( .(Legend), " inv(Sigma)")) ); - + h <- hist(InvSigma_Simul[,1], NumBins, plot= FALSE) + D <- h$mids[2] - h$mids[1] + n <- h$counts /(D * NumSimulations) + plot(h$mids, n, type = "h", main = bquote(paste(.(Legend), + "inv(Sigma)"))) + # superimpose analytical expectation - InvSigma_0=solve(Sigma_0); - points(InvSigma_0[ 1, 1 ],0, pch = 21, bg = "red" ); + InvSigma_0 <- solve(Sigma_0) + points(InvSigma_0[1, 1], 0, pch = 21, bg = "red") # superimpose analytical pdf - x_lo = min(InvSigma_Simul[ ,1 ]); - x_hi = max(InvSigma_Simul[ ,1 ]); - x_grid = seq( x_lo, x_hi, (x_hi-x_lo)/100 ); - sigma_square = InvSigma_0[ 1, 1] / Nu_0; - A = Nu_0 / 2; - B = 2 * sigma_square; - f = dgamma(x_grid, shape = A, scale = B); - lines(x_grid, f, col = "red" ); -} \ No newline at end of file + x_lo <- min(InvSigma_Simul[,1]) + x_hi <- max(InvSigma_Simul[,1]) + x_grid <- seq(x_lo, x_hi, (x_hi - x_lo) / 100) + sigma_square <- InvSigma_0[1, 1] / Nu_0 + A <- Nu_0 / 2 + B <- 2 * sigma_square + f <- dgamma(x_grid, shape = A, scale = B) + lines(x_grid, f, col = "red") +} Modified: pkg/Meucci/R/RandNormalInverseWishart.R =================================================================== --- pkg/Meucci/R/RandNormalInverseWishart.R 2015-08-12 07:03:47 UTC (rev 3944) +++ pkg/Meucci/R/RandNormalInverseWishart.R 2015-08-12 08:50:54 UTC (rev 3945) @@ -1,7 +1,9 @@ -#' @title Generates a multivariate i.i.d. sample of lenght J from the normal-inverse-Wishart distribution. +#' @title Generates a multivariate i.i.d. sample of lenght J from the +#' normal-inverse-Wishart distribution. #' -#' @description Generates a multivariate i.i.d. sample of lenght J from the normal-inverse-Wishart distribution, as described in -#' A. Meucci "Risk and Asset Allocation", Springer, 2005. +#' @description Generates a multivariate i.i.d. sample of lenght J from the +#' normal-inverse-Wishart distribution, as described in A. Meucci "Risk and +#' Asset Allocation", Springer, 2005. #' #' @param Mu_0 [vector] location parameter. #' @param T_0 [scalar] number of observations. @@ -9,49 +11,50 @@ #' @param nu_0 [scalar] degrees of freedom. #' @param J [scalar] number of simulations to compute. #' -#' @return Mu [vector] location parameter from the normal-inverse-Wishart distribution. -#' @return Sigma [matrix] dispersion parameter from the normal-inverse-Wishart distribution. -#' @return InvSigma [matrix] inverse of the dispersion parameter from the normal-inverse-Wishart distribution. +#' @return Mu [vector] location parameter from the +#' normal-inverse-Wishart distribution. +#' @return Sigma [matrix] dispersion parameter from the +#' normal-inverse-Wishart distribution. +#' @return InvSigma [matrix] inverse of the dispersion parameter from the +#' normal-inverse-Wishart distribution. #' #' @note -#' \deqn{\mu\| \Sigma \sim N(\mu_{0}, \frac{\Sigma}{T_{0}}) }{Mu|Sigma ~ N(Mu_0,Sigma/T_0)} -#' \deqn{\Sigma^{-1} \sim W(\nu_{0},\frac{\Sigma_{0}^{-1}}{\nu_{0}})}{inv(Sigma) ~ W(Nu_0,inv(Sigma_0)/Nu_0)} +#' \deqn{\mu\| \Sigma \sim N(\mu_{0}, \frac{\Sigma}{T_{0}}) } +#' {Mu|Sigma ~ N(Mu_0,Sigma/T_0)} +#' \deqn{\Sigma^{-1} \sim W(\nu_{0},\frac{\Sigma_{0}^{-1}}{\nu_{0}})} +#' {inv(Sigma) ~ W(Nu_0,inv(Sigma_0)/Nu_0)} #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. -#' See Meucci's script for "RandNormalInverseWishart.m" +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" +#' \url{http://symmys.com/node/170}. See Meucci's script for +#' "RandNormalInverseWishart.m" #' -#' @author Xavier Valls \email{flamejat@@gmail.com} +#' @author Xavier Valls \email{xaviervallspla@@gmail.com} #' @export -RandNormalInverseWishart = function(Mu_0, T_0, Sigma_0, nu_0, J) -{ - N = length( Mu_0 ); - VecIndex = NULL; - for( n in 1 : N ) - { - VecIndex[ n ] = cbind( VecIndex, ( n-1 ) * N +( n:N ) ); ##ok +RandNormalInverseWishart <- function(Mu_0, T_0, Sigma_0, nu_0, J) { + N <- length(Mu_0) + VecIndex <- NULL + for (n in 1 : N) { + VecIndex[n] <- cbind(VecIndex, (n - 1) * N + (n:N)) } - invSigma_0 = solve(Sigma_0) %*% diag( 1, dim( Sigma_0 )); - Phi = invSigma_0 / nu_0; + invSigma_0 <- solve(Sigma_0) %*% diag(1, dim(Sigma_0)) + Phi <- invSigma_0 / nu_0 - Mu = NULL; - Sigma = NULL; - InvSigma = NULL; + Mu <- NULL + Sigma <- NULL + InvSigma <- NULL - for( j in 1 : J ) - { - Inv_Sigma = rwishart( df = nu_0, Sigma = Phi ); - InvSigma = rbind( InvSigma, Inv_Sigma[ VecIndex ] ); - - S = solve(Inv_Sigma) %*% diag( 1, dim( Inv_Sigma ) ); - Sigma = rbind( Sigma, S[VecIndex] ); - - M = rmvnorm( nrow(Mu_0) * nrow(S/T_0), Mu_0, S/T_0); - Mu = rbind( Mu, M ); + for (j in 1 : J) { + Inv_Sigma <- rwishart(df = nu_0, Sigma = Phi) + InvSigma <- rbind(InvSigma, Inv_Sigma[VecIndex]) + + S <- solve(Inv_Sigma) %*% diag(1, dim(Inv_Sigma)) + Sigma <- rbind(Sigma, S[VecIndex]) + + M <- rmvnorm(nrow(Mu_0) * nrow(S / T_0), Mu_0, S / T_0) + Mu <- rbind(Mu, M) } - - return( list( Mu = Mu, Sigma = Sigma , InvSigma = InvSigma ) ) + return(list(Mu = Mu, Sigma = Sigma, InvSigma = InvSigma)) } - Modified: pkg/Meucci/demo/00Index =================================================================== --- pkg/Meucci/demo/00Index 2015-08-12 07:03:47 UTC (rev 3944) +++ pkg/Meucci/demo/00Index 2015-08-12 08:50:54 UTC (rev 3945) @@ -1,107 +1,234 @@ -AnalyticalvsNumerical compares the numerical and the analytical solution of entropy-pooling -ButterflyTrading performs the butterfly-trading case study for the Entropy-Pooling approach by Attilio Meucci -DetectOutliersviaMVE detects outliers in two-asset and multi-asset case -FullyFlexibleBayesNets uses Entropy Pooling to compute Fully Flexible Bayesian networks for risk management -FullFlexProbs uses Entropy Pooling to compute Fully Flexible Probabilities for historical scenarios -FullyIntegratedLiquidityAndMarketRisk computes the liquidity-risk and funding-risk adjusted P&L distribution -HermiteGrid_CaseStudy estimates the prior of a hedge fund return and processes extreme views on CVaR according to Entropy Pooling -HermiteGrid_CVaR_Recursion illustrates the discrete Newton recursion to process views on CVaR according to Entropy Pooling -HermiteGrid_demo compares the performance of plain Monte Carlo versus grid in applying Entropy Pooling to process extreme views -InvariantProjection projects summary statistics to arbitrary horizons under i.i.d. assumption -MeanDiversificationFrontier computes the mean-diversification efficient frontier -Prior2Posterior compares the numerical and the analytical solution of entropy-pooling -RankingInformation performs ranking allocation using the Entropy-Pooling approach by Attilio Meucci -RobustBayesianAllocation replicates the example from Meucci's MATLAB script S_SimulationsCaseStudy.M -S_AnalyzeLognormalCorrelation considers a bivariate lognormal market and display the correlation and the condition number of the covariance matrix -S_AnalyzeNormalCorrelation considers a bivariate normal market and display the correlation and the condition number of the covariance matrix -S_AnalyzeNormalInverseWishart familiarizes the users with multivariate Bayesian estimation. -S_AutocorrelatedProcess simulates a Ornstein-Uhlenbeck AR(1) process -S_BivariateSample generates draws from a bivariate distribution with different marginals -S_BlackLittermanBasic describes to basic market-based Black-Litterman approach -S_BondProjectionPricingNormal projects the distribution of the market invariants for the bond markets from the estimation interval to the investment horizon -S_BondProjectionPricingStudentT projects the distribution of the market invariants for the bond markets from the estimation interval to the investment horizon (Student's T assumption) +AnalyticalvsNumerical compares the numerical and the analytical + solution of entropy-pooling +ButterflyTrading performs the butterfly-trading case study + for the Entropy-Pooling approach by Attilio Meucci +DetectOutliersviaMVE detects outliers in two-asset and + multi-asset case +FullyFlexibleBayesNets uses Entropy Pooling to compute Fully + Flexible Bayesian networks for risk management +FullFlexProbs uses Entropy Pooling to compute Fully + Flexible Probabilities for historical scenarios +FullyIntegratedLiquidityAndMarketRisk computes the liquidity-risk and + funding-risk adjusted P&L distribution +HermiteGrid_CaseStudy estimates the prior of a hedge fund return + and processes extreme views on CVaR according to Entropy Pooling +HermiteGrid_CVaR_Recursion illustrates the discrete Newton recursion + to process views on CVaR according to Entropy Pooling +HermiteGrid_demo compares the performance of plain Monte + Carlo versus grid in applying Entropy Pooling to process extreme views +InvariantProjection projects summary statistics to arbitrary + horizons under i.i.d. assumption +MeanDiversificationFrontier computes the mean-diversification efficient + frontier +Prior2Posterior compares the numerical and the analytical + solution of entropy-pooling +RankingInformation performs ranking allocation using the + Entropy-Pooling approach by Attilio Meucci +RobustBayesianAllocation replicates the example from Meucci's + MATLAB script S_SimulationsCaseStudy.M +S_AnalyzeLognormalCorrelation considers a bivariate lognormal market + and display the correlation and the condition number of the covariance matrix +S_AnalyzeNormalCorrelation considers a bivariate normal market and + display the correlation and the condition number of the covariance matrix +S_AnalyzeNormalInverseWishart familiarizes the users with multivariate + Bayesian estimation. +S_AutocorrelatedProcess simulates a Ornstein-Uhlenbeck AR(1) + process +S_BivariateSample generates draws from a bivariate + distribution with different marginals +S_BlackLittermanBasic describes to basic market-based + Black-Litterman approach +S_BondProjectionPricingNormal projects the distribution of the market + invariants for the bond markets from the estimation interval to the investment + horizon +S_BondProjectionPricingStudentT projects the distribution of the market + invariants for the bond markets from the estimation interval to the investment + horizon (Student's T assumption) S_BuyNHold illustrates the buy & hold dynamic strategy -S_CPPI illustrates the CPPI (constant proportion portfolio insurance) dynamic strategy -S_CallsProjectionPricing projects the distribution of the market invariants for the derivatives market and computes the distribution of prices at the investment horizon -S_CheckDiagonalization verifies the correctness of the eigenvalue-eigenvector representation in terms of real matrices for the transition matrix of an OU process -S_CornishFisher compares the Cornish-Fisher estimate of the VaR with the true analytical VaR under the lognormal assumptions -S_CorrelationPriorUniform shows how a jointly uniform prior on the correlations implies that the marginal distribution of each correlation is peaked around zero -S_CovarianceEvolution represents the evolution of the covariance of an OU process in terms of the dispersion ellipsoid -S_CrossSectionConstrainedIndustries fits a cross-sectional linear factor model creating industry factors, where the industry factors are constrained to be uncorrelated with the market -S_CrossSectionIndustries fits a cross-sectional linear factor model creating industry factors -S_DerivativesInvariants performs the quest for invariance in the derivatives market -S_DeterministicEvolution animates the evolution of the determinstic component of an OU process -S_DisplayLognormalCopulaPdf displays the pdf of the copula of a lognormal distribution -S_DisplayNormalCopulaCdf displays the cdf of the copula of a normal distribution -S_DisplayNormalCopulaPdf displays the pdf of the copula of a normal distribution -S_DisplayStudentTCopulaPdf displays the pdf of the copula of a Student t distribution -S_ESContributionFactors computes the expected shortfall and the contributions to ES from each factor in simulations -S_ESContributionsStudentT computes the expected shortfall and the contributions to ES from each security -S_EigenvalueDispersion displays the sample eigenvalues dispersion phenomenon -S_EllipticalNDim decomposes the N-variate normal distribution into its radial and uniform components to generate an elliptical distribution +S_CPPI illustrates the CPPI (constant proportion + portfolio insurance) dynamic strategy +S_CallsProjectionPricing projects the distribution of the market + invariants for the derivatives market and computes the distribution of prices + at the investment horizon +S_CheckDiagonalization verifies the correctness of the + eigenvalue-eigenvector representation in terms of real matrices for the + transition matrix of an OU process +S_CornishFisher compares the Cornish-Fisher estimate of + the VaR with the true analytical VaR under the lognormal assumptions +S_CorrelationPriorUniform shows how a jointly uniform prior on the + correlations implies that the marginal distribution of each correlation is + peaked around zero +S_CovarianceEvolution represents the evolution of the covariance + of an OU process in terms of the dispersion ellipsoid +S_CrossSectionConstrainedIndustries fits a cross-sectional linear factor model + creating industry factors, where the industry factors are constrained to be + uncorrelated with the market +S_CrossSectionIndustries fits a cross-sectional linear factor model + creating industry factors +S_DerivativesInvariants performs the quest for invariance in the + derivatives market +S_DeterministicEvolution animates the evolution of the determinstic + component of an OU process +S_DisplayLognormalCopulaPdf displays the pdf of the copula of a + lognormal distribution +S_DisplayNormalCopulaCdf displays the cdf of the copula of a normal + distribution +S_DisplayNormalCopulaPdf displays the pdf of the copula of a normal + distribution +S_DisplayStudentTCopulaPdf displays the pdf of the copula of a Student + t distribution +S_ESContributionFactors computes the expected shortfall and the + contributions to ES from each factor in simulations +S_ESContributionsStudentT computes the expected shortfall and the + contributions to ES from each security +S_EigenvalueDispersion displays the sample eigenvalues dispersion + phenomenon +S_EllipticalNDim decomposes the N-variate normal + distribution into its radial and uniform components to generate an elliptical + distribution S_EntropyView illustrates the Entropy Pooling approach -S_EquitiesInvariants performs the quest for invariance in the stock market -S_EquityProjectionPricing projects the distribution of the market invariants for the stock market from the estimation interval (normal assumption) to the investment horizon. Then it computes the distribution of prices at the investment horizon analytically. -S_EstimateExpectedValueEvaluation script familiarizes the user with the evaluation of an estimator replicability, loss, error, bias and inefficiency -S_EstimateMomentsComboEvaluation familiarizes the user with the evaluation of an estimator: replicability, loss, error, bias and inefficiency -S_EstimateQuantileEvaluation familiarizes the user with the evaluation of an estimator: replicability, loss, error, bias and inefficiency -S_Estimator familiarizes the user with the evaluation of an estimator: replicability, loss, error, bias and inefficiency +S_EquitiesInvariants performs the quest for invariance in the + stock market +S_EquityProjectionPricing projects the distribution of the market + invariants for the stock market from the estimation interval (normal + assumption) to the investment horizon. Then it computes the distribution of + prices at the investment horizon analytically. +S_EstimateExpectedValueEvaluation script familiarizes the user with the + evaluation of an estimator replicability, loss, error, bias and inefficiency +S_EstimateMomentsComboEvaluation familiarizes the user with the evaluation + of an estimator: replicability, loss, error, bias and inefficiency +S_EstimateQuantileEvaluation familiarizes the user with the evaluation + of an estimator: replicability, loss, error, bias and inefficiency +S_Estimator familiarizes the user with the evaluation + of an estimator: replicability, loss, error, bias and inefficiency S_EvaluationGeneric determines the optimal allocation -S_ExactMeanAndCovariance generate draws from a multivariate normal with matching mean and covariance -S_ExpectationMaximizationHighYield implements the Expectation-Maximization (EM) algoritm, which estimates the parameters of a multivariate normal distribution when some observations are randomly missing -S_ExtremeValueTheory computes the quantile (VaR) analytically, in simulations and using the extreme value theory approximation -S_FactorAnalysisNotOk illustrates the hidden factor analysis puzzle -S_FactorResidualCorrelation illustrates exogenous loadings and endogenous factors the true analytical VaR under the lognormal assumptions from the estimation interval to the investment horizon -S_FitProjectRates fits the swap rates dynamics to a multivariate Ornstein-Uhlenbeck process and computes and plots the estimated future distribution -S_FitSwapToStudentT demonstrates the recursive ML estimation of the location and scatter parameters of a multivariate Student t distribution -S_FixedIncomeInvariants performs the quest for invariance in the fixed income market +S_ExactMeanAndCovariance generate draws from a multivariate normal + with matching mean and covariance +S_ExpectationMaximizationHighYield implements the Expectation-Maximization + (EM) algoritm, which estimates the parameters of a multivariate normal + distribution when some observations are randomly missing +S_ExtremeValueTheory computes the quantile (VaR) analytically, + in simulations and using the extreme value theory approximation +S_FactorAnalysisNotOk illustrates the hidden factor analysis + puzzle +S_FactorResidualCorrelation illustrates exogenous loadings and + endogenous factors the true analytical VaR under the lognormal assumptions from + the estimation interval to the investment horizon +S_FitProjectRates fits the swap rates dynamics to a + multivariate Ornstein-Uhlenbeck process and computes and plots the estimated + future distribution +S_FitSwapToStudentT demonstrates the recursive ML estimation of + the location and scatter parameters of a multivariate Student t distribution +S_FixedIncomeInvariants performs the quest for invariance in the + fixed income market S_FullCodependence illustrates the concept of co-dependence -S_FxCopulaMarginal displays the empirical copula of a set of market variables +S_FxCopulaMarginal displays the empirical copula of a set of + market variables S_GenerateMixtureSample generates draws from a univarite mixture -S_HedgeOptions compares hedging based on Black-Scholes deltas with Factors on Demand hedging -S_HorizonEffect studies horizon effect on explicit factors / implicit loadings linear model -S_InvestorsObjective familiarizes the users with the objectives of different investors in a highly non-normal bi-variate market of securities +S_HedgeOptions compares hedging based on Black-Scholes + deltas with Factors on Demand hedging +S_HorizonEffect studies horizon effect on explicit factors/ +implicit loadings linear model +S_InvestorsObjective familiarizes the users with the objectives + of different investors in a highly non-normal bi-variate market of securities S_JumpDiffusionMerton simulates a jump-diffusion process -S_LinVsLogReturn project a distribution in the future according to the i.i.d.-implied square-root rule +S_LinVsLogReturn project a distribution in the future + according to the i.i.d.-implied square-root rule S_LognormalSample simulate univariate lognormal variables -S_MarkovChainMonteCarlo illustrates the Metropolis-Hastings algorithm -S_MaxMinVariance dispays location-dispersion ellipsoid and statistic -S_MaximumLikelihood performs ML under a non-standard parametric set of distributions -S_MeanVarianceBenchmark projects the distribution of the market invariants for the bond and stock markets from the estimation interval to the investment horizon. Then it computes the distribution of prices at the investment horizon and translates this distribution into the returns distribution -S_MeanVarianceCalls computes the mean-variance frontier of a set of options -S_MeanVarianceHorizon projects the distribution of the market invariants for the bond and stock markets from the estimation interval to the investment horizon. Then it computes the distribution of prices at the investment horizon and performs the two-step mean-variance optimization in terms of returns and relative portfolio weights. -S_MeanVarianceOptimization projects the distribution of the market invariants for the bond and stock markets from the estimation interval to the investment horizon. Then it computes the distribution of prices at the investment horizon and performs the two-step mean-variance optimization. -S_MultiVarSqrRootRule illustrates the multivariate square root rule-of-thumb -S_NonAnalytical generates draws for the sum of random variable +S_MarkovChainMonteCarlo illustrates the Metropolis-Hastings + algorithm +S_MaxMinVariance dispays location-dispersion ellipsoid and + statistic +S_MaximumLikelihood performs ML under a non-standard parametric + set of distributions +S_MeanVarianceBenchmark projects the distribution of the market + invariants for the bond and stock markets from the estimation interval to the + investment horizon. Then it computes the distribution of prices at the + investment horizon and translates this distribution into the returns + distribution +S_MeanVarianceCalls computes the mean-variance frontier of a + set of options +S_MeanVarianceHorizon projects the distribution of the market + invariants for the bond and stock markets from the estimation interval to the + investment horizon. Then it computes the distribution of prices at the + investment horizon and performs the two-step mean-variance optimization in + terms of returns and relative portfolio weights. +S_MeanVarianceOptimization projects the distribution of the market + invariants for the bond and stock markets from the estimation interval to the + investment horizon. Then it computes the distribution of prices at the + investment horizon and performs the two-step mean-variance optimization. +S_MultiVarSqrRootRule illustrates the multivariate square root + rule-of-thumb +S_NonAnalytical generates draws for the sum of random + variable S_NormalSample simulate univariate normal variables -S_OrderStatisticsPdfLognormal script shows that the pdf of the r-th order statistics of a lognormal random variable -S_OrderStatisticsPdfStudentT script shows that the pdf of the r-th order statistics of a tudent t random variable -S_PasturMarchenko illustrate the Marchenko-Pastur limit of runifom matrix theory -S_ProjectNPriceMvGarch fits a multivariate GARCH model and projects the distribution of the compounded returns from the estimation interval to the investment horizon. Then it computes the distribution of prices at the investment horizon. -S_ProjectSummaryStatistics projects summary statistics to arbitrary horizons -S_PureResidualBonds models the joint distribution of the yet-to-be realized key rates of the government curve +S_OrderStatisticsPdfLognormal script shows that the pdf of the r-th order + statistics of a lognormal random variable +S_OrderStatisticsPdfStudentT script shows that the pdf of the r-th order + statistics of a tudent t random variable +S_PasturMarchenko illustrate the Marchenko-Pastur limit of + runifom matrix theory +S_ProjectNPriceMvGarch fits a multivariate GARCH model and + projects the distribution of the compounded returns from the estimation + interval to the investment horizon. Then it computes the distribution of prices + at the investment horizon. +S_ProjectSummaryStatistics projects summary statistics to arbitrary + horizons +S_PureResidualBonds models the joint distribution of the + yet-to-be realized key rates of the government curve S_ResidualAnalysisTheory performs the analysis of residuals S_SelectionHeuristics computes the r-square of selected factors -S_SemiCircular illustrate the semi-circular law of random matrix theory [TRUNCATED] To get the complete diff run: svnlook diff /svnroot/returnanalytics -r 3945 From noreply at r-forge.r-project.org Wed Aug 12 11:33:02 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 12 Aug 2015 11:33:02 +0200 (CEST) Subject: [Returnanalytics-commits] r3946 - in pkg/Meucci: R demo man Message-ID: <20150812093302.61BBF187A60@r-forge.r-project.org> Author: xavierv Date: 2015-08-12 11:33:01 +0200 (Wed, 12 Aug 2015) New Revision: 3946 Modified: pkg/Meucci/R/BlackLittermanFormula.R pkg/Meucci/R/BlackScholesCallPrice.R pkg/Meucci/R/CentralAndStandardizedStatistics.R pkg/Meucci/R/ConvertChangeInYield2Price.R pkg/Meucci/R/CovertCompoundedReturns2Price.R pkg/Meucci/R/DoubleDecay.R pkg/Meucci/R/EfficientFrontierPrices.R pkg/Meucci/R/EfficientFrontierReturns.R pkg/Meucci/R/EfficientFrontierReturnsBenchmark.R pkg/Meucci/R/Fit2Moms.R pkg/Meucci/R/FitExpectationMaximization.R pkg/Meucci/R/FitMultivariateGarch.R pkg/Meucci/R/FitOrnsteinUhlenbeck.R pkg/Meucci/R/GenerateUniformDrawsOnUnitSphere.R pkg/Meucci/R/InterExtrapolate.R pkg/Meucci/R/LeastInfoKernel.R pkg/Meucci/R/Log2Lin.R pkg/Meucci/R/LognormalCopulaPdf.R pkg/Meucci/R/LognormalMoments2Parameters.R pkg/Meucci/R/MaxRsqCS.R pkg/Meucci/R/MaxRsqTS.R pkg/Meucci/R/MleRecursionForStudentT.R pkg/Meucci/R/MvnRnd.R pkg/Meucci/R/NormalCopulaPdf.R pkg/Meucci/R/PerformIidAnalysis.R pkg/Meucci/R/PlotCompositionEfficientFrontier.R pkg/Meucci/R/PlotVolVsCompositionEfficientFrontier.R pkg/Meucci/R/ProjectionStudentT.R pkg/Meucci/R/QuantileMixture.R pkg/Meucci/R/SimulateJumpDiffusionMerton.R pkg/Meucci/R/StudentTCopulaPdf.R pkg/Meucci/R/TwoDimEllipsoid.R pkg/Meucci/R/pHistPriorPosterior.R pkg/Meucci/demo/ButterflyTrading.R pkg/Meucci/demo/FullFlexProbs.R pkg/Meucci/demo/S_BivariateSample.R pkg/Meucci/demo/S_BlackLittermanBasic.R pkg/Meucci/demo/S_BondProjectionPricingNormal.R pkg/Meucci/demo/S_BondProjectionPricingStudentT.R pkg/Meucci/demo/S_BuyNHold.R pkg/Meucci/demo/S_CPPI.R pkg/Meucci/demo/S_CallsProjectionPricing.R pkg/Meucci/demo/S_CornishFisher.R pkg/Meucci/demo/S_CorrelationPriorUniform.R pkg/Meucci/demo/S_CrossSectionConstrainedIndustries.R pkg/Meucci/demo/S_CrossSectionIndustries.R pkg/Meucci/demo/S_DerivativesInvariants.R pkg/Meucci/demo/S_DisplayLognormalCopulaPdf.R pkg/Meucci/demo/S_DisplayNormalCopulaCdf.R pkg/Meucci/demo/S_DisplayNormalCopulaPdf.R pkg/Meucci/demo/S_DisplayStudentTCopulaPdf.R pkg/Meucci/demo/S_ESContributionFactors.R pkg/Meucci/demo/S_ESContributionsStudentT.R pkg/Meucci/demo/S_EigenvalueDispersion.R pkg/Meucci/demo/S_EllipticalNDim.R pkg/Meucci/demo/S_EntropyView.R pkg/Meucci/demo/S_EquitiesInvariants.R pkg/Meucci/demo/S_EquityProjectionPricing.R pkg/Meucci/demo/S_EstimateExpectedValueEvaluation.R pkg/Meucci/demo/S_EstimateMomentsComboEvaluation.R pkg/Meucci/demo/S_EstimateQuantileEvaluation.R pkg/Meucci/demo/S_Estimator.R pkg/Meucci/demo/S_EvaluationGeneric.R pkg/Meucci/demo/S_ExactMeanAndCovariance.R pkg/Meucci/demo/S_ExpectationMaximizationHighYield.R pkg/Meucci/demo/S_ExtremeValueTheory.R pkg/Meucci/demo/S_FactorAnalysisNotOk.R pkg/Meucci/demo/S_FactorResidualCorrelation.R pkg/Meucci/demo/S_FitSwapToStudentT.R pkg/Meucci/demo/S_FixedIncomeInvariants.R pkg/Meucci/demo/S_FullCodependence.R pkg/Meucci/demo/S_FxCopulaMarginal.R pkg/Meucci/demo/S_GenerateMixtureSample.R pkg/Meucci/demo/S_HedgeOptions.R pkg/Meucci/demo/S_HorizonEffect.R pkg/Meucci/demo/S_InvestorsObjective.R pkg/Meucci/demo/S_JumpDiffusionMerton.R pkg/Meucci/demo/S_LinVsLogReturn.R pkg/Meucci/demo/S_LognormalSample.R pkg/Meucci/demo/S_MarkovChainMonteCarlo.R pkg/Meucci/demo/S_MaxMinVariance.R pkg/Meucci/demo/S_MaximumLikelihood.R pkg/Meucci/demo/S_MeanVarianceBenchmark.R pkg/Meucci/demo/S_MeanVarianceCalls.R pkg/Meucci/demo/S_MeanVarianceHorizon.R pkg/Meucci/demo/S_MeanVarianceOptimization.R pkg/Meucci/demo/S_MultiVarSqrRootRule.R pkg/Meucci/demo/S_NonAnalytical.R pkg/Meucci/demo/S_NormalSample.R pkg/Meucci/demo/S_OrderStatisticsPdfLognormal.R pkg/Meucci/demo/S_OrderStatisticsPdfStudentT.R pkg/Meucci/demo/S_ProjectNPriceMvGarch.R pkg/Meucci/demo/S_ProjectSummaryStatistics.R pkg/Meucci/demo/S_PureResidualBonds.R pkg/Meucci/demo/S_ResidualAnalysisTheory.R pkg/Meucci/demo/S_SelectionHeuristics.R pkg/Meucci/demo/S_StatArbSwaps.R pkg/Meucci/demo/S_StudentTSample.R pkg/Meucci/demo/S_SwapPca2Dim.R pkg/Meucci/demo/S_TStatApprox.R pkg/Meucci/demo/S_TimeSeriesConstrainedIndustries.R pkg/Meucci/demo/S_TimeSeriesIndustries.R pkg/Meucci/demo/S_TimeSeriesVsCrossSectionIndustries.R pkg/Meucci/demo/S_Toeplitz.R pkg/Meucci/demo/S_UtilityMax.R pkg/Meucci/demo/S_VaRContributionsUniform.R pkg/Meucci/demo/S_VolatilityClustering.R pkg/Meucci/demo/S_Wishart.R pkg/Meucci/demo/S_WishartCorrelation.R pkg/Meucci/demo/S_WishartLocationDispersion.R pkg/Meucci/man/BlackLittermanFormula.Rd pkg/Meucci/man/BlackScholesCallPrice.Rd pkg/Meucci/man/CentralAndStandardizedStatistics.Rd pkg/Meucci/man/ConvertChangeInYield2Price.Rd pkg/Meucci/man/ConvertCompoundedReturns2Price.Rd pkg/Meucci/man/DoubleDecay.Rd pkg/Meucci/man/EfficientFrontierPrices.Rd pkg/Meucci/man/EfficientFrontierReturns.Rd pkg/Meucci/man/EfficientFrontierReturnsBenchmark.Rd pkg/Meucci/man/Fit2Moms.Rd pkg/Meucci/man/FitExpectationMaximization.Rd pkg/Meucci/man/FitMultivariateGarch.Rd pkg/Meucci/man/FitOrnsteinUhlenbeck.Rd pkg/Meucci/man/GenerateUniformDrawsOnUnitSphere.Rd pkg/Meucci/man/InterExtrapolate.Rd pkg/Meucci/man/LeastInfoKernel.Rd pkg/Meucci/man/Log2Lin.Rd pkg/Meucci/man/LognormalCopulaPdf.Rd pkg/Meucci/man/LognormalMoments2Parameters.Rd pkg/Meucci/man/MaxRsqCS.Rd pkg/Meucci/man/MaxRsqTS.Rd pkg/Meucci/man/MleRecursionForStudentT.Rd pkg/Meucci/man/MvnRnd.Rd pkg/Meucci/man/NormalCopulaPdf.Rd pkg/Meucci/man/PerformIidAnalysis.Rd pkg/Meucci/man/PlotCompositionEfficientFrontier.Rd pkg/Meucci/man/PlotVolVsCompositionEfficientFrontier.Rd pkg/Meucci/man/ProjectionStudentT.Rd pkg/Meucci/man/QuantileMixture.Rd pkg/Meucci/man/SimulateJumpDiffusionMerton.Rd pkg/Meucci/man/StudentTCopulaPdf.Rd pkg/Meucci/man/TwoDimEllipsoid.Rd pkg/Meucci/man/garch1f4.Rd pkg/Meucci/man/garch2f8.Rd pkg/Meucci/man/pHistPriorPosterior.Rd Log: Replacement of email and formatted demoscripts and relating functions up to S_BlackLittermanBasic Modified: pkg/Meucci/R/BlackLittermanFormula.R =================================================================== --- pkg/Meucci/R/BlackLittermanFormula.R 2015-08-12 08:50:54 UTC (rev 3945) +++ pkg/Meucci/R/BlackLittermanFormula.R 2015-08-12 09:33:01 UTC (rev 3946) @@ -1,7 +1,9 @@ -#' @title Computes the Black-Litterman formula for the moments of the posterior normal. +#' @title Computes the Black-Litterman formula for the moments of the posterior +#' normal. #' -#' @description This function computes the Black-Litterman formula for the moments of the posterior normal, as described in -#' A. Meucci, "Risk and Asset Allocation", Springer, 2005. +#' @description This function computes the Black-Litterman formula for the +#' moments of the posterior normal, as described in A. Meucci, "Risk and Asset +#' Allocation", Springer, 2005. #' #' @param Mu [vector] (N x 1) prior expected values. #' @param Sigma [matrix] (N x N) prior covariance matrix. @@ -13,18 +15,17 @@ #' @return BLSigma [matrix] (N x N) posterior covariance matrix. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" +#' \url{http://symmys.com/node/170}. See Meucci's script for +#' "BlackLittermanFormula.m" #' -#' See Meucci's script for "BlackLittermanFormula.m" -#' -#' @author Xavier Valls \email{flamejat@@gmail.com} +#' @author Xavier Valls \email{xaviervallspla@@gmail.com} #' @export -BlackLittermanFormula = function( Mu, Sigma, P, v, Omega) -{ - BLMu = Mu + Sigma %*% t( P ) %*% ( solve( P %*% Sigma %*% t( P ) + Omega ) %*% ( v - P %*% Mu ) ); - BLSigma = Sigma - Sigma %*% t( P ) %*% ( solve( P %*% Sigma %*% t( P ) + Omega ) %*% ( P %*% Sigma ) ); - - return( list( BLMu = BLMu , BLSigma = BLSigma ) ); - -} \ No newline at end of file +BlackLittermanFormula <- function(Mu, Sigma, P, v, Omega) { + BLMu <- Mu + Sigma %*% t(P) %*% (solve(P %*% Sigma %*% t(P) + Omega) + %*% (v - P %*% Mu)) + BLSigma <- Sigma - Sigma %*% t(P) %*% (solve(P %*% Sigma %*% t(P) + Omega) + %*% (P %*% Sigma)) + return(list(BLMu = BLMu, BLSigma = BLSigma)) +} Modified: pkg/Meucci/R/BlackScholesCallPrice.R =================================================================== --- pkg/Meucci/R/BlackScholesCallPrice.R 2015-08-12 08:50:54 UTC (rev 3945) +++ pkg/Meucci/R/BlackScholesCallPrice.R 2015-08-12 09:33:01 UTC (rev 3946) @@ -22,7 +22,7 @@ #' #' See Meucci's script for "BlackScholesCallPrice.m" #' -#' @author Xavier Valls \email{flamejat@@gmail.com} +#' @author Xavier Valls \email{xaviervallspla@@gmail.com} #' @export BlackScholesCallPrice = function( spot, K, r, vol, T ) Modified: pkg/Meucci/R/CentralAndStandardizedStatistics.R =================================================================== --- pkg/Meucci/R/CentralAndStandardizedStatistics.R 2015-08-12 08:50:54 UTC (rev 3945) +++ pkg/Meucci/R/CentralAndStandardizedStatistics.R 2015-08-12 09:33:01 UTC (rev 3946) @@ -22,7 +22,7 @@ #' #' A. Meucci - "Annualization and general projection of skweness, kurtosis, and all summary statistics", #' GARP Risk Professional August 2010, 55-56. \url{http://symmys.com/node/136}. -#' @author Xavier Valls \email{flamejat@@gmail.com} +#' @author Xavier Valls \email{xaviervallspla@@gmail.com} #' @export CentralAndStandardizedStatistics = function( X, N ) Modified: pkg/Meucci/R/ConvertChangeInYield2Price.R =================================================================== --- pkg/Meucci/R/ConvertChangeInYield2Price.R 2015-08-12 08:50:54 UTC (rev 3945) +++ pkg/Meucci/R/ConvertChangeInYield2Price.R 2015-08-12 09:33:01 UTC (rev 3946) @@ -17,7 +17,7 @@ #' #' A. Meucci - "Risk and Asset Allocation"-Springer (2005). See (6.77)-(6.79). #' -#' @author Xavier Valls \email{flamejat@@gmail.com} +#' @author Xavier Valls \email{xaviervallspla@@gmail.com} #' @export ConvertChangeInYield2Price = function( Exp_DY, Cov_DY, Times2Mat, CurrentPrices ) Modified: pkg/Meucci/R/CovertCompoundedReturns2Price.R =================================================================== --- pkg/Meucci/R/CovertCompoundedReturns2Price.R 2015-08-12 08:50:54 UTC (rev 3945) +++ pkg/Meucci/R/CovertCompoundedReturns2Price.R 2015-08-12 09:33:01 UTC (rev 3946) @@ -16,7 +16,7 @@ #' #' A. Meucci - "Risk and Asset Allocation"-Springer (2005). See (6.77)-(6.79). #' -#' @author Xavier Valls \email{flamejat@@gmail.com} +#' @author Xavier Valls \email{xaviervallspla@@gmail.com} #' @export ConvertCompoundedReturns2Price = function(Exp_Comp_Rets, Cov_Comp_Rets, Starting_Prices) Modified: pkg/Meucci/R/DoubleDecay.R =================================================================== --- pkg/Meucci/R/DoubleDecay.R 2015-08-12 08:50:54 UTC (rev 3945) +++ pkg/Meucci/R/DoubleDecay.R 2015-08-12 09:33:01 UTC (rev 3946) @@ -14,7 +14,7 @@ #' \url{http://www.symmys.com/node/150} #' See Meucci script for "DoubleDecay.m" #' -#' @author Xavier Valls \email{flamejat@@gmail.com} +#' @author Xavier Valls \email{xaviervallspla@@gmail.com} #' @export DoubleDecay = function( X, lmd_c, lmd_s) Modified: pkg/Meucci/R/EfficientFrontierPrices.R =================================================================== --- pkg/Meucci/R/EfficientFrontierPrices.R 2015-08-12 08:50:54 UTC (rev 3945) +++ pkg/Meucci/R/EfficientFrontierPrices.R 2015-08-12 09:33:01 UTC (rev 3946) @@ -18,7 +18,7 @@ #' #' See Meucci's script for "EfficientFrontierReturns.m". #' -#' @author Xavier Valls \email{flamejat@@gmail.com} +#' @author Xavier Valls \email{xaviervallspla@@gmail.com} #' @export EfficientFrontierPrices = function( NumPortf, Covariance, ExpectedValues, Current_Prices, Budget ) Modified: pkg/Meucci/R/EfficientFrontierReturns.R =================================================================== --- pkg/Meucci/R/EfficientFrontierReturns.R 2015-08-12 08:50:54 UTC (rev 3945) +++ pkg/Meucci/R/EfficientFrontierReturns.R 2015-08-12 09:33:01 UTC (rev 3946) @@ -17,7 +17,7 @@ #' #' See Meucci's script for "EfficientFrontierReturns.m". #' -#' @author Xavier Valls \email{flamejat@@gmail.com} +#' @author Xavier Valls \email{xaviervallspla@@gmail.com} #' @export EfficientFrontierReturns = function(NumPortf, Covariance, ExpectedValues, Constraints = NULL) Modified: pkg/Meucci/R/EfficientFrontierReturnsBenchmark.R =================================================================== --- pkg/Meucci/R/EfficientFrontierReturnsBenchmark.R 2015-08-12 08:50:54 UTC (rev 3945) +++ pkg/Meucci/R/EfficientFrontierReturnsBenchmark.R 2015-08-12 09:33:01 UTC (rev 3946) @@ -18,7 +18,7 @@ #' #' See Meucci's script for "EfficientFrontierReturnsBenchmark.m" #' -#' @author Xavier Valls \email{flamejat@@gmail.com} +#' @author Xavier Valls \email{xaviervallspla@@gmail.com} #' @export EfficientFrontierReturnsBenchmark = function(NumPortf, Covariance, ExpectedValues, Benchmark, Constraints = NULL) Modified: pkg/Meucci/R/Fit2Moms.R =================================================================== --- pkg/Meucci/R/Fit2Moms.R 2015-08-12 08:50:54 UTC (rev 3945) +++ pkg/Meucci/R/Fit2Moms.R 2015-08-12 09:33:01 UTC (rev 3946) @@ -13,7 +13,7 @@ #' \url{http://www.symmys.com/node/150} #' See Meucci script for "S_MainFullFlexProbs.m" #' -#' @author Xavier Valls \email{flamejat@@gmail.com} +#' @author Xavier Valls \email{xaviervallspla@@gmail.com} #' @export Fit2Moms = function( X, m, S) Modified: pkg/Meucci/R/FitExpectationMaximization.R =================================================================== --- pkg/Meucci/R/FitExpectationMaximization.R 2015-08-12 08:50:54 UTC (rev 3945) +++ pkg/Meucci/R/FitExpectationMaximization.R 2015-08-12 09:33:01 UTC (rev 3946) @@ -20,7 +20,7 @@ #' #' Bilmes, J. A.- "A Gentle Tutorial of the EM Algorithm and its Application to Parameter Estimation for Gaussian Mixture #' and Hidden Markov Models", 1998. -#' @author Xavier Valls \email{flamejat@@gmail.com} +#' @author Xavier Valls \email{xaviervallspla@@gmail.com} #' @export FitExpectationMaximization = function(X) Modified: pkg/Meucci/R/FitMultivariateGarch.R =================================================================== --- pkg/Meucci/R/FitMultivariateGarch.R 2015-08-12 08:50:54 UTC (rev 3945) +++ pkg/Meucci/R/FitMultivariateGarch.R 2015-08-12 09:33:01 UTC (rev 3946) @@ -19,7 +19,7 @@ #' #' See Meucci's script for "FitMultivariateGarch.m" #' -#' @author Xavier Valls \email{flamejat@@gmail.com} +#' @author Xavier Valls \email{xaviervallspla@@gmail.com} #' @export FitMultivariateGarch = function( returns, demean = 1, eps = 0, df = 500 ) @@ -135,7 +135,7 @@ #' #' See Meucci's script for "FitMultivariateGarch.m" #' -#' @author Xavier Valls \email{flamejat@@gmail.com} +#' @author Xavier Valls \email{xaviervallspla@@gmail.com} #' @export @@ -401,7 +401,7 @@ #' #' See Meucci's script for "FitMultivariateGarch.m" #' -#' @author Xavier Valls \email{flamejat@@gmail.com} +#' @author Xavier Valls \email{xaviervallspla@@gmail.com} #' @export garch2f8 = function( y, c1, a1, b1, y1, h1, c2, a2, b2, y2, h2, df ) @@ -696,7 +696,7 @@ # # See Meucci's script for "FitMultivariateGarch.m" # -# @author Xavier Valls \email{flamejat@@gmail.com} +# @author Xavier Valls \email{xaviervallspla@@gmail.com} minfro = function( A ) { Modified: pkg/Meucci/R/FitOrnsteinUhlenbeck.R =================================================================== --- pkg/Meucci/R/FitOrnsteinUhlenbeck.R 2015-08-12 08:50:54 UTC (rev 3945) +++ pkg/Meucci/R/FitOrnsteinUhlenbeck.R 2015-08-12 09:33:01 UTC (rev 3946) @@ -19,7 +19,7 @@ #' #' See Meucci's script for "FitOrnsteinUhlenbeck.m" #' -#' @author Xavier Valls \email{flamejat@@gmail.com} +#' @author Xavier Valls \email{xaviervallspla@@gmail.com} #' @export FitOrnsteinUhlenbeck = function( Y, tau ) Modified: pkg/Meucci/R/GenerateUniformDrawsOnUnitSphere.R =================================================================== --- pkg/Meucci/R/GenerateUniformDrawsOnUnitSphere.R 2015-08-12 08:50:54 UTC (rev 3945) +++ pkg/Meucci/R/GenerateUniformDrawsOnUnitSphere.R 2015-08-12 09:33:01 UTC (rev 3946) @@ -18,7 +18,7 @@ #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See Meucci's script for "GenerateUniformDrawsOnUnitSphere.m" #' -#' @author Xavier Valls \email{flamejat@@gmail.com} +#' @author Xavier Valls \email{xaviervallspla@@gmail.com} #' @export GenerateUniformDrawsOnUnitSphere = function(J, N) Modified: pkg/Meucci/R/InterExtrapolate.R =================================================================== --- pkg/Meucci/R/InterExtrapolate.R 2015-08-12 08:50:54 UTC (rev 3945) +++ pkg/Meucci/R/InterExtrapolate.R 2015-08-12 09:33:01 UTC (rev 3946) @@ -24,7 +24,7 @@ #' #' See Meucci's script for "InterExtrapolate.R" #' -#' @author Xavier Valls \email{flamejat@@gmail.com} +#' @author Xavier Valls \email{xaviervallspla@@gmail.com} #' @export # examples (MATLAB) Modified: pkg/Meucci/R/LeastInfoKernel.R =================================================================== --- pkg/Meucci/R/LeastInfoKernel.R 2015-08-12 08:50:54 UTC (rev 3945) +++ pkg/Meucci/R/LeastInfoKernel.R 2015-08-12 09:33:01 UTC (rev 3946) @@ -14,7 +14,7 @@ #' \url{http://www.symmys.com/node/150} #' See Meucci script for "LeastInfoKernel.m" #' -#' @author Xavier Valls \email{flamejat@@gmail.com} +#' @author Xavier Valls \email{xaviervallspla@@gmail.com} #' @export LeastInfoKernel = function( Y, y, h2 ) Modified: pkg/Meucci/R/Log2Lin.R =================================================================== --- pkg/Meucci/R/Log2Lin.R 2015-08-12 08:50:54 UTC (rev 3945) +++ pkg/Meucci/R/Log2Lin.R 2015-08-12 09:33:01 UTC (rev 3946) @@ -14,7 +14,7 @@ #' #' See Meucci's script for "Log2Lin.m" #' -#' @author Xavier Valls \email{flamejat@@gmail.com} +#' @author Xavier Valls \email{xaviervallspla@@gmail.com} #' @export Log2Lin = function( Mu, Sigma ) Modified: pkg/Meucci/R/LognormalCopulaPdf.R =================================================================== --- pkg/Meucci/R/LognormalCopulaPdf.R 2015-08-12 08:50:54 UTC (rev 3945) +++ pkg/Meucci/R/LognormalCopulaPdf.R 2015-08-12 09:33:01 UTC (rev 3946) @@ -15,7 +15,7 @@ #' #' See Meucci's script for "LognormalCopulaPdf.m" #' -#' @author Xavier Valls \email{flamejat@@gmail.com} +#' @author Xavier Valls \email{xaviervallspla@@gmail.com} #' @export LognormalCopulaPdf = function( u, Mu, Sigma ) Modified: pkg/Meucci/R/LognormalMoments2Parameters.R =================================================================== --- pkg/Meucci/R/LognormalMoments2Parameters.R 2015-08-12 08:50:54 UTC (rev 3945) +++ pkg/Meucci/R/LognormalMoments2Parameters.R 2015-08-12 09:33:01 UTC (rev 3946) @@ -20,7 +20,7 @@ #' #' See Meucci's script for "LognormalMoments2Parameters.m" #' -#' @author Xavier Valls \email{flamejat@@gmail.com} +#' @author Xavier Valls \email{xaviervallspla@@gmail.com} #' @export #determines $\mu$ and $\sigma^2$ from $\Expect\{X\}$ and $\Var\{X\}$, and uses it to determine $\mu$ Modified: pkg/Meucci/R/MaxRsqCS.R =================================================================== --- pkg/Meucci/R/MaxRsqCS.R 2015-08-12 08:50:54 UTC (rev 3945) +++ pkg/Meucci/R/MaxRsqCS.R 2015-08-12 09:33:01 UTC (rev 3946) @@ -26,7 +26,7 @@ #' #' See Meucci's script for "MaxRsqCS.m" #' -#' @author Xavier Valls \email{flamejat@@gmail.com} +#' @author Xavier Valls \email{xaviervallspla@@gmail.com} #' @export MaxRsqCS = function(X, B, W, A = NULL, D = NULL, Aeq = NULL, Deq, lb = NULL, ub = NULL) Modified: pkg/Meucci/R/MaxRsqTS.R =================================================================== --- pkg/Meucci/R/MaxRsqTS.R 2015-08-12 08:50:54 UTC (rev 3945) +++ pkg/Meucci/R/MaxRsqTS.R 2015-08-12 09:33:01 UTC (rev 3946) @@ -21,7 +21,7 @@ #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See Meucci's script for "MaxRsqTS.m" #' -#' @author Xavier Valls \email{flamejat@@gmail.com} +#' @author Xavier Valls \email{xaviervallspla@@gmail.com} #' @export MaxRsqTS = function(X, F, W, A = NULL, D = NULL, Aeq = NULL, Deq, lb = NULL, ub = NULL) Modified: pkg/Meucci/R/MleRecursionForStudentT.R =================================================================== --- pkg/Meucci/R/MleRecursionForStudentT.R 2015-08-12 08:50:54 UTC (rev 3945) +++ pkg/Meucci/R/MleRecursionForStudentT.R 2015-08-12 09:33:01 UTC (rev 3946) @@ -22,7 +22,7 @@ #' #' See Meucci's script for "MleRecursionForStudentT.m" #' -#' @author Xavier Valls \email{flamejat@@gmail.com} +#' @author Xavier Valls \email{xaviervallspla@@gmail.com} #' @export MleRecursionForStudentT = function(x, Nu, Tolerance = 10^(-10) ) Modified: pkg/Meucci/R/MvnRnd.R =================================================================== --- pkg/Meucci/R/MvnRnd.R 2015-08-12 08:50:54 UTC (rev 3945) +++ pkg/Meucci/R/MvnRnd.R 2015-08-12 09:33:01 UTC (rev 3946) @@ -15,7 +15,7 @@ #' #' See Meucci's script for "MvnRnd.m". #' -#' @author Xavier Valls \email{flamejat@@gmail.com} +#' @author Xavier Valls \email{xaviervallspla@@gmail.com} #' @export MvnRnd = function( M, S, J ) Modified: pkg/Meucci/R/NormalCopulaPdf.R =================================================================== --- pkg/Meucci/R/NormalCopulaPdf.R 2015-08-12 08:50:54 UTC (rev 3945) +++ pkg/Meucci/R/NormalCopulaPdf.R 2015-08-12 09:33:01 UTC (rev 3946) @@ -15,7 +15,7 @@ #' #' See Meucci's script for "NormalCopulaPdf.m" #' -#' @author Xavier Valls \email{flamejat@@gmail.com} +#' @author Xavier Valls \email{xaviervallspla@@gmail.com} #' @export NormalCopulaPdf = function( u, Mu, Sigma ) Modified: pkg/Meucci/R/PerformIidAnalysis.R =================================================================== --- pkg/Meucci/R/PerformIidAnalysis.R 2015-08-12 08:50:54 UTC (rev 3945) +++ pkg/Meucci/R/PerformIidAnalysis.R 2015-08-12 09:33:01 UTC (rev 3946) @@ -20,7 +20,7 @@ #' #' See Meucci's script for "PerformIidAnalysis.m" #' -#' @author Xavier Valls \email{flamejat@@gmail.com} +#' @author Xavier Valls \email{xaviervallspla@@gmail.com} #' @export PerformIidAnalysis = function( Dates = dim( Data)[1], Data, Str = "") Modified: pkg/Meucci/R/PlotCompositionEfficientFrontier.R =================================================================== --- pkg/Meucci/R/PlotCompositionEfficientFrontier.R 2015-08-12 08:50:54 UTC (rev 3945) +++ pkg/Meucci/R/PlotCompositionEfficientFrontier.R 2015-08-12 09:33:01 UTC (rev 3946) @@ -6,28 +6,27 @@ #' @param Portfolios : [matrix] (M x N) M portfolios of size N (weights) #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" +#' \url{http://symmys.com/node/170}. See Meucci's script for +#' "PlotCompositionEfficientFrontier.m" #' -#' See Meucci's script for "PlotCompositionEfficientFrontier.m" -#' -#' @author Xavier Valls \email{flamejat@@gmail.com} +#' @author Xavier Valls \email{xaviervallspla@@gmail.com} #' @export -PlotCompositionEfficientFrontier = function( Portfolios ) -{ - dev.new(); +PlotCompositionEfficientFrontier <- function (Portfolios) { + dev.new() - xx = dim( Portfolios )[ 1 ]; - N = dim( Portfolios )[ 2 ]; - Data = t( apply( Portfolios, 1, cumsum ) ); + xx <- dim(Portfolios)[1] + N <- dim(Portfolios)[2] + Data <- t(apply(Portfolios, 1, cumsum)) - plot( c(2000, 2000), xlim= c( 1, xx ), ylim = c( 0, max(Data) ), xlab = " Portfolio # risk propensity", ylab = "Portfolio composition" ); - - for( n in 1 : N ) - { - x = rbind( 1, matrix(1 : xx), xx ); - y = rbind( 0, matrix( Data[ , N-n+1 ] ), 0 ); - polygon( x, y, col = rgb( 0.9 - mod(n,3)*0.2, 0.9 - mod(n,3)*0.2, 0.9 - mod(n,3)*0.2) ); - } + plot(c(2000, 2000), xlim = c(1, xx), ylim = c(0, max(Data)), + xlab = " Portfolio # risk propensity", ylab = "Portfolio composition") -} \ No newline at end of file + for(n in 1:N) { + x <- rbind(1, matrix(1:xx), xx) + y <- rbind(0, matrix(Data[, N - n + 1]), 0) + polygon(x, y, col = rgb(0.9 - mod(n, 3) * 0.2, 0.9 - mod(n, 3) * 0.2, + 0.9 - mod(n , 3) * 0.2)) + } +} Modified: pkg/Meucci/R/PlotVolVsCompositionEfficientFrontier.R =================================================================== --- pkg/Meucci/R/PlotVolVsCompositionEfficientFrontier.R 2015-08-12 08:50:54 UTC (rev 3945) +++ pkg/Meucci/R/PlotVolVsCompositionEfficientFrontier.R 2015-08-12 09:33:01 UTC (rev 3946) @@ -8,7 +8,7 @@ #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See Meucci's script for "PlotVolVsCompositionEfficientFrontier.m" #' -#' @author Xavier Valls \email{flamejat@@gmail.com} +#' @author Xavier Valls \email{xaviervallspla@@gmail.com} #' @export PlotVolVsCompositionEfficientFrontier = function( Portfolios, vol ) Modified: pkg/Meucci/R/ProjectionStudentT.R =================================================================== --- pkg/Meucci/R/ProjectionStudentT.R 2015-08-12 08:50:54 UTC (rev 3945) +++ pkg/Meucci/R/ProjectionStudentT.R 2015-08-12 09:33:01 UTC (rev 3946) @@ -18,7 +18,7 @@ #' #' See Meucci's script for "ProjectionStudentT.m" #' -#' @author Xavier Valls \email{flamejat@@gmail.com} +#' @author Xavier Valls \email{xaviervallspla@@gmail.com} #' @export ProjectionStudentT = function(nu, m, s, T) Modified: pkg/Meucci/R/QuantileMixture.R =================================================================== --- pkg/Meucci/R/QuantileMixture.R 2015-08-12 08:50:54 UTC (rev 3945) +++ pkg/Meucci/R/QuantileMixture.R 2015-08-12 09:33:01 UTC (rev 3946) @@ -19,7 +19,7 @@ #' #'See Meucci's script for "QuantileMixture.m" #' -#' @author Xavier Valls \email{flamejat@@gmail.com} +#' @author Xavier Valls \email{xaviervallspla@@gmail.com} #' @export QuantileMixture = function( p, a, m_Y, s_Y, m_Z, s_Z ) Modified: pkg/Meucci/R/SimulateJumpDiffusionMerton.R =================================================================== --- pkg/Meucci/R/SimulateJumpDiffusionMerton.R 2015-08-12 08:50:54 UTC (rev 3945) +++ pkg/Meucci/R/SimulateJumpDiffusionMerton.R 2015-08-12 09:33:01 UTC (rev 3946) @@ -22,7 +22,7 @@ #' Merton, R. C., 1976. "Option pricing when underlying stocks are discontinuous". Journal of Financial #' Economics 3, 125-144. #' -#'@author Xavier Valls \email{flamejat@@gmail.com} +#'@author Xavier Valls \email{xaviervallspla@@gmail.com} #' @export SimulateJumpDiffusionMerton = function( m, s, l, a, D, ts, J ) Modified: pkg/Meucci/R/StudentTCopulaPdf.R =================================================================== --- pkg/Meucci/R/StudentTCopulaPdf.R 2015-08-12 08:50:54 UTC (rev 3945) +++ pkg/Meucci/R/StudentTCopulaPdf.R 2015-08-12 09:33:01 UTC (rev 3946) @@ -17,7 +17,7 @@ #' #' See Meucci's script for "StudentTCopulaPdf.m" #' -#' @author Xavier Valls \email{flamejat@@gmail.com} +#' @author Xavier Valls \email{xaviervallspla@@gmail.com} #' @export StudentTCopulaPdf = function( u, nu, Mu, Sigma ) Modified: pkg/Meucci/R/TwoDimEllipsoid.R =================================================================== --- pkg/Meucci/R/TwoDimEllipsoid.R 2015-08-12 08:50:54 UTC (rev 3945) +++ pkg/Meucci/R/TwoDimEllipsoid.R 2015-08-12 09:33:01 UTC (rev 3946) @@ -18,7 +18,7 @@ #' #' See Meucci's script for "TwoDimEllipsoid.m" #' -#' @author Xavier Valls \email{flamejat@@gmail.com} +#' @author Xavier Valls \email{xaviervallspla@@gmail.com} #' @export Modified: pkg/Meucci/R/pHistPriorPosterior.R =================================================================== --- pkg/Meucci/R/pHistPriorPosterior.R 2015-08-12 08:50:54 UTC (rev 3945) +++ pkg/Meucci/R/pHistPriorPosterior.R 2015-08-12 09:33:01 UTC (rev 3946) @@ -12,7 +12,7 @@ #' #' See Meucci's script for "pHistPriorPosterior.m" #' -#' @author Xavier Valls \email{flamejat@@gmail.com} +#' @author Xavier Valls \email{xaviervallspla@@gmail.com} #' @export pHistPriorPosterior = function( X, p, p_) Modified: pkg/Meucci/demo/ButterflyTrading.R =================================================================== --- pkg/Meucci/demo/ButterflyTrading.R 2015-08-12 08:50:54 UTC (rev 3945) +++ pkg/Meucci/demo/ButterflyTrading.R 2015-08-12 09:33:01 UTC (rev 3946) @@ -12,7 +12,7 @@ #' "ButterflyTrading/S_MAIN.m" #' #' -#' @author Xavier Valls \email{flamejat@@gmail.com} and Ram Ahluwalia +#' @author Xavier Valls \email{xaviervallspla@@gmail.com} and Ram Ahluwalia #' \email{ram@@wingedfootcapital.com} ################################################################################ Modified: pkg/Meucci/demo/FullFlexProbs.R =================================================================== --- pkg/Meucci/demo/FullFlexProbs.R 2015-08-12 08:50:54 UTC (rev 3945) +++ pkg/Meucci/demo/FullFlexProbs.R 2015-08-12 09:33:01 UTC (rev 3946) @@ -17,7 +17,7 @@ #' \url{http://www.symmys.com/node/150} #' See Meucci script for "CallPrice.m" #' -#' @author Xavier Valls \email{flamejat@@gmail.com} +#' @author Xavier Valls \email{xaviervallspla@@gmail.com} CallPrice <- function(P, K, r, t, s) { d_1 <- log(P / K) + (r + s * s / 2) * t @@ -42,7 +42,7 @@ #' \url{http://www.symmys.com/node/150}, #' See Meucci script for "DoubleDecay.m" #' -#' @author Xavier Valls \email{flamejat@@gmail.com} +#' @author Xavier Valls \email{xaviervallspla@@gmail.com} ########################################################################## # risk drivers scenarios Modified: pkg/Meucci/demo/S_BivariateSample.R =================================================================== --- pkg/Meucci/demo/S_BivariateSample.R 2015-08-12 08:50:54 UTC (rev 3945) +++ pkg/Meucci/demo/S_BivariateSample.R 2015-08-12 09:33:01 UTC (rev 3946) @@ -1,123 +1,128 @@ -#' This script generates draws from a bivariate distribution with different marginals, -#' as described in A. Meucci, "Risk and Asset Allocation", Springer, 2005, Chapter 2. +#' This script generates draws from a bivariate distribution with different +#' marginals, as described in A. Meucci, "Risk and Asset Allocation", Springer, +#' 2005, Chapter 2. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, -#' "E 38 - Normal copula and given marginals". +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" +#' \url{http://symmys.com/node/170}, "E 38 - Normal copula and given marginals". #' #' See Meucci's script for "S_BivariateSample.m" #' -#' @author Xavier Valls \email{flamejat@@gmail.com} +#' @author Xavier Valls \email{xaviervallspla@@gmail.com} -if ( !require( "latticeExtra" ) ) stop("latticeExtra package installation required for this script") +if (!require("latticeExtra")) + stop("latticeExtra package installation required for this script") -################################################################################################################### +################################################################################ ### input parameters -nSim = 10000; +nSim <- 10000 # input for bivariate normal distribution -NormCorr = -0.8; -NormStDev = rbind( 1, 3 ); # NOTE: this input plays no role in the final output -NormExpVal = rbind( -2, 5 ); # NOTE: this input plays no role in the final output +NormCorr <- -0.8 +NormStDev <- rbind(1, 3) # NOTE: this input plays no role in the final output +NormExpVal <- rbind(-2, 5) # NOTE: this input plays no role in the final output # input for first marginal -nu_1 = 9; -sigmasq_1 = 2; +nu_1 <- 9 +sigmasq_1 <- 2 -mu_2 = 0; -sigmasq_2 = 0.04; +mu_2 <- 0 +sigmasq_2 <- 0.04 # input for second marginal -nu_2 = 7; +nu_2 <- 7 -################################################################################################################### +################################################################################ ### Generate draws from a bivariate normal distribution -NormCorrMatrix = rbind( c( 1, NormCorr ), c( NormCorr, 1 )); -NormCovMatrix = diag( c( NormStDev ) ) %*% NormCorrMatrix %*% diag( c( NormStDev) ); +NormCorrMatrix <- rbind(c(1, NormCorr), c(NormCorr, 1)) +NormCovMatrix <- diag(c(NormStDev)) %*% NormCorrMatrix %*% diag(c(NormStDev)) -Z = rmvnorm( nSim, NormExpVal, NormCovMatrix ); +Z <- rmvnorm(nSim, NormExpVal, NormCovMatrix) -Z_1 = Z[, 1]; -Z_2 = Z[, 2]; +Z_1 <- Z[, 1] +Z_2 <- Z[, 2] # display marginals: as expected, they are normal -dev.new(); -NumBins = round(10 * log(nSim)); -par( mfrow = c( 2, 1) ); -hist( Z_1, NumBins, xlab = "normal 1", ylab = "" ); -hist( Z_2, NumBins, xlab = "normal 2", ylab = "" ); +dev.new() +NumBins <- round(10 * log(nSim)) +par(mfrow <- c(2, 1)) +hist(Z_1, NumBins, xlab <- "normal 1", ylab <- "") +hist(Z_2, NumBins, xlab <- "normal 2", ylab <- "") -dev.new(); -plot( Z_1, Z_2, type = "p", xlab = "normal 1", ylab = "normal 2" ); +dev.new() +plot(Z_1, Z_2, type <- "p", xlab <- "normal 1", ylab <- "normal 2") # 3d histograms -NumBins2D = round(sqrt(100 * log(nSim))); -Z_3 = table( cut (Z_1, NumBins2D ), cut ( Z_2, NumBins2D)); -dev.new(); -cloud( Z_3, panel.3d.cloud = panel.3dbars, scales = list( arrows = FALSE, just = "right" ), - xlab = "normal 1", ylab = "normal 2", zlab="", main = "pdf normal" ); +NumBins2D <- round(sqrt(100 * log(nSim))) +Z_3 <- table(cut (Z_1, NumBins2D), cut (Z_2, NumBins2D)) +dev.new() +cloud(Z_3, panel.3d.cloud <- panel.3dbars, scales <- list(arrows <- FALSE, + just <- "right"), xlab <- "normal 1", ylab <- "normal 2", zlab = "", + main <- "pdf normal") -################################################################################################################### +################################################################################ ### Generate draws from the copula -U_1 = pnorm( Z[ , 1 ], NormExpVal[ 1 ], NormStDev[ 1 ]); # grade 1 -U_2 = pnorm( Z[ , 2 ], NormExpVal[ 2 ], NormStDev[ 2 ]); # grade 2 -U = c( U_1, U_2 ); # joint realizations from the required copula +U_1 <- pnorm(Z[, 1], NormExpVal[1], NormStDev[1]) # grade 1 +U_2 <- pnorm(Z[, 2], NormExpVal[2], NormStDev[2]) # grade 2 +U <- c(U_1, U_2) # joint realizations from the required copula # plot copula -NumBins = round(10 * log(nSim)); -dev.new(); -par( mfrow = c( 2, 1) ); -hist( U_1, NumBins, xlab = "grade 1", ylab = "", main = "" ); -hist( U_2, NumBins, xlab = "grade 2", ylab = "", main = "" ); +NumBins <- round(10 * log(nSim)) +dev.new() +par(mfrow <- c(2, 1)) +hist(U_1, NumBins, xlab <- "grade 1", ylab <- "", main <- "") +hist(U_2, NumBins, xlab <- "grade 2", ylab <- "", main <- "") # joint sample -dev.new(); -plot(U_1, U_2, xlab="grade 1", ylab="grade 2" ); +dev.new() +plot(U_1, U_2, xlab = "grade 1", ylab = "grade 2") # 3d histogram -NumBins2D = round(sqrt(100 * log(nSim))); -dev.new(); -U_3 = table( cut (U_1, NumBins2D ), cut ( U_2, NumBins2D )); -cloud( U_3, panel.3d.cloud = panel.3dbars, scales = list( arrows = FALSE, just = "right" ), - xlab = "grade 1", ylab = "grade 2", zlab="", main = "pdf copula" ); +NumBins2D <- round(sqrt(100 * log(nSim))) +dev.new() +U_3 <- table(cut (U_1, NumBins2D), cut (U_2, NumBins2D)) +cloud(U_3, panel.3d.cloud <- panel.3dbars, scales <- list(arrows <- FALSE, + just <- "right"), xlab <- "grade 1", ylab <- "grade 2", zlab = "", + main <- "pdf copula") -################################################################################################################### +################################################################################ ### Generate draws from the joint distribution -a = nu_1 / 2; -b = 2 * sigmasq_1; -X_1 = qgamma( U_1, a, b ); +a <- nu_1 / 2 +b <- 2 * sigmasq_1 +X_1 <- qgamma(U_1, a, b) -sigma_2 = sqrt( sigmasq_2 ); -X_2 = qlnorm( U_2, mu_2, sigma_2 ); +sigma_2 <- sqrt(sigmasq_2) +X_2 <- qlnorm(U_2, mu_2, sigma_2) -X = C(X_1, X_2); # joint realizations from the required distribution +X <- C(X_1, X_2) # joint realizations from the required distribution -################################################################################################################### +################################################################################ ### Plot joint distribution -# marginals: as expected, the histograms (pdf's) do NOT change as NormCorr varies +# marginals: as expected, the histograms (pdf) do NOT change as NormCorr varies -NumBins = round(10 * log(nSim)); +NumBins <- round(10 * log(nSim)) -dev.new(); -par( mfrow = c( 2, 1) ); +dev.new() +par(mfrow <- c(2, 1)) # Student t distribution -hist( X_1, NumBins, xlab = "gamma", ylab = "", main = "" ); +hist(X_1, NumBins, xlab <- "gamma", ylab <- "", main <- "") # chi-square distribution -hist( X_2, NumBins, xlab = "lognormal", ylab = "", main = "" ); +hist(X_2, NumBins, xlab <- "lognormal", ylab <- "", main <- "") # joint sample -dev.new(); -plot(X_1, X_2, xlab="gamma", ylab="lognormal" ); +dev.new() +plot(X_1, X_2, xlab = "gamma", ylab = "lognormal") # 3d histogram -NumBins2D = round(sqrt(100 * log(nSim))); -dev.new(); -X_3 = table( cut (X_1, NumBins2D ), cut ( X_2, NumBins2D )); -cloud( X_3, panel.3d.cloud = panel.3dbars, scales = list( arrows = FALSE, just = "right" ), - xlab = "gamma", ylab = "lognormal", zlab="", main = "pdf joint distribution" ); \ No newline at end of file +NumBins2D <- round(sqrt(100 * log(nSim))) +dev.new() +X_3 <- table(cut (X_1, NumBins2D), cut (X_2, NumBins2D)) +cloud(X_3, panel.3d.cloud <- panel.3dbars, scales <- list(arrows <- FALSE, + just <- "right"), xlab <- "gamma", ylab <- "lognormal", zlab = "", + main <- "pdf joint distribution") Modified: pkg/Meucci/demo/S_BlackLittermanBasic.R =================================================================== --- pkg/Meucci/demo/S_BlackLittermanBasic.R 2015-08-12 08:50:54 UTC (rev 3945) +++ pkg/Meucci/demo/S_BlackLittermanBasic.R 2015-08-12 09:33:01 UTC (rev 3946) @@ -1,36 +1,39 @@ -#' This script describes to basic market-based Black-Litterman approach in particular: -#' - full confidence = conditional -#' - no confidence = reference model +#' This script describes to basic market-based Black-Litterman approach in +#' particular: +#' - full confidence<- conditional +#' - no confidence <- reference model #' Described in A. Meucci, "Risk and Asset Allocation", #' Springer, 2005, Chapter 9. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, -#' "E 303 - Black-Litterman and beyond II". +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" +#' \url{http://symmys.com/node/170}, "E 303 - Black-Litterman and beyond II". #' -#' See Meucci's script for "S_BlackLittermanBasic.m" and "E 302 - Black-Litterman and beyond I" +#' See Meucci's script for "S_BlackLittermanBasic.m" and +#' "E 302-\Black-Litterman and beyond I" #' -#' @author Xavier Valls \email{flamejat@@gmail.com} +#' @author Xavier Valls \email{xaviervallspla@@gmail.com} -################################################################################################################## +################################################################################ ### Load inputs -data("covNRets"); +data("covNRets") -################################################################################################################## +################################################################################ ### Compute efficient frontier -NumPortf = 40; # number of MV-efficient portfolios -L2L = Log2Lin( covNRets$Mu, covNRets$Sigma ); [TRUNCATED] To get the complete diff run: svnlook diff /svnroot/returnanalytics -r 3946 From noreply at r-forge.r-project.org Thu Aug 13 08:56:04 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 13 Aug 2015 08:56:04 +0200 (CEST) Subject: [Returnanalytics-commits] r3947 - in pkg/Meucci: R demo man Message-ID: <20150813065604.83DB7186B45@r-forge.r-project.org> Author: xavierv Date: 2015-08-13 08:56:03 +0200 (Thu, 13 Aug 2015) New Revision: 3947 Modified: pkg/Meucci/R/RobustBayesianAllocation.R pkg/Meucci/demo/S_BondProjectionPricingNormal.R pkg/Meucci/demo/S_BondProjectionPricingStudentT.R pkg/Meucci/demo/S_BuyNHold.R pkg/Meucci/man/robustBayesianPortfolioOptimization.Rd Log: fixed R check errors and formatted demoscripts until S_C* Modified: pkg/Meucci/R/RobustBayesianAllocation.R =================================================================== --- pkg/Meucci/R/RobustBayesianAllocation.R 2015-08-12 09:33:01 UTC (rev 3946) +++ pkg/Meucci/R/RobustBayesianAllocation.R 2015-08-13 06:56:03 UTC (rev 3947) @@ -135,8 +135,8 @@ #' efficient frontier #' weights: the weights of each portfolio along #' the Bayesian efficient frontier -#' -#' \deqn{ w_{rB}^{(i)} = argmax_{w \in C, w' \Sigma_{1} w \leq +#' @note +#' \deqn{ w_{rB}^{(i)}} = argmax_{w \in C, w' \Sigma_{1} w \leq #' \gamma_{\Sigma}^{(i)} } \big\{w' \mu^{1} - \gamma _{\mu} #' \sqrt{w' \Sigma_{1} w} \big\}, #' @@ -144,7 +144,7 @@ #' \frac{v_{1}}{v_{1} - 2} } #' #' \gamma_{\Sigma}^{(i)} \equiv \frac{v^{(i)}}{ \frac{ \nu_{1}}{\nu_{1}+N+1} + -#' \sqrt{ \frac{2\nu_{1}^{2}q_{\Sigma}^{2}}{ (\nu_{1}+N+1)^{3} } } } } +#' \sqrt{ \frac{2\nu_{1}^{2}q_{\Sigma}^{2}}{ (\nu_{1}+N+1)^{3} } } } #' @references #' A. Meucci - Robust Bayesian Allocation - See formula (19) - (21) #' \url{ http://papers.ssrn.com/sol3/papers.cfm?abstract_id=681553 } Modified: pkg/Meucci/demo/S_BondProjectionPricingNormal.R =================================================================== --- pkg/Meucci/demo/S_BondProjectionPricingNormal.R 2015-08-12 09:33:01 UTC (rev 3946) +++ pkg/Meucci/demo/S_BondProjectionPricingNormal.R 2015-08-13 06:56:03 UTC (rev 3947) @@ -1,63 +1,69 @@ -#'This script projects the distribution of the market invariants for the bond markets -#'(i.e. the changes in yield to maturity) from the estimation interval to the investment horizon -#'Then it computes the distribution of prices at the investment horizon as described in A. Meucci, -#'"Risk and Asset Allocation", Springer, 2005, Chapter 3. +#' This script projects the distribution of the market invariants for the bond +#' markets +#'(i.e. the changes in yield to maturity) from the estimation interval to the +#' investment horizon +#' Then it computes the distribution of prices at the investment horizon as +#' described in A. Meucci, "Risk and Asset Allocation", Springer, 2005, Ch. 3 #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, -#' "E 140 - Fixed-income market: projection of normal invariants". +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" +#' \url{http://symmys.com/node/170}, +#' "E 140 - Fixed-income market: projection of normal invariants" #' #' See Meucci's script for "S_BondProjectionPricingNormal.m" #' #' @author Xavier Valls \email{xaviervallspla@@gmail.com} -################################################################################################################## +################################################################################ ### Inputs -tau = 1/52; # time to horizon expressed in years -tau_tilde = 1/52; # estimation period expressed in years +tau <- 1/52 # time to horizon expressed in years +tau_tilde <- 1/52 # estimation period expressed in years -FlatCurve = 0.04; -TimesToMat = c( 1, 5, 10, 52, 520 ) / 52; # time to maturity of selected bonds expressed in years +FlatCurve <- 0.04 +# time to maturity of selected bonds expressed in years +TimesToMat <- c(1, 5, 10, 52, 520) / 52 # parameters of the distribution of the changes in yield to maturity -u_minus_tau = TimesToMat - tau; -mus = 0 * u_minus_tau; -sigmas = ( 20 + 5 / 4 * u_minus_tau ) / 10000; +u_minus_tau <- TimesToMat - tau +mus <- 0 * u_minus_tau +sigmas <- (20 + 5 / 4 * u_minus_tau) / 10000 -nSim = 100000; +nSim <- 100000 -################################################################################################################## +################################################################################ ### Bond market projection to horizon and pricing -BondCurrent_Prices_Shifted = exp( -FlatCurve * u_minus_tau ); -BondCurrent_Prices = exp( -FlatCurve * TimesToMat ); +BondCurrent_Prices_Shifted <- exp(-FlatCurve * u_minus_tau) +BondCurrent_Prices <- exp(-FlatCurve * TimesToMat) # project bond market to horizon -N = length( TimesToMat ); # number of bonds -U = runif( nSim ); -BondMarket_Scenarios = matrix( 0, nSim, N ); -for( n in 1 : N ) -{ +N <- length(TimesToMat) # number of bonds +U <- runif(nSim) +BondMarket_Scenarios <- matrix(0, nSim, N) +for (n in 1 : N) { # generate co-dependent changes in yield-to-maturity - DY_Scenarios = qnorm( U, mus[ n ] * tau / tau_tilde, sigmas[ n ] * sqrt( tau / tau_tilde ) ); + DY_Scenarios <- qnorm(U, mus[n] * tau / tau_tilde, sigmas[n] * + sqrt(tau / tau_tilde)) - # compute the horizon prices, (3.81) in "Risk and Asset Allocation" - Springer - X = -u_minus_tau[ n ] * DY_Scenarios; - BondMarket_Scenarios[ , n ] = BondCurrent_Prices_Shifted[ n ] * exp( X ); + # compute the horizon prices, (3.81) in "Risk and Asset Allocation"-Springer + X <- -u_minus_tau[n] * DY_Scenarios + BondMarket_Scenarios[, n] <- BondCurrent_Prices_Shifted[n] * exp(X) } -################################################################################################################## +################################################################################ ### MV inputs - analytical -Exp_Hrzn_DY_Hat = mus * tau / tau_tilde; -SDev_Hrzn_DY_Hat = sigmas * sqrt( tau / tau_tilde ); -Corr_Hrzn_DY_Hat = matrix( 1, N, N ); # full co-dependence -Cov_Hrzn_DY_Hat = diag( SDev_Hrzn_DY_Hat ) %*% Corr_Hrzn_DY_Hat %*% diag( SDev_Hrzn_DY_Hat ); -Bond = ConvertChangeInYield2Price( Exp_Hrzn_DY_Hat, Cov_Hrzn_DY_Hat, u_minus_tau, BondCurrent_Prices_Shifted ); -print( Bond$Exp_Prices ); -print( Bond$Cov_Prices ); +Exp_Hrzn_DY_Hat <- mus * tau / tau_tilde +SDev_Hrzn_DY_Hat <- sigmas * sqrt(tau / tau_tilde) +Corr_Hrzn_DY_Hat <- matrix(1, N, N) # full co-dependence +Cov_Hrzn_DY_Hat <- diag(SDev_Hrzn_DY_Hat) %*% Corr_Hrzn_DY_Hat %*% + diag(SDev_Hrzn_DY_Hat) +Bond <- ConvertChangeInYield2Price(Exp_Hrzn_DY_Hat, Cov_Hrzn_DY_Hat, + u_minus_tau, BondCurrent_Prices_Shifted) +print(Bond$Exp_Prices) +print(Bond$Cov_Prices) -################################################################################################################## +################################################################################ ### MV inputs - numerical -BondExp_Prices = t( apply(BondMarket_Scenarios, 2, mean) ); -BondCov_Prices = cov( BondMarket_Scenarios ); -print( BondExp_Prices ); -print( BondCov_Prices ); +BondExp_Prices <- t(apply(BondMarket_Scenarios, 2, mean)) +BondCov_Prices <- cov(BondMarket_Scenarios) +print(BondExp_Prices) +print(BondCov_Prices) Modified: pkg/Meucci/demo/S_BondProjectionPricingStudentT.R =================================================================== --- pkg/Meucci/demo/S_BondProjectionPricingStudentT.R 2015-08-12 09:33:01 UTC (rev 3946) +++ pkg/Meucci/demo/S_BondProjectionPricingStudentT.R 2015-08-13 06:56:03 UTC (rev 3947) @@ -1,60 +1,64 @@ -#'This script projects the distribution of the market invariants for the bond markets -#'(i.e. the changes in yield to maturity) from the estimation interval (Student t assumption) -#'to the investment horizon. Then it computes the distribution of prices at the investment -#'horizon as described in A. Meucci,"Risk and Asset Allocation", Springer, 2005, Chapter 3. +#'This script projects the distribution of the market invariants for the bond +#' markets (i.e. the changes in yield to maturity) from the estimation interval +#' (Student t assumption) to the investment horizon. Then it computes the +#' distribution of prices at the investment horizon as described in A. Meucci, +#' "Risk and Asset Allocation", Springer, 2005, Chapter 3. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, -#' "E 141 - Fixed-income market: projection of Student t invariants". +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" +#' \url{http://symmys.com/node/170}, "E 141 - Fixed-income market: projection of +#' Student t invariants". #' #' See Meucci's script for "S_BondProjectionPricingStudentT.m" #' #' @author Xavier Valls \email{xaviervallspla@@gmail.com} -################################################################################################################## +################################################################################ ### Inputs -tau = 4/52; # time to horizon expressed in years -tau_tilde = 1/52; # estimation period expressed in years +tau <- 4/52 # time to horizon expressed in years +tau_tilde <- 1/52 # estimation period expressed in years -FlatCurve = 0.04; -TimesToMat = c( 4, 5, 10, 52, 520 ) / 52; # time to maturity of selected bonds expressed in years +FlatCurve <- 0.04 +# time to maturity of selected bonds expressed in years +TimesToMat <- c(4, 5, 10, 52, 520) / 52 -# determine the parameters of the distribution of the invariants (changes in yield to maturity) -Periods = tau / tau_tilde; # number of estimation periods until the investment horizon -u_minus_tau = TimesToMat - tau; +# determine the parameters of the distribution of the invariants +# (changes in yield to maturity) +# number of estimation periods until the investment horizon +Periods <- tau / tau_tilde +u_minus_tau <- TimesToMat - tau -nu = 8; -mus = 0 * u_minus_tau; -sigmas = ( 20 + 5 / 4 * u_minus_tau ) / 10000; -Num_Scenarios = 100000; +nu <- 8 +mus <- 0 * u_minus_tau +sigmas <- (20 + 5 / 4 * u_minus_tau) / 10000 +Num_Scenarios <- 100000 -################################################################################################################## -### Projection and pricing -BondCurrent_Prices_Shifted = exp(-FlatCurve * u_minus_tau); -BondCurrent_Prices = exp(-FlatCurve * TimesToMat); +################################################################################ +### Projection and pricing +BondCurrent_Prices_Shifted <- exp(-FlatCurve * u_minus_tau) +BondCurrent_Prices <- exp(-FlatCurve * TimesToMat) # generate common source of randomness -U = runif( Num_Scenarios ); +U <- runif(Num_Scenarios) -N = length( TimesToMat ); # number of bonds -par( mfrow = c( N,1 )); -for( n in 1 : N ) -{ +N <- length(TimesToMat) # number of bonds +par(mfrow = c(N,1)) +for (n in 1 : N) { # project bond market to horizon - Projection = ProjectionStudentT( nu, mus[ n ], sigmas[ n ], Periods); + Projection <- ProjectionStudentT(nu, mus[n], sigmas[n], Periods) # generate co-dependent changes in yield-to-maturity - DY_Scenarios = interp1( Projection$F, Projection$x, U, method = "linear"); + DY_Scenarios <- interp1(Projection$F, Projection$x, U, method = "linear") - # compute the horizon prices, (3.81) in "Risk and Asset Allocation" - Springer - X = -u_minus_tau[ n ] * DY_Scenarios; - Z = BondCurrent_Prices_Shifted[ n ] * exp(X); - + # compute the horizon prices, (3.81) in "Risk and Asset Allocation"-Springer + X <- -u_minus_tau[n] * DY_Scenarios + Z <- BondCurrent_Prices_Shifted[n] * exp(X) + # compute and plot linear returns - L = Z / BondCurrent_Prices[ n ] - 1; + L <- Z / BondCurrent_Prices[n] - 1 #for n=1 histogram represents the only bar (not empty) - hist(L, round(10 * log(Num_Scenarios)), xlab = paste( "Linear returns for bond", n ), main = "" ); - + hist(L, round(10 * log(Num_Scenarios)), xlab = paste("Linear returns for + bond", n ), main = "") } Modified: pkg/Meucci/demo/S_BuyNHold.R =================================================================== --- pkg/Meucci/demo/S_BuyNHold.R 2015-08-12 09:33:01 UTC (rev 3946) +++ pkg/Meucci/demo/S_BuyNHold.R 2015-08-13 06:56:03 UTC (rev 3947) @@ -1,101 +1,112 @@ -#' This script illustrates the buy & hold dynamic strategy, as described in A. Meucci,"Risk and Asset Allocation", -#' Springer, 2005, Chapter 6. +#' This script illustrates the buy & hold dynamic strategy, as described in +#' A. Meucci,"Risk and Asset Allocation", Springer, 2005, Chapter 6. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, -#' "E 261 - Buy and hold". +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" +#' \url{http://symmys.com/node/170}, "E 261 - Buy and hold". #' #' See Meucci's script for "S_BuyNHold.m" # #' @author Xavier Valls \email{xaviervallspla@@gmail.com} -################################################################################################################## +################################################################################ ### Input parameters -Initial_Investment = 1000; -Time_Horizon = 6 / 12; # in years -Time_Step = 1 / 252; # in years +Initial_Investment <- 1000 +Time_Horizon <- 6 / 12 # in years +Time_Step <- 1 / 252 # in years -m = 0.2; # yearly expected return on the underlying -s = 0.40; # yearly expected percentage volatility on the stock index -r = 0.04; # risk-free (money market) interest rate +m <- 0.2 # yearly expected return on the underlying +s <- 0.40 # yearly expected percentage volatility on the stock index +r <- 0.04 # risk-free (money market) interest rate -NumSimul = 30000; +NumSimul <- 30000 -################################################################################################################## +################################################################################ # proportion of underlying you want to hold in the beginning, e.g.: 50 -Prct = 50 ; +Prct <- 50 -################################################################################################################## +################################################################################ #### Initialize values -Underlying_Index = Initial_Investment; # value of the underlyting at starting time, normalzed to equal investment -Start = Underlying_Index; -Elapsed_Time = 0; -Portfolio_Value = Initial_Investment; +# value of the underlyting at starting time, normalzed to equal investment +Underlying_Index <- Initial_Investment +Start <- Underlying_Index +Elapsed_Time <- 0 +Portfolio_Value <- Initial_Investment -Underlying_in_Portfolio_Percent = Prct / 100; +Underlying_in_Portfolio_Percent <- Prct / 100 -Underlyings_in_Portfolio = Portfolio_Value * Underlying_in_Portfolio_Percent; -Cash_in_Portfolio = Portfolio_Value - Underlyings_in_Portfolio; +Underlyings_in_Portfolio <- Portfolio_Value * Underlying_in_Portfolio_Percent +Cash_in_Portfolio <- Portfolio_Value - Underlyings_in_Portfolio -################################################################################################################## +################################################################################ ### Initialize parameters for the plot (no theory in this) -Portfolio_Series = Portfolio_Value; -Market_Series = Underlying_Index; -Percentage_Series = Underlying_in_Portfolio_Percent; +Portfolio_Series <- Portfolio_Value +Market_Series <- Underlying_Index +Percentage_Series <- Underlying_in_Portfolio_Percent -# asset evolution and portfolio rebalancing -while( Elapsed_Time < (Time_Horizon - 10^(-5)) ) # add this term to avoid errors -{ +## asset evolution and portfolio rebalancing + +# add this term to avoid errors +while(Elapsed_Time < (Time_Horizon - 10^(-5)) ) { + # time elapses... - Elapsed_Time = Elapsed_Time + Time_Step; + Elapsed_Time <- Elapsed_Time + Time_Step # ...asset prices evolve and portfolio takes on new value... - Multiplicator = exp( (m - s ^ 2 / 2) * Time_Step + s * sqrt( Time_Step ) * rnorm(NumSimul)); - Underlying_Index = Underlying_Index * Multiplicator; - Underlyings_in_Portfolio = Underlyings_in_Portfolio * Multiplicator; - Cash_in_Portfolio = Cash_in_Portfolio * exp(r * Time_Step); - Portfolio_Value = Underlyings_in_Portfolio + Cash_in_Portfolio; + Multiplicator <- exp((m - s ^ 2 / 2) * Time_Step + s * sqrt(Time_Step) * + rnorm(NumSimul)) + Underlying_Index <- Underlying_Index * Multiplicator + Underlyings_in_Portfolio <- Underlyings_in_Portfolio * Multiplicator + Cash_in_Portfolio <- Cash_in_Portfolio * exp(r * Time_Step) + Portfolio_Value <- Underlyings_in_Portfolio + Cash_in_Portfolio # ...and we rebalance our portfolio - Underlying_in_Portfolio_Percent = Underlyings_in_Portfolio / Portfolio_Value; + Underlying_in_Portfolio_Percent <- Underlyings_in_Portfolio / + Portfolio_Value # store one path for the movie (no theory in this) - Portfolio_Series = cbind( Portfolio_Series, Portfolio_Value[ 1 ] ); ##ok<*AGROW> - Market_Series = cbind( Market_Series, Underlying_Index[ 1 ] ); - Percentage_Series = cbind( Percentage_Series, Underlying_in_Portfolio_Percent[ 1 ] ); + Portfolio_Series <- cbind(Portfolio_Series, Portfolio_Value[1]) + Market_Series <- cbind(Market_Series, Underlying_Index[1]) + Percentage_Series <- cbind(Percentage_Series, + Underlying_in_Portfolio_Percent[1]) } -################################################################################################################## +################################################################################ ### Play the movie for one path -Time = seq( 0, Time_Horizon, Time_Step); -y_max = max( cbind( Portfolio_Series, Market_Series) ) * 1.2; -dev.new(); -par( mfrow = c(2,1)) -for( i in 1 : length(Time) ) +Time <- seq(0, Time_Horizon, Time_Step) +y_max <- max(cbind(Portfolio_Series, Market_Series)) * 1.2 +dev.new() +par(mfrow <- c(2,1)) +for(i in 1 : length(Time)) { - plot( Time[ 1:i ], Portfolio_Series[ 1:i ], type ="l", lwd = 2.5, col = "blue", ylab = "value", - xlim = c(0, Time_Horizon), ylim = c(0, y_max), main = "investment (blue) vs underlying (red) value"); - lines( Time[ 1:i ], Market_Series[ 1:i ], lwd = 2, col = "red" ); - #axis( 1, [0, Time_Horizon, 0, y_max]); + plot(Time[1:i], Portfolio_Series[1:i], type ="l", lwd = 2.5, col = "blue", + ylab = "value", xlim = c(0, Time_Horizon), ylim = c(0, y_max), + main = "investment (blue) vs underlying (red) value") + lines(Time[1:i], Market_Series[1:i], lwd = 2, col = "red") - plot(Time[ 1:i ], Percentage_Series[ 1:i ], type = "h", col = "red", xlab = "time", ylab = "#", - xlim = c(0, Time_Horizon), ylim =c(0,1), main = "percentage of underlying in portfolio"); + plot(Time[1:i], Percentage_Series[1:i], type = "h", col = "red", + xlab = "time", ylab = "#", xlim = c(0, Time_Horizon), ylim =c(0,1), + main = "percentage of underlying in portfolio") } -################################################################################################################## +################################################################################ ### Plots # plot the scatterplot -dev.new(); +dev.new() # marginals -NumBins = round(10 * log(NumSimul)); -layout( matrix(c(1,2,2,2,1,2,2,2,1,2,2,2,0,3,3,3), 4, 4, byrow = TRUE)); -barplot( table( cut( Portfolio_Value, NumBins )), horiz=TRUE, yaxt="n") +NumBins <- round(10 * log(NumSimul)) +layout(matrix(c(1, 2, 2, 2, 1, 2, 2, 2, 1, 2, 2, 2, 0, 3, 3, 3), 4, 4, + byrow = TRUE)) +barplot(table(cut(Portfolio_Value, NumBins)), horiz = TRUE, yaxt = "n") # joint scatter plot -plot(Underlying_Index, Portfolio_Value, xlab = "underlying at horizon (~ buy & hold )", ylab = "investment at horizon" ); -so = sort( Underlying_Index ); -lines( so, so, col = "red" ); +plot(Underlying_Index, Portfolio_Value, + xlab = "underlying at horizon (~ buy & hold)", + ylab = "investment at horizon") -barplot( table( cut( Underlying_Index, NumBins )), yaxt="n") +so <- sort(Underlying_Index) +lines(so, so, col = "red") + +barplot(table(cut(Underlying_Index, NumBins)), yaxt = "n") Modified: pkg/Meucci/man/robustBayesianPortfolioOptimization.Rd =================================================================== --- pkg/Meucci/man/robustBayesianPortfolioOptimization.Rd 2015-08-12 09:33:01 UTC (rev 3946) +++ pkg/Meucci/man/robustBayesianPortfolioOptimization.Rd 2015-08-13 06:56:03 UTC (rev 3947) @@ -62,8 +62,15 @@ efficient frontier weights: the weights of each portfolio along the Bayesian efficient frontier - -\deqn{ w_{rB}^{(i)} = argmax_{w \in C, w' \Sigma_{1} w \leq +} +\description{ +Construct a collection of portfolios along the Bayesian +mean-variance efficient frontier where each portfolio is equally distanced in +return space. The function also returns the most robust portfolio along the +Bayesian efficient frontier +} +\note{ +\deqn{ w_{rB}^{(i)}} = argmax_{w \in C, w' \Sigma_{1} w \leq \gamma_{\Sigma}^{(i)} } \big\{w' \mu^{1} - \gamma _{\mu} \sqrt{w' \Sigma_{1} w} \big\}, @@ -71,14 +78,8 @@ \frac{v_{1}}{v_{1} - 2} } \gamma_{\Sigma}^{(i)} \equiv \frac{v^{(i)}}{ \frac{ \nu_{1}}{\nu_{1}+N+1} + -\sqrt{ \frac{2\nu_{1}^{2}q_{\Sigma}^{2}}{ (\nu_{1}+N+1)^{3} } } } } +\sqrt{ \frac{2\nu_{1}^{2}q_{\Sigma}^{2}}{ (\nu_{1}+N+1)^{3} } } } } -\description{ -Construct a collection of portfolios along the Bayesian -mean-variance efficient frontier where each portfolio is equally distanced in -return space. The function also returns the most robust portfolio along the -Bayesian efficient frontier -} \author{ Ram Ahluwalia \email{ram at wingedfootcapital.com} } From noreply at r-forge.r-project.org Thu Aug 13 11:58:55 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 13 Aug 2015 11:58:55 +0200 (CEST) Subject: [Returnanalytics-commits] r3948 - pkg/Dowd/man Message-ID: <20150813095855.2139A185763@r-forge.r-project.org> Author: dacharya Date: 2015-08-13 11:58:54 +0200 (Thu, 13 Aug 2015) New Revision: 3948 Added: pkg/Dowd/man/NormalVaRHotspots.Rd Log: Function NormalVaRHotspot.R added. Added: pkg/Dowd/man/NormalVaRHotspots.Rd =================================================================== --- pkg/Dowd/man/NormalVaRHotspots.Rd (rev 0) +++ pkg/Dowd/man/NormalVaRHotspots.Rd 2015-08-13 09:58:54 UTC (rev 3948) @@ -0,0 +1,43 @@ +% Generated by roxygen2 (4.1.1): do not edit by hand +% Please edit documentation in R/NormalVaRHotspots.R +\name{NormalVaRHotspots} +\alias{NormalVaRHotspots} +\title{Hotspots for normal VaR} +\usage{ +NormalVaRHotspots(vc.matrix, mu, positions, cl, hp) +} +\arguments{ +\item{vc.matrix}{Variance covariance matrix for returns} + +\item{mu}{Vector of expected position returns} + +\item{positions}{Vector of positions} + +\item{cl}{Confidence level and is scalar} + +\item{hp}{Holding period and is scalar} +} +\value{ +Hotspots for normal VaR +} +\description{ +Estimates the VaR hotspots (or vector of incremental VaRs) for +a portfolio assuming individual asset returns are normally distributed, for +specified confidence level and holding period. +} +\examples{ +# Hotspots for ES for randomly generated portfolio + vc.matrix <- matrix(rnorm(16),4,4) + mu <- rnorm(4,.08,.04) + positions <- c(5,2,6,10) + cl <- .95 + hp <- 280 + NormalVaRHotspots(vc.matrix, mu, positions, cl, hp) +} +\author{ +Dinesh Acharya +} +\references{ +Dowd, K. Measuring Market Risk, Wiley, 2007. +} + From noreply at r-forge.r-project.org Thu Aug 13 11:59:09 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 13 Aug 2015 11:59:09 +0200 (CEST) Subject: [Returnanalytics-commits] r3949 - pkg/Dowd/R Message-ID: <20150813095909.8C166185763@r-forge.r-project.org> Author: dacharya Date: 2015-08-13 11:59:09 +0200 (Thu, 13 Aug 2015) New Revision: 3949 Added: pkg/Dowd/R/NormalVaRHotspots.R Log: Function NormalVaRHotspot.R added. Added: pkg/Dowd/R/NormalVaRHotspots.R =================================================================== --- pkg/Dowd/R/NormalVaRHotspots.R (rev 0) +++ pkg/Dowd/R/NormalVaRHotspots.R 2015-08-13 09:59:09 UTC (rev 3949) @@ -0,0 +1,72 @@ +#' @title Hotspots for normal VaR +#' +#' @description Estimates the VaR hotspots (or vector of incremental VaRs) for +#' a portfolio assuming individual asset returns are normally distributed, for +#' specified confidence level and holding period. +#' +#' @param vc.matrix Variance covariance matrix for returns +#' @param mu Vector of expected position returns +#' @param positions Vector of positions +#' @param cl Confidence level and is scalar +#' @param hp Holding period and is scalar +#' @return Hotspots for normal VaR +#' @references Dowd, K. Measuring Market Risk, Wiley, 2007. +#' +#' @author Dinesh Acharya +#' +#' @examples +#' +#' # Hotspots for ES for randomly generated portfolio +#' vc.matrix <- matrix(rnorm(16),4,4) +#' mu <- rnorm(4,.08,.04) +#' positions <- c(5,2,6,10) +#' cl <- .95 +#' hp <- 280 +#' NormalVaRHotspots(vc.matrix, mu, positions, cl, hp) +#' +#' @export +NormalVaRHotspots <- function(vc.matrix, mu, positions, cl, hp){ + + # Check that positions vector read as a scalar or row vector + positions <- as.matrix(positions) + if (dim(positions)[1] > dim(positions)[2]){ + positions <- t(positions) + } + + # Check that expected returns vector is read as a scalar or row vector + mu <- as.matrix(mu) + if (dim(mu)[1] > dim(mu)[2]){ + mu <- t(mu) + } + + # Check that dimensions are correct + if (max(dim(mu)) != max(dim(positions))){ + stop("Positions vector and expected returns vector must have same size") + } + vc.matrix <- as.matrix(vc.matrix) + if (max(dim(vc.matrix)) != max(dim(positions))){ + stop("Positions vector and expected returns vector must have same size") + } + + # Check that inputs obey sign and value restrictions + if (cl >= 1){ + stop("Confidence level must be less than 1") + } + if (cl <= 0){ + stop("Confidence level must be greater than 0"); + } + if (hp <= 0){ + stop("Holding period must be greater than 0"); + } + + # VaR and ES estimation + VaR <- - mu %*% t(positions) * hp - qnorm(1 - cl, 0, 1) * (positions %*% vc.matrix %*% t(positions)) * sqrt(hp) # VaR + iVaR <- double(length(positions)) + for (i in 1:length(positions)){ + x <- positions + x[i] <- 0 + iVaR[i] <- VaR + mu %*% t(x) %*% hp + qnorm(1 - cl, 0, 1) * (x %*% vc.matrix %*% t(x)) * sqrt(hp) + } + y <- iVaR + return(y) +} \ No newline at end of file From noreply at r-forge.r-project.org Thu Aug 13 11:59:29 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 13 Aug 2015 11:59:29 +0200 (CEST) Subject: [Returnanalytics-commits] r3950 - pkg/Dowd/R Message-ID: <20150813095929.832BA185763@r-forge.r-project.org> Author: dacharya Date: 2015-08-13 11:59:29 +0200 (Thu, 13 Aug 2015) New Revision: 3950 Added: pkg/Dowd/R/NormalESHotspots.R Log: Function NormalESHotspot.R added. Added: pkg/Dowd/R/NormalESHotspots.R =================================================================== --- pkg/Dowd/R/NormalESHotspots.R (rev 0) +++ pkg/Dowd/R/NormalESHotspots.R 2015-08-13 09:59:29 UTC (rev 3950) @@ -0,0 +1,97 @@ +#' @title Hotspots for normal ES +#' +#' @description Estimates the ES hotspots (or vector of incremental ESs) for a +#' portfolio assuming individual asset returns are normally distributed, for +#' specified confidence level and holding period. +#' +#' @param vc.matrix Variance covariance matrix for returns +#' @param mu Vector of expected position returns +#' @param positions Vector of positions +#' @param cl Confidence level and is scalar +#' @param hp Holding period and is scalar +#' @return Hotspots for normal ES +#' @references Dowd, K. Measuring Market Risk, Wiley, 2007. +#' +#' @author Dinesh Acharya +#' +#' @examples +#' +#' # Hotspots for ES for randomly generated portfolio +#' vc.matrix <- matrix(rnorm(16),4,4) +#' mu <- rnorm(4,.08,.04) +#' skew <- .5 +#' kurtosis <- 1.2 +#' positions <- c(5,2,6,10) +#' cl <- .95 +#' hp <- 280 +#' AdjustedNormalESHotspots(vc.matrix, mu, skew, kurtosis, positions, cl, hp) +#' +#' @export +NormalESHotspots <- function(vc.matrix, mu, skew, kurtosis, positions, + cl, hp){ + + # Check that positions vector read as a scalar or row vector + positions <- as.matrix(positions) + if (dim(positions)[1] > dim(positions)[2]){ + positions <- t(positions) + } + + # Check that expected returns vector is read as a scalar or row vector + mu <- as.matrix(mu) + if (dim(mu)[1] > dim(mu)[2]){ + mu <- t(mu) + } + + # Check that dimensions are correct + if (max(dim(mu)) != max(dim(positions))){ + stop("Positions vector and expected returns vector must have same size") + } + if (max(dim(vc.matrix)) != max(dim(positions))){ + stop("Positions vector and expected returns vector must have same size") + } + + # Check that inputs obey sign and value restrictions + if (cl >= 1){ + stop("Confidence level must be less than 1") + } + if (cl <= 0){ + stop("Confidence level must be greater than 0"); + } + if (hp <= 0){ + stop("Holding period must be greater than 0"); + } + + VaR <- - mu %*% t(positions) * hp - qnorm(1 - cl, 0, 1) * + (positions %*% vc.matrix %*% t(positions)) * sqrt(hp) # VaR + n <- 1000 # Number of slives into which tail is divided + cl0 <- cl # Initial confidence level + term <- VaR + delta.cl <- (1 - cl) / n # Increment to confidence level + for (k in 1:(n - 1)) { + cl <- cl0 + k * delta.cl # Revised cl + term <- term - mu %*% t(positions) * hp - qnorm(1 - cl, 0, 1) * + (positions %*% vc.matrix %*% t(positions)) * sqrt(hp) + } + portfolio.ES <- term/n + + # Portfolio ES + es <- double(length(positions)) + ies <- double(length(positions)) + for (j in 1:length(positions)) { + x <- positions + x[j] <- 0 + term[j] <- - mu %*% t(x) * hp - qnorm(1-cl, 0, 1) * x %*% + vc.matrix %*% t(x) * sqrt(hp) + + for (k in 1:(n - 1)){ + cl <- cl0 + k * delta.cl # Revised cl + term[j] <- term[j] - mu %*% t(x) * hp - qnorm(1-cl, 0, 1) * x %*% + vc.matrix %*% t(x) * sqrt(hp) + } + es[j] <- term[j]/n # ES on portfolio minus position j + ies [j] <- portfolio.ES - es[j] # Incremental ES + + } + y <- ies + return(ies) +} From noreply at r-forge.r-project.org Thu Aug 13 12:01:40 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 13 Aug 2015 12:01:40 +0200 (CEST) Subject: [Returnanalytics-commits] r3951 - pkg/Dowd/man Message-ID: <20150813100140.3E77A18792C@r-forge.r-project.org> Author: dacharya Date: 2015-08-13 12:01:39 +0200 (Thu, 13 Aug 2015) New Revision: 3951 Added: pkg/Dowd/man/NormalESHotspots.Rd Log: Function NormalESHotspot.R added. Added: pkg/Dowd/man/NormalESHotspots.Rd =================================================================== --- pkg/Dowd/man/NormalESHotspots.Rd (rev 0) +++ pkg/Dowd/man/NormalESHotspots.Rd 2015-08-13 10:01:39 UTC (rev 3951) @@ -0,0 +1,45 @@ +% Generated by roxygen2 (4.1.1): do not edit by hand +% Please edit documentation in R/NormalESHotspots.R +\name{NormalESHotspots} +\alias{NormalESHotspots} +\title{Hotspots for normal ES} +\usage{ +NormalESHotspots(vc.matrix, mu, skew, kurtosis, positions, cl, hp) +} +\arguments{ +\item{vc.matrix}{Variance covariance matrix for returns} + +\item{mu}{Vector of expected position returns} + +\item{positions}{Vector of positions} + +\item{cl}{Confidence level and is scalar} + +\item{hp}{Holding period and is scalar} +} +\value{ +Hotspots for normal ES +} +\description{ +Estimates the ES hotspots (or vector of incremental ESs) for a +portfolio assuming individual asset returns are normally distributed, for +specified confidence level and holding period. +} +\examples{ +# Hotspots for ES for randomly generated portfolio + vc.matrix <- matrix(rnorm(16),4,4) + mu <- rnorm(4,.08,.04) + skew <- .5 + kurtosis <- 1.2 + positions <- c(5,2,6,10) + cl <- .95 + hp <- 280 + AdjustedNormalESHotspots(vc.matrix, mu, skew, kurtosis, positions, cl, hp) +} +\author{ +Dinesh Acharya +} +\references{ +Dowd, K. Measuring Market Risk, Wiley, 2007. +} + From noreply at r-forge.r-project.org Thu Aug 13 12:02:27 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 13 Aug 2015 12:02:27 +0200 (CEST) Subject: [Returnanalytics-commits] r3952 - pkg/Dowd Message-ID: <20150813100228.0631918792C@r-forge.r-project.org> Author: dacharya Date: 2015-08-13 12:02:27 +0200 (Thu, 13 Aug 2015) New Revision: 3952 Modified: pkg/Dowd/NAMESPACE Log: NormalESHotspot and NormalVaRHotspot added. Modified: pkg/Dowd/NAMESPACE =================================================================== --- pkg/Dowd/NAMESPACE 2015-08-13 10:01:39 UTC (rev 3951) +++ pkg/Dowd/NAMESPACE 2015-08-13 10:02:27 UTC (rev 3952) @@ -101,6 +101,7 @@ export(NormalESConfidenceInterval) export(NormalESDFPerc) export(NormalESFigure) +export(NormalESHotspots) export(NormalESPlot2DCL) export(NormalESPlot2DHP) export(NormalESPlot3D) @@ -110,6 +111,7 @@ export(NormalVaRConfidenceInterval) export(NormalVaRDFPerc) export(NormalVaRFigure) +export(NormalVaRHotspots) export(NormalVaRPlot2DCL) export(NormalVaRPlot2DHP) export(NormalVaRPlot3D) From noreply at r-forge.r-project.org Thu Aug 13 18:11:52 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 13 Aug 2015 18:11:52 +0200 (CEST) Subject: [Returnanalytics-commits] r3953 - in pkg/Meucci: . R demo man Message-ID: <20150813161152.13C08183CE2@r-forge.r-project.org> Author: xavierv Date: 2015-08-13 18:11:51 +0200 (Thu, 13 Aug 2015) New Revision: 3953 Modified: pkg/Meucci/NAMESPACE pkg/Meucci/R/MaxRsqCS.R pkg/Meucci/R/MaxRsqTS.R pkg/Meucci/R/MultivariateOUnCointegration.R pkg/Meucci/demo/S_CPPI.R pkg/Meucci/demo/S_CallsProjectionPricing.R pkg/Meucci/demo/S_CheckDiagonalization.R pkg/Meucci/demo/S_CornishFisher.R pkg/Meucci/demo/S_CorrelationPriorUniform.R pkg/Meucci/demo/S_CovarianceEvolution.R pkg/Meucci/demo/S_CrossSectionConstrainedIndustries.R pkg/Meucci/demo/S_CrossSectionIndustries.R pkg/Meucci/demo/S_EigenvalueDispersion.R pkg/Meucci/man/FitOU.Rd pkg/Meucci/man/MaxRsqCS.Rd pkg/Meucci/man/MaxRsqTS.Rd pkg/Meucci/man/OUstep.Rd pkg/Meucci/man/OUstepEuler.Rd Log: Formatted demo scripts and relating functions up to S_D* Modified: pkg/Meucci/NAMESPACE =================================================================== --- pkg/Meucci/NAMESPACE 2015-08-13 10:02:27 UTC (rev 3952) +++ pkg/Meucci/NAMESPACE 2015-08-13 16:11:51 UTC (rev 3953) @@ -27,6 +27,7 @@ export(Fit2Moms) export(FitExpectationMaximization) export(FitMultivariateGarch) +export(FitOU) export(FitOrnsteinUhlenbeck) export(GenerateLogNormalDistribution) export(GenerateUniformDrawsOnUnitSphere) @@ -47,6 +48,8 @@ export(MvnRnd) export(NoisyObservations) export(NormalCopulaPdf) +export(OUstep) +export(OUstepEuler) export(PHist) export(PanicCopula) export(PartialConfidencePosterior) Modified: pkg/Meucci/R/MaxRsqCS.R =================================================================== --- pkg/Meucci/R/MaxRsqCS.R 2015-08-13 10:02:27 UTC (rev 3952) +++ pkg/Meucci/R/MaxRsqCS.R 2015-08-13 16:11:51 UTC (rev 3953) @@ -1,9 +1,9 @@ -#' @title Solve for G that maximises sample r-square of X*G'*B' with X under constraints A*G<=D -#' and Aeq*G=Deq +#' @title Solve for G that maximises sample r-square of X*G'*B' with X under +#' constraints A*G<=D and Aeq*G=Deq #' -#' @description Solve for G that maximises sample r-square of X*G'*B' with X under constraints A*G<=D -#' and Aeq*G=Deq (A,D, Aeq,Deq conformable matrices),as described in A. Meucci, -#' "Risk and Asset Allocation", Springer, 2005. +#' @description Solve for G that maximises sample r-square of X*G'*B' with X +#' under constraints A*G<=D and Aeq*G=Deq (A,D, Aeq,Deq conformable matrices), +#' as described in A. Meucci, "Risk and Asset Allocation", Springer, 2005. #' #' @param X : [matrix] (T x N) #' @param B : [matrix] (T x K) @@ -17,105 +17,94 @@ #' #' @return G : [matrix] (N x K) #' -#' @note -#' Initial code by Tai-Ho Wang +#' @note +#' Initial code by Tai-Ho Wang #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. -#' Used in "E 123 - Cross-section factors: generalized cross-section industry factors". -#' +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" +#' \url{http://symmys.com/node/170}. Used in "E 123 - Cross-section factors: +#' generalized cross-section industry factors". #' See Meucci's script for "MaxRsqCS.m" #' #' @author Xavier Valls \email{xaviervallspla@@gmail.com} #' @export -MaxRsqCS = function(X, B, W, A = NULL, D = NULL, Aeq = NULL, Deq, lb = NULL, ub = NULL) -{ - N = ncol(X); - K = ncol(B); +MaxRsqCS <- function(X, B, W, A = NULL, D = NULL, Aeq = NULL, Deq, lb = NULL, + ub = NULL) { + N <- ncol(X) + K <- ncol(B) # compute sample estimates - Sigma_X = (dim(X)[1]-1)/dim(X)[1] * cov(X); + Sigma_X <- (dim(X)[1] - 1) / dim(X)[1] * cov(X) - # restructure for feeding to quadprog - Phi = t(W) %*% W; + # restructure for feeding to quadprog + Phi <- t(W) %*% W - # restructure the linear term of the objective function - FirstDegree = matrix( Sigma_X %*% Phi %*% B, K * N, ); + # restructure the linear term of the objective function + FirstDegree <- matrix(Sigma_X %*% Phi %*% B, K * N, ) # restructure the quadratic term of the objective function - SecondDegree = Sigma_X; - - for( k in 1 : (N - 1) ) - { - SecondDegree = blkdiag(SecondDegree, Sigma_X); + SecondDegree <- Sigma_X + + for (k in 1 : (N - 1)) { + SecondDegree <- blkdiag(SecondDegree, Sigma_X) } - - SecondDegree = t( kron( sqrt(Phi) %*% B, diag( 1, N ) ) ) %*% SecondDegree %*% kron( sqrt( Phi ) %*% B, diag( 1, N ) ); - # restructure the equality constraints - if( !length(Aeq) ) - { - AEq = Aeq; - }else - { - AEq = blkdiag(Aeq); - for( k in 2 : K ) - { - AEq = blkdiag(AEq, Aeq); + SecondDegree <- t(kron(sqrt(Phi) %*% B, diag(1, N))) %*% SecondDegree %*% + kron(sqrt(Phi) %*% B, diag(1, N)) + + # restructure the equality constraints + if (!length(Aeq) ) { + AEq <- Aeq + } else { + AEq <- blkdiag(Aeq) + for (k in 2 : K) { + AEq <- blkdiag(AEq, Aeq) } } - Deq = matrix( Deq, , 1); + Deq <- matrix(Deq, , 1) - # resturcture the inequality constraints - if( length(A) ) - { - AA = NULL - for( k in 1 : N ) - { - AA = cbind( AA, kron(diag( 1, K ), A[ k ] ) ); ##ok + # resturcture the inequality constraints + if(length(A)) { + AA <- NULL + for (k in 1 : N) { + AA <- cbind(AA, kron(diag(1, K), A[k])) } - }else - { - AA = A; + } else { + AA <- A } - if( length(D)) - { - D = matrix( D, , 1 ); + if (length(D)) { + D <- matrix(D, , 1) } # restructure upper and lower bounds - if( length(lb) ) - { - lb = matrix( lb, K * N, 1 ); + if (length(lb)) { + lb <- matrix(lb, K * N, 1) } - if( length(ub) ) - { - ub = matrix( ub, K * N, 1 ); + if (length(ub)) { + ub <- matrix(ub, K * N, 1) } # initial guess - x0 = matrix( 1, K * N, 1 ); - if(length(AA)) - { - AA = ( AA + t(AA) ) / 2; # robustify - + x0 <- matrix(1, K * N, 1) + if(length(AA)) { + AA <- (AA + t(AA)) / 2 # robustify } - Amat = rbind( AEq, AA); - bvec = c( Deq, D ); - + Amat <- rbind(AEq, AA) + bvec <- c(Deq, D) + # solve the constrained generlized r-square problem by quadprog - #options = optimset('LargeScale', 'off', 'MaxIter', 2000, 'Display', 'none'); - + #options = optimset('LargeScale', 'off', 'MaxIter', 2000, 'Display', 'none') - b = ipop( c = matrix( FirstDegree ), H = SecondDegree, A = Amat, b = bvec, l = lb , u = ub , r = rep(0, length(bvec)) ) + b <- ipop(c = matrix(FirstDegree), H = SecondDegree, A = Amat, b = bvec, + l = lb, u = ub, r = rep(0, length(bvec))) # reshape for output - G = t( matrix( attributes(b)$primal, N, ) ); + G <- t(matrix(attributes(b)$primal, N, )) - return( G ); -} \ No newline at end of file + return(G) +} Modified: pkg/Meucci/R/MaxRsqTS.R =================================================================== --- pkg/Meucci/R/MaxRsqTS.R 2015-08-13 10:02:27 UTC (rev 3952) +++ pkg/Meucci/R/MaxRsqTS.R 2015-08-13 16:11:51 UTC (rev 3953) @@ -1,6 +1,6 @@ -#' Solve for B that maximises sample r-square of F'*B' with X under constraints A*G<=D -#' and Aeq*G=Deq (A,D, Aeq,Deq conformable matrices),as described in A. Meucci, -#' "Risk and Asset Allocation", Springer, 2005. +#' Solve for B that maximises sample r-square of F'*B' with X under constraints +#' A*G<=D and Aeq*G=Deq (A,D, Aeq,Deq conformable matrices),as described in +#' A. Meucci, "Risk and Asset Allocation", Springer, 2005. #' #' @param X : [matrix] (T x N) #' @param F : [matrix] (T x K) @@ -18,109 +18,98 @@ #' Initial MATLAB's code by Tai-Ho Wang. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. -#' See Meucci's script for "MaxRsqTS.m" +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" +#' \url{http://symmys.com/node/170}. See Meucci's script for "MaxRsqTS.m" #' #' @author Xavier Valls \email{xaviervallspla@@gmail.com} #' @export -MaxRsqTS = function(X, F, W, A = NULL, D = NULL, Aeq = NULL, Deq, lb = NULL, ub = NULL) -{ +MaxRsqTS <- function(X, F, W, A = NULL, D = NULL, Aeq = NULL, Deq, lb = NULL, + ub = NULL) { - N = dim(X)[ 2 ]; - K = dim(F)[ 2 ]; - X = matrix(as.numeric(X), dim(X)) + N <- dim(X)[2] + K <- dim(F)[2] + X <- matrix(as.numeric(X), dim(X)) # compute sample estimates - # E_X = apply( X, 2, mean); - # E_F = apply( F, 2, mean); - XF = cbind( X, F ); - SigmaJoint_XF = (dim(XF)[1]-1)/dim(XF)[1] * cov(XF); - - Sigma_X = SigmaJoint_XF[ 1:N, 1:N ]; - Sigma_XF = SigmaJoint_XF[ 1:N, (N+1):(N+K) ]; - Sigma_F = SigmaJoint_XF[ (N+1):(N+K), (N+1):(N+K) ]; + # E_X = apply(X, 2, mean) + # E_F = apply(F, 2, mean) + XF <- cbind(X, F) + SigmaJoint_XF <- (dim(XF)[1] - 1) / dim(XF)[1] * cov(XF) + Sigma_X <- SigmaJoint_XF[1:N, 1:N] + Sigma_XF <- SigmaJoint_XF[1:N, (N + 1):(N + K)] + Sigma_F <- SigmaJoint_XF[(N + 1):(N + K), (N + 1):(N + K)] - # restructure for feeding to quadprog - Phi = t(W) %*% W; - trSigma_WX = sum( diag( Sigma_X %*% Phi ) ); - # restructure the linear term of the objective function - FirstDegree = ( -1 / trSigma_WX ) * matrix( t( Phi %*% Sigma_XF ), N*K, ); + # restructure for feeding to quadprog + Phi <- t(W) %*% W + trSigma_WX <- sum(diag(Sigma_X %*% Phi)) + # restructure the linear term of the objective function + FirstDegree <- (-1 / trSigma_WX) * matrix(t(Phi %*% Sigma_XF), N * K,) + # restructure the quadratic term of the objective function - SecondDegree = Sigma_F; - for( k in 1 : (N - 1) ) - { - SecondDegree = blkdiag( SecondDegree, Sigma_F ); + SecondDegree <- Sigma_F + for(k in 1 : (N - 1)) { + SecondDegree <- blkdiag(SecondDegree, Sigma_F) } - SecondDegree = ( 1 / trSigma_WX ) * t(kron( sqrt( Phi ), diag( 1, K ))) %*% SecondDegree %*% kron( sqrt(Phi), diag( 1, K ) ); + SecondDegree <- (1 / trSigma_WX) * t(kron(sqrt(Phi), diag(1, K))) %*% + SecondDegree %*% kron(sqrt(Phi), diag(1, K)) - # restructure the equality constraints - if( !length(Aeq) ) - { - AEq = Aeq; - }else - { - AEq = NULL; - for( k in 1 : N ) - { - AEq = cbind( AEq, kron( diag( 1, K ), Aeq[k] ) ); + # restructure the equality constraints + if(!length(Aeq) ) { + AEq <- Aeq + } else { + AEq <- NULL + for (k in 1 : N) { + AEq <- cbind(AEq, kron(diag(1, K), Aeq[k])) } } - Deq = matrix( Deq, , 1); + Deq <- matrix(Deq, , 1) - # resturcture the inequality constraints - if( length(A) ) - { - AA = NULL - for( k in 1 : N ) - { - AA = cbind( AA, kron(diag( 1, K ), A[ k ] ) ); ##ok + # resturcture the inequality constraints + if(length(A)) { + AA <- NULL + for (k in 1 : N) { + AA <- cbind(AA, kron(diag(1, K), A[k])) } - }else - { - AA = A; + } else { + AA <- A } - if( length(D)) - { - D = matrix( D, , 1 ); + if(length(D)) { + D <- matrix(D, , 1) } # restructure upper and lower bounds - if( length(lb) ) - { - lb = matrix( lb, K * N, 1 ); + if(length(lb)) { + lb <- matrix(lb, K * N, 1) } - if( length(ub) ) - { - ub = matrix( ub, K * N, 1 ); + if(length(ub)) { + ub <- matrix(ub, K * N, 1) } # initial guess - x0 = matrix( 1, K * N, 1 ); - if(length(AA)) - { - AA = ( AA + t(AA) ) / 2; # robustify - + x0 <- matrix(1, K * N, 1) + if(length(AA)) { + AA <- (AA + t(AA)) / 2 # robustify } - Amat = rbind( AEq, AA); - bvec = c( Deq, D ); + Amat <- rbind(AEq, AA) + bvec <- c(Deq, D ) # solve the constrained generlized r-square problem by quadprog - #options = optimset('LargeScale', 'off', 'MaxIter', 2000, 'Display', 'none'); - + #options = optimset('LargeScale', 'off', 'MaxIter', 2000, 'Display', 'none') - b = ipop( c = matrix( FirstDegree ), H = SecondDegree, A = Amat, b = bvec, l = lb , u = ub , r = rep(0, length(bvec)) ) - + b <- ipop(c = matrix(FirstDegree), H = SecondDegree, A = Amat, b = bvec, + l = lb, u = ub, r = rep(0, length(bvec))) + # reshape for output - G = matrix( attributes(b)$primal, N, ) ; + G <- matrix(attributes(b)$primal, N,) - return( G ); -} \ No newline at end of file + return(G) +} Modified: pkg/Meucci/R/MultivariateOUnCointegration.R =================================================================== --- pkg/Meucci/R/MultivariateOUnCointegration.R 2015-08-13 10:02:27 UTC (rev 3952) +++ pkg/Meucci/R/MultivariateOUnCointegration.R 2015-08-13 16:11:51 UTC (rev 3953) @@ -3,139 +3,158 @@ #' @param X_0 a matrix containing the starting value of each process #' @param t a numeric containing the timestep #' @param Mu a vector containing the unconditional expectation of the process -#' @param Th a transition matrix, i.e., a fully generic square matrix that steers the deterministic portion -#' of the evolution of the process +#' @param Th a transition matrix, i.e., a fully generic square matrix that +#' steers the deterministic portion of the evolution of the process #' @param Sig a square matrix that drives the dispersion of the process #' #' @return a list containing -#' @return X_t a vector containing the value of the process after the given timestep +#' @return X_t a vector containing the value of the process after the given +#' timestep #' @return Mu_t a vector containing the conditional expectation of the process #' @return Sig_t a matrix containing the covariance after the time step #' -#' \deqn{ X_{t+ \tau } = \big(I- e^{- \theta \tau } \big) \mu + e^{- \theta \tau } X_{t} + \epsilon _{t, \tau } } +#' \deqn{ X_{t+ \tau } = \big(I- e^{- \theta \tau } \big) \mu + +#' e^{- \theta \tau } X_{t} + \epsilon _{t, \tau } } #' @references -#' A. Meucci - "Review of Statistical Arbitrage, Cointegration, and Multivariate Ornstein-Uhlenbeck" - Formula (2) -#' \url{http://ssrn.com/abstract=1404905} +#' A. Meucci - "Review of Statistical Arbitrage, Cointegration, and Multivariate +#' Ornstein-Uhlenbeck" - Formula (2) \url{http://ssrn.com/abstract=1404905} +#' #' @author Manan Shah \email{mkshah@@cmu.edu} -OUstep = function( X_0 , t , Mu , Th , Sig ) -{ - NumSimul = nrow( X_0 ) - N = ncol( X_0 ) - +#' @export + +OUstep <- function(X_0, t, Mu, Th, Sig) { + NumSimul <- nrow(X_0) + N <- ncol(X_0) + # location - ExpM = expm( -Th * t ) - + ExpM <- expm(-Th * t) + # repmat = function(X,m,n) - R equivalent of repmat (matlab) - X = t( Mu - ExpM %*% Mu ) - mx = dim( X )[1] - nx = dim( X )[2] - Mu_t = matrix( t ( matrix( X , mx , nx*1 ) ), mx * NumSimul, nx * 1, byrow = T ) + X_0 %*% ExpM - + X <- t(Mu - ExpM %*% Mu) + mx <- dim(X)[1] + nx <- dim(X)[2] + Mu_t <- matrix(t (matrix(X, mx, nx*1)), mx * NumSimul, nx * 1, byrow = T) + + X_0 %*% ExpM # scatter - TsT = kronecker( Th , diag( N ) ) + kronecker( diag( N ) , Th ) - - VecSig = Sig - dim( VecSig ) = c( N^2 , 1 ) - VecSig_t = solve( TsT ) %*% ( diag( N^2 ) - expm( -TsT * t ) ) %*% VecSig - Sig_t = VecSig_t - dim( Sig_t ) = c( N , N ) - Sig_t = ( Sig_t + t( Sig_t ) ) / 2 - - Eps = mvrnorm( NumSimul, rep( 0 , N ), Sig_t ) - - X_t = Mu_t + Eps - Mu_t = t( colMeans( Mu_t ) ) - - return( list( X_t = X_t, Mu_t = Mu_t, Sig_t = Sig_t ) ) + TsT <- kronecker(Th, diag(N)) + kronecker(diag(N), Th) + + VecSig <- Sig + dim(VecSig) <- c(N ^ 2, 1) + VecSig_t <- solve(TsT) %*% (diag(N ^ 2) - expm(-TsT * t)) %*% VecSig + Sig_t <- VecSig_t + dim(Sig_t) <- c(N, N) + Sig_t <- (Sig_t + t(Sig_t)) / 2 + + Eps <- mvrnorm(NumSimul, rep(0, N), Sig_t) + + X_t <- Mu_t + Eps + Mu_t <- t(colMeans(Mu_t)) + + return(list(X_t = X_t, Mu_t = Mu_t, Sig_t = Sig_t)) } -#' Generate the next element based on Ornstein-Uhlenbeck process using antithetic concept and assuming that the -#' Brownian motion has Euler discretization +#' Generate the next element based on Ornstein-Uhlenbeck process using +#' antithetic concept and assuming that the Brownian motion has Euler +#' discretization #' #' @param X_0 a matrix containing the starting value of each process #' @param Dt a numeric containing the timestep #' @param Mu a vector containing the unconditional expectation of the process -#' @param Th a transition matrix, i.e., a fully generic square matrix that steers the deterministic portion -#' of the evolution of the process +#' @param Th a transition matrix, i.e., a fully generic square matrix that +#' steers the deterministic portion of the evolution of the process #' @param Sig a square matrix that drives the dispersion of the process #' #' @return a list containing -#' @return X_t a vector containing the value of the process after the given timestep +#' @return X_t a vector containing the value of the process after the given +#' timestep #' @return Mu_t a vector containing the conditional expectation of the process #' @return Sig_t a matrix containing the covariance after the time step #' -#' \deqn{ X_{t+ \tau } = \big(I- e^{- \theta \tau } \big) \mu + e^{- \theta \tau } X_{t} + \epsilon _{t, \tau } } +#' \deqn{ X_{t+ \tau } = \big(I- e^{- \theta \tau } \big) \mu + +#' e^{- \theta \tau } X_{t} + \epsilon _{t, \tau } } #' @references -#' A. Meucci - "Review of Statistical Arbitrage, Cointegration, and Multivariate Ornstein-Uhlenbeck" - Formula (2) -#' \url{http://ssrn.com/abstract=1404905} +#' A. Meucci - "Review of Statistical Arbitrage, Cointegration, and Multivariate +#' Ornstein-Uhlenbeck" - Formula (2). \url{http://ssrn.com/abstract=1404905} +#' #' @author Manan Shah \email{mkshah@@cmu.edu} -OUstepEuler = function( X_0 , Dt , Mu , Th , Sig ) -{ - NumSimul = nrow( X_0 ) - N = ncol( X_0 ) - +#' @export + +OUstepEuler <- function(X_0, Dt, Mu, Th, Sig){ + NumSimul <- nrow(X_0) + N <- ncol(X_0) + # location - ExpM = expm( as.matrix( -Th %*% Dt ) ) - - # repmat = function(X,m,n) - R equivalent of repmat (matlab) - X = t( Mu - ExpM %*% Mu ) - mx = dim( X )[1] - nx = dim( X )[2] - Mu_t = matrix( t ( matrix( X , mx , nx*1 ) ), mx * NumSimul, nx * 1, byrow = T ) + X_0 %*% ExpM - + ExpM <- expm(as.matrix(-Th %*% Dt)) + + # repmat <- function(X,m,n) - R equivalent of repmat (matlab) + X <- t(Mu - ExpM %*% Mu) + mx <- dim(X)[1] + nx <- dim(X)[2] + Mu_t <- matrix(t(matrix(X, mx, nx)), mx * NumSimul, nx, byrow = T) + + X_0 %*% ExpM + # scatter - Sig_t = Sig %*% Dt - Eps = mvrnorm( NumSimul / 2, rep( 0 , N ) , Sig_t ) - Eps = rbind( Eps, -Eps) - - X_t = Mu_t + Eps - Mu_t = t( colMeans( X_t ) ) - - return( list( X_t = X_t, Mu_t = Mu_t, Sig_t = Sig_t ) ) + Sig_t <- Sig %*% Dt + Eps <- mvrnorm(NumSimul / 2, rep(0, N), Sig_t) + Eps <- rbind(Eps, -Eps) + + X_t <- Mu_t + Eps + Mu_t <- t(colMeans(X_t)) + + return(list(X_t = X_t, Mu_t = Mu_t, Sig_t = Sig_t)) } -#' Fit the Ornstein-uhlenbeck process to model the behavior for different values of the timestep. +#' Fit the Ornstein-uhlenbeck process to model the behavior for different values +#' of the timestep. #' -#' @param Y a matrix containing the value of a process at various time steps. +#' @param Y a matrix containing the value of a process at various time +#' steps. #' @param tau a numeric containing the timestep #' #' @return a list containing #' @return Mu a vector containing the expectation of the process -#' @return Sig a matrix containing the covariance of the resulting fitted OU process -#' @return Th a transition matrix required for defining the fitted OU process +#' @return Sig a matrix containing the covariance of the resulting fitted OU +#' process +#' @return Th a transition matrix required for defining the fitted OU +#' process #' -#' \deqn{ x_{t+ \tau } = \big(I- e^{- \theta \tau } \big) \mu + e^{- \theta \tau } x_{t}, -#' vec \big( \Sigma _{ \tau } \big) \equiv \big( \Theta \oplus \Theta \big) ^{-1} \big(I- e^{( \Theta \oplus \Theta ) \tau } \big) vec \big( \Sigma \big) } +#' \deqn{ x_{t+ \tau } = \big(I- e^{- \theta \tau } \big) \mu + +#' e^{- \theta \tau } x_{t}, vec \big( \Sigma _{ \tau } \big) \equiv +#' \big(\Theta \oplus \Theta \big) ^{-1} \big(I - e^{(\Theta \oplus \Theta) +#' \tau } \big) vec \big( \Sigma \big) } +#' #' @references -#' A. Meucci - "Review of Statistical Arbitrage, Cointegration, and Multivariate Ornstein-Uhlenbeck" - Formula (8),(9) -#' \url{http://ssrn.com/abstract=1404905} +#' A. Meucci - "Review of Statistical Arbitrage, Cointegration, and Multivariate +#' Ornstein-Uhlenbeck" - Formula (8),(9). \url{http://ssrn.com/abstract=1404905} #' @author Manan Shah \email{mkshah@@cmu.edu} -FitOU = function ( Y, tau ) -{ +#' @export + +FitOU <- function(Y, tau) { library(expm) - T = nrow( Y ) - N = ncol( Y ) - - X = Y[ -1 , ] - F = cbind( rep( 1, T-1 ), Y [ 1:T-1 ,] ) - E_XF = t( X ) %*% F / T - E_FF = t( F ) %*% F / T - B = E_XF %*% solve( E_FF ) - - Th = -logm ( B [ , -1 ] ) / tau - Mu = solve( diag( N ) - B[ , -1 ] ) %*% B[ , 1 ] - - U = F %*% t( B ) - X - Sig_tau = cov( U ) - - N = length( Mu ) - TsT = kronecker( Th , diag( N ) ) + kronecker( diag( N ) , Th ) - - VecSig_tau = Sig_tau - dim( VecSig_tau ) = c( N^2 , 1 ) - VecSig = solve( diag( N^2 ) - expm( as.matrix( -TsT * tau ) ) ) %*% TsT %*% VecSig_tau - Sig = VecSig - dim( Sig ) = c( N , N ) - - return( list( Mu = Mu, Th = Th, Sig = Sig ) ) -} \ No newline at end of file + T <- nrow(Y) + N <- ncol(Y) + + X <- Y[-1, ] + F <- cbind(rep(1, T - 1), Y [1:T - 1, ]) + E_XF <- t(X) %*% F / T + E_FF <- t(F) %*% F / T + B <- E_XF %*% solve(E_FF) + + Th <- -logm (B [, -1]) / tau + Mu <- solve(diag(N) - B[, -1]) %*% B[, 1] + + U <- F %*% t(B) - X + Sig_tau <- cov(U) + + N <- length(Mu) + TsT <- kronecker(Th, diag(N)) + kronecker(diag(N), Th) + + VecSig_tau <- Sig_tau + dim(VecSig_tau) <- c(N ^ 2, 1) + VecSig <- solve(diag(N ^ 2) - expm(as.matrix(-TsT * tau))) %*% TsT %*% + VecSig_tau + Sig <- VecSig + dim(Sig) <- c(N, N) + + return(list(Mu = Mu, Th = Th, Sig = Sig)) +} Modified: pkg/Meucci/demo/S_CPPI.R =================================================================== --- pkg/Meucci/demo/S_CPPI.R 2015-08-13 10:02:27 UTC (rev 3952) +++ pkg/Meucci/demo/S_CPPI.R 2015-08-13 16:11:51 UTC (rev 3953) @@ -1,114 +1,125 @@ -#' This script illustrates the CPPI (constant proportion portfolio insurance) dynamic strategy, as described in -#' A. Meucci,"Risk and Asset Allocation", Springer, 2005, Chapter 6. +#' This script illustrates the CPPI (constant proportion portfolio insurance) +#' dynamic strategy, as described in A. Meucci,"Risk and Asset Allocation", +#' Springer, 2005, Chapter 6. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, -#' "E 264 - Constant proportion portfolio insurance". +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" +#' \url{http://symmys.com/node/170}, "E 264 - Constant proportion portfolio +#' insurance". #' #' See Meucci's script for "S_CPPI.m" # #' @author Xavier Valls \email{xaviervallspla@@gmail.com} -################################################################################################################## +################################################################################ ### Input parameters -Initial_Investment = 1000; -Time_Horizon = 6 / 12; # in years -Time_Step = 1 / 252; # in years +Initial_Investment <- 1000 +Time_Horizon <- 6 / 12 # in years +Time_Step <- 1 / 252 # in years -m = 0.2; # yearly expected return on the underlying -s = 0.40; # yearly expected percentage volatility on the stock index -r = 0.04; # risk-free (money market) interest rate +m <- 0.2 # yearly expected return on the underlying +s <- 0.40 # yearly expected percentage volatility on the stock index +r <- 0.04 # risk-free (money market) interest rate -nSim = 30000; +nSim <- 30000 -################################################################################################################## +################################################################################ ### Setup # floor today (will evolve at the risk-free rate), e.g.: 950 -Floor = 980; +Floor <- 980 # leverage on the cushion between your money and the floor, e.g. 3 -Multiple_CPPI = 5; +Multiple_CPPI <- 5 -################################################################################################################## +################################################################################ ### Initialize values -Underlying_Index = Initial_Investment; # value of the underlyting at starting time, normalzed to equal investment -Start = Underlying_Index; -Elapsed_Time = 0; -Portfolio_Value = Initial_Investment; +# value of the underlyting at starting time, normalzed to equal investment +Underlying_Index <- Initial_Investment +Start <- Underlying_Index +Elapsed_Time <- 0 +Portfolio_Value <- Initial_Investment -Cushion = max( 0, Portfolio_Value - Floor ); -Underlyings_in_Portfolio = min(Portfolio_Value, max( 0, Multiple_CPPI * Cushion ) ); -#Cash_in_Portfolio = Portfolio_Value - Underlyings_in_Portfolio; -Underlying_in_Portfolio_Percent = Underlyings_in_Portfolio / Portfolio_Value; +Cushion <- max(0, Portfolio_Value - Floor) +Underlyings_in_Portfolio <- min(Portfolio_Value, max(0, + Multiple_CPPI * Cushion)) +#Cash_in_Portfolio <- Portfolio_Value - Underlyings_in_Portfolio +Underlying_in_Portfolio_Percent <- Underlyings_in_Portfolio / Portfolio_Value -Underlyings_in_Portfolio = Portfolio_Value * Underlying_in_Portfolio_Percent; -Cash_in_Portfolio = Portfolio_Value - Underlyings_in_Portfolio; +Underlyings_in_Portfolio <- Portfolio_Value * Underlying_in_Portfolio_Percent +Cash_in_Portfolio <- Portfolio_Value - Underlyings_in_Portfolio -################################################################################################################## +################################################################################ ### Initialize parameters for the plot (no theory in this) -Portfolio_Series = Portfolio_Value; -Market_Series = Underlying_Index; -Percentage_Series = Underlying_in_Portfolio_Percent; +Portfolio_Series <- Portfolio_Value +Market_Series <- Underlying_Index +Percentage_Series <- Underlying_in_Portfolio_Percent -################################################################################################################## +################################################################################ ### Asset evolution and portfolio rebalancing -while( Elapsed_Time < (Time_Horizon - 10^(-5)) ) # add this term to avoid errors -{ + +while (Elapsed_Time < (Time_Horizon - 10 ^ (-5)) ) { # time elapses... - Elapsed_Time = Elapsed_Time + Time_Step; - + Elapsed_Time <- Elapsed_Time + Time_Step + # ...asset prices evolve and portfolio takes on new value... - Multiplicator = exp( (m - s ^ 2 / 2) * Time_Step + s * sqrt( Time_Step ) * rnorm( NumSimul )); - Underlying_Index = Underlying_Index * Multiplicator; - Underlyings_in_Portfolio = Underlyings_in_Portfolio * Multiplicator; - Cash_in_Portfolio = Cash_in_Portfolio * exp(r * Time_Step); - Portfolio_Value = Underlyings_in_Portfolio + Cash_in_Portfolio; + Multiplicator <- exp((m - s ^ 2 / 2) * Time_Step + s * + sqrt(Time_Step) * rnorm(NumSimul)) + Underlying_Index <- Underlying_Index * Multiplicator + Underlyings_in_Portfolio <- Underlyings_in_Portfolio * Multiplicator + Cash_in_Portfolio <- Cash_in_Portfolio * exp(r * Time_Step) + Portfolio_Value <- Underlyings_in_Portfolio + Cash_in_Portfolio # ...and we rebalance our portfolio - Floor = Floor * exp( r * Time_Step ); - Cushion = pmax( 0, (Portfolio_Value - Floor) ); - Underlyings_in_Portfolio = pmin(Portfolio_Value, pmax( 0, Multiple_CPPI * Cushion) ); - Cash_in_Portfolio = Portfolio_Value - Underlyings_in_Portfolio; - Underlying_in_Portfolio_Percent = Underlyings_in_Portfolio / Portfolio_Value; - + Floor <- Floor * exp(r * Time_Step) + Cushion <- pmax(0, (Portfolio_Value - Floor)) + Underlyings_in_Portfolio <- pmin(Portfolio_Value, pmax(0, + Multiple_CPPI * Cushion)) + Cash_in_Portfolio <- Portfolio_Value - + Underlyings_in_Portfolio + Underlying_in_Portfolio_Percent <- Underlyings_in_Portfolio / + Portfolio_Value + # store one path for the movie (no theory in this) - Portfolio_Series = cbind( Portfolio_Series, Portfolio_Value[ 1 ] ); ##ok<*AGROW> - Market_Series = cbind( Market_Series, Underlying_Index[ 1 ] ); - Percentage_Series = cbind( Percentage_Series, Underlying_in_Portfolio_Percent[ 1 ] ); + Portfolio_Series <- cbind(Portfolio_Series, Portfolio_Value[1]) + Market_Series <- cbind(Market_Series, Underlying_Index[1]) + Percentage_Series <- cbind(Percentage_Series, + Underlying_in_Portfolio_Percent[1]) } -################################################################################################################## +################################################################################ ### Play the movie for one path -Time = seq( 0, Time_Horizon, Time_Step); -y_max = max( cbind( Portfolio_Series, Market_Series) ) * 1.2; -dev.new(); -par( mfrow = c(2,1)) -for( i in 1 : length(Time) ) -{ - plot( Time[ 1:i ], Portfolio_Series[ 1:i ], type ="l", lwd = 2.5, col = "blue", ylab = "value", - xlim = c(0, Time_Horizon), ylim = c(0, y_max), main = "investment (blue) vs underlying (red) value"); - lines( Time[ 1:i ], Market_Series[ 1:i ], lwd = 2, col = "red" ); - #axis( 1, [0, Time_Horizon, 0, y_max]); - - plot(Time[ 1:i ], Percentage_Series[ 1:i ], type = "h", col = "red", xlab = "time", ylab = "#", - xlim = c(0, Time_Horizon), ylim =c(0,1), main = "percentage of underlying in portfolio"); - +Time <- seq(0, Time_Horizon, Time_Step) +y_max <- max(cbind(Portfolio_Series, Market_Series)) * 1.2 +dev.new() +par(mfrow <- c(2,1)) +for (i in 1 : length(Time)) { + plot(Time[1:i], Portfolio_Series[1:i], type ="l", lwd = 2.5, + col = "blue", ylab = "value", xlim = c(0, Time_Horizon), + ylim = c(0, y_max), + main = "investment (blue) vs underlying (red) value") + lines(Time[1:i], Market_Series[1:i], lwd = 2, col = "red") + #axis(1, [0, Time_Horizon, 0, y_max]) + + plot(Time[1:i], Percentage_Series[1:i], type = "h", col = "red", + xlab = "time", ylab = "#", xlim = c(0, Time_Horizon), ylim =c(0,1), + main = "percentage of underlying in portfolio") } - -################################################################################################################## +################################################################################ ### plot the scatterplot -dev.new(); +dev.new() # marginals -NumBins = round(10 * log(NumSimul)); -layout( matrix(c(1,2,2,2,1,2,2,2,1,2,2,2,0,3,3,3), 4, 4, byrow = TRUE)); -barplot( table( cut( Portfolio_Value, NumBins )), horiz=TRUE, yaxt="n"); +NumBins <- round(10 * log(NumSimul)) +layout(matrix(c(1, 2, 2, 2, 1, 2, 2, 2, 1, 2, 2, 2, 0, 3, 3, 3), 4, 4, + byrow = TRUE)) +barplot(table(cut(Portfolio_Value, NumBins)), horiz = TRUE, yaxt = "n") # joint scatter plot -plot(Underlying_Index, Portfolio_Value, xlab = "underlying at horizon (~ buy & hold )", ylab = "investment at horizon" ); -so = sort( Underlying_Index ); -lines( so, so, col = "red" ); +plot(Underlying_Index, Portfolio_Value, + xlab = "underlying at horizon (~ buy & hold)", + ylab = "investment at horizon") +so <- sort(Underlying_Index) +lines(so, so, col = "red") -barplot( table( cut( Underlying_Index, NumBins )), yaxt="n"); - +barplot(table(cut(Underlying_Index, NumBins)), yaxt = "n") Modified: pkg/Meucci/demo/S_CallsProjectionPricing.R =================================================================== --- pkg/Meucci/demo/S_CallsProjectionPricing.R 2015-08-13 10:02:27 UTC (rev 3952) +++ pkg/Meucci/demo/S_CallsProjectionPricing.R 2015-08-13 16:11:51 UTC (rev 3953) @@ -1,104 +1,111 @@ -#'This script projects the distribution of the market invariants for the derivatives market -#'Then it computes the distribution of prices at the investment horizon as described in A. Meucci, -#'"Risk and Asset Allocation", Springer, 2005, Chapter 3. +#'This script projects the distribution of the market invariants for the +#' derivatives market +#'Then it computes the distribution of prices at the investment horizon as +#' described in A. Meucci, "Risk and Asset Allocation", Springer, 2005, Ch 3. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, -#' "E 143 - Derivatives market: projection of invariants". +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" +#' \url{http://symmys.com/node/170}, "E 143 - Derivatives market: projection of +#' invariants". #' #' See Meucci's script for "S_CallsProjectionPricing.m" #' #' @author Xavier Valls \email{xaviervallspla@@gmail.com} -################################################################################################################## +################################################################################ ### Load data # load 'spot' for underlying and current vol surface, given by [TRUNCATED] To get the complete diff run: svnlook diff /svnroot/returnanalytics -r 3953 From noreply at r-forge.r-project.org Thu Aug 13 18:24:09 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 13 Aug 2015 18:24:09 +0200 (CEST) Subject: [Returnanalytics-commits] r3954 - pkg/Dowd Message-ID: <20150813162409.D30711879D3@r-forge.r-project.org> Author: dacharya Date: 2015-08-13 18:24:09 +0200 (Thu, 13 Aug 2015) New Revision: 3954 Modified: pkg/Dowd/NAMESPACE Log: Functions NormalQuantileStandardError and tQuantileStandardError added. Modified: pkg/Dowd/NAMESPACE =================================================================== --- pkg/Dowd/NAMESPACE 2015-08-13 16:11:51 UTC (rev 3953) +++ pkg/Dowd/NAMESPACE 2015-08-13 16:24:09 UTC (rev 3954) @@ -106,6 +106,7 @@ export(NormalESPlot2DHP) export(NormalESPlot3D) export(NormalQQPlot) +export(NormalQuantileStandardError) export(NormalSpectralRiskMeasure) export(NormalVaR) export(NormalVaRConfidenceInterval) @@ -135,6 +136,7 @@ export(tESPlot2DCL) export(tESPlot2DHP) export(tESPlot3D) +export(tQuantileStandardError) export(tVaR) export(tVaRDFPerc) export(tVaRESPlot2DCL) From noreply at r-forge.r-project.org Thu Aug 13 18:26:26 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 13 Aug 2015 18:26:26 +0200 (CEST) Subject: [Returnanalytics-commits] r3955 - pkg/Dowd/R Message-ID: <20150813162626.ED7F8186CF5@r-forge.r-project.org> Author: dacharya Date: 2015-08-13 18:26:26 +0200 (Thu, 13 Aug 2015) New Revision: 3955 Added: pkg/Dowd/R/tQuantileStandardError.R Log: Function tQuantileStandardError added. Added: pkg/Dowd/R/tQuantileStandardError.R =================================================================== --- pkg/Dowd/R/tQuantileStandardError.R (rev 0) +++ pkg/Dowd/R/tQuantileStandardError.R 2015-08-13 16:26:26 UTC (rev 3955) @@ -0,0 +1,42 @@ +#' Standard error of t quantile estimate +#' +#' Estimates standard error of t quantile estimate +#' +#' @param prob Tail probability. Can be a vector or scalar +#' @param n Sample size +#' @param mu Mean of the normal distribution +#' @param sigma Standard deviation of the distribution +#' @param df Number of degrees of freedom +#' @param bin.size Bin size. It is optional parameter with default value 1 +#' @return Vector or scalar +#' depending on whether the probability is a vector +#' or scalar +#' +#' @references Dowd, K. Measuring Market Risk, Wiley, 2007. +#' +#' @author Dinesh Acharya +#' @examples +#' +#' # Estimates standard error of normal quantile estimate +#' tQuantileStandardError(.8, 100, 0, .5, 5, 3) +#' +#' @export +tQuantileStandardError <- function(prob, n, mu, sigma, df, bin.size){ + # Check that inputs obey sign and value restrictions + if (prob < 0|prob>1) { + stop("Probability must be nonnegative and no greater than 1") + } + if (n <= 0){ + stop("Sample size must be positive") + } + if (bin.size <= 0){ + stop("Bin size must be greater than 0") + } + # Determination of frequency + x <- mu + sigma * qt(prob, df) + z <- (x - mu)/sigma + freq <- pt((x + .5 * bin.size - mu) / sigma, df) - pt((x - 0.5 * bin.size - mu) / sigma, df) + # Standard error estimation + y <- prob * (1 - prob) / (n * freq ^ 2) # Standard Error + return(y) +} \ No newline at end of file From noreply at r-forge.r-project.org Thu Aug 13 18:27:12 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 13 Aug 2015 18:27:12 +0200 (CEST) Subject: [Returnanalytics-commits] r3956 - pkg/Dowd/man Message-ID: <20150813162712.DFB39186CF5@r-forge.r-project.org> Author: dacharya Date: 2015-08-13 18:27:12 +0200 (Thu, 13 Aug 2015) New Revision: 3956 Added: pkg/Dowd/man/tQuantileStandardError.Rd Log: Function tQuantileStandardError added. Added: pkg/Dowd/man/tQuantileStandardError.Rd =================================================================== --- pkg/Dowd/man/tQuantileStandardError.Rd (rev 0) +++ pkg/Dowd/man/tQuantileStandardError.Rd 2015-08-13 16:27:12 UTC (rev 3956) @@ -0,0 +1,40 @@ +% Generated by roxygen2 (4.1.1): do not edit by hand +% Please edit documentation in R/tQuantileStandardError.R +\name{tQuantileStandardError} +\alias{tQuantileStandardError} +\title{Standard error of t quantile estimate} +\usage{ +tQuantileStandardError(prob, n, mu, sigma, df, bin.size) +} +\arguments{ +\item{prob}{Tail probability. Can be a vector or scalar} + +\item{n}{Sample size} + +\item{mu}{Mean of the normal distribution} + +\item{sigma}{Standard deviation of the distribution} + +\item{df}{Number of degrees of freedom} + +\item{bin.size}{Bin size. It is optional parameter with default value 1} +} +\value{ +Vector or scalar +depending on whether the probability is a vector +or scalar +} +\description{ +Estimates standard error of t quantile estimate +} +\examples{ +# Estimates standard error of normal quantile estimate + tQuantileStandardError(.8, 100, 0, .5, 5, 3) +} +\author{ +Dinesh Acharya +} +\references{ +Dowd, K. Measuring Market Risk, Wiley, 2007. +} + From noreply at r-forge.r-project.org Thu Aug 13 18:35:44 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 13 Aug 2015 18:35:44 +0200 (CEST) Subject: [Returnanalytics-commits] r3957 - pkg/Dowd/man Message-ID: <20150813163544.35DC0187AA6@r-forge.r-project.org> Author: dacharya Date: 2015-08-13 18:35:43 +0200 (Thu, 13 Aug 2015) New Revision: 3957 Added: pkg/Dowd/man/NormalQuantileStandardError.Rd Log: Function NormalQuantileStandardError added. Added: pkg/Dowd/man/NormalQuantileStandardError.Rd =================================================================== --- pkg/Dowd/man/NormalQuantileStandardError.Rd (rev 0) +++ pkg/Dowd/man/NormalQuantileStandardError.Rd 2015-08-13 16:35:43 UTC (rev 3957) @@ -0,0 +1,38 @@ +% Generated by roxygen2 (4.1.1): do not edit by hand +% Please edit documentation in R/NormalQuantileStandardError.R +\name{NormalQuantileStandardError} +\alias{NormalQuantileStandardError} +\title{Standard error of normal quantile estimate} +\usage{ +NormalQuantileStandardError(prob, n, mu, sigma, bin.size) +} +\arguments{ +\item{prob}{Tail probability. Can be a vector or scalar} + +\item{n}{Sample size} + +\item{mu}{Mean of the normal distribution} + +\item{sigma}{Standard deviation of the distribution} + +\item{bin.size}{Bin size. It is optional parameter with default value 1} +} +\value{ +Vector or scalar +depending on whether the probability is a vector +or scalar +} +\description{ +Estimates standard error of normal quantile estimate +} +\examples{ +# Estimates standard error of normal quantile estimate + NormalQuantileStandardError(.8, 100, 0, .5, 3) +} +\author{ +Dinesh Acharya +} +\references{ +Dowd, K. Measuring Market Risk, Wiley, 2007. +} + From noreply at r-forge.r-project.org Thu Aug 13 18:36:09 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 13 Aug 2015 18:36:09 +0200 (CEST) Subject: [Returnanalytics-commits] r3958 - pkg/Dowd/R Message-ID: <20150813163609.496D2187AA6@r-forge.r-project.org> Author: dacharya Date: 2015-08-13 18:36:08 +0200 (Thu, 13 Aug 2015) New Revision: 3958 Added: pkg/Dowd/R/NormalQuantileStandardError.R Log: Function NormalQuantileStandardError added. Added: pkg/Dowd/R/NormalQuantileStandardError.R =================================================================== --- pkg/Dowd/R/NormalQuantileStandardError.R (rev 0) +++ pkg/Dowd/R/NormalQuantileStandardError.R 2015-08-13 16:36:08 UTC (rev 3958) @@ -0,0 +1,39 @@ +#' Standard error of normal quantile estimate +#' +#' Estimates standard error of normal quantile estimate +#' +#' @param prob Tail probability. Can be a vector or scalar +#' @param n Sample size +#' @param mu Mean of the normal distribution +#' @param sigma Standard deviation of the distribution +#' @param bin.size Bin size. It is optional parameter with default value 1 +#' @return Vector or scalar +#' depending on whether the probability is a vector +#' or scalar +#' +#' @references Dowd, K. Measuring Market Risk, Wiley, 2007. +#' +#' @author Dinesh Acharya +#' @examples +#' +#' # Estimates standard error of normal quantile estimate +#' NormalQuantileStandardError(.8, 100, 0, .5, 3) +#' +#' @export +NormalQuantileStandardError <- function(prob, n, mu, sigma, bin.size){ + # Check that inputs obey sign and value restrictions + if (prob < 0|prob>1) { + stop("Probability must be nonnegative and no greater than 1") + } + if (n <= 0){ + stop("Sample size must be positive") + } + if (bin.size <= 0){ + stop("Bin size must be greater than 0") + } + # Determination of frequency + x <- qnorm(prob, mu, sigma) + freq <- pnorm(x+.5*bin.size,mu,sigma) - pnorm(x - 0.5*bin.size, mu, sigma) + y <- sqrt(prob*(1 - prob)/(n*freq^2)) # Standard Error + return(y) +} \ No newline at end of file From noreply at r-forge.r-project.org Sun Aug 16 12:48:29 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 16 Aug 2015 12:48:29 +0200 (CEST) Subject: [Returnanalytics-commits] r3959 - pkg/Dowd Message-ID: <20150816104829.6C622186517@r-forge.r-project.org> Author: dacharya Date: 2015-08-16 12:48:28 +0200 (Sun, 16 Aug 2015) New Revision: 3959 Modified: pkg/Dowd/NAMESPACE Log: Function DefaultRiskyBondVaR added. Modified: pkg/Dowd/NAMESPACE =================================================================== --- pkg/Dowd/NAMESPACE 2015-08-13 16:36:08 UTC (rev 3958) +++ pkg/Dowd/NAMESPACE 2015-08-16 10:48:28 UTC (rev 3959) @@ -30,6 +30,7 @@ export(CornishFisherVaR) export(DBPensionVaR) export(DCPensionVaR) +export(DefaultRiskyBondVaR) export(FilterStrategyLogNormalVaR) export(FrechetES) export(FrechetESPlot2DCl) From noreply at r-forge.r-project.org Sun Aug 16 12:50:28 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 16 Aug 2015 12:50:28 +0200 (CEST) Subject: [Returnanalytics-commits] r3960 - pkg/Dowd/man Message-ID: <20150816105028.C9693187648@r-forge.r-project.org> Author: dacharya Date: 2015-08-16 12:50:28 +0200 (Sun, 16 Aug 2015) New Revision: 3960 Added: pkg/Dowd/man/DefaultRiskyBondVaR.Rd Log: Function DefaultRiskyBondVaR added Added: pkg/Dowd/man/DefaultRiskyBondVaR.Rd =================================================================== --- pkg/Dowd/man/DefaultRiskyBondVaR.Rd (rev 0) +++ pkg/Dowd/man/DefaultRiskyBondVaR.Rd 2015-08-16 10:50:28 UTC (rev 3960) @@ -0,0 +1,47 @@ +% Generated by roxygen2 (4.1.1): do not edit by hand +% Please edit documentation in R/DefaultRiskyBondVaR.R +\name{DefaultRiskyBondVaR} +\alias{DefaultRiskyBondVaR} +\title{VaR for default risky bond portfolio} +\usage{ +DefaultRiskyBondVaR(r, rf, coupon, sigma, amount.invested, recovery.rate, p, + number.trials, hp, cl) +} +\arguments{ +\item{r}{Spot (interest) rate, assumed to be flat} + +\item{rf}{Risk-free rate} + +\item{coupon}{Coupon rate} + +\item{sigma}{Variance} + +\item{amount.invested}{Amount Invested} + +\item{recovery.rate}{Recovery rate} + +\item{p}{Probability of default} + +\item{number.trials}{Number of trials} + +\item{hp}{Holding period} + +\item{cl}{Confidence level} +} +\value{ +Monte Carlo VaR +} +\description{ +Generates Monte Carlo VaR for default risky bond portfolio in Chapter 6.4 +} +\examples{ +# VaR for default risky bond portfolio for given parameters + DefaultRiskyBondVaR(.01, .01, .1, .01, 1, .1, .2, 100, 100, .95) +} +\author{ +Dinesh Acharya +} +\references{ +Dowd, K. Measuring Market Risk, Wiley, 2007. +} + From noreply at r-forge.r-project.org Sun Aug 16 12:50:44 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 16 Aug 2015 12:50:44 +0200 (CEST) Subject: [Returnanalytics-commits] r3961 - pkg/Dowd/R Message-ID: <20150816105044.1C364187648@r-forge.r-project.org> Author: dacharya Date: 2015-08-16 12:50:43 +0200 (Sun, 16 Aug 2015) New Revision: 3961 Added: pkg/Dowd/R/DefaultRiskyBondVaR.R Log: Function DefaultRiskyBondVaR added Added: pkg/Dowd/R/DefaultRiskyBondVaR.R =================================================================== --- pkg/Dowd/R/DefaultRiskyBondVaR.R (rev 0) +++ pkg/Dowd/R/DefaultRiskyBondVaR.R 2015-08-16 10:50:43 UTC (rev 3961) @@ -0,0 +1,58 @@ +#' VaR for default risky bond portfolio +#' +#' Generates Monte Carlo VaR for default risky bond portfolio in Chapter 6.4 +#' +#' @param r Spot (interest) rate, assumed to be flat +#' @param rf Risk-free rate +#' @param coupon Coupon rate +#' @param sigma Variance +#' @param amount.invested Amount Invested +#' @param recovery.rate Recovery rate +#' @param p Probability of default +#' @param number.trials Number of trials +#' @param hp Holding period +#' @param cl Confidence level +#' @return Monte Carlo VaR +#' +#' @references Dowd, K. Measuring Market Risk, Wiley, 2007. +#' +#' @author Dinesh Acharya +#' @examples +#' +#' # VaR for default risky bond portfolio for given parameters +#' DefaultRiskyBondVaR(.01, .01, .1, .01, 1, .1, .2, 100, 100, .95) +#' +#' @export +DefaultRiskyBondVaR <- function(r, rf, coupon, sigma, amount.invested, recovery.rate, p, number.trials, hp, cl){ + M <- number.trials + delta <- recovery.rate + ann.hp <- hp/360 + # R equals spot rate prevailing at end of hp/2 with term equal to hp.2 + initial.bond.value <- coupon * ((1 + rf)/(1+r))^(ann.hp/2) + (1 + coupon)/((1+r)^ann.hp) + number.of.bonds <- amount.invested/initial.bond.value + z <- rnorm(M) + R <- double(M) + interim.bond.value <- double(M) + interim.default.state <- double(M) + terminal.bond.value <- double(M) + terminal.default.state <- double(M) + for (j in 1:M) { + R[j] <- exp(r + sigma * sqrt(hp/2)*z[j]) # Random realisation of spot rate with term hp/2 + interim.default.state[j] <- rbinom(1,1,p) # Determines whether default occurs at hp/2 + terminal.default.state[j] <- rbinom(1,1,p) # Determines whether default occurs at hp + if (interim.default.state[j] == 0) { + if (terminal.default.state[j] == 0) { + terminal.bond.value[j] <- coupon * (1 + rf) ^ (ann.hp / 2) + (1 + coupon) + } else { + terminal.bond.value[j] <- coupon * ((1 + rf) ^ (ann.hp / 2)) + delta * (1 + coupon) + } + } else { + interim.bond.value[j] <- delta * (coupon + ((1 + coupon) / (1+R[j])^(ann.hp/2))) + terminal.bond.value[j] <- ((1 + rf) ^ (ann.hp / 2)) * interim.bond.value[j] + } + } + profit.or.loss <- number.of.bonds * (terminal.bond.value - initial.bond.value) + # Convert to P/L + hist(-profit.or.loss) + HSVaR(profit.or.loss, cl) +} \ No newline at end of file From noreply at r-forge.r-project.org Sun Aug 16 14:30:16 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 16 Aug 2015 14:30:16 +0200 (CEST) Subject: [Returnanalytics-commits] r3962 - pkg/Dowd Message-ID: <20150816123016.664FE187A33@r-forge.r-project.org> Author: dacharya Date: 2015-08-16 14:30:15 +0200 (Sun, 16 Aug 2015) New Revision: 3962 Modified: pkg/Dowd/NAMESPACE Log: Function BoxCoxVaR added. Modified: pkg/Dowd/NAMESPACE =================================================================== --- pkg/Dowd/NAMESPACE 2015-08-16 10:50:43 UTC (rev 3961) +++ pkg/Dowd/NAMESPACE 2015-08-16 12:30:15 UTC (rev 3962) @@ -21,6 +21,7 @@ export(BootstrapVaR) export(BootstrapVaRConfInterval) export(BootstrapVaRFigure) +export(BoxCoxVaR) export(CdfOfSumUsingGaussianCopula) export(CdfOfSumUsingGumbelCopula) export(CdfOfSumUsingProductCopula) @@ -147,3 +148,4 @@ export(tVaRPlot3D) import(MASS) import(bootstrap) +import(forecast) From noreply at r-forge.r-project.org Sun Aug 16 14:30:52 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 16 Aug 2015 14:30:52 +0200 (CEST) Subject: [Returnanalytics-commits] r3963 - pkg/Dowd/R Message-ID: <20150816123053.0BCE6187A33@r-forge.r-project.org> Author: dacharya Date: 2015-08-16 14:30:52 +0200 (Sun, 16 Aug 2015) New Revision: 3963 Added: pkg/Dowd/R/BoxCoxVaR.R Log: Function BoxCoxVaR added. Added: pkg/Dowd/R/BoxCoxVaR.R =================================================================== --- pkg/Dowd/R/BoxCoxVaR.R (rev 0) +++ pkg/Dowd/R/BoxCoxVaR.R 2015-08-16 12:30:52 UTC (rev 3963) @@ -0,0 +1,75 @@ +#' Estimates VaR with Box-Cox transformation +#' +#' Function estimates the VaR of a portfolio assuming P and L data set transformed +#' using the BoxCox transformation to make it as near normal as possible, for +#' specified confidence level and holding period implied by data frequency. +#' +#' @param PandLdata Daily Profit/Loss data +#' @param cl Confidence Level. It can be a scalar or a vector. +#' @return Estimated Box-Cox VaR. Its dimension is same as that of cl +#' +#' @references Dowd, K. Measuring Market Risk, Wiley, 2007. +#' +#' Hamilton, S. A. and Taylor, M. G. A Comparision of the Box-Cox +#' transformation method and nonparametric methods for estimating quantiles +#' in clinical data with repeated measures. J. Statist. Comput. Simul., vol. +#' 45, 1993, pp. 185 - 201. +#' +#' @author Dinesh Acharya +#' @examples +#' +#' # Estimates Box-Cox VaR +#' a<-rnorm(100) +#' BoxCoxVaR(a,.95) +#' +#' @import forecast +#' +#' @export +BoxCoxVaR <- function(PandLdata, cl){ + # Check that inputs have correct dimensions + cl <- as.matrix(cl) + cl.row <- dim(cl)[1] + cl.col <- dim(cl)[2] + if (min(cl.row, cl.col) > 1) { + stop("Confidence level must be a scalar or a vector") + } + + if (cl.row > cl.col) { + cl <- t(cl) + } + + # Check that inputs obey sign and value restrictions + if (max(cl) >= 1){ + stop("Confidence level(s) must be less than 1") + } + if (min(cl) <= 0){ + stop("Confidence level(s) must be greater than 0") + } + # Transform data and obtain lambda + loss.data <- -PandLdata + loss.data <- loss.data - min(loss.data) + 1 + loss.data <- sort(loss.data) + lambda <- BoxCox.lambda(loss.data, method="loglik") + transdat <- BoxCox(loss.data, lambda) + + # Alternative method: + # for dependence only on MASS and not on forecast package (not working yet!) + # model <- lm(loss.data~1) + # boxcox <- boxcox(model,plotit=FALSE) + # lambda <- with(bc, x[which.max(y)]) + # box cox transformation + # if(lambda == 0) { + # transdat <- log(loss.data) + # } else { + # transdat <- (loss.data^lambda -1)/lambda + # } + + # Estimate mean and standard deviation of transformed data + mu <- mean(transdat) + sigma <- sd(transdat) + VaR <- double(length(cl)) + for(i in 1:length(cl)){ + VaR[i] <- (1 + lambda * (mu + sigma * qnorm(cl[i]))) ^ (1 / lambda) + min(-PandLdata) - 1 # i-th VaR + } + return(VaR) +} \ No newline at end of file From noreply at r-forge.r-project.org Sun Aug 16 14:31:05 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 16 Aug 2015 14:31:05 +0200 (CEST) Subject: [Returnanalytics-commits] r3964 - pkg/Dowd/man Message-ID: <20150816123105.CB1B2187A33@r-forge.r-project.org> Author: dacharya Date: 2015-08-16 14:31:05 +0200 (Sun, 16 Aug 2015) New Revision: 3964 Added: pkg/Dowd/man/BoxCoxVaR.Rd Log: Function BoxCoxVaR added. Added: pkg/Dowd/man/BoxCoxVaR.Rd =================================================================== --- pkg/Dowd/man/BoxCoxVaR.Rd (rev 0) +++ pkg/Dowd/man/BoxCoxVaR.Rd 2015-08-16 12:31:05 UTC (rev 3964) @@ -0,0 +1,38 @@ +% Generated by roxygen2 (4.1.1): do not edit by hand +% Please edit documentation in R/BoxCoxVaR.R +\name{BoxCoxVaR} +\alias{BoxCoxVaR} +\title{Estimates VaR with Box-Cox transformation} +\usage{ +BoxCoxVaR(PandLdata, cl) +} +\arguments{ +\item{PandLdata}{Daily Profit/Loss data} + +\item{cl}{Confidence Level. It can be a scalar or a vector.} +} +\value{ +Estimated Box-Cox VaR. Its dimension is same as that of cl +} +\description{ +Function estimates the VaR of a portfolio assuming P and L data set transformed +using the BoxCox transformation to make it as near normal as possible, for +specified confidence level and holding period implied by data frequency. +} +\examples{ +# Estimates Box-Cox VaR + a<-rnorm(100) + BoxCoxVaR(a,.95) +} +\author{ +Dinesh Acharya +} +\references{ +Dowd, K. Measuring Market Risk, Wiley, 2007. + +Hamilton, S. A. and Taylor, M. G. A Comparision of the Box-Cox +transformation method and nonparametric methods for estimating quantiles +in clinical data with repeated measures. J. Statist. Comput. Simul., vol. +45, 1993, pp. 185 - 201. +} + From noreply at r-forge.r-project.org Sun Aug 16 14:31:32 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 16 Aug 2015 14:31:32 +0200 (CEST) Subject: [Returnanalytics-commits] r3965 - pkg/Dowd Message-ID: <20150816123132.0BFF8187A33@r-forge.r-project.org> Author: dacharya Date: 2015-08-16 14:31:31 +0200 (Sun, 16 Aug 2015) New Revision: 3965 Modified: pkg/Dowd/DESCRIPTION Log: Dependence on forecast added. Modified: pkg/Dowd/DESCRIPTION =================================================================== --- pkg/Dowd/DESCRIPTION 2015-08-16 12:31:05 UTC (rev 3964) +++ pkg/Dowd/DESCRIPTION 2015-08-16 12:31:31 UTC (rev 3965) @@ -9,7 +9,8 @@ Kevin Dowd's book Measuring Market Risk. Depends: R (>= 3.0.0), bootstrap, - MASS + MASS, + forecast Suggests: PerformanceAnalytics, testthat License: GPL From noreply at r-forge.r-project.org Sun Aug 16 14:54:07 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 16 Aug 2015 14:54:07 +0200 (CEST) Subject: [Returnanalytics-commits] r3966 - pkg/Dowd Message-ID: <20150816125407.714F718599B@r-forge.r-project.org> Author: dacharya Date: 2015-08-16 14:54:07 +0200 (Sun, 16 Aug 2015) New Revision: 3966 Modified: pkg/Dowd/NAMESPACE Log: Function BoxCoxES.R added Modified: pkg/Dowd/NAMESPACE =================================================================== --- pkg/Dowd/NAMESPACE 2015-08-16 12:31:31 UTC (rev 3965) +++ pkg/Dowd/NAMESPACE 2015-08-16 12:54:07 UTC (rev 3966) @@ -21,6 +21,7 @@ export(BootstrapVaR) export(BootstrapVaRConfInterval) export(BootstrapVaRFigure) +export(BoxCoxES) export(BoxCoxVaR) export(CdfOfSumUsingGaussianCopula) export(CdfOfSumUsingGumbelCopula) From noreply at r-forge.r-project.org Sun Aug 16 14:54:34 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 16 Aug 2015 14:54:34 +0200 (CEST) Subject: [Returnanalytics-commits] r3967 - pkg/Dowd/R Message-ID: <20150816125434.53B6118599B@r-forge.r-project.org> Author: dacharya Date: 2015-08-16 14:54:33 +0200 (Sun, 16 Aug 2015) New Revision: 3967 Added: pkg/Dowd/R/BoxCoxES.R Log: Function BoxCoxES.R added Added: pkg/Dowd/R/BoxCoxES.R =================================================================== --- pkg/Dowd/R/BoxCoxES.R (rev 0) +++ pkg/Dowd/R/BoxCoxES.R 2015-08-16 12:54:33 UTC (rev 3967) @@ -0,0 +1,54 @@ +#' Estimates ES with Box-Cox transformation +#' +#' Function estimates the ES of a portfolio assuming P and L data set transformed +#' using the BoxCox transformation to make it as near normal as possible, for +#' specified confidence level and holding period implied by data frequency. +#' +#' @param loss.data Daily Profit/Loss data +#' @param cl Confidence Level. It can be a scalar or a vector. +#' @return Estimated Box-Cox ES. Its dimension is same as that of cl +#' +#' @references Dowd, K. Measuring Market Risk, Wiley, 2007. +#' +#' Hamilton, S. A. and Taylor, M. G. A Comparision of the Box-Cox +#' transformation method and nonparametric methods for estimating quantiles +#' in clinical data with repeated measures. J. Statist. Comput. Simul., vol. +#' 45, 1993, pp. 185 - 201. +#' +#' @author Dinesh Acharya +#' @examples +#' +#' # Estimates Box-Cox VaR +#' a<-rnorm(200) +#' BoxCoxES(a,.95) +#' +#' @import forecast +#' +#' @export +BoxCoxES <- function(loss.data, cl){ + # Check that inputs have correct dimensions + cl <- as.matrix(cl) + cl.row <- dim(cl)[1] + cl.col <- dim(cl)[2] + if (min(cl.row, cl.col) > 1) { + stop("Confidence level must be a scalar or a vector") + } + + if (cl.row > cl.col) { + cl <- t(cl) + } + + # Check that inputs obey sign and value restrictions + if (max(cl) >= 1){ + stop("Confidence level(s) must be less than 1") + } + if (min(cl) <= 0){ + stop("Confidence level(s) must be greater than 0") + } + # ES Estimation + VaR <- BoxCoxVaR(loss.data, cl) # HS VaR + k <- which(VaR Author: dacharya Date: 2015-08-16 14:54:50 +0200 (Sun, 16 Aug 2015) New Revision: 3968 Added: pkg/Dowd/man/BoxCoxES.Rd Log: Function BoxCoxES.R added Added: pkg/Dowd/man/BoxCoxES.Rd =================================================================== --- pkg/Dowd/man/BoxCoxES.Rd (rev 0) +++ pkg/Dowd/man/BoxCoxES.Rd 2015-08-16 12:54:50 UTC (rev 3968) @@ -0,0 +1,38 @@ +% Generated by roxygen2 (4.1.1): do not edit by hand +% Please edit documentation in R/BoxCoxES.R +\name{BoxCoxES} +\alias{BoxCoxES} +\title{Estimates ES with Box-Cox transformation} +\usage{ +BoxCoxES(loss.data, cl) +} +\arguments{ +\item{loss.data}{Daily Profit/Loss data} + +\item{cl}{Confidence Level. It can be a scalar or a vector.} +} +\value{ +Estimated Box-Cox ES. Its dimension is same as that of cl +} +\description{ +Function estimates the ES of a portfolio assuming P and L data set transformed +using the BoxCox transformation to make it as near normal as possible, for +specified confidence level and holding period implied by data frequency. +} +\examples{ +# Estimates Box-Cox VaR + a<-rnorm(200) + BoxCoxES(a,.95) +} +\author{ +Dinesh Acharya +} +\references{ +Dowd, K. Measuring Market Risk, Wiley, 2007. + +Hamilton, S. A. and Taylor, M. G. A Comparision of the Box-Cox +transformation method and nonparametric methods for estimating quantiles +in clinical data with repeated measures. J. Statist. Comput. Simul., vol. +45, 1993, pp. 185 - 201. +} + From noreply at r-forge.r-project.org Thu Aug 20 00:32:34 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 20 Aug 2015 00:32:34 +0200 (CEST) Subject: [Returnanalytics-commits] r3969 - pkg/Dowd/R Message-ID: <20150819223234.4028D187A25@r-forge.r-project.org> Author: dacharya Date: 2015-08-20 00:32:33 +0200 (Thu, 20 Aug 2015) New Revision: 3969 Modified: pkg/Dowd/R/AdjustedNormalESHotspots.R Log: Error message to be displayed changed. Modified: pkg/Dowd/R/AdjustedNormalESHotspots.R =================================================================== --- pkg/Dowd/R/AdjustedNormalESHotspots.R 2015-08-16 12:54:50 UTC (rev 3968) +++ pkg/Dowd/R/AdjustedNormalESHotspots.R 2015-08-19 22:32:33 UTC (rev 3969) @@ -1,114 +1,114 @@ -#' @title Hotspots for ES adjusted by Cornish-Fisher correction -#' -#' @description Estimates the ES hotspots (or vector of incremental ESs) for a -#' portfolio with portfolio return adjusted for non-normality by Cornish-Fisher -#' corerction, for specified confidence level and holding period. -#' -#' @param vc.matrix Variance covariance matrix for returns -#' @param mu Vector of expected position returns -#' @param skew Return skew -#' @param kurtosis Return kurtosis -#' @param positions Vector of positions -#' @param cl Confidence level and is scalar -#' @param hp Holding period and is scalar -#' -#' @references Dowd, K. Measuring Market Risk, Wiley, 2007. -#' -#' @author Dinesh Acharya -#' -#' @examples -#' -#' # Hotspots for ES for randomly generated portfolio -#' vc.matrix <- matrix(rnorm(16),4,4) -#' mu <- rnorm(4) -#' skew <- .5 -#' kurtosis <- 1.2 -#' positions <- c(5,2,6,10) -#' cl <- .95 -#' hp <- 280 -#' AdjustedNormalESHotspots(vc.matrix, mu, skew, kurtosis, positions, cl, hp) -#' -#' @export -AdjustedNormalESHotspots <- function(vc.matrix, mu, skew, kurtosis, positions, - cl, hp){ - - # Check that positions vector read as a scalar or row vector - positions <- as.matrix(positions) - if (dim(positions)[1] > dim(positions)[2]){ - positions <- t(positions) - } - - # Check that expected returns vector is read as a scalar or row vector - mu <- as.matrix(mu) - if (dim(mu)[1] > dim(mu)[2]){ - mu <- t(mu) - } - - # Check that dimensions are correct - if (max(dim(mu)) != max(dim(positions))){ - stop("Positions vector and expected returns vector must have same size") - } - if (max(dim(vc.matrix)) != max(dim(positions))){ - stop("Positions vector and expected returns vector must have same size") - } - - # Check that inputs obey sign and value restrictions - if (cl >= 1){ - stop("Confidence level must be less than 1") - } - if (cl <= 0){ - stop("Confidence level must be greater than 0"); - } - if (hp <= 0){ - stop("Holding period must be greater than 0"); - } - - # VaR and ES estimation - # Begin with portfolio ES - z <- qnorm(1 - cl, 0 ,1) - sigma <- positions %*% vc.matrix %*% t(positions)/(sum(positions)^2) # Initial - # standard deviation of portfolio returns - adjustment <- (1 / 6) * (z ^ 2 - 1) * skew + (1 / 24) * (z ^ 3 - 3 * z) * - (kurtosis - 3) - (1 / 36) * (2 * z ^ 3 - 5 * z) * skew ^ 2 - VaR <- - mu %*% t(positions) * hp - (z + adjustment) * sigma * - (sum(positions)^2) * sqrt(hp) # Initial VaR - n <- 1000 # Number of slives into which tail is divided - cl0 <- cl # Initial confidence level - term <- VaR - delta.cl <- (1 - cl) / n # Increment to confidence level - for (k in 1:(n - 1)) { - cl <- cl0 + k * delta.cl # Revised cl - z <- qnorm(1 - cl, 0, 1) - adjustment=(1 / 6) * (z ^ 2 - 1) * skew + (1 / 24) * (z ^ 3 - 3 * z) * - (kurtosis - 3) - (1 / 36) * (2 * z ^ 3 - 5 * z) * skew ^ 2 - term <- term - mu %*% t(positions) * hp - (z + adjustment) * sigma * - (sum(positions)^2) * sqrt(hp) - } - portfolio.ES <- term/n - - # Portfolio ES - es <- double(length(positions)) - ies <- double(length(positions)) - for (j in 1:length(positions)) { - x <- positions - x[j] <- 0 - sigma <- x %*% vc.matrix %*% t(x) / (sum(x)^2) - term[j] <- - mu %*% t(x) * hp - qnorm(1-cl, 0, 1) * x %*% - vc.matrix %*% t(x) * sqrt(hp) - - for (k in 1:(n - 1)){ - cl <- cl0 + k * delta.cl # Revised cl - z <- qnorm(1-cl, 0, 1) - adjustment=(1 / 6) * (z ^ 2 - 1) * skew + (1 / 24) * (z ^ 3 - 3 * z) * - (kurtosis - 3) - (1 / 36) * (2 * z ^ 3 - 5 * z) * skew ^ 2 - term[j] <- term[j] - mu %*% t(positions) * hp - (z + adjustment) * - sigma * (sum(positions)^2) * sqrt(hp) - } - es[j] <- term[j]/n # ES on portfolio minus position j - ies [j] <- portfolio.ES - es[j] # Incremental ES - - } - y <- ies - return(ies) - -} +#' @title Hotspots for ES adjusted by Cornish-Fisher correction +#' +#' @description Estimates the ES hotspots (or vector of incremental ESs) for a +#' portfolio with portfolio return adjusted for non-normality by Cornish-Fisher +#' corerction, for specified confidence level and holding period. +#' +#' @param vc.matrix Variance covariance matrix for returns +#' @param mu Vector of expected position returns +#' @param skew Return skew +#' @param kurtosis Return kurtosis +#' @param positions Vector of positions +#' @param cl Confidence level and is scalar +#' @param hp Holding period and is scalar +#' +#' @references Dowd, K. Measuring Market Risk, Wiley, 2007. +#' +#' @author Dinesh Acharya +#' +#' @examples +#' +#' # Hotspots for ES for randomly generated portfolio +#' vc.matrix <- matrix(rnorm(16),4,4) +#' mu <- rnorm(4) +#' skew <- .5 +#' kurtosis <- 1.2 +#' positions <- c(5,2,6,10) +#' cl <- .95 +#' hp <- 280 +#' AdjustedNormalESHotspots(vc.matrix, mu, skew, kurtosis, positions, cl, hp) +#' +#' @export +AdjustedNormalESHotspots <- function(vc.matrix, mu, skew, kurtosis, positions, + cl, hp){ + + # Check that positions vector read as a scalar or row vector + positions <- as.matrix(positions) + if (dim(positions)[1] > dim(positions)[2]){ + positions <- t(positions) + } + + # Check that expected returns vector is read as a scalar or row vector + mu <- as.matrix(mu) + if (dim(mu)[1] > dim(mu)[2]){ + mu <- t(mu) + } + + # Check that dimensions are correct + if (max(dim(mu)) != max(dim(positions))){ + stop("Positions vector and expected returns vector must have same size.") + } + if (max(dim(vc.matrix)) != max(dim(positions))){ + stop("Positions vector and variance-covariance matrix must have compatible dimensions.") + } + + # Check that inputs obey sign and value restrictions + if (cl >= 1){ + stop("Confidence level must be less than 1") + } + if (cl <= 0){ + stop("Confidence level must be greater than 0"); + } + if (hp <= 0){ + stop("Holding period must be greater than 0"); + } + + # VaR and ES estimation + # Begin with portfolio ES + z <- qnorm(1 - cl, 0 ,1) + sigma <- positions %*% vc.matrix %*% t(positions)/(sum(positions)^2) # Initial + # standard deviation of portfolio returns + adjustment <- (1 / 6) * (z ^ 2 - 1) * skew + (1 / 24) * (z ^ 3 - 3 * z) * + (kurtosis - 3) - (1 / 36) * (2 * z ^ 3 - 5 * z) * skew ^ 2 + VaR <- - mu %*% t(positions) * hp - (z + adjustment) * sigma * + (sum(positions)^2) * sqrt(hp) # Initial VaR + n <- 1000 # Number of slives into which tail is divided + cl0 <- cl # Initial confidence level + term <- VaR + delta.cl <- (1 - cl) / n # Increment to confidence level + for (k in 1:(n - 1)) { + cl <- cl0 + k * delta.cl # Revised cl + z <- qnorm(1 - cl, 0, 1) + adjustment=(1 / 6) * (z ^ 2 - 1) * skew + (1 / 24) * (z ^ 3 - 3 * z) * + (kurtosis - 3) - (1 / 36) * (2 * z ^ 3 - 5 * z) * skew ^ 2 + term <- term - mu %*% t(positions) * hp - (z + adjustment) * sigma * + (sum(positions)^2) * sqrt(hp) + } + portfolio.ES <- term/n + + # Portfolio ES + es <- double(length(positions)) + ies <- double(length(positions)) + for (j in 1:length(positions)) { + x <- positions + x[j] <- 0 + sigma <- x %*% vc.matrix %*% t(x) / (sum(x)^2) + term[j] <- - mu %*% t(x) * hp - qnorm(1-cl, 0, 1) * x %*% + vc.matrix %*% t(x) * sqrt(hp) + + for (k in 1:(n - 1)){ + cl <- cl0 + k * delta.cl # Revised cl + z <- qnorm(1-cl, 0, 1) + adjustment=(1 / 6) * (z ^ 2 - 1) * skew + (1 / 24) * (z ^ 3 - 3 * z) * + (kurtosis - 3) - (1 / 36) * (2 * z ^ 3 - 5 * z) * skew ^ 2 + term[j] <- term[j] - mu %*% t(positions) * hp - (z + adjustment) * + sigma * (sum(positions)^2) * sqrt(hp) + } + es[j] <- term[j]/n # ES on portfolio minus position j + ies [j] <- portfolio.ES - es[j] # Incremental ES + + } + y <- ies + return(ies) + +} From noreply at r-forge.r-project.org Thu Aug 20 00:32:58 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 20 Aug 2015 00:32:58 +0200 (CEST) Subject: [Returnanalytics-commits] r3970 - pkg/Dowd/R Message-ID: <20150819223258.B7161187A25@r-forge.r-project.org> Author: dacharya Date: 2015-08-20 00:32:58 +0200 (Thu, 20 Aug 2015) New Revision: 3970 Modified: pkg/Dowd/R/AdjustedNormalVaRHotspots.R Log: Error message to be displayed changed. Modified: pkg/Dowd/R/AdjustedNormalVaRHotspots.R =================================================================== --- pkg/Dowd/R/AdjustedNormalVaRHotspots.R 2015-08-19 22:32:33 UTC (rev 3969) +++ pkg/Dowd/R/AdjustedNormalVaRHotspots.R 2015-08-19 22:32:58 UTC (rev 3970) @@ -1,85 +1,85 @@ -#' @title Hotspots for VaR adjusted by Cornish-Fisher correction -#' -#' @description Estimates the VaR hotspots (or vector of incremental VaRs) for a -#' portfolio with portfolio return adjusted for non-normality by Cornish-Fisher -#' corerction, for specified confidence level and holding period. -#' -#' @param vc.matrix Variance covariance matrix for returns -#' @param mu Vector of expected position returns -#' @param skew Return skew -#' @param kurtosis Return kurtosis -#' @param positions Vector of positions -#' @param cl Confidence level and is scalar -#' @param hp Holding period and is scalar -#' -#' @references Dowd, K. Measuring Market Risk, Wiley, 2007. -#' -#' @author Dinesh Acharya -#' -#' @examples -#' -#' # Hotspots for ES for randomly generated portfolio -#' vc.matrix <- matrix(rnorm(16),4,4) -#' mu <- rnorm(4) -#' skew <- .5 -#' kurtosis <- 1.2 -#' positions <- c(5,2,6,10) -#' cl <- .95 -#' hp <- 280 -#' AdjustedNormalVaRHotspots(vc.matrix, mu, skew, kurtosis, positions, cl, hp) -#' -#' @export -AdjustedNormalVaRHotspots <- function(vc.matrix, mu, skew, kurtosis, positions, cl, hp){ - - # Check that positions vector read as a scalar or row vector - positions <- as.matrix(positions) - if (dim(positions)[1] > dim(positions)[2]){ - positions <- t(positions) - } - - # Check that expected returns vector is read as a scalar or row vector - mu <- as.matrix(mu) - if (dim(mu)[1] > dim(mu)[2]){ - mu <- t(mu) - } - - # Check that dimensions are correct - if (max(dim(mu)) != max(dim(positions))){ - stop("Positions vector and expected returns vector must have same size") - } - vc.matrix <- as.matrix(vc.matrix) - if (max(dim(vc.matrix)) != max(dim(positions))){ - stop("Positions vector and expected returns vector must have same size") - } - - # Check that inputs obey sign and value restrictions - if (cl >= 1){ - stop("Confidence level must be less than 1") - } - if (cl <= 0){ - stop("Confidence level must be greater than 0"); - } - if (hp <= 0){ - stop("Holding period must be greater than 0"); - } - - # VaR and ES estimation - z <- qnorm(1 - cl, 0 ,1) - sigma <- positions %*% vc.matrix %*% t(positions)/(sum(positions)^2) # Initial standard deviation of portfolio returns - adjustment <- (1 / 6) * (z ^ 2 - 1) * skew + (1 / 24) * (z ^ 3 - 3 * z) * (kurtosis - 3) - (1 / 36) * (2 * z ^ 3 - 5 * z) * skew ^ 2 - VaR <- - mu %*% t(positions) * hp - (z + adjustment) * sigma * (sum(positions)^2) * sqrt(hp) - - # VaR - x <- double(length(positions)) - sigma <- double(length(positions)) - iVaR <- double(length(positions)) - for (i in 1:length(positions)){ - x <- positions - x[i] <- 0 - sigma[i] <- x %*% vc.matrix %*% t(x)/sum(x)^2 # standard deviation of portfolio returns - iVaR[i] <- VaR + mu %*% t(x) %*% hp + (z + adjustment) * sigma[i] * (sum(x))^2 * sqrt(hp) # Incremental VaR - } - y <- iVaR - return(y) - +#' @title Hotspots for VaR adjusted by Cornish-Fisher correction +#' +#' @description Estimates the VaR hotspots (or vector of incremental VaRs) for a +#' portfolio with portfolio return adjusted for non-normality by Cornish-Fisher +#' corerction, for specified confidence level and holding period. +#' +#' @param vc.matrix Variance covariance matrix for returns +#' @param mu Vector of expected position returns +#' @param skew Return skew +#' @param kurtosis Return kurtosis +#' @param positions Vector of positions +#' @param cl Confidence level and is scalar +#' @param hp Holding period and is scalar +#' +#' @references Dowd, K. Measuring Market Risk, Wiley, 2007. +#' +#' @author Dinesh Acharya +#' +#' @examples +#' +#' # Hotspots for ES for randomly generated portfolio +#' vc.matrix <- matrix(rnorm(16),4,4) +#' mu <- rnorm(4) +#' skew <- .5 +#' kurtosis <- 1.2 +#' positions <- c(5,2,6,10) +#' cl <- .95 +#' hp <- 280 +#' AdjustedNormalVaRHotspots(vc.matrix, mu, skew, kurtosis, positions, cl, hp) +#' +#' @export +AdjustedNormalVaRHotspots <- function(vc.matrix, mu, skew, kurtosis, positions, cl, hp){ + + # Check that positions vector read as a scalar or row vector + positions <- as.matrix(positions) + if (dim(positions)[1] > dim(positions)[2]){ + positions <- t(positions) + } + + # Check that expected returns vector is read as a scalar or row vector + mu <- as.matrix(mu) + if (dim(mu)[1] > dim(mu)[2]){ + mu <- t(mu) + } + + # Check that dimensions are correct + if (max(dim(mu)) != max(dim(positions))){ + stop("Positions vector and expected returns vector must have same size") + } + vc.matrix <- as.matrix(vc.matrix) + if (max(dim(vc.matrix)) != max(dim(positions))){ + stop("Positions vector and variance-covariance matrix must have compatible dimensions.") + } + + # Check that inputs obey sign and value restrictions + if (cl >= 1){ + stop("Confidence level must be less than 1") + } + if (cl <= 0){ + stop("Confidence level must be greater than 0"); + } + if (hp <= 0){ + stop("Holding period must be greater than 0"); + } + + # VaR and ES estimation + z <- qnorm(1 - cl, 0 ,1) + sigma <- positions %*% vc.matrix %*% t(positions)/(sum(positions)^2) # Initial standard deviation of portfolio returns + adjustment <- (1 / 6) * (z ^ 2 - 1) * skew + (1 / 24) * (z ^ 3 - 3 * z) * (kurtosis - 3) - (1 / 36) * (2 * z ^ 3 - 5 * z) * skew ^ 2 + VaR <- - mu %*% t(positions) * hp - (z + adjustment) * sigma * (sum(positions)^2) * sqrt(hp) + + # VaR + x <- double(length(positions)) + sigma <- double(length(positions)) + iVaR <- double(length(positions)) + for (i in 1:length(positions)){ + x <- positions + x[i] <- 0 + sigma[i] <- x %*% vc.matrix %*% t(x)/sum(x)^2 # standard deviation of portfolio returns + iVaR[i] <- VaR + mu %*% t(x) %*% hp + (z + adjustment) * sigma[i] * (sum(x))^2 * sqrt(hp) # Incremental VaR + } + y <- iVaR + return(y) + } \ No newline at end of file From noreply at r-forge.r-project.org Thu Aug 20 00:34:51 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 20 Aug 2015 00:34:51 +0200 (CEST) Subject: [Returnanalytics-commits] r3971 - pkg/Dowd/R Message-ID: <20150819223451.39140187B2B@r-forge.r-project.org> Author: dacharya Date: 2015-08-20 00:34:50 +0200 (Thu, 20 Aug 2015) New Revision: 3971 Modified: pkg/Dowd/R/NormalVaRHotspots.R Log: Correction of Example and error message to be displayed changed. Modified: pkg/Dowd/R/NormalVaRHotspots.R =================================================================== --- pkg/Dowd/R/NormalVaRHotspots.R 2015-08-19 22:32:58 UTC (rev 3970) +++ pkg/Dowd/R/NormalVaRHotspots.R 2015-08-19 22:34:50 UTC (rev 3971) @@ -45,7 +45,7 @@ } vc.matrix <- as.matrix(vc.matrix) if (max(dim(vc.matrix)) != max(dim(positions))){ - stop("Positions vector and expected returns vector must have same size") + stop("Positions vector and variance-covariance matrix must have compatible dimensions") } # Check that inputs obey sign and value restrictions From noreply at r-forge.r-project.org Thu Aug 20 00:36:00 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 20 Aug 2015 00:36:00 +0200 (CEST) Subject: [Returnanalytics-commits] r3972 - pkg/Dowd/man Message-ID: <20150819223600.45088187B41@r-forge.r-project.org> Author: dacharya Date: 2015-08-20 00:35:59 +0200 (Thu, 20 Aug 2015) New Revision: 3972 Modified: pkg/Dowd/man/NormalESHotspots.Rd Log: Correction of example. Modified: pkg/Dowd/man/NormalESHotspots.Rd =================================================================== --- pkg/Dowd/man/NormalESHotspots.Rd 2015-08-19 22:34:50 UTC (rev 3971) +++ pkg/Dowd/man/NormalESHotspots.Rd 2015-08-19 22:35:59 UTC (rev 3972) @@ -4,7 +4,7 @@ \alias{NormalESHotspots} \title{Hotspots for normal ES} \usage{ -NormalESHotspots(vc.matrix, mu, skew, kurtosis, positions, cl, hp) +NormalESHotspots(vc.matrix, mu, positions, cl, hp) } \arguments{ \item{vc.matrix}{Variance covariance matrix for returns} @@ -29,12 +29,10 @@ # Hotspots for ES for randomly generated portfolio vc.matrix <- matrix(rnorm(16),4,4) mu <- rnorm(4,.08,.04) - skew <- .5 - kurtosis <- 1.2 positions <- c(5,2,6,10) cl <- .95 hp <- 280 - AdjustedNormalESHotspots(vc.matrix, mu, skew, kurtosis, positions, cl, hp) + NormalESHotspots(vc.matrix, mu, positions, cl, hp) } \author{ Dinesh Acharya From noreply at r-forge.r-project.org Thu Aug 20 00:36:36 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 20 Aug 2015 00:36:36 +0200 (CEST) Subject: [Returnanalytics-commits] r3973 - pkg/Dowd/R Message-ID: <20150819223636.37E0F187B43@r-forge.r-project.org> Author: dacharya Date: 2015-08-20 00:36:35 +0200 (Thu, 20 Aug 2015) New Revision: 3973 Modified: pkg/Dowd/R/NormalESHotspots.R Log: Correction of example and incorrect error message changed. Modified: pkg/Dowd/R/NormalESHotspots.R =================================================================== --- pkg/Dowd/R/NormalESHotspots.R 2015-08-19 22:35:59 UTC (rev 3972) +++ pkg/Dowd/R/NormalESHotspots.R 2015-08-19 22:36:35 UTC (rev 3973) @@ -19,15 +19,13 @@ #' # Hotspots for ES for randomly generated portfolio #' vc.matrix <- matrix(rnorm(16),4,4) #' mu <- rnorm(4,.08,.04) -#' skew <- .5 -#' kurtosis <- 1.2 #' positions <- c(5,2,6,10) #' cl <- .95 #' hp <- 280 -#' AdjustedNormalESHotspots(vc.matrix, mu, skew, kurtosis, positions, cl, hp) +#' NormalESHotspots(vc.matrix, mu, positions, cl, hp) #' #' @export -NormalESHotspots <- function(vc.matrix, mu, skew, kurtosis, positions, +NormalESHotspots <- function(vc.matrix, mu, positions, cl, hp){ # Check that positions vector read as a scalar or row vector @@ -47,7 +45,7 @@ stop("Positions vector and expected returns vector must have same size") } if (max(dim(vc.matrix)) != max(dim(positions))){ - stop("Positions vector and expected returns vector must have same size") + stop("Positions vector and variance-covariance matrix must have compatible dimensions") } # Check that inputs obey sign and value restrictions From noreply at r-forge.r-project.org Thu Aug 20 10:12:40 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 20 Aug 2015 10:12:40 +0200 (CEST) Subject: [Returnanalytics-commits] r3974 - pkg/Dowd/R Message-ID: <20150820081241.113EC1842F7@r-forge.r-project.org> Author: dacharya Date: 2015-08-20 10:12:40 +0200 (Thu, 20 Aug 2015) New Revision: 3974 Modified: pkg/Dowd/R/AdjustedVarianceCovarianceES.R Log: Minor typos corrected Modified: pkg/Dowd/R/AdjustedVarianceCovarianceES.R =================================================================== --- pkg/Dowd/R/AdjustedVarianceCovarianceES.R 2015-08-19 22:36:35 UTC (rev 3973) +++ pkg/Dowd/R/AdjustedVarianceCovarianceES.R 2015-08-20 08:12:40 UTC (rev 3974) @@ -99,7 +99,7 @@ VaR[i,j] <- - mu %*% t(positions) * hp[j] - (z[i] + adjustment[i]) * sigma * (sum(positions)^2) * sqrt(hp[j]) # VaR # ES Estimation - n <- 1000 # Number of slives into which tail is divided + n <- 1000 # Number of slices into which tail is divided cl0[i] <- cl[i] # Initial confidence level term[i, j] <- VaR[i, j] delta.cl[i] <- (1 - cl[i]) / n # Increment to confidence level as each From noreply at r-forge.r-project.org Thu Aug 20 10:13:23 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 20 Aug 2015 10:13:23 +0200 (CEST) Subject: [Returnanalytics-commits] r3975 - pkg/Dowd/R Message-ID: <20150820081324.024EF1842F7@r-forge.r-project.org> Author: dacharya Date: 2015-08-20 10:13:23 +0200 (Thu, 20 Aug 2015) New Revision: 3975 Modified: pkg/Dowd/R/BlackScholesPutESSim.R Log: Minor typos corrected Modified: pkg/Dowd/R/BlackScholesPutESSim.R =================================================================== --- pkg/Dowd/R/BlackScholesPutESSim.R 2015-08-20 08:12:40 UTC (rev 3974) +++ pkg/Dowd/R/BlackScholesPutESSim.R 2015-08-20 08:13:23 UTC (rev 3975) @@ -50,7 +50,7 @@ lnSt[i] <- lnS + rnorm(1, nudt, sigmadt) # Random stock price movement newStockPrice[i] <- exp(lnSt[i, 1]) # New stock price } - # Profit/Loss camculation + # Profit/Loss calculation profitOrLoss <- double(M) if (amountInvested > 0) { # If option position is long for (i in 1:M) { From noreply at r-forge.r-project.org Thu Aug 20 10:30:43 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 20 Aug 2015 10:30:43 +0200 (CEST) Subject: [Returnanalytics-commits] r3976 - pkg/Dowd/R Message-ID: <20150820083043.300F4187510@r-forge.r-project.org> Author: dacharya Date: 2015-08-20 10:30:42 +0200 (Thu, 20 Aug 2015) New Revision: 3976 Modified: pkg/Dowd/R/BlancoIhleBacktest.R Log: Example slightly changed Modified: pkg/Dowd/R/BlancoIhleBacktest.R =================================================================== --- pkg/Dowd/R/BlancoIhleBacktest.R 2015-08-20 08:13:23 UTC (rev 3975) +++ pkg/Dowd/R/BlancoIhleBacktest.R 2015-08-20 08:30:42 UTC (rev 3976) @@ -17,8 +17,8 @@ #' @author Dinesh Acharya #' @examples #' -#' # Has to be modified with appropriate data: -#' # Blanco-Ihle Backtest For Independence for given parameters +#' # Blanco-Ihle Backtest For Independence for given confidence level. +#' # The VaR and ES are randomly generated. #' a <- rnorm(1*100) #' b <- abs(rnorm(1*100))+2 #' c <- abs(rnorm(1*100))+2 From noreply at r-forge.r-project.org Thu Aug 20 10:31:15 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 20 Aug 2015 10:31:15 +0200 (CEST) Subject: [Returnanalytics-commits] r3977 - pkg/Dowd/man Message-ID: <20150820083115.2F8FA187510@r-forge.r-project.org> Author: dacharya Date: 2015-08-20 10:31:14 +0200 (Thu, 20 Aug 2015) New Revision: 3977 Modified: pkg/Dowd/man/BlancoIhleBacktest.Rd Log: Example slightly changed Modified: pkg/Dowd/man/BlancoIhleBacktest.Rd =================================================================== --- pkg/Dowd/man/BlancoIhleBacktest.Rd 2015-08-20 08:30:42 UTC (rev 3976) +++ pkg/Dowd/man/BlancoIhleBacktest.Rd 2015-08-20 08:31:14 UTC (rev 3977) @@ -23,8 +23,8 @@ risk measurement model. } \examples{ -# Has to be modified with appropriate data: - # Blanco-Ihle Backtest For Independence for given parameters +# Blanco-Ihle Backtest For Independence for given confidence level. + # The VaR and ES are randomly generated. a <- rnorm(1*100) b <- abs(rnorm(1*100))+2 c <- abs(rnorm(1*100))+2 From noreply at r-forge.r-project.org Thu Aug 20 11:29:25 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 20 Aug 2015 11:29:25 +0200 (CEST) Subject: [Returnanalytics-commits] r3978 - pkg/Dowd Message-ID: <20150820092926.0D98D187AB9@r-forge.r-project.org> Author: dacharya Date: 2015-08-20 11:29:25 +0200 (Thu, 20 Aug 2015) New Revision: 3978 Added: pkg/Dowd/TODO Log: Things to be done. Added: pkg/Dowd/TODO =================================================================== --- pkg/Dowd/TODO (rev 0) +++ pkg/Dowd/TODO 2015-08-20 09:29:25 UTC (rev 3978) @@ -0,0 +1,14 @@ +The vignette is not yet ready for the package and it will be done in near +future. Many examples in the source files are not insightful. They also need +to be (and will be) changed accordingly. The dependence on forecast and few other +packages can be avoided and will be attempted in future. + +The original Dowd's package contains some errors and this package has inherited +most of them. Not all functions work perfectly. It is also a possibility that can +be explored. + +There are also few functions that did not give same result as Dowd's MATLAB toolbox. +This is possibly because of two different scenarios, either because the parameters +parameters to algorithms such as density function in Kernel Density Approach (just +for sake of example) were not suitably chosen or there actually are errors in the code. +An attempt will be made in future to remove such inconsistencies. \ No newline at end of file From noreply at r-forge.r-project.org Thu Aug 20 11:33:48 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 20 Aug 2015 11:33:48 +0200 (CEST) Subject: [Returnanalytics-commits] r3979 - pkg/Dowd Message-ID: <20150820093348.C366D187AAF@r-forge.r-project.org> Author: dacharya Date: 2015-08-20 11:33:48 +0200 (Thu, 20 Aug 2015) New Revision: 3979 Added: pkg/Dowd/THANKS Log: Acknowledgements Added: pkg/Dowd/THANKS =================================================================== --- pkg/Dowd/THANKS (rev 0) +++ pkg/Dowd/THANKS 2015-08-20 09:33:48 UTC (rev 3979) @@ -0,0 +1,8 @@ +As the package "Dowd" is ported from Kevin Dowd's MATLAB Toolbox MMRII, he deserves +special acknowledgement. Without his work, this project would not have been possible. + +The project started as a Google Summer of Code Project for the year 2015. So, GSoC also +deserves acknowledgement. + +Peter Carl and Brian G. Peterson who came up with this idea and mentored this project +also deserve special acknowledgement for coming up with the project idea and mentorship. \ No newline at end of file From noreply at r-forge.r-project.org Thu Aug 20 11:35:02 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 20 Aug 2015 11:35:02 +0200 (CEST) Subject: [Returnanalytics-commits] r3980 - pkg/Dowd Message-ID: <20150820093502.CB4E6187AB9@r-forge.r-project.org> Author: dacharya Date: 2015-08-20 11:35:02 +0200 (Thu, 20 Aug 2015) New Revision: 3980 Modified: pkg/Dowd/DESCRIPTION Log: Details added Modified: pkg/Dowd/DESCRIPTION =================================================================== --- pkg/Dowd/DESCRIPTION 2015-08-20 09:33:48 UTC (rev 3979) +++ pkg/Dowd/DESCRIPTION 2015-08-20 09:35:02 UTC (rev 3980) @@ -1,16 +1,30 @@ Package: Dowd Type: Package -Title: R-Version of MMR II Toolbox Offered in Kevin Dowd's Book Measuring Market Risk +Title: Functions Ported From MMR2 Toolbox Offered in Kevin Dowd's Book + Measuring Market Risk Version: 0.1 -Date: 2015-05-24 +Date: 2015-08-20 Author: Dinesh Acharya Maintainer: Dinesh Acharya -Description: R-version of MMR2 Toolbox that supplements - Kevin Dowd's book Measuring Market Risk. +Description: Kevin Dowd's book Measuring Market Risk is a widely read book + in the area of risk measurement and is widely used by students and + practitioners alike. As he claims, MATLAB indeed have been most + suitable language when Dowd originally wrote the functions, however, + with growing popularity of R and a large user base, this project + should make Dowd's code accessible to more readers. As Dowd's code + was not intended to be error free and were more for reference, some + functions in this package have inherited those errors. An attempt + will be made in future to identify and correct them. Dowd's original + code can be downloaded from . It should be noted that Dowd offers both + MMR2 and MMR1 toolboxes. Only MMR2 was ported to R. MMR2 is more + recent version of MMR1 toolbox and they both have mostly similar + function. The toolbox mainly contains different parametric and non + parametric methods for measurement of market risk as well as + backtesting risk measurement methods. Depends: R (>= 3.0.0), bootstrap, MASS, forecast Suggests: PerformanceAnalytics, testthat -License: GPL +License: GPL \ No newline at end of file From noreply at r-forge.r-project.org Thu Aug 20 11:37:12 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 20 Aug 2015 11:37:12 +0200 (CEST) Subject: [Returnanalytics-commits] r3981 - pkg/Dowd Message-ID: <20150820093712.975DC187AAF@r-forge.r-project.org> Author: dacharya Date: 2015-08-20 11:37:12 +0200 (Thu, 20 Aug 2015) New Revision: 3981 Removed: pkg/Dowd/README Log: File deleted Deleted: pkg/Dowd/README =================================================================== --- pkg/Dowd/README 2015-08-20 09:35:02 UTC (rev 3980) +++ pkg/Dowd/README 2015-08-20 09:37:12 UTC (rev 3981) @@ -1,28 +0,0 @@ -# -# General Notes for Modification: -#*************************************************************** -# FrechetVaR does not use hp and the remark about return value when it is vector is vaccuous. -#*************************************************************** -# In Normal/t QQ Plots, dowd code does not work for matrices but the code contains parts that -# work for matrices. some vectors like pvec are not defined anywhere in his code. -#*************************************************************** -# Some error is present in GumbelCopulaVaR and needs correction -#*************************************************************** -# Bootstrap is functional (but HSVaR still does not accept matrix P/L -# and only still accepts vectors, its needs to be modified) -#*************************************************************** -# Jarque-Bera Test: -# It has to be checked Probability of null (H0) or (H1). -#*************************************************************** -# Christofferson Backtest for Independence: -# VaR(excess_loss<=0)=[]; Does not make sense. It is still to be checked if it is as intended. -# if(excess.loss[i-1]<=0) if condition incomplete statement. -#*************************************************************** -# Tests/Examples for profit.loss distribution and corresponding VaR and ETL -# still needs to be completed. Around 4 in Backtest do not have examples. -# It still has to be completed. -#*************************************************************** -# Lopez Backtest: -# In Christofferson , excess.loss is defined as -profit.loss-VaR -# But in Lopez Backtest, profit.loss-VaR is used. It has to be checked. -#*************************************************************** \ No newline at end of file From noreply at r-forge.r-project.org Thu Aug 20 11:41:00 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 20 Aug 2015 11:41:00 +0200 (CEST) Subject: [Returnanalytics-commits] r3982 - pkg/Dowd/vignettes Message-ID: <20150820094100.55437181103@r-forge.r-project.org> Author: dacharya Date: 2015-08-20 11:40:59 +0200 (Thu, 20 Aug 2015) New Revision: 3982 Removed: pkg/Dowd/vignettes/Dowd.Rnw Log: Deleted as it was incomplete and not ready to submission to CRAN Deleted: pkg/Dowd/vignettes/Dowd.Rnw =================================================================== --- pkg/Dowd/vignettes/Dowd.Rnw 2015-08-20 09:37:12 UTC (rev 3981) +++ pkg/Dowd/vignettes/Dowd.Rnw 2015-08-20 09:40:59 UTC (rev 3982) @@ -1,39 +0,0 @@ -\documentclass{article} -\usepackage{amsmath, amsthm} -\usepackage{hyperref} -\usepackage{Rd} -\usepackage{Sweave} -%\VignetteDepends{Dowd, MASS, bootstrap} -%\VignetteIndexEntry{Dowd} -%\VignetteKeywords{risk measurement, parametric methods, non-parametric methods backtest} -%\VignettePackage{Dowd} -\title{Usage of \pkg{Dowd} Package} -\author{Dinesh Acharya} -\begin{document} -\maketitle -\begin{abstract} -In this vignette, use of package \pkg{Dowd} for various parametric and non-parametric methods to measure market risk is demonstrated. Additionally, methods for backtesting risk measures are also discussed. -\end{abstract} -\tableofcontents -\section{Introduction} -Market Risks are those risks that are associated with fluctuations in market prices or rates. For example, risk associated with fluctuation in price of a particular stock or a certain commodity is a market risk where as risk associated with default of a loan or financial system collapse is not market risk.\\ -\\ -Since the early works of Harry Markowitz, and particularly in the last two decades, there has been significant development in the area of risk measurement. Value-at-Risk (VaR) has become widely used measure of risk. VaR at $\alpha$ confidence level is defined as the negative of $\alpha-$th quantile of the profit/loss distribution, i.e. -\[VaR_{\alpha}(F) = -inf\{x\in R:F(x) \ge \alpha\}\] -where $F$ is the distribution function associated with random variable .\\ -\\ -VaR has its own weaknesses. Consequently, ES has been put championed by some as a better alternative to VaR. At $\alpha-$ confidence level, it is defined as: -\[ES_{\alpha}(F)=\frac{1}{\alpha}\int_0^{\alpha}VaR_u(F)d(u)\] -ES too has its own weaknesses and few other alternative riskmeasures have also been proposed. - -\section{Parametric Methods} -Parametric methods are based on certain assumption on the profit/loss distribution. Based on those assumptions, the parameters of the theoretical distribution are approximated with the data. Given a theoretical distribution, the definition of VaR or ES given above usually reduces to a definite form, and can be approximated using estimates of parameters. -<>= -library(Dowd) -library(MASS) -library(bootstrap) -library(PerformanceAnalytics) -@ - - -\end{document} \ No newline at end of file From noreply at r-forge.r-project.org Thu Aug 20 11:49:54 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 20 Aug 2015 11:49:54 +0200 (CEST) Subject: [Returnanalytics-commits] r3983 - pkg/Dowd Message-ID: <20150820094954.A81B8186726@r-forge.r-project.org> Author: dacharya Date: 2015-08-20 11:49:54 +0200 (Thu, 20 Aug 2015) New Revision: 3983 Removed: pkg/Dowd/vignettes/ Log: delete vignettes From noreply at r-forge.r-project.org Thu Aug 20 17:52:02 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 20 Aug 2015 17:52:02 +0200 (CEST) Subject: [Returnanalytics-commits] r3984 - in pkg/Meucci: R demo man Message-ID: <20150820155202.F123C185AB1@r-forge.r-project.org> Author: xavierv Date: 2015-08-20 17:52:02 +0200 (Thu, 20 Aug 2015) New Revision: 3984 Modified: pkg/Meucci/R/InvariantProjection.R pkg/Meucci/R/LognormalCopulaPdf.R pkg/Meucci/R/NormalCopulaPdf.R pkg/Meucci/R/PerformIidAnalysis.R pkg/Meucci/R/StudentTCopulaPdf.R pkg/Meucci/R/TwoDimEllipsoid.R pkg/Meucci/demo/AnalyticalvsNumerical.R pkg/Meucci/demo/ButterflyTrading.R pkg/Meucci/demo/DetectOutliersviaMVE.R pkg/Meucci/demo/FullFlexProbs.R pkg/Meucci/demo/FullyFlexibleBayesNets.R pkg/Meucci/demo/FullyIntegratedLiquidityAndMarketRisk.R pkg/Meucci/demo/HermiteGrid_CVaR_Recursion.R pkg/Meucci/demo/HermiteGrid_CaseStudy.R pkg/Meucci/demo/HermiteGrid_demo.R pkg/Meucci/demo/InvariantProjection.R pkg/Meucci/demo/MeanDiversificationFrontier.R pkg/Meucci/demo/Prior2Posterior.R pkg/Meucci/demo/RobustBayesianAllocation.R pkg/Meucci/demo/RobustBayesianCaseStudy.R pkg/Meucci/demo/S_AutocorrelatedProcess.R pkg/Meucci/demo/S_BondProjectionPricingNormal.R pkg/Meucci/demo/S_BondProjectionPricingStudentT.R pkg/Meucci/demo/S_BuyNHold.R pkg/Meucci/demo/S_CallsProjectionPricing.R pkg/Meucci/demo/S_CheckDiagonalization.R pkg/Meucci/demo/S_CornishFisher.R pkg/Meucci/demo/S_CorrelationPriorUniform.R pkg/Meucci/demo/S_CovarianceEvolution.R pkg/Meucci/demo/S_DerivativesInvariants.R pkg/Meucci/demo/S_DeterministicEvolution.R pkg/Meucci/demo/S_DisplayLognormalCopulaPdf.R pkg/Meucci/demo/S_DisplayNormalCopulaCdf.R pkg/Meucci/demo/S_DisplayNormalCopulaPdf.R pkg/Meucci/demo/S_DisplayStudentTCopulaPdf.R pkg/Meucci/demo/S_DynamicManagementCase1.R pkg/Meucci/demo/S_DynamicManagementCase2.R pkg/Meucci/demo/S_EigenvalueDispersion.R pkg/Meucci/demo/logToArithmeticCovariance.R pkg/Meucci/man/Central2Raw.Rd pkg/Meucci/man/Cumul2Raw.Rd pkg/Meucci/man/GenerateLogNormalDistribution.Rd pkg/Meucci/man/LognormalCopulaPdf.Rd pkg/Meucci/man/NormalCopulaPdf.Rd pkg/Meucci/man/PerformIidAnalysis.Rd pkg/Meucci/man/Raw2Central.Rd pkg/Meucci/man/Raw2Cumul.Rd pkg/Meucci/man/StudentTCopulaPdf.Rd pkg/Meucci/man/SummStats.Rd pkg/Meucci/man/TwoDimEllipsoid.Rd pkg/Meucci/man/std.Rd Log: Fixed documentation and reformated demo scripts and its relating functions up to S_EigenvalueDispersion included Modified: pkg/Meucci/R/InvariantProjection.R =================================================================== --- pkg/Meucci/R/InvariantProjection.R 2015-08-20 09:49:54 UTC (rev 3983) +++ pkg/Meucci/R/InvariantProjection.R 2015-08-20 15:52:02 UTC (rev 3984) @@ -7,168 +7,176 @@ #' Note the first central moment defined as expectation. #' #' \deqn{\tilde{ \mu } ^ {\big(n\big)} _{X} \equiv E \big\{ X^{n} \big\}, -#' \\ \mu ^{ \big(n\big) }_{X} \equiv \sum_0^{n-1} \big(-1\big)^{n-k} \mu ^{n-k}_{X} \tilde{ \mu }^{k}_{X} + \tilde{ \mu }_{X}^{n} } +#' \\ \mu ^{ \big(n\big) }_{X} \equiv \sum_0^{n-1} \big(-1\big)^{n-k} +#' \mu ^{n-k}_{X} \tilde{ \mu }^{k}_{X} + \tilde{ \mu }_{X}^{n} } #' -#' @param mu_ : [vector] (length N corresponding to order N) corresponding raw moments +#' @param mu_ : [vector] (length N corresponding to order N) corresponding +#' raw moments #' #' @return mu : [vector] (length N corresponding to order N) central moments #' #' @author Ram Ahluwalia \email{rahluwalia@@gmail.com} #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, -#' "E 16- Raw Moments to central moments". +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" +#' \url{http://symmys.com/node/170}, "E 16- Raw Moments to central moments". #' #' See Meucci's script for "Raw2Central.m" #' @export -Raw2Central = function( mu_ ) -{ - N = length( mu_ ); - mu = mu_; - for( n in 1 : N ) - { - mu[ n ] = ( (-1) ^ n ) * ( mu_[ 1 ] )^( n ); - - for( k in 1 : (n-1) ) - { - if( n != 1 ){ mu[ n ] = mu[ n ] + choose( n, k ) * ((-1)^(n-k)) * mu_[ k ] * (mu_[ 1 ])^(n-k) } ; - } +Raw2Central <- function(mu_) { + N <- length(mu_) + mu <- mu_ - mu[ n ] = mu[ n ] + mu_[ n ]; + for (n in 1:N) { + mu[n] <- ((-1) ^ n) * (mu_[1]) ^ (n) + + for(k in 1:(n - 1)) { + if (n != 1) { + mu[n] <- mu[n] + choose(n, k) * ((-1) ^ (n - k)) * mu_[k] * + (mu_[1]) ^ (n - k) + } + } + mu[n] <- mu[n] + mu_[n] } - - return( mu = mu ) + return(mu = mu) } -#' Map cumulative moments into raw moments. +#' @title Map cumulative moments into raw moments. #' -#' Step 5 of the projection process: -#' -#' From the cumulants of Y we compute the raw non-central moments of Y -#' -#' We do so recursively by the identity in formula (24) which follows from applying (21) and re-arranging terms +#' @description Step 5 of the projection process:\\ +#' From the cumulants of Y we compute the raw non-central moments of Y, and We +#' do so recursively by the identity in formula (24) which follows from applying +#' (21) and re-arranging terms #' -#' \deqn{ \tilde{ \mu } ^{ \big(n\big) }_{Y} +#' @details \deqn{ \tilde{ \mu } ^{ \big(n\big) }_{Y} #' \equiv \kappa^{ \big(n\big) }_{Y} + \sum_{k=1}^{n-1} (n-1)C_{k-1} #' \kappa_{Y}^{ \big(k\big) } \tilde{ \mu } ^{n-k}_{Y} } #' -#' @param ka : [vector] (length N corresponding to order N) cumulative moments +#' @param ka : [vector] (length N corresponding to order N) cumulative +#' moments #' -#' @return mu_ : [vector] (length N corresponding to order N) corresponding raw moments +#' @return mu_ : [vector] (length N corresponding to order N) corresponding +#' raw moments #' #' @author Ram Ahluwalia \email{rahluwalia@@gmail.com} #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. -#' See Meucci's script for "Cumul2Raw.m". +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" +#' \url{http://symmys.com/node/170}. See Meucci's script for "Cumul2Raw.m". +#' A. Meucci - "Annualization and General Projection of Skewness, Kurtosis and +#' All Summary Statistics" - formula (24) \url{http://www.symmys.com/node/136} #' -#' A. Meucci - "Annualization and General Projection of Skewness, Kurtosis and All Summary Statistics" - formula (24) \url{http://www.symmys.com/node/136} #' @export -Cumul2Raw = function( ka ) -{ - N = length( ka ); - mu_ = ka; +Cumul2Raw <- function(ka) { + N <- length(ka) + mu_ <- ka - for( n in 1 : N ) - { - ka[ n ] = mu_[ n ]; + for (n in 1 : N) { + ka[n] <- mu_[n] - for( k in 1 : (n-1) ) - { - if( n != 1 ){ mu_[ n ] = mu_[ n ] + choose( n-1, k-1 ) * ka[ k ] * mu_[ n-k ] }; - } + for (k in 1:(n - 1)) { + if (n != 1) { + mu_[n] <- mu_[n] + choose(n - 1, k - 1) * ka[k] * mu_[n-k] + } + } } - - return( mu_ ); + return(mu_) } -#' Transforms raw moments into cumulants +#' @title Transforms raw moments into cumulants #' -#' Step 3 of the projection process: From the non-central moments of X-t, we compute the cumulants. -#' -#' -#' This process follows from the Taylor approximations for any small z and ln(1+x)~x for any small x, -#' and from the definition of the first cumulant in (17). The we apply recursively the identity -#' in formula (21). See Kendall and Stuart (1969) +#' @description Step 3 of the projection process: From the non-central moments +#' of X-t, we compute the cumulants. +#' This process follows from the Taylor approximations for any small z and +#' ln(1+x)~x for any small x, and from the definition of the first cumulant in +#' (17). The we apply recursively the identity in formula (21). See Kendall and +#' Stuart (1969) #' -#' \deqn{ \kappa^{ \big(n\big) }_{X} \equiv \tilde{ \mu } ^{ \big(n\big) }_{X} - \sum_{k=1}^{n-1} (n-1)C_{k-1} \kappa_{X}^{ \big(k\big) } \tilde{ \mu } ^{n-k}_{X} } +#' @details \deqn{ \kappa^{ \big(n\big) }_{X} \equiv \tilde{ \mu } ^ +#' { \big(n\big) }_{X} - \sum_{k=1}^{n-1} (n-1)C_{k-1} \kappa_{X} ^ +#' { \big(k\big) } \tilde{ \mu } ^{n-k}_{X} } #' -#' @param mu_ : [vector] (length N corresponding to order N) corresponding raw moments +#' @param mu_ : [vector] (length N corresponding to order N) corresponding +#' raw moments #' -#' @return ka : [vector] (length N corresponding to order N) cumulative moments +#' @return ka : [vector] (length N corresponding to order N) cumulative +#' moments #' #' @author Ram Ahluwalia \email{rahluwalia@@gmail.com} +#' #' @references -#' A. Meucci - "Annualization and General Projection of Skewness, Kurtosis and All Summary Statistics" - formula (21) -#' Symmys site containing original MATLAB source code \url{http://www.symmys.com/node/136} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" +#' \url{http://symmys.com/node/170}. See Meucci's script for "Cumul2Raw.m". +#' A. Meucci - "Annualization and General Projection of Skewness, Kurtosis and +#' All Summary Statistics" - formula (24) \url{http://www.symmys.com/node/136} #' -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. -#' See Meucci's script for "Raw2Cumul.m" #' @export -Raw2Cumul = function( mu_ ) -{ - N = length( mu_ ) - ka = mu_ - - for ( i in 1:N ) - { - ka[i] = mu_[i]; +Raw2Cumul <- function(mu_) { + N <- length(mu_) + ka <- mu_ - for ( k in 1:(i-1) ) - { - if ( i != 1 ) { ka[i] = ka[i] - choose(i-1,k-1) * ka[k] * mu_[i-k] } + for (i in 1:N) { + ka[i] <- mu_[i] + + for (k in 1:(i - 1)) { + if (i != 1) { + ka[i] <- ka[i] - choose(i - 1,k - 1) * ka[k] * mu_[i - k] + } } } - - return( ka = ka ) + return(ka = ka) } -#' Transforms first n central moments into first n raw moments (first central moment defined as expectation) +#' @title Transforms first n central moments into first n raw moments (first +#' central moment defined as expectation) #' -#' Step 2 of projection process: From the central moments of step 1, we compute the non-central moments. To do so we start -#' with the first non-central moment and apply recursively an identity (formula 20) +#' @description Step 2 of projection process: From the central moments of +#' step 1, we compute the non-central moments. To do so we start with the first +#' non-central moment and apply recursively an identity (formula 20) #' -#' \deqn{ \tilde{ \mu }^{ \big(1\big) }_{X} \equiv \mu ^{\big(1\big)}_{X} -#' \\ \tilde{ \mu }^{ \big(n\big) }_{X} \equiv \mu ^{n}_{X} \sum_{k=0}^{n-1} \big(-1\big)^{n-k+1} \mu ^{n-k}_{X} \tilde{ \mu }^{\big(k\big)}_{X} } +#' @details \deqn{ \tilde{ \mu }^{ \big(1\big) }_{X} \equiv +#' \mu^{\big(1\big)}_{X} \\ \tilde{ \mu }^{ \big(n\big) }_{X} \equiv +#' \mu^{n}_{X} \sum_{k=0}^{n-1} \big(-1\big)^{n-k+1} \mu^{n-k}_{X} +#' \tilde{ \mu }^{\big(k\big)}_{X} } #' #' @param mu : [vector] (length N corresponding to order N) central moments #' -#' @return mu_ : [vector] (length N corresponding to order N) corresponding raw moments +#' @return mu_ : [vector] (length N corresponding to order N) corresponding +#' raw moments #' #' @author Ram Ahluwalia \email{rahluwalia@@gmail.com} #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, -#' "E 16- Raw moments to central moments". +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" +#' \url{http://symmys.com/node/170}, "E 16- Raw moments to central moments". #' #' See Meucci's script for "Central2Raw.m" #' @export -Central2Raw = function( mu ) -{ - N = length( mu ) - mu_ = mu - - for ( i in 2:N ) # we use the notation 'i' instead of 'n' as in Meucci's code so we can browser - { - mu_[i] = ( ( -1 ) ^ ( i + 1 ) ) * ( mu[1] ) ^ (i) - - for ( k in 1:(i-1) ) - { - mu_[i] = mu_[i] + choose(i,k) * ((-1)^(i-k+1)) * mu_[k] * (mu_[1])^(i-k) - } - mu_[i] = mu_[i] + mu[i] + +Central2Raw <- function(mu) { + N <- length(mu) + mu_ <- mu + + for (i in 2:N) { + mu_[i] <- ((-1) ^ (i + 1)) * (mu[1]) ^ (i) + + for (k in 1:(i - 1)) { + mu_[i] <- mu_[i] + choose(i,k) * ((-1) ^ (i - k + 1)) * mu_[k] * + (mu_[1]) ^ (i - k) + } + mu_[i] <- mu_[i] + mu[i] } - - return ( mu_ = mu_ ) + return (mu_ = mu_) } -#' Compute summary stats +#' @title Compute summary stats #' -#' Step 0 in projection process: Compute summary stats (mean, skew, kurtosis, etc.) of the invariant X-t -#' step 1 in the project process We collect the first 'n' central moments of the invariant X-t. +#' @description Step 0 in projection process: Compute summary stats (mean, skew, +#' kurtosis, etc.) of the invariant X-t step 1 in the project process We collect +#' the first 'n' central moments of the invariant X-t. #' #' @param X an invariant #' @param N the number of order statistics to collect @@ -178,55 +186,71 @@ #' @export #' @author Ram Ahluwalia \email{rahluwalia@@gmail.com} #' @export -SummStats = function( X , N ) -{ - suppressWarnings( library( matlab ) ) # otherwise, stops with "Error: (converted from warning) package 'matlab' was built under R version 2.13.2" - library( moments ) - + +SummStats <- function(X, N) { + # otherwise, stops with "Error: (converted from warning) package 'matlab' was + # built under R version 2.13.2" + suppressWarnings(library(matlab)) + library(moments) + # step 1: compute central moments based on formula 15 - mu = zeros( 1 , N ) - mu[ 1 ] = mean( X ) - for ( n in 2:N ) { mu[n] = moment( X , central = TRUE , order = n ) } # mu(2:n) contains the central moments. mu(1) is the mean - - # step 0: compute standardized statistics - ga = mu - ga[ 2 ] = sqrt( mu[2] ) - for ( n in 3:N ) # we focus on case n >= 3 because from the definition of central moments (15) and from (3) that i) the first central moment is the mean of the invariant X-t, and ii) the second central moment is standard deviaiton of the of the invariant X-t - { - ga[n] = mu[n] / ( ga[2]^n ) # based on formula 19. ga[1] = mean of invariant X-t, ga[2] = sd, ga[3] = skew, ga[4] = kurtosis... - } - - return( list( ga = ga , mu = mu ) ) + mu <- zeros(1, N) + mu[1] <- mean(X) + # mu(2:n) contains the central moments. mu(1) is the mean + for (n in 2:N) { + mu[n] <- moment(X, central = TRUE, order = n) + } + + # step 0: compute standardized statistics + ga <- mu + ga[2] <- sqrt(mu[2]) + # we focus on case n >= 3 because from the definition of central moments (15) + # and from (3) that i) the first central moment is the mean of the invariant + # X-t, and ii) the second central moment is standard deviaiton of the of the + # invariant X-t + for (n in 3:N) { + # based on formula 19. ga[1] = mean of invariant X-t, ga[2] = sd, + # ga[3] = skew, ga[4] = kurtosis... + ga[n] <- mu[n] / (ga[2]^n) + } + return(list(ga = ga, mu = mu)) } -#' Calculates the population standard deviation +#' @title Calculates the population standard deviation #' -#' Calculates the population standard deviation dividing by 'n' instead of 'n-1' equivalent to Matlab +#' @description Calculates the population standard deviation dividing by 'n' +#' instead of 'n-1' equivalent to Matlab #' #' @param x a generic numeric vector -#' @return std a numeric with the population standard deviaiton of the generic numeric +#' @return std a numeric with the population standard deviaiton of the generic +#' numeric +#' @author Ram Ahluwalia \email{rahluwalia@@gmail.com} +#' #' @export -#' @author Ram Ahluwalia \email{rahluwalia@@gmail.com} -std = function( x ) { ( sum( ( x - mean( x ) ) ^ 2 ) / length( x ) ) ^.5 } +std <- function(x) { + return(sum((x - mean(x)) ^ 2) / length(x)) ^ .5 +} + #' Generate arbitrary distribution of a shifted-lognormal invariant #' -#' \deqn{X = a + e^{ m + sZ }} (formula 14) +#' @details \deqn{X = a + e^{ m + sZ }} (formula 14) #' #' @param J a numeric with the number of scenarios -#' @param a a numeric with the location shift parameter. Mean of distribution will be exp(a) +#' @param a a numeric with the location shift parameter. Mean of +#' distribution will be exp(a) #' @param m log of the mean of the distribution #' @param s log of the standard deviation of the distribution #' -#' @return X a numeric vector with i.i.d. lognormal samples based on parameters J, a, m, and s where X = a + exp( m + s * Z ) +#' @return X a numeric vector with i.i.d. lognormal samples based on +#' parameters J, a, m, and s where X = a + exp(m + s * Z) #' @author Ram Ahluwalia \email{rahluwalia@@gmail.com} #' @export -GenerateLogNormalDistribution = function( J, a, m, s ) -{ - Z = rnorm( J / 2 , 0 , 1 ) # create J/2 draws from the standard normal - Z = c( Z , -Z) / std( Z ) # a Jx1 numeric vector - X = a + exp( m + s * Z ) # a Jx1 numeric vector - - return( X = X ) + +GenerateLogNormalDistribution <- function(J, a, m, s) { + Z <- rnorm(J / 2, 0, 1) # create J/2 draws from the standard normal + Z <- c(Z, -Z) / std(Z) # a Jx1 numeric vector + X <- a + exp(m + s * Z) # a Jx1 numeric vector + + return(X = X) } - Modified: pkg/Meucci/R/LognormalCopulaPdf.R =================================================================== --- pkg/Meucci/R/LognormalCopulaPdf.R 2015-08-20 09:49:54 UTC (rev 3983) +++ pkg/Meucci/R/LognormalCopulaPdf.R 2015-08-20 15:52:02 UTC (rev 3984) @@ -1,7 +1,9 @@ -#' @title Computes the pdf of the copula of the lognormal distribution at the generic point u in the unit hypercube. +#' @title Computes the pdf of the copula of the lognormal distribution at the +#' generic point u in the unit hypercube. #' -#' @description Computes the pdf of the copula of the lognormal distribution at the generic point u in the unit hypercube, -#' as described in A. Meucci, "Risk and Asset Allocation", Springer, 2005. +#' @description Computes the pdf of the copula of the lognormal distribution at +#' the generic point u in the unit hypercube, as described in A. Meucci, +#' "Risk and Asset Allocation", Springer, 2005. #' #' @param u [vector] (J x 1) grades #' @param Mu [vector] (N x 1) location parameter @@ -10,29 +12,29 @@ #' @return F_U [vector] (J x 1) PDF values #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, -#' "E 36 - Pdf of the lognormal copula". +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" +#' \url{http://symmys.com/node/170}, "E 36 - Pdf of the lognormal copula". #' #' See Meucci's script for "LognormalCopulaPdf.m" #' #' @author Xavier Valls \email{xaviervallspla@@gmail.com} #' @export -LognormalCopulaPdf = function( u, Mu, Sigma ) -{ - N = length( u ); - s = sqrt( diag( Sigma )); +LognormalCopulaPdf <- function(u, Mu, Sigma) { + N <- length(u) + s <- sqrt(diag(Sigma)) - x = qlnorm( u, Mu, s ); + x <- qlnorm(u, Mu, s) - Numerator = ( 2 * pi ) ^ ( -N / 2 ) * ( (det ( Sigma ) ) ^ ( -0.5 ) ) / - prod(x) * exp( -0.5 * t(log(x) - Mu) %*% mldivide( Sigma , ( log( x ) - Mu ), pinv=FALSE ) ); + Numerator <- (2 * pi) ^ (-N / 2) * ((det (Sigma)) ^ (-0.5)) / + prod(x) * exp(-0.5 * t(log(x) - Mu) %*% + mldivide(Sigma, (log(x) - Mu), pinv=FALSE)) - fs = dlnorm( x, Mu, s); + fs <- dlnorm(x, Mu, s) - Denominator = prod(fs); + Denominator <- prod(fs) - F_U = Numerator / Denominator; + F_U <- Numerator / Denominator - return ( F_U ); -} \ No newline at end of file + return (F_U) +} Modified: pkg/Meucci/R/NormalCopulaPdf.R =================================================================== --- pkg/Meucci/R/NormalCopulaPdf.R 2015-08-20 09:49:54 UTC (rev 3983) +++ pkg/Meucci/R/NormalCopulaPdf.R 2015-08-20 15:52:02 UTC (rev 3984) @@ -1,7 +1,9 @@ -#' @title Computes the pdf of the copula of the normal distribution at the generic point u in the unit hypercube +#' @title Computes the pdf of the copula of the normal distribution at the +#' generic point u in the unit hypercube #' -#' @description Computes the pdf of the copula of the normal distribution at the generic point u in the unit -#' hypercube, as described in A. Meucci, "Risk and Asset Allocation", Springer, 2005. +#' @description Computes the pdf of the copula of the normal distribution at the +#' generic point u in the unit hypercube, as described in A. Meucci, "Risk and +#' Asset Allocation", Springer, 2005. #' #' @param u [vector] (J x 1) grade #' @param Mu [vector] (N x 1) mean @@ -10,28 +12,28 @@ #' @return F_U [vector] (J x 1) PDF values #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, -#' "E 33 - Pdf of the normal copula". +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" +#' \url{http://symmys.com/node/170}, "E 33 - Pdf of the normal copula". #' #' See Meucci's script for "NormalCopulaPdf.m" #' #' @author Xavier Valls \email{xaviervallspla@@gmail.com} #' @export -NormalCopulaPdf = function( u, Mu, Sigma ) -{ - N = length( u ); - s = sqrt( diag( Sigma )); +NormalCopulaPdf <- function(u, Mu, Sigma) { + N <- length(u) + s <- sqrt(diag(Sigma)) - x = qnorm( u, Mu, s ); + x <- qnorm(u, Mu, s) - Numerator = ( 2 * pi ) ^ ( -N / 2 ) * ( (det ( Sigma ) ) ^ ( -0.5 ) ) * exp( -0.5 * t(x - Mu) %*% mldivide( Sigma , ( x - Mu ))); + Numerator <- (2 * pi) ^ (-N / 2) * ((det (Sigma)) ^ (-0.5)) * + exp(-0.5 * t(x - Mu) %*% mldivide(Sigma, (x - Mu))) - fs = dnorm( x, Mu, s); + fs <- dnorm(x, Mu, s) - Denominator = prod(fs); + Denominator <- prod(fs) - F_U = Numerator / Denominator; + F_U <- Numerator / Denominator - return ( F_U ); -} \ No newline at end of file + return (F_U) +} Modified: pkg/Meucci/R/PerformIidAnalysis.R =================================================================== --- pkg/Meucci/R/PerformIidAnalysis.R 2015-08-20 09:49:54 UTC (rev 3983) +++ pkg/Meucci/R/PerformIidAnalysis.R 2015-08-20 15:52:02 UTC (rev 3984) @@ -1,7 +1,8 @@ #' @title Performs simple invariance (i.i.d.) tests on a time series. #' -#' @description This function performs simple invariance (i.i.d.) tests on a time series, as described in -#' A. Meucci "Risk and Asset Allocation", Springer, 2005 +#' @description This function performs simple invariance (i.i.d.) tests on a +#' time series, as described in A. Meucci "Risk and Asset Allocation", Springer, +#' 2005 #' #' @param Dates : [vector] (T x 1) dates #' @param Data : [matrix] (T x N) data @@ -9,54 +10,55 @@ #' #' @note it checks the evolution over time #' -#' it checks that the variables are identically distributed by looking at the histogram of two subsamples +#' it checks that the variables are identically distributed by looking at the +#' histogram of two subsamples #' -#' it checks that the variables are independent by looking at the 1-lag scatter plot +#' it checks that the variables are independent by looking at the 1-lag +#' scatter plot #' #' under i.i.d. the location-dispersion ellipsoid should be a circle #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" +#' \url{http://symmys.com/node/170}. See Meucci's script for +#' "PerformIidAnalysis.m" #' -#' See Meucci's script for "PerformIidAnalysis.m" -#' #' @author Xavier Valls \email{xaviervallspla@@gmail.com} #' @export -PerformIidAnalysis = function( Dates = dim( Data)[1], Data, Str = "") -{ +PerformIidAnalysis <- function(Dates = dim(Data)[1], Data, Str = "") { - ########################################################################################################## + ############################################################################ ### Time series over time - dev.new(); - plot( Dates, Data, main = Str ); - #datetick( 'x', 'mmmyy', 'keeplimits', 'keepticks' ); - + dev.new() + plot(Dates, Data, main = Str) + #datetick('x', 'mmmyy', 'keeplimits', 'keepticks') - ########################################################################################################## - ### Test "identically distributed hypothesis": split observations into two sub-samples and plot histogram - Sample_1 = Data[ 1:round(length(Data) / 2) ]; - Sample_2 = Data[(round(length(Data)/2) + 1) : length(Data) ]; - num_bins_1 = round(5 * log(length(Sample_1))); - num_bins_2 = round(5 * log(length(Sample_2))); - X_lim = c( min(Data) - .1 * (max(Data) - min(Data)), max(Data) + .1 * (max(Data) - min(Data))); + ############################################################################ + ### Test "identically distributed hypothesis": split observations into two + ### sub-samples and plot histogram + Sample_1 <- Data[1:round(length(Data) / 2)] + Sample_2 <- Data[(round(length(Data)/2) + 1) : length(Data)] + num_bins_1 <- round(5 * log(length(Sample_1))) + num_bins_2 <- round(5 * log(length(Sample_2))) + X_lim <- c(min(Data) - .1 * (max(Data) - min(Data)), max(Data) + .1 * + (max(Data) - min(Data))) - dev.new(); + dev.new() - layout( matrix(c(1,1,2,2,0,3,3,0), 2, 4, byrow = TRUE), heights=c(1,1,1)); - hist(Sample_1, num_bins_1, xlab = "", ylab = "", main = "first half" ); - hist(Sample_2, num_bins_2, xlab = "", ylab = "", main = "second half" ); - - ########################################################################################################## - ### Test "independently distributed hypothesis": scatter plot of observations at lagged times - + layout(matrix(c(1,1,2,2,0,3,3,0), 2, 4, byrow = TRUE), heights=c(1,1,1)) + hist(Sample_1, num_bins_1, xlab = "", ylab = "", main = "first half") + hist(Sample_2, num_bins_2, xlab = "", ylab = "", main = "second half") - X = Data[ 1 : length(Data)-1 ]; - Y = Data[ 2 : length(Data) ]; - plot(X, Y, main="changes in implied vol"); + ############################################################################ + ### Test "independently distributed hypothesis": scatter plot of + ### observations at lagged times - m = cbind( apply( cbind( X, Y ), 2, mean )); - S = cov( cbind( X, Y )); - TwoDimEllipsoid( m, S, 2, FALSE, FALSE); + X <- Data[1 : length(Data)-1] + Y <- Data[2 : length(Data)] + plot(X, Y, main = "changes in implied vol") -} \ No newline at end of file + m <- cbind(apply(cbind(X, Y), 2, mean)) + S <- cov(cbind(X, Y)) + TwoDimEllipsoid(m, S, 2, FALSE, FALSE) +} Modified: pkg/Meucci/R/StudentTCopulaPdf.R =================================================================== --- pkg/Meucci/R/StudentTCopulaPdf.R 2015-08-20 09:49:54 UTC (rev 3983) +++ pkg/Meucci/R/StudentTCopulaPdf.R 2015-08-20 15:52:02 UTC (rev 3984) @@ -1,7 +1,9 @@ -#' @title Pdf of the copula of the Student t distribution at the generic point u in the unit hypercube +#' @title Pdf of the copula of the Student t distribution at the generic point u +#' in the unit hypercube #' -#' @description Pdf of the copula of the Student t distribution at the generic point u in the unit hypercube, -#' as described in A. Meucci, "Risk and Asset Allocation", Springer, 2005. +#' @description Pdf of the copula of the Student t distribution at the generic +#' point u in the unit hypercube, as described in A. Meucci, "Risk and Asset +#' Allocation", Springer, 2005. #' #' @param u : [vector] (J x 1) grade #' @param nu : [numerical] degrees of freedom @@ -12,31 +14,31 @@ #' @return F_U : [vector] (J x 1) PDF values #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, -#' "E 88 - Copula vs. Correlation". +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" +#' \url{http://symmys.com/node/170}, "E 88 - Copula vs. Correlation". #' #' See Meucci's script for "StudentTCopulaPdf.m" #' #' @author Xavier Valls \email{xaviervallspla@@gmail.com} #' @export -StudentTCopulaPdf = function( u, nu, Mu, Sigma ) -{ - N = length( u ); - s = sqrt( diag( Sigma )); +StudentTCopulaPdf <- function(u, nu, Mu, Sigma) { + N <- length(u) + s <- sqrt(diag(Sigma)) - x = Mu + s * qt( u, nu); + x <- Mu + s * qt(u, nu) + #z2 <- t(x - Mu) %*% inv(Sigma) * (x-Mu) + z2 <- t(x - Mu) %*% mldivide(Sigma, (x - Mu)) + K <- (nu * pi) ^ (-N / 2) * gamma((nu + N) / 2) / gamma(nu / 2) * + ((det(Sigma)) ^ (-0.5)) + Numerator <- K * (1 + z2 / nu) ^ (-(nu + N) / 2) - z2 = t(x - Mu) %*% mldivide( Sigma, (x - Mu)); #z2 = t(x - Mu) %*% inv(Sigma) * (x-Mu); - K = ( nu * pi )^( -N / 2 ) * gamma( ( nu + N ) / 2 ) / gamma( nu / 2 ) * ( ( det( Sigma ) )^( -0.5 )); - Numerator = K * (1 + z2 / nu)^(-(nu + N) / 2); - - fs = dt((x - Mu) / s , nu); + fs <- dt((x - Mu) / s, nu) - Denominator = prod(fs); + Denominator <- prod(fs) - F_U = Numerator / Denominator; + F_U <- Numerator / Denominator - return ( F_U ); + return (F_U) } Modified: pkg/Meucci/R/TwoDimEllipsoid.R =================================================================== --- pkg/Meucci/R/TwoDimEllipsoid.R 2015-08-20 09:49:54 UTC (rev 3983) +++ pkg/Meucci/R/TwoDimEllipsoid.R 2015-08-20 15:52:02 UTC (rev 3984) @@ -1,20 +1,29 @@ -#'@title Computes the location-dispersion ellipsoid of the normalized first diagonal and off-diagonal elements -#' of a 2x2 Wishart distribution as a function of the inputs +#' @title Computes the location-dispersion ellipsoid of the normalized first +#' diagonal and off-diagonal elements of a 2x2 Wishart distribution as a +#' function of the inputs #' -#' @description This function computes the location-dispersion ellipsoid of the normalized (unit variance, -#' zero expectation)first diagonal and off-diagonal elements of a 2x2 Wishart distribution as a function -#' of the inputs, as described in A. Meucci, "Risk and Asset Allocation", Springer, 2005, Chapter 2. +#' @description This function computes the location-dispersion ellipsoid of the +#' normalized (unit variance, zero expectation)first diagonal and off-diagonal +#' elements of a 2x2 Wishart distribution as a function of the inputs, as +#' described in A. Meucci, "Risk and Asset Allocation", Springer, 2005, Ch 2. #' -#' @param Location : [vector] (2 x 1) location vector (typically the expected value -#' @param Square_Dispersion : [matrix] (2 x 2) scatter matrix Square_Dispersion (typically the covariance matrix) -#' @param Scale : [scalar] a scalar Scale, that specifies the scale (radius) of the ellipsoid -#' @param PlotEigVectors : [boolean] true then the eigenvectors (=principal axes) are plotted -#' @param PlotSquare : [boolean] true then the enshrouding box is plotted. If Square_Dispersion is the covariance +#' @param Location : [vector] (2x1) location vector (typically the +#' expected value +#' @param Square_Dispersion : [matrix] (2x2) scatter matrix Square_Dispersion +#' (typically the covariance matrix) +#' @param Scale : [scalar] a scalar Scale, that specifies the +#' scale (radius) of the ellipsoid +#' @param PlotEigVectors : [boolean] true then the eigenvectors (=principal +#' axes) are plotted +#' @param PlotSquare : [boolean] true then the enshrouding box is +#' plotted. If Square_Dispersion is the +#' covariance #' #' @return E : [figure handle] #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" +#' \url{http://symmys.com/node/170}. #' #' See Meucci's script for "TwoDimEllipsoid.m" #' @@ -22,81 +31,81 @@ #' @export -TwoDimEllipsoid = function( Location, Square_Dispersion, Scale = 1, PlotEigVectors = FALSE, PlotSquare = FALSE ) -{ +TwoDimEllipsoid <- function(Location, Square_Dispersion, Scale = 1, + PlotEigVectors = FALSE, PlotSquare = FALSE) { - ########################################################################################################## - ### compute the ellipsoid in the r plane, solution to ((R-Location)' * Dispersion^-1 * (R-Location) ) = Scale^2 - - Eigen = eigen(Square_Dispersion); - Centered_Ellipse = c(); - Angle = seq( 0, 2*pi, pi/500 ); - NumSteps = length(Angle); - - for( i in 1 : NumSteps ) - { - # normalized variables (parametric representation of the ellipsoid) - y = rbind( cos( Angle[ i ] ), sin( Angle[ i ] ) ); - Centered_Ellipse = c( Centered_Ellipse, Eigen$vectors %*% diag(sqrt(Eigen$values), length(Eigen$values)) %*% y ); ##ok + ############################################################################ + ### compute the ellipsoid in the r plane, solution to ((R-Location)' * + ### Dispersion^-1 * (R-Location)) = Scale^2 + + Eigen <- eigen(Square_Dispersion) + Centered_Ellipse <- c() + Angle <- seq(0, 2 * pi, pi / 500) + NumSteps <- length(Angle) + + for (i in 1 : NumSteps) { + # normalized variables (parametric representation of the ellipsoid) + y <- rbind(cos(Angle[i]), sin(Angle[i])) + Centered_Ellipse <- c(Centered_Ellipse, Eigen$vectors %*% + diag(sqrt(Eigen$values), length(Eigen$values)) %*% y) } - R = Location %*% array( 1, NumSteps ) + Scale * Centered_Ellipse; + R <- Location %*% array(1, NumSteps) + Scale * Centered_Ellipse - ########################################################################################################## - ### Plot the ellipsoid - - E = lines( R[1, ], R[2, ], col = "red", lwd = 2 ); + ############################################################################ + ### Plot the ellipsoid - ########################################################################################################## - ### Plot a rectangle centered in Location with semisides of lengths Dispersion[ 1] and Dispersion[ 2 ], respectively - - if( PlotSquare ) - { - Dispersion = sqrt( diag( Square_Dispersion ) ); - Vertex_LowRight_A = Location[ 1 ] + Scale * Dispersion[ 1 ]; - Vertex_LowRight_B = Location[ 2 ] - Scale * Dispersion[ 2 ]; - Vertex_LowLeft_A = Location[ 1 ] - Scale * Dispersion[ 1 ]; - Vertex_LowLeft_B = Location[ 2 ] - Scale * Dispersion[ 2 ]; - Vertex_UpRight_A = Location[ 1 ] + Scale * Dispersion[ 1 ]; - Vertex_UpRight_B = Location[ 2 ] + Scale * Dispersion[ 2 ]; - Vertex_UpLeft_A = Location[ 1 ] - Scale * Dispersion[ 1 ]; - Vertex_UpLeft_B = Location[ 2 ] + Scale * Dispersion[ 2 ]; - - Square = rbind( c( Vertex_LowRight_A, Vertex_LowRight_B ), - c( Vertex_LowLeft_A, Vertex_LowLeft_B ), - c( Vertex_UpLeft_A, Vertex_UpLeft_B ), - c( Vertex_UpRight_A, Vertex_UpRight_B ), - c( Vertex_LowRight_A, Vertex_LowRight_B ) ); + E <- lines(R[1, ], R[2, ], col = "red", lwd = 2) - h = lines(Square[ , 1 ], Square[ , 2 ], col = "red", lwd = 2 ); - - } + ############################################################################ + ### Plot a rectangle centered in Location with semisides of lengths + ### Dispersion[1] and Dispersion[2], respectively [TRUNCATED] To get the complete diff run: svnlook diff /svnroot/returnanalytics -r 3984 From noreply at r-forge.r-project.org Fri Aug 21 01:26:59 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 21 Aug 2015 01:26:59 +0200 (CEST) Subject: [Returnanalytics-commits] r3985 - pkg/Dowd Message-ID: <20150820232659.EBF4418657B@r-forge.r-project.org> Author: dacharya Date: 2015-08-21 01:26:59 +0200 (Fri, 21 Aug 2015) New Revision: 3985 Modified: pkg/Dowd/DESCRIPTION Log: Minor changes Modified: pkg/Dowd/DESCRIPTION =================================================================== --- pkg/Dowd/DESCRIPTION 2015-08-20 15:52:02 UTC (rev 3984) +++ pkg/Dowd/DESCRIPTION 2015-08-20 23:26:59 UTC (rev 3985) @@ -8,7 +8,7 @@ Maintainer: Dinesh Acharya Description: Kevin Dowd's book Measuring Market Risk is a widely read book in the area of risk measurement and is widely used by students and - practitioners alike. As he claims, MATLAB indeed have been most + practitioners alike. As he claims, MATLAB indeed might have been most suitable language when Dowd originally wrote the functions, however, with growing popularity of R and a large user base, this project should make Dowd's code accessible to more readers. As Dowd's code From noreply at r-forge.r-project.org Fri Aug 21 01:27:36 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 21 Aug 2015 01:27:36 +0200 (CEST) Subject: [Returnanalytics-commits] r3986 - pkg/Dowd Message-ID: <20150820232736.5965E18657B@r-forge.r-project.org> Author: dacharya Date: 2015-08-21 01:27:36 +0200 (Fri, 21 Aug 2015) New Revision: 3986 Modified: pkg/Dowd/TODO Log: Minor changes Modified: pkg/Dowd/TODO =================================================================== --- pkg/Dowd/TODO 2015-08-20 23:26:59 UTC (rev 3985) +++ pkg/Dowd/TODO 2015-08-20 23:27:36 UTC (rev 3986) @@ -4,11 +4,11 @@ packages can be avoided and will be attempted in future. The original Dowd's package contains some errors and this package has inherited -most of them. Not all functions work perfectly. It is also a possibility that can -be explored. +most of them. Not all functions work perfectly. Correction of such errors is a +possibility that can be explored, too. -There are also few functions that did not give same result as Dowd's MATLAB toolbox. +There are also few functions that did not give same result as Dowd's MATLAB toolbox. This is possibly because of two different scenarios, either because the parameters -parameters to algorithms such as density function in Kernel Density Approach (just -for sake of example) were not suitably chosen or there actually are errors in the code. -An attempt will be made in future to remove such inconsistencies. \ No newline at end of file +to algorithms such as density function in Kernel Density Approach (just +for sake of example) were not suitably chosen or there actually are some errors in the +code. An attempt will be made in future to remove such inconsistencies. \ No newline at end of file From noreply at r-forge.r-project.org Sun Aug 30 21:06:47 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 30 Aug 2015 21:06:47 +0200 (CEST) Subject: [Returnanalytics-commits] r3987 - pkg/Dowd Message-ID: <20150830190647.20456180FD8@r-forge.r-project.org> Author: dacharya Date: 2015-08-30 21:06:46 +0200 (Sun, 30 Aug 2015) New Revision: 3987 Modified: pkg/Dowd/DESCRIPTION Log: Small errors corrected Modified: pkg/Dowd/DESCRIPTION =================================================================== --- pkg/Dowd/DESCRIPTION 2015-08-20 23:27:36 UTC (rev 3986) +++ pkg/Dowd/DESCRIPTION 2015-08-30 19:06:46 UTC (rev 3987) @@ -7,24 +7,24 @@ Author: Dinesh Acharya Maintainer: Dinesh Acharya Description: Kevin Dowd's book Measuring Market Risk is a widely read book - in the area of risk measurement and is widely used by students and + in the area of risk measurement by students and practitioners alike. As he claims, MATLAB indeed might have been most - suitable language when Dowd originally wrote the functions, however, - with growing popularity of R and a large user base, this project - should make Dowd's code accessible to more readers. As Dowd's code - was not intended to be error free and were more for reference, some - functions in this package have inherited those errors. An attempt - will be made in future to identify and correct them. Dowd's original - code can be downloaded from . It should be noted that Dowd offers both + suitable language when he originally wrote the functions, but, + with growing popularity of R and a large user base it is not entirely + valid. As Dowd's code was not intended to be error free and were more + for reference, some functions in this package have inherited those + errors. An attempt will be made in future releases to identify and correct + them. Dowd's original code can be downloaded from www.kevindowd.org/measuring-market-risk/. + It should be noted that Dowd offers both MMR2 and MMR1 toolboxes. Only MMR2 was ported to R. MMR2 is more recent version of MMR1 toolbox and they both have mostly similar function. The toolbox mainly contains different parametric and non parametric methods for measurement of market risk as well as backtesting risk measurement methods. -Depends: R (>= 3.0.0), +Depends: R (>= 3.0.0), bootstrap, MASS, forecast -Suggests: PerformanceAnalytics, +Suggests: PerformanceAnalytics, testthat License: GPL \ No newline at end of file From noreply at r-forge.r-project.org Mon Aug 31 23:46:12 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 31 Aug 2015 23:46:12 +0200 (CEST) Subject: [Returnanalytics-commits] r3988 - pkg/Dowd Message-ID: <20150831214612.A027B18702A@r-forge.r-project.org> Author: dacharya Date: 2015-08-31 23:46:12 +0200 (Mon, 31 Aug 2015) New Revision: 3988 Modified: pkg/Dowd/DESCRIPTION Log: Description according to CRAN policies Modified: pkg/Dowd/DESCRIPTION =================================================================== --- pkg/Dowd/DESCRIPTION 2015-08-30 19:06:46 UTC (rev 3987) +++ pkg/Dowd/DESCRIPTION 2015-08-31 21:46:12 UTC (rev 3988) @@ -1,6 +1,6 @@ Package: Dowd Type: Package -Title: Functions Ported From MMR2 Toolbox Offered in Kevin Dowd's Book +Title: Functions Ported from 'MMR2' Toolbox Offered in Kevin Dowd's Book Measuring Market Risk Version: 0.1 Date: 2015-08-20 @@ -8,16 +8,16 @@ Maintainer: Dinesh Acharya Description: Kevin Dowd's book Measuring Market Risk is a widely read book in the area of risk measurement by students and - practitioners alike. As he claims, MATLAB indeed might have been most + practitioners alike. As he claims, 'MATLAB' indeed might have been the most suitable language when he originally wrote the functions, but, - with growing popularity of R and a large user base it is not entirely - valid. As Dowd's code was not intended to be error free and were more + with growing popularity of R it is not entirely + valid. As Dowd's code was not intended to be error free and were mainly for reference, some functions in this package have inherited those errors. An attempt will be made in future releases to identify and correct them. Dowd's original code can be downloaded from www.kevindowd.org/measuring-market-risk/. It should be noted that Dowd offers both - MMR2 and MMR1 toolboxes. Only MMR2 was ported to R. MMR2 is more - recent version of MMR1 toolbox and they both have mostly similar + 'MMR2' and 'MMR1' toolboxes. Only 'MMR2' was ported to R. 'MMR2' is more + recent version of 'MMR1' toolbox and they both have mostly similar function. The toolbox mainly contains different parametric and non parametric methods for measurement of market risk as well as backtesting risk measurement methods. From noreply at r-forge.r-project.org Mon Aug 31 23:46:47 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 31 Aug 2015 23:46:47 +0200 (CEST) Subject: [Returnanalytics-commits] r3989 - pkg/Dowd Message-ID: <20150831214647.8077A187A33@r-forge.r-project.org> Author: dacharya Date: 2015-08-31 23:46:47 +0200 (Mon, 31 Aug 2015) New Revision: 3989 Modified: pkg/Dowd/NAMESPACE Log: Import from added. Modified: pkg/Dowd/NAMESPACE =================================================================== --- pkg/Dowd/NAMESPACE 2015-08-31 21:46:12 UTC (rev 3988) +++ pkg/Dowd/NAMESPACE 2015-08-31 21:46:47 UTC (rev 3989) @@ -150,3 +150,8 @@ import(MASS) import(bootstrap) import(forecast) +importFrom("graphics", "barplot", "hist", "legend", "lines", "par", + "persp", "plot", "text", "title") +importFrom("stats", "cov", "density", "dlnorm", "dnorm", "dt", + "pbinom", "pchisq", "pnorm", "pt", "qnorm", "qt", + "quantile", "rbinom", "rlnorm", "rnorm", "sd") \ No newline at end of file From noreply at r-forge.r-project.org Mon Aug 31 23:50:25 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 31 Aug 2015 23:50:25 +0200 (CEST) Subject: [Returnanalytics-commits] r3990 - pkg/Dowd/R Message-ID: <20150831215025.25685187A33@r-forge.r-project.org> Author: dacharya Date: 2015-08-31 23:50:24 +0200 (Mon, 31 Aug 2015) New Revision: 3990 Modified: pkg/Dowd/R/CdfOfSumUsingGaussianCopula.R pkg/Dowd/R/CdfOfSumUsingGumbelCopula.R pkg/Dowd/R/GaussianCopulaVaR.R pkg/Dowd/R/GumbelCopulaVaR.R Log: Functions modified to reduce run time. Modified: pkg/Dowd/R/CdfOfSumUsingGaussianCopula.R =================================================================== --- pkg/Dowd/R/CdfOfSumUsingGaussianCopula.R 2015-08-31 21:46:47 UTC (rev 3989) +++ pkg/Dowd/R/CdfOfSumUsingGaussianCopula.R 2015-08-31 21:50:24 UTC (rev 3990) @@ -1,4 +1,4 @@ -#' Derives prob ( X + Y < quantile) using Gumbel copula +#' Derives prob ( X + Y < quantile) using Gaussian copula #' #' If X and Y are position P/Ls, then the VaR is equal to minus quantile. In #' such cases, we insert the negative of the VaR as the quantile, and the @@ -30,9 +30,9 @@ CdfOfSumUsingGaussianCopula <- function(quantile, mu1, mu2, sigma1, sigma2, rho, number.steps.in.copula){ # Define w variable - w.min <- 0.001 - w.max <- 0.999 # Specify min and max of w - dw <- 0.001 # Specify incremental change in w + w.min <- 0.01 + w.max <- 0.99 # Specify min and max of w + dw <- 0.01 # Specify incremental change in w w <- seq(w.min, w.max, dw) # Define w-variable going from min to max in units of size dw # Obtain 'first' and 'second' sets of copula values corresponding to Modified: pkg/Dowd/R/CdfOfSumUsingGumbelCopula.R =================================================================== --- pkg/Dowd/R/CdfOfSumUsingGumbelCopula.R 2015-08-31 21:46:47 UTC (rev 3989) +++ pkg/Dowd/R/CdfOfSumUsingGumbelCopula.R 2015-08-31 21:50:24 UTC (rev 3990) @@ -35,7 +35,8 @@ # Define w variable w.min <- 0.001 w.max <- 0.999 # Specify min and max of w - dw <- 0.001 # Specify incremental change in w + dw <- 0.001 # Specify incremental change in w. Note that this is different than in Dowd's + # original code (dw=0.001). It was necessary to reduce run time for examples. w <- seq(w.min, w.max, dw) # Define w-variable going from min to max in units of size dw # Obtain 'first' and 'second' sets of copula values corresponding to Modified: pkg/Dowd/R/GaussianCopulaVaR.R =================================================================== --- pkg/Dowd/R/GaussianCopulaVaR.R 2015-08-31 21:46:47 UTC (rev 3989) +++ pkg/Dowd/R/GaussianCopulaVaR.R 2015-08-31 21:50:24 UTC (rev 3990) @@ -20,7 +20,7 @@ #' @author Dinesh Acharya #' @examples #' -#' # VaR using bivariate Gumbel for X and Y with given parameters: +#' # VaR using bivariate Gaussian for X and Y with given parameters: #' GaussianCopulaVaR(2.3, 4.1, 1.2, 1.5, .6, 10, .95) #' #' @export @@ -50,7 +50,7 @@ } # Bisection Algorithm - tol <- 0.0001 # Tolerance level (NM: change manually if desired) + tol <- 0.001 # Tolerance level (NM: change manually if desired) while (U - L > tol){ x <- (L + U) / 2 # Bisection carried out in terms of P/L quantiles or minus VaR cum.prob <- CdfOfSumUsingGaussianCopula(x, mu1, mu2, sigma1, sigma2, rho, Modified: pkg/Dowd/R/GumbelCopulaVaR.R =================================================================== --- pkg/Dowd/R/GumbelCopulaVaR.R 2015-08-31 21:46:47 UTC (rev 3989) +++ pkg/Dowd/R/GumbelCopulaVaR.R 2015-08-31 21:50:24 UTC (rev 3990) @@ -20,7 +20,7 @@ #' @examples #' #' # VaR using bivariate Gumbel for X and Y with given parameters: -#' GumbelCopulaVaR(2.3, 4.1, 1.2, 1.5, 1.2, .95) +#' GumbelCopulaVaR(1.1, 3.1, 1.2, 1.5, 1.1, .95) #' #' @export GumbelCopulaVaR <- function(mu1, mu2, sigma1, sigma2, beta, cl){ @@ -50,7 +50,9 @@ } # Bisection Algorithm - tol <- 0.0001 # Tolerance level (NM: change manually if desired) + # The tolerance level in Dowd's original code was 0.0001. It was changed for test case + # to take less time. + tol <- 0.001 # Tolerance level (NM: change manually if desired) while (U - L > tol){ x <- (L + U) / 2 # Bisection carried out in terms of P/L quantiles or minus VaR cum.prob <- CdfOfSumUsingGumbelCopula(x, mu1, mu2, sigma1, sigma2, beta) @@ -64,5 +66,5 @@ } } y <- -x # VaR is negative of terminal x-value or P/L quantile - + return(y) } \ No newline at end of file From noreply at r-forge.r-project.org Mon Aug 31 23:51:15 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 31 Aug 2015 23:51:15 +0200 (CEST) Subject: [Returnanalytics-commits] r3991 - pkg/Dowd/man Message-ID: <20150831215115.87ACE187A33@r-forge.r-project.org> Author: dacharya Date: 2015-08-31 23:51:15 +0200 (Mon, 31 Aug 2015) New Revision: 3991 Modified: pkg/Dowd/man/CdfOfSumUsingGaussianCopula.Rd pkg/Dowd/man/GaussianCopulaVaR.Rd pkg/Dowd/man/GumbelCopulaVaR.Rd Log: Functions modified to reduce run time of example. Modified: pkg/Dowd/man/CdfOfSumUsingGaussianCopula.Rd =================================================================== --- pkg/Dowd/man/CdfOfSumUsingGaussianCopula.Rd 2015-08-31 21:50:24 UTC (rev 3990) +++ pkg/Dowd/man/CdfOfSumUsingGaussianCopula.Rd 2015-08-31 21:51:15 UTC (rev 3991) @@ -2,7 +2,7 @@ % Please edit documentation in R/CdfOfSumUsingGaussianCopula.R \name{CdfOfSumUsingGaussianCopula} \alias{CdfOfSumUsingGaussianCopula} -\title{Derives prob ( X + Y < quantile) using Gumbel copula} +\title{Derives prob ( X + Y < quantile) using Gaussian copula} \usage{ CdfOfSumUsingGaussianCopula(quantile, mu1, mu2, sigma1, sigma2, rho, number.steps.in.copula) Modified: pkg/Dowd/man/GaussianCopulaVaR.Rd =================================================================== --- pkg/Dowd/man/GaussianCopulaVaR.Rd 2015-08-31 21:50:24 UTC (rev 3990) +++ pkg/Dowd/man/GaussianCopulaVaR.Rd 2015-08-31 21:51:15 UTC (rev 3991) @@ -30,7 +30,7 @@ for normal marginals. } \examples{ -# VaR using bivariate Gumbel for X and Y with given parameters: +# VaR using bivariate Gaussian for X and Y with given parameters: GaussianCopulaVaR(2.3, 4.1, 1.2, 1.5, .6, 10, .95) } \author{ Modified: pkg/Dowd/man/GumbelCopulaVaR.Rd =================================================================== --- pkg/Dowd/man/GumbelCopulaVaR.Rd 2015-08-31 21:50:24 UTC (rev 3990) +++ pkg/Dowd/man/GumbelCopulaVaR.Rd 2015-08-31 21:51:15 UTC (rev 3991) @@ -28,7 +28,7 @@ } \examples{ # VaR using bivariate Gumbel for X and Y with given parameters: - GumbelCopulaVaR(2.3, 4.1, 1.2, 1.5, 1.2, .95) + GumbelCopulaVaR(1.1, 3.1, 1.2, 1.5, 1.1, .95) } \author{ Dinesh Acharya From noreply at r-forge.r-project.org Mon Aug 31 23:52:27 2015 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 31 Aug 2015 23:52:27 +0200 (CEST) Subject: [Returnanalytics-commits] r3992 - pkg/Dowd/tests/testthat Message-ID: <20150831215227.7151B183F35@r-forge.r-project.org> Author: dacharya Date: 2015-08-31 23:52:27 +0200 (Mon, 31 Aug 2015) New Revision: 3992 Modified: pkg/Dowd/tests/testthat/testCdfOfSumUsingGaussianCopula.R pkg/Dowd/tests/testthat/testCdfOfSumUsingGumbelCopula.R Log: Tolerance level reduced for tests. Modified: pkg/Dowd/tests/testthat/testCdfOfSumUsingGaussianCopula.R =================================================================== --- pkg/Dowd/tests/testthat/testCdfOfSumUsingGaussianCopula.R 2015-08-31 21:51:15 UTC (rev 3991) +++ pkg/Dowd/tests/testthat/testCdfOfSumUsingGaussianCopula.R 2015-08-31 21:52:27 UTC (rev 3992) @@ -1,8 +1,8 @@ test_that("Cdf Of Sum Using Gaussian Copula.",{ # Success - 1 - expect_equal(0.3446, CdfOfSumUsingGaussianCopula(0.9, 0.3, 1.5, 1.2, 1.5, 0.6, 25), tolerance=0.001) + expect_equal(0.3446, CdfOfSumUsingGaussianCopula(0.9, 0.3, 1.5, 1.2, 1.5, 0.6, 25), tolerance=0.1) # Success - 2 - expect_equal(0.9939, CdfOfSumUsingGaussianCopula(0.1, -0.3, -3, .2, 1.5, 0.6, 10), tolerance=0.001) + expect_equal(0.9939, CdfOfSumUsingGaussianCopula(0.1, -0.3, -3, .2, 1.5, 0.6, 10), tolerance=0.1) }) Modified: pkg/Dowd/tests/testthat/testCdfOfSumUsingGumbelCopula.R =================================================================== --- pkg/Dowd/tests/testthat/testCdfOfSumUsingGumbelCopula.R 2015-08-31 21:51:15 UTC (rev 3991) +++ pkg/Dowd/tests/testthat/testCdfOfSumUsingGumbelCopula.R 2015-08-31 21:52:27 UTC (rev 3992) @@ -1,9 +1,9 @@ test_that("Cdf Of Sum Using Gumbel Copula.",{ # Success - 1 - expect_equal(0.0259, CdfOfSumUsingGumbelCopula(0.1, 1.3, 2.5, 1.2, 1.5, 1.2), tolerance=0.001) + expect_equal(0.0259, CdfOfSumUsingGumbelCopula(0.1, 1.3, 2.5, 1.2, 1.5, 1.2), tolerance=0.1) # Success - 2 - expect_equal(0.8896, CdfOfSumUsingGumbelCopula(0.1, 1.5, -2.5, 0.4, 5.1, 2.1), tolerance=0.001) + expect_equal(0.8896, CdfOfSumUsingGumbelCopula(0.1, 1.5, -2.5, 0.4, 5.1, 2.1), tolerance=0.1) # Error - 1 expect_error(val <- CdfOfSumUsingGumbelCopula(0.1, 1.5, -2.5, 0.4, 5.1, .9))