[Returnanalytics-commits] r3673 - in pkg/Meucci: . R demo man
noreply at r-forge.r-project.org
noreply at r-forge.r-project.org
Fri Jun 12 18:43:18 CEST 2015
Author: xavierv
Date: 2015-06-12 18:43:18 +0200 (Fri, 12 Jun 2015)
New Revision: 3673
Modified:
pkg/Meucci/NAMESPACE
pkg/Meucci/R/CmaCopula.R
pkg/Meucci/R/EntropyProg.R
pkg/Meucci/R/PlotDistributions.R
pkg/Meucci/R/Prior2Posterior.R
pkg/Meucci/R/data.R
pkg/Meucci/R/pHistPriorPosterior.R
pkg/Meucci/demo/00Index
pkg/Meucci/demo/AnalyticalvsNumerical.R
pkg/Meucci/demo/HermiteGrid_demo.R
pkg/Meucci/man/BlackLittermanFormula.Rd
pkg/Meucci/man/BlackScholesCallPrice.Rd
pkg/Meucci/man/CMAcombination.Rd
pkg/Meucci/man/CMAseparation.Rd
pkg/Meucci/man/Central2Raw.Rd
pkg/Meucci/man/CentralAndStandardizedStatistics.Rd
pkg/Meucci/man/ComputeCVaR.Rd
pkg/Meucci/man/ComputeMVE.Rd
pkg/Meucci/man/ComputeMoments.Rd
pkg/Meucci/man/CondProbViews.Rd
pkg/Meucci/man/ConvertChangeInYield2Price.Rd
pkg/Meucci/man/ConvertCompoundedReturns2Price.Rd
pkg/Meucci/man/Cumul2Raw.Rd
pkg/Meucci/man/DetectOutliersViaMVE.Rd
pkg/Meucci/man/DoubleDecay.Rd
pkg/Meucci/man/EfficientFrontierPrices.Rd
pkg/Meucci/man/EfficientFrontierReturns.Rd
pkg/Meucci/man/EfficientFrontierReturnsBenchmark.Rd
pkg/Meucci/man/EntropyProg.Rd
pkg/Meucci/man/Equities.Rd
pkg/Meucci/man/Fit2Moms.Rd
pkg/Meucci/man/FitExpectationMaximization.Rd
pkg/Meucci/man/FitMultivariateGarch.Rd
pkg/Meucci/man/FitOU.Rd
pkg/Meucci/man/FitOrnsteinUhlenbeck.Rd
pkg/Meucci/man/GenFirstEigVect.Rd
pkg/Meucci/man/GenPCBasis.Rd
pkg/Meucci/man/GenerateLogNormalDistribution.Rd
pkg/Meucci/man/GenerateUniformDrawsOnUnitSphere.Rd
pkg/Meucci/man/HorizonPricing.Rd
pkg/Meucci/man/InterExtrapolate.Rd
pkg/Meucci/man/LeastInfoKernel.Rd
pkg/Meucci/man/Log2Lin.Rd
pkg/Meucci/man/LognormalCopulaPdf.Rd
pkg/Meucci/man/LognormalMoments2Parameters.Rd
pkg/Meucci/man/LognormalParam2Statistics.Rd
pkg/Meucci/man/LongShortMeanCVaRFrontier.Rd
pkg/Meucci/man/MaxEntropy.Rd
pkg/Meucci/man/MaxRsqCS.Rd
pkg/Meucci/man/MaxRsqTS.Rd
pkg/Meucci/man/MeanTCEntropyFrontier.Rd
pkg/Meucci/man/MleRecursionForStudentT.Rd
pkg/Meucci/man/MvnRnd.Rd
pkg/Meucci/man/NoisyObservations.Rd
pkg/Meucci/man/NormalCopulaPdf.Rd
pkg/Meucci/man/OUstep.Rd
pkg/Meucci/man/OUstepEuler.Rd
pkg/Meucci/man/PanicCopula.Rd
pkg/Meucci/man/PartialConfidencePosterior.Rd
pkg/Meucci/man/PerformIidAnalysis.Rd
pkg/Meucci/man/PlotCompositionEfficientFrontier.Rd
pkg/Meucci/man/PlotDistributions.Rd
pkg/Meucci/man/PlotFrontier.Rd
pkg/Meucci/man/PlotMarginalsNormalInverseWishart.Rd
pkg/Meucci/man/PlotResults.Rd
pkg/Meucci/man/PlotVolVsCompositionEfficientFrontier.Rd
pkg/Meucci/man/Prior2Posterior.Rd
pkg/Meucci/man/ProjectionStudentT.Rd
pkg/Meucci/man/QuantileMixture.Rd
pkg/Meucci/man/RIEfficientFrontier.Rd
pkg/Meucci/man/RandNormalInverseWishart.Rd
pkg/Meucci/man/Raw2Central.Rd
pkg/Meucci/man/Raw2Cumul.Rd
pkg/Meucci/man/RejectOutlier.Rd
pkg/Meucci/man/SimulateJumpDiffusionMerton.Rd
pkg/Meucci/man/StockSeries.Rd
pkg/Meucci/man/StudentTCopulaPdf.Rd
pkg/Meucci/man/SummStats.Rd
pkg/Meucci/man/TimeSeries.Rd
pkg/Meucci/man/Tweak.Rd
pkg/Meucci/man/TwoDimEllipsoid.Rd
pkg/Meucci/man/UsSwapRates.Rd
pkg/Meucci/man/ViewCurveSlope.Rd
pkg/Meucci/man/ViewImpliedVol.Rd
pkg/Meucci/man/ViewRanking.Rd
pkg/Meucci/man/ViewRealizedVol.Rd
pkg/Meucci/man/bondAttribution.Rd
pkg/Meucci/man/butterfliesAnalytics.Rd
pkg/Meucci/man/covNRets.Rd
pkg/Meucci/man/db.Rd
pkg/Meucci/man/dbFFP.Rd
pkg/Meucci/man/db_FX.Rd
pkg/Meucci/man/derivatives.Rd
pkg/Meucci/man/efficientFrontier.Rd
pkg/Meucci/man/fILMR.Rd
pkg/Meucci/man/factorsDistribution.Rd
pkg/Meucci/man/fixedIncome.Rd
pkg/Meucci/man/freaqEst.Rd
pkg/Meucci/man/garch1f4.Rd
pkg/Meucci/man/garch2f8.Rd
pkg/Meucci/man/gaussHermiteMesh.Rd
pkg/Meucci/man/hermitePolynomial.Rd
pkg/Meucci/man/highYieldIndices.Rd
pkg/Meucci/man/implVol.Rd
pkg/Meucci/man/integrateSubIntervals.Rd
pkg/Meucci/man/kernelbw.Rd
pkg/Meucci/man/kernelcdf.Rd
pkg/Meucci/man/kernelinv.Rd
pkg/Meucci/man/kernelpdf.Rd
pkg/Meucci/man/linearModel.Rd
pkg/Meucci/man/linreturn.Rd
pkg/Meucci/man/normalizeProb.Rd
pkg/Meucci/man/pHist.Rd
pkg/Meucci/man/pHistPriorPosterior.Rd
pkg/Meucci/man/private_fun.Rd
pkg/Meucci/man/returnsDistribution.Rd
pkg/Meucci/man/robustBayesianPortfolioOptimization.Rd
pkg/Meucci/man/sectorsSnP500.Rd
pkg/Meucci/man/sectorsTS.Rd
pkg/Meucci/man/securitiesIndustryClassification.Rd
pkg/Meucci/man/securitiesTS.Rd
pkg/Meucci/man/std.Rd
pkg/Meucci/man/subIntervals.Rd
pkg/Meucci/man/swap2y4y.Rd
pkg/Meucci/man/swapParRates.Rd
pkg/Meucci/man/swaps.Rd
Log:
Ported Torsion and Inverse Call papers, formatted AnalyticalvsNumerical and its dependencies
Modified: pkg/Meucci/NAMESPACE
===================================================================
--- pkg/Meucci/NAMESPACE 2015-06-11 21:45:59 UTC (rev 3672)
+++ pkg/Meucci/NAMESPACE 2015-06-12 16:43:18 UTC (rev 3673)
@@ -1,3 +1,5 @@
+# Generated by roxygen2 (4.1.1): do not edit by hand
+
export(BlackLittermanFormula)
export(BlackScholesCallPrice)
export(BlackScholesCallPutPrice)
@@ -2,9 +4,9 @@
export(BlackScholesPutPrice)
+export(CMAcombination)
+export(CMAseparation)
export(Central2Raw)
export(CentralAndStandardizedStatistics)
-export(CMAcombination)
-export(CMAseparation)
export(ComputeCVaR)
+export(ComputeMVE)
export(ComputeMoments)
-export(ComputeMVE)
export(CondProbViews)
@@ -15,6 +17,7 @@
export(Cumul2Raw)
export(DetectOutliersViaMVE)
export(DoubleDecay)
+export(EffectiveBets)
export(EfficientFrontierPrices)
export(EfficientFrontierReturns)
export(EfficientFrontierReturnsBenchmark)
@@ -23,16 +26,12 @@
export(FitExpectationMaximization)
export(FitMultivariateGarch)
export(FitOrnsteinUhlenbeck)
-export(garch1f4)
-export(garch2f8)
export(GenerateLogNormalDistribution)
export(GenerateUniformDrawsOnUnitSphere)
-export(hermitePolynomial)
export(HorizonPricing)
-export(integrateSubIntervals)
export(InterExtrapolate)
+export(InverseCallTransformation)
export(LeastInfoKernel)
-export(linreturn)
export(Log2Lin)
export(LognormalCopulaPdf)
export(LognormalMoments2Parameters)
@@ -44,11 +43,9 @@
export(MvnRnd)
export(NoisyObservations)
export(NormalCopulaPdf)
-export(normalizeProb)
export(PanicCopula)
export(PartialConfidencePosterior)
export(PerformIidAnalysis)
-export(pHistPriorPosterior)
export(PlotCompositionEfficientFrontier)
export(PlotDistributions)
export(PlotFrontier)
@@ -57,19 +54,27 @@
export(Prior2Posterior)
export(ProjectionStudentT)
export(QuantileMixture)
+export(RIEfficientFrontier)
export(RandNormalInverseWishart)
export(Raw2Central)
export(Raw2Cumul)
export(RejectOutlier)
-export(RIEfficientFrontier)
-export(robustBayesianPortfolioOptimization)
export(SimulateJumpDiffusionMerton)
-export(std)
export(StudentTCopulaPdf)
-export(subIntervals)
export(SummStats)
+export(Torsion)
export(Tweak)
export(TwoDimEllipsoid)
export(ViewCurveSlope)
export(ViewImpliedVol)
export(ViewRealizedVol)
+export(garch1f4)
+export(garch2f8)
+export(hermitePolynomial)
+export(integrateSubIntervals)
+export(linreturn)
+export(normalizeProb)
+export(pHistPriorPosterior)
+export(robustBayesianPortfolioOptimization)
+export(std)
+export(subIntervals)
Modified: pkg/Meucci/R/CmaCopula.R
===================================================================
--- pkg/Meucci/R/CmaCopula.R 2015-06-11 21:45:59 UTC (rev 3672)
+++ pkg/Meucci/R/CmaCopula.R 2015-06-12 16:43:18 UTC (rev 3673)
@@ -212,7 +212,7 @@
# Aeq %*% p_ # equals the expectations in beq
# meanPriorReturnScenarios = apply( Aeq[ -1 , ] , 2 , mean ) # mean return for j'th prior scenario (distribution remains unaltered)
# meanPriorReturnScenarios %*% p_ # expected portfolio return with new probabilities at each scenario
- # breaks = pHist( meanPriorReturnScenarios , p_ , round( 10 * log(J)) , freq = FALSE )$Breaks
+ # breaks = PHist( meanPriorReturnScenarios , p_ , round( 10 * log(J)) , freq = FALSE )$Breaks
# weighted.hist( meanPriorReturnScenarios, p_ , breaks , freq = FALSE ) # create weighted histogram. TODO: FIX. Cannot abline the expected portfolio return
# bin <- cut( meanPriorReturnScenarios , breaks , include.lowest = TRUE)
@@ -270,8 +270,8 @@
R_w = Y %*% w # Range is from -.18 to .18
meanReturn = mean( R_w )
- histOld = pHist( R_w , p , round( 10 * log(J)) , freq = FALSE ) # barplot with old probabilities
- histNew = pHist( R_w , p_ , round( 10 * log(J)) , freq = FALSE ) # barplot with new probabilities
+ histOld = PHist( R_w , p , round( 10 * log(J)) , freq = FALSE ) # barplot with old probabilities
+ histNew = PHist( R_w , p_ , round( 10 * log(J)) , freq = FALSE ) # barplot with new probabilities
print( histOld$Plot )
print( histNew$Plot )
# abline( v = mean( R_w ) , col = "blue" )
Modified: pkg/Meucci/R/EntropyProg.R
===================================================================
--- pkg/Meucci/R/EntropyProg.R 2015-06-11 21:45:59 UTC (rev 3672)
+++ pkg/Meucci/R/EntropyProg.R 2015-06-12 16:43:18 UTC (rev 3673)
@@ -1,191 +1,306 @@
-#' Entropy pooling program for blending views on scenarios with a prior scenario-probability distribution
+#' @title Compute posterior (=change of measure) with Entropy Pooling.
#'
-#' Entropy program will change the initial predictive distribution 'p' to a new set 'p_' that satisfies
-#' specified moment conditions but changes other propoerties of the new distribution the least by
-#' minimizing the relative entropy between the two distributions. Theoretical note: Relative Entropy (Kullback-Leibler information criterion KLIC) is an
-#' asymmetric measure.
+#' @description
+#' Entropy Pooling program for blending views on scenarios with a
+#' prior scenario-probability distribution.
#'
-#' We retrieve a new set of probabilities for the joint-scenarios using the Entropy pooling method
-#' Of the many choices of 'p' that satisfy the views, we choose 'p' that minimize the entropy or distance of the new probability
-#' distribution to the prior joint-scenario probabilities
-#' We use Kullback-Leibler divergence or relative entropy dist(p,q): Sum across all scenarios [ p-t * ln( p-t / q-t ) ]
-#' Therefore we define solution as p* = argmin (choice of p ) [ sum across all scenarios: p-t * ln( p-t / q-t) ],
-#' such that 'p' satisfies views. The views modify the prior in a cohrent manner (minimizing distortion)
-#' We forumulate the stress tests of the baseline scenarios as linear constraints on yet-to-be defined probabilities
-#' Note that the numerical optimization acts on a very limited number of variables equal
-#' to the number of views. It does not act directly on the very large number of variables
-#' of interest, namely the probabilities of the Monte Carlo scenarios. This feature guarantees
-#' the numerical feasability of entropy optimization
-#' Note that new probabilities are generated in much the same way that the state-price density modifies
-#' objective probabilities of pay-offs to risk-neutral probabilities in contingent-claims asset pricing
+#' @details
+#' The entropy program will change the initial predictive distribution 'p' to a
+#' new set 'p_' that satisfies specified moment conditions but changes other
+#' properties of the new distribution, the least by minimizing the relative
+#' entropy between the two distributions. Theoretical note: Relative Entropy
+#' (Kullback-Leibler information criterion KLIC) is an asymmetric measure.
#'
-#' Compute posterior (=change of measure) with Entropy Pooling, as described in
+#' We retrieve a new set of probabilities for the joint-scenarios using the
+#' Entropy pooling method of the many choices of 'p' that satisfy the views,
+#' we choose a 'p' that minimize the entropy or distance of the new probability
+#' distribution to the prior joint-scenario probabilities, we use the
+#' Kullback-Leibler divergence or the relative entropy dist(p,q): Sum across all
+#' scenarios [p-t * ln( p-t / q-t)]. Therefore we define solution as
+#' p* = argmin (choice of p) [sum across all scenarios: p-t * ln( p-t / q-t)],
+#' such that 'p' satisfies views. The views modify the prior in a coherent
+#' manner (minimizing distortion). We formulate the stress tests of the baseline
+#' scenarios as linear constraints on yet-to-be defined probabilities.
#'
-#' @param p a vector of initial probabilities based on prior (reference model, empirical distribution, etc.). Sum of 'p' must be 1
-#' @param Aeq matrix consisting of equality constraints (paired with argument 'beq'). Denoted as 'H' in the Meucci paper. (denoted as 'H' in the "Meucci - Flexible Views Theory & Practice" paper formlua 86 on page 22)
-#' @param beq vector corresponding to the matrix of equality constraints (paired with argument 'Aeq'). Denoted as 'h' in the Meucci paper
-#' @param A matrix consisting of inequality constraints (paired with argument 'b'). Denoted as 'F' in the Meucci paper
-#' @param b vector consisting of inequality constraints (paired with matrix A). Denoted as 'f' in the Meucci paper
+#' Note that the numerical optimization acts on a very limited number of
+#' variables equal to the number of views. It does not act directly on the very
+#' large number of variables of interest, namely the probabilities of the Monte
+#' Carlo scenarios. This feature guarantees the numerical feasability of entropy
+#' optimization.
#'
-#' ' \deqn{ \tilde{p} \equiv argmin_{Fx \leq f, Hx \equiv h} \big\{ \sum_1^J x_{j} \big(ln \big( x_{j} \big) - ln \big( p_{j} \big) \big) \big\}
-#' \\ \ell \big(x, \lambda, \nu \big) \equiv x' \big(ln \big(x\big) - ln \big(p\big) \big) + \lambda' \big(Fx - f\big) + \nu' \big(Hx - h\big)}
-#' @return a list with
-#' p_ revised probabilities based on entropy pooling
-#' optimizationPerformance a list with status of optimization, value, number of iterations and sum of probabilities.
+#' Note that new probabilities are generated in much the same way that the
+#' state-price density modifies objective probabilities of pay-offs to
+#' risk-neutral probabilities in contingent-claims asset pricing.
+#'
+#'
+#' @param p a vector of initial probabilities based on prior (reference
+#' model, empirical distribution, etc.). Sum of 'p' must be 1
+#' @param Aeq matrix consisting of equality constraints (paired with
+#' argument 'beq'). Denoted as 'H' in the Meucci paper.
+#' (denoted as 'H' in the "Meucci - Flexible Views Theory &
+#' Practice" paper formlua 86 on page 22)
+#'
+#' @param beq vector corresponding to the matrix of equality constraints
+#' (paired with argument 'Aeq'). Denoted as 'h' in the paper
+#' @param A matrix consisting of inequality constraints (paired with
+#' argument 'b'). Denoted as 'F' in the Meucci paper
+#' @param b vector consisting of inequality constraints (paired with
+#' matrix A). Denoted as 'f' in the Meucci paper
+#'
+#' \deqn{ \tilde{p} \equiv argmin_{Fx \leq f, Hx \equiv h} \big\{ \sum_1^J
+#' x_{j} \big(ln \big( x_{j} \big) - ln \big( p_{j} \big) \big) \big\} \\
+#' \ell \big(x, \lambda, \nu \big) \equiv x' \big(ln \big(x\big) - ln
+#' \big(p\big) \big) + \lambda' \big(Fx - f\big) + \nu' \big(Hx - h\big)}
+#'
+#' @return p_ revised probabilities based on entropy
+#' pooling
+#' @return optimizationPerformance a list with status of optimization,
+#' value, number of iterations and sum of
+#' probabilities.
+#'
#' @author Ram Ahluwalia \email{ram@@wingedfootcapital.com}
+#'
#' @references
-#' A. Meucci - "Fully Flexible Views: Theory and Practice". See page 22 for illustration of numerical implementation
-#' Symmys site containing original MATLAB source code \url{http://www.symmys.com}
-#' NLOPT open-source optimization site containing background on algorithms \url{http://ab-initio.mit.edu/wiki/index.php/NLopt}
+#' A. Meucci - "Fully Flexible Views: Theory and Practice". See page 22 for
+#' illustration of numerical implementation
+#'
+#' Symmys site containing original MATLAB code \url{http://www.symmys.com}
+#'
+#' NLOPT open-source optimization site containing background on algorithms
+#' \url{http://ab-initio.mit.edu/wiki/index.php/NLopt}
+#'
#' We use the information-theoretic estimator of Kitamur and Stutzer (1997).
-#' Reversing 'p' and 'p_' leads to the empirical likelihood" estimator of Qin and Lawless (1994).
-#' See Robertson et al, "Forecasting Using Relative Entropy" (2002) for more theory
-#' @export
-EntropyProg = function( p , A = NULL , b = NULL , Aeq , beq )
-{
- library( nloptr )
+#'
+#' Reversing 'p' and 'p_' leads to the empirical likelihood" estimator of Qin
+#' and Lawless (1994).
+#'
+#' See Robertson et al, "Forecasting Using Relative Entropy" (2002) for theory
+#'
+#' @export
- if( !length(b) ) A = matrix( ,nrow = 0, ncol = 0)
- if( !length(b) ) b = matrix( ,nrow = 0, ncol = 0)
- # count the number of constraints
- K_ = nrow( A ) # K_ is the number of inequality constraints in the matrix-vector pair A-b
- K = nrow( Aeq ) # K is the number of equality views in the matrix-vector pair Aeq-beq
-
- # parameter checks
- if ( K_ + K == 0 ) { stop( "at least one equality or inequality constraint must be specified")}
- if ( ( ( .999999 < sum(p)) & (sum(p) < 1.00001) ) == FALSE ) { stop( "sum of probabilities from prior distribution must equal 1")}
- if ( nrow(Aeq)!=nrow(beq) ) { stop( "number of inequality constraints in matrix Aeq must match number of elements in vector beq") }
- if ( nrow(A)!=nrow(b) ) { stop( "number of equality constraints in matrix A must match number of elements in vector b") }
-
- # calculate derivatives of constraint matrices
- A_ = t( A )
- b_ = t( b )
- Aeq_ = t( Aeq )
- beq_ = t( beq )
-
- # starting guess for optimization search with length = to number of views
- x0 = matrix( 0 , nrow = K_ + K , ncol = 1 )
-
- if ( !K_ ) # equality constraints only
- {
- # define maximum likelihood, gradient, and hessian functions for unconstrained and constrained optimization
- eval_f_list = function( v ) # cost function for unconstrained optimization (no inequality constraints)
- {
- x = exp( log(p) - 1 - Aeq_ %*% v )
- x = apply( cbind( x , 10^-32 ) , 1 , max ) # robustification
- # L is the Lagrangian dual function (without inequality constraints). See formula 88 on p. 22 in "Meucci - Fully Flexible Views - Theory and Practice (2010)"
- # t(x) is the derivative x'
- # Aeq_ is the derivative of the Aeq matrix of equality constraints (denoted as 'H in the paper)
- # beq_ is the transpose of the vector associated with Aeq equality constraints
- # L= x' * ( log(x) - log(p) + Aeq_ * v ) - beq_ * v
- # 1xJ * ( Jx1 - Jx1 + JxN+1 * N+1x1 ) - 1xN+1 * N+1x1
- L = t(x) %*% ( log(x) - log(p) + Aeq_ %*% v ) - beq_ %*% v
- mL = -L # take negative values since we want to maximize
-
- # evaluate gradient
- gradient = beq - Aeq %*% x
-
- # evaluate Hessian
- # We comment this out for now -- to be used when we find an optimizer that can utilize the Hessian in addition to the gradient
- # H = ( Aeq %*% (( x %*% ones(1,K) ) * Aeq_) ) # Hessian computed by Chen Qing, Lin Daimin, Meng Yanyan, Wang Weijun
-
- return( list( objective = mL , gradient = gradient ) )
- }
-
- # setup unconstrained optimization
- start = Sys.time()
- opts = list( algorithm = "NLOPT_LD_LBFGS" , xtol_rel = 1.0e-6 ,
- check_derivatives = TRUE , check_derivatives_print = "all" , print_level = 2 , maxeval = 1000 )
- optimResult = nloptr(x0 = x0, eval_f = eval_f_list , opts = opts )
- end = Sys.time()
- print( c("Optimization completed in " , end - start )) ; rm( start ) ; rm( end )
-
- if ( optimResult$status < 0 ) { print( c("Exit code " , optimResult$status ) ) ; stop( "Error: The optimizer did not converge" ) }
-
- # return results of optimization
- v = optimResult$solution
- p_ = exp( log(p) - 1 - Aeq_ %*% v )
- optimizationPerformance = list( converged = (optimResult$status > 0) , ml = optimResult$objective , iterations = optimResult$iterations , sumOfProbabilities = sum( p_ ) )
- }else # case inequality constraints are specified
- {
- # setup variables for constrained optimization
- InqMat = -diag( 1 , K_ + K ) # -1 * Identity Matrix with dimension equal to number of constraints
- InqMat = InqMat[ -c( K_+1:nrow( InqMat ) ) , ] # drop rows corresponding to equality constraints
- InqVec = matrix( 0 , K_ , 1 )
-
- # define maximum likelihood, gradient, and hessian functions for constrained optimization
- InqConstraint = function( x ) { return( InqMat %*% x ) } # function used to evalute A %*% x <= 0 or A %*% x <= c(0,0) if there is more than one inequality constraint
-
- jacobian_constraint = function( x ) { return( InqMat ) }
- # Jacobian of the constraints matrix. One row per constraint, one column per control parameter (x1,x2)
- # Turns out the Jacobian of the constraints matrix is always equal to InqMat
-
- nestedfunC = function( lv )
- {
- lv = as.matrix( lv )
- l = lv[ 1:K_ , , drop = FALSE ] # inequality Lagrange multiplier
- v = lv[ (K_+1):length(lv) , , drop = FALSE ] # equality lagrange multiplier
- x = exp( log(p) - 1 - A_ %*% l - Aeq_ %*% v )
- x = apply( cbind( x , 10^-32 ) , 1 , max )
-
- # L is the cost function used for constrained optimization
- # L is the Lagrangian dual function with inequality constraints and equality constraints
- L = t(x) %*% ( log(x) - log(p) ) + t(l) %*% (A %*% x-b) + t(v) %*% (Aeq %*% x-beq)
- objective = -L # take negative values since we want to maximize
-
- # calculate the gradient
- gradient = rbind( b - A%*%x , beq - Aeq %*% x )
-
- # compute the Hessian (commented out since no R optimizer supports use of Hessian)
- # Hessian computed by Chen Qing, Lin Daimin, Meng Yanyan, Wang Weijun
- #onesToK_ = array( rep( 1 , K_ ) ) ;onesToK = array( rep( 1 , K ) )
- #x = as.matrix( x )
- #H11 = A %*% ((x %*% onesToK_) * A_) ; H12 = A %*% ((x %*% onesToK) * Aeq_)
- #H21 = Aeq %*% ((x %*% onesToK_) * A_) ; H22 = Aeq %*% ((x %*% onesToK) * Aeq_)
- #H1 = cbind( H11 , H12 ) ; H2 = cbind( H21 , H22 ) ; H = rbind( H1 , H2 ) # Hessian for constrained optimization
- return( list( objective = objective , gradient = gradient ) )
- }
-
- # find minimum of constrained multivariate function
- start = Sys.time()
- # Note: other candidates for constrained optimization in library nloptr: NLOPT_LD_SLSQP, NLOPT_LD_MMA, NLOPT_LN_AUGLAG, NLOPT_LD_AUGLAG_EQ
- # See NLOPT open-source site for more details: http://ab-initio.mit.edu/wiki/index.php/NLopt
- local_opts <- list( algorithm = "NLOPT_LD_SLSQP", xtol_rel = 1.0e-6 ,
- check_derivatives = TRUE , check_derivatives_print = "all" ,
- eval_f = nestedfunC , eval_g_ineq = InqConstraint , eval_jac_g_ineq = jacobian_constraint )
- optimResult = nloptr( x0 = x0 , eval_f = nestedfunC , eval_g_ineq = InqConstraint , eval_jac_g_ineq = jacobian_constraint ,
- opts = list( algorithm = "NLOPT_LD_AUGLAG" , local_opts = local_opts ,
- print_level = 2 , maxeval = 1000 ,
- check_derivatives = TRUE , check_derivatives_print = "all" , xtol_rel = 1.0e-6 ) )
- end = Sys.time()
- print( c("Optimization completed in " , end - start )) ; rm( start ) ; rm( end )
-
- if ( optimResult$status < 0 ) { print( c("Exit code " , optimResult$status ) ) ; stop( "Error: The optimizer did not converge" ) }
-
- # return results of optimization
- lv = matrix( optimResult$solution , ncol = 1 )
- l = lv[ 1:K_ , , drop = FALSE ] # inequality Lagrange multipliers
- v = lv[ (K_+1):nrow( lv ) , , drop = FALSE ] # equality Lagrange multipliers
- p_ = exp( log(p) -1 - A_ %*% l - Aeq_ %*% v )
- optimizationPerformance = list( converged = (optimResult$status > 0) , ml = optimResult$objective , iterations = optimResult$iterations , sumOfProbabilities = sum( p_ ) )
+EntropyProg <- function(p, A = NULL, b = NULL, Aeq, beq) {
+
+ if(!length(A))
+ A <- matrix(,nrow = 0, ncol = 0)
+ if(!length(b))
+ b <- matrix(,nrow = 0, ncol = 0)
+
+ # count the number of constraints
+ # K_ is the number of inequality constraints in the matrix-vector pair A-b
+ K_ <- nrow(A)
+ # K is the number of equality views in the matrix-vector pair Aeq-beq
+ K <- nrow(Aeq)
+
+ # parameter checks
+ if (K_ + K == 0)
+ stop("at least one equality or inequality constraint must be specified")
+
+ if (((.999999 < sum(p)) & (sum(p) < 1.00001)) == FALSE)
+ stop("sum of probabilities from prior distribution must equal 1")
+
+ if (nrow(Aeq) != nrow(beq)){
+ stop("number of inequality constraints in matrix Aeq must match
+ number of elements in vector beq")
+ }
+ if (nrow(A) != nrow(b)) {
+ stop("number of equality constraints in matrix A must match number of
+ elements in vector b")
+ }
+
+ # calculate derivatives of constraint matrices
+ A_ <- t(A)
+ Aeq_ <- t(Aeq)
+ beq_ <- t(beq)
+
+ # starting guess for optimization search with length = to number of views
+ x0 <- matrix(0, nrow = K_ + K, ncol = 1)
+
+ # equality constraints only
+ if (!K_) {
+ # define maximum likelihood, gradient, and hessian functions for
+ # unconstrained and constrained optimization
+ eval_f_list <- function(v) {
+ # cost function for unconstrained optimization
+ # (no inequality constraints)
+ # L: Lagrangian dual function (without inequality constraints)
+ # See formula 88 on p. 22 in "Meucci - Fully Flexible Views -
+ # Theory and Practice (2010)".
+ # t(x): the derivative x'
+ # Aeq_: the derivative of the Aeq matrix of equality constraints
+ # (denoted as 'H in the paper)
+ # beq_: transpose of the vector associated with Aeq equality
+ # constraints
+ # L= x' * (log(x) - log(p) + Aeq_ * v) - beq_ * v
+ # 1xJ * (Jx1 - Jx1 + JxN+1 * N+1x1) - 1xN+1 * N+1x1
+
+ x <- exp(log(p) - 1 - Aeq_ %*% v)
+ x <- apply(cbind(x, 10 ^ -32), 1, max) # robustification
+ L <- t(x) %*% (log(x) - log(p) + Aeq_ %*% v) - beq_ %*% v
+ mL <- -L # take negative values since we want to maximize
+ # evaluate gradient
+ gradient <- beq - Aeq %*% x
+
+ # evaluate Hessian
+ # We comment this out for now -- to be used when we find an
+ # optimizer that can utilize the Hessian in addition to the gradient
+ # H = (Aeq %*% ((x %*% ones(1,K)) * Aeq_))
+ # Hessian computed by Chen Qing, Lin Daimin, Meng Yanyan,Wang Weijun
+
+ return(list(objective = mL, gradient = gradient))
}
-
- print( optimizationPerformance )
-
- if ( sum( p_ ) < .999 ) { stop( "Sum or revised probabilities is less than 1!" ) }
- if ( sum( p_ ) > 1.001 ) { stop( "Sum or revised probabilities is greater than 1!" ) }
-
- return ( list ( p_ = p_ , optimizationPerformance = optimizationPerformance ) )
+
+ # setup unconstrained optimization
+ start <- Sys.time()
+ opts <- list(algorithm = "NLOPT_LD_LBFGS", xtol_rel = 1.0e-6,
+ check_derivatives = TRUE, check_derivatives_print = "all",
+ print_level = 2, maxeval = 1000)
+ optimResult <- nloptr(x0 = x0, eval_f = eval_f_list, opts = opts)
+ end <- Sys.time()
+ print(c("Optimization completed in ", end - start))
+ rm(start)
+ rm(end)
+
+ if (optimResult$status < 0) {
+ print(c("Exit code ", optimResult$status))
+ stop("Error: The optimizer did not converge")
+ }
+
+ # return results of optimization
+ v <- optimResult$solution
+ p_ <- exp(log(p) - 1 - Aeq_ %*% v)
+ optimizationPerformance <- list(converged = (optimResult$status > 0),
+ ml = optimResult$objective,
+ iterations = optimResult$iterations,
+ sumOfProbabilities = sum(p_))
+ } else {
+ #--- case inequality constraints are specified
+
+ #-- setup variables for constrained optimization
+
+ # -1 * Identity Matrix with dimension equal to number of constraints
+ InqMat <- -diag(1, K_ + K)
+ # drop rows corresponding to equality constraints
+ InqMat <- InqMat[-c(K_ + 1:nrow(InqMat)),]
+ InqVec <- matrix(0, K_, 1)
+
+ #-- define maximum likelihood, gradient, and hessian functions for
+ # constrained optimization.
+
+ # function used to evalute A %*% x <= 0 or A %*% x <= c(0,0) if there is
+ # more than one inequality constraint
+ InqConstraint <- function(x) {
+ return(InqMat %*% x)
+ }
+
+ jacobian_constraint <- function(x) {
+ return(InqMat)
+ }
+ # Jacobian of the constraints matrix. One row per constraint, one column
+ # per control parameter (x1,x2)
+ # Turns out the Jacobian of the constraints matrix is always equal to
+ # InqMat
+
+ nestedfunC <- function(lv) {
+ lv <- as.matrix(lv)
+ # inequality Lagrange multiplier
+ l <- lv[1:K_, , drop = FALSE]
+ # equality lagrange multiplier
+ v <- lv[(K_ + 1):length(lv), , drop = FALSE]
+ x <- exp(log(p) - 1 - A_ %*% l - Aeq_ %*% v)
+ x <- apply(cbind(x, 10 ^ -32), 1, max)
+
+ # L is the cost function used for constrained optimization.
+ # L is the Lagrangian dual function with inequality constraints and
+ # equality constraints
+ L <- t(x) %*% (log(x) - log(p)) + t(l) %*% (A %*% x - b) + t(v) %*%
+ (Aeq %*% x - beq)
+ objective <- -L # take negative values since we want to maximize
+
+ # calculate the gradient
+ gradient <- rbind(b - A %*% x, beq - Aeq %*% x)
+
+ ##-- compute the Hessian (commented out since no R optimizer
+ ## supports use of Hessian)
+ ## Hessian by Chen Qing, Lin Daimin, Meng Yanyan, Wang Weijun
+ # onesToK_ = array(rep(1, K_))
+ # onesToK = array(rep(1, K))
+ # x = as.matrix(x)
+ # H11 = A %*% ((x %*% onesToK_) * A_)
+ # H12 = A %*% ((x %*% onesToK) * Aeq_)
+ # H21 = Aeq %*% ((x %*% onesToK_) * A_)
+ # H22 = Aeq %*% ((x %*% onesToK) * Aeq_)
+ # H1 = cbind(H11, H12)
+ # H2 = cbind(H21, H22)
+ ## H: Hessian for constrained optimization
+ # H = rbind(H1, H2)
+
+ return(list(objective = objective, gradient = gradient))
+ }
+
+ # find minimum of constrained multivariate function
+ start <- Sys.time()
+ # Note: other candidates for constrained optimization in library nloptr:
+ # NLOPT_LD_SLSQP, NLOPT_LD_MMA, NLOPT_LN_AUGLAG, NLOPT_LD_AUGLAG_EQ
+ # See NLOPT open-source site for more details:
+ # http://ab-initio.mit.edu/wiki/index.php/NLopt
+ local_opts <- list(algorithm = "NLOPT_LD_SLSQP", xtol_rel = 1.0e-6,
+ check_derivatives = TRUE,
+ check_derivatives_print = "all", eval_f = nestedfunC,
+ eval_g_ineq = InqConstraint,
+ eval_jac_g_ineq = jacobian_constraint)
+ optimResult <- nloptr(x0 = x0, eval_f = nestedfunC,
+ eval_g_ineq = InqConstraint,
+ eval_jac_g_ineq = jacobian_constraint,
+ opts = list(algorithm = "NLOPT_LD_AUGLAG",
+ local_opts = local_opts,
+ print_level = 2, maxeval = 1000,
+ check_derivatives = TRUE,
+ check_derivatives_print = "all",
+ xtol_rel = 1.0e-6))
+ end <- Sys.time()
+ print(c("Optimization completed in ", end - start))
+ rm(start)
+ rm(end)
+
+ if (optimResult$status < 0) {
+ print(c("Exit code ", optimResult$status))
+ stop("Error: The optimizer did not converge")
+ }
+
+ # return results of optimization
+
+ lv <- matrix(optimResult$solution, ncol = 1)
+ # inequality Lagrange multipliers
+ l <- lv[1:K_, , drop = FALSE]
+ # equality Lagrange multipliers
+ v <- lv[(K_ + 1):nrow(lv), , drop = FALSE]
+ p_ <- exp(log(p) - 1 - A_ %*% l - Aeq_ %*% v)
+ optimizationPerformance <- list(converged = (optimResult$status > 0),
+ ml = optimResult$objective,
+ iterations = optimResult$iterations,
+ sumOfProbabilities = sum(p_))
+ }
+
+ print(optimizationPerformance)
+
+ if (sum(p_) < .999)
+ stop("Sum or revised probabilities is less than 1!")
+ if (sum(p_) > 1.001)
+ stop("Sum or revised probabilities is greater than 1!")
+
+ return (list (p_ = p_, optimizationPerformance = optimizationPerformance))
}
#' Generates histogram
#'
-#' @param X a vector containing the data points
-#' @param p a vector containing the probabilities for each of the data points in X
+#' @param X vector containing the data points
+#' @param p vector containing the probabilities for each of the data
+#' points in X
#' @param nBins expected number of Bins the data set is to be broken down into
-#' @param freq a boolean variable to indicate whether the graphic is a representation of frequencies
+#' @param freq boolean variable to indicate whether the graphic is a
+#' representation of frequencies
#'
#' @return a list with
#' f the frequency for each midpoint
@@ -194,34 +309,32 @@
#' @references
#' \url{http://www.symmys.com}
#' See Meucci script pHist.m used for plotting
-#' @author Ram Ahluwalia \email{ram@@wingedfootcapital.com} and Xavier Valls \email{flamejat@@gmail.com}
+#' @author Ram Ahluwalia \email{ram@@wingedfootcapital.com} and Xavier Valls
+#' \email{xaviervallspla@@gmail.com}
-pHist = function( X , p , nBins, freq = FALSE )
-{
- if ( length( match.call() ) < 3 )
- {
- J = dim( X )[ 1 ]
- nBins = round( 10 * log(J) )
+PHist <- function(X, p, nBins, freq = FALSE) {
+
+ if (length(match.call()) < 3) {
+ J <- dim(X)[1]
+ nBins <- round(10 * log(J))
}
-
- dist = hist( x = X , breaks = nBins , plot = FALSE );
- n = dist$counts
- x = dist$breaks
- D = x[2] - x[1]
-
- N = length(x)
- np = zeros(N , 1)
-
- for (s in 1:N)
- {
- # The boolean Index is true is X is within the interval centered at x(s) and within a half-break distance
- Index = ( X >= x[s] - D/2 ) & ( X <= x[s] + D/2 )
+
+ x <- hist(x = X, breaks = nBins, plot = FALSE)$breaks
+ D <- x[2] - x[1]
+
+ N <- length(x)
+ np <- zeros(N, 1)
+
+ for (s in 1:N) {
+ # The boolean Index is true is X is within the interval centered at x(s) and
+ # within a half-break distance
+ Index <- (X >= x[s] - D / 2) & (X <= x[s] + D / 2)
# np = new probabilities?
- np[ s ] = sum( p[ Index ] )
- f = np/D
+ np[s] <- sum(p[Index])
+ f <- np / D
}
-
- plot( x , f , type = "h", main = "Portfolio return distribution")
-
- return( list( f = f , x = x ) )
+
+ plot(x, f, type = "h", main = "Portfolio return distribution")
+
+ return(list(f = f, x = x))
}
Modified: pkg/Meucci/R/PlotDistributions.R
===================================================================
--- pkg/Meucci/R/PlotDistributions.R 2015-06-11 21:45:59 UTC (rev 3672)
+++ pkg/Meucci/R/PlotDistributions.R 2015-06-12 16:43:18 UTC (rev 3673)
@@ -9,39 +9,36 @@
#' @param Sigma_ a vector containing the posterior standard deviations
#'
#' @references
-#' A. Meucci, "Fully Flexible Views: Theory and Practice" \url{http://www.symmys.com/node/158}
+#' A. Meucci, "Fully Flexible Views: Theory and Practice"
+#' \url{http://www.symmys.com/node/158}
#' See Meucci script for "PlotDistributions.m"
#'
#' @author Ram Ahluwalia \email{ram@@wingedfootcapital.com}
#' @export
-PlotDistributions = function( X , p , Mu , Sigma , p_ , Mu_ , Sigma_ )
-{
- J = nrow( X )
- N = ncol( X )
-
- NBins = round( 10*log( J ) )
-
- for ( n in 1:N )
- {
+PlotDistributions <- function(X , p , Mu , Sigma , p_ , Mu_ , Sigma_) {
+ J <- nrow(X)
+ N <- ncol(X)
+
+ NBins <- round(10 * log(J))
+
+ for (n in 1:N) {
# set ranges
- xl = min( X[ , n ] )
- xh = max( X[ , n ] )
- x = as.matrix( seq( from=xl, to=xh, by=(xh-xl) / 100 ) )
-
+ xl <- min(X[, n])
+ xh <- max(X[, n])
+ x <- as.matrix(seq(from = xl, to = xh, by = (xh - xl) / 100))
+
# posterior numerical
- # h3 = pHist(X[ ,n] , p_ , NBins )
-
- # posterior analytical
- y1 = dnorm( x , Mu_[n] , sqrt( Sigma_[n,n] ) )
- h4 = plot( x , y1, type='l', col='red', xlab='', ylab='' )
-
+ PHist(X[,n] , p_ , NBins)
+
+ # posterior analytical
+ y1 <- dnorm(x , Mu_[n] , sqrt(Sigma_[n,n]))
+ lines(x , y1, type = "l", col = "red", xlab = "", ylab = "")
[TRUNCATED]
To get the complete diff run:
svnlook diff /svnroot/returnanalytics -r 3673
More information about the Returnanalytics-commits
mailing list