[Returnanalytics-commits] r3453 - in pkg/PortfolioAnalytics/sandbox: . FFEV

noreply at r-forge.r-project.org noreply at r-forge.r-project.org
Mon Jun 30 23:40:38 CEST 2014


Author: rossbennett34
Date: 2014-06-30 23:40:38 +0200 (Mon, 30 Jun 2014)
New Revision: 3453

Added:
   pkg/PortfolioAnalytics/sandbox/FFEV/
   pkg/PortfolioAnalytics/sandbox/FFEV/HermiteGrid_CVaR_Recursion.R
   pkg/PortfolioAnalytics/sandbox/FFEV/HermiteGrid_CaseStudy.R
   pkg/PortfolioAnalytics/sandbox/FFEV/HermiteGrid_demo.R
Log:
Copying Fully Flexible Extreme Views scripts from Meucci package to a sandbox folder

Added: pkg/PortfolioAnalytics/sandbox/FFEV/HermiteGrid_CVaR_Recursion.R
===================================================================
--- pkg/PortfolioAnalytics/sandbox/FFEV/HermiteGrid_CVaR_Recursion.R	                        (rev 0)
+++ pkg/PortfolioAnalytics/sandbox/FFEV/HermiteGrid_CVaR_Recursion.R	2014-06-30 21:40:38 UTC (rev 3453)
@@ -0,0 +1,134 @@
+# This script illustrates the discrete Newton recursion  to process views on CVaR according to Entropy Pooling
+# This script complements the article
+#	"Fully Flexible Extreme Views"
+#	by A. Meucci, D. Ardia, S. Keel
+#	available at www.ssrn.com
+# The most recent version of this code is available at
+# MATLAB Central - File Exchange
+
+# Prior market model (normal) on grid
+emptyMatrix = matrix( nrow=0 , ncol=0 )
+market.mu   = 0.0
+market.sig2 = 1.0
+market.pdf = function(x) dnorm( x , mean = market.mu , sd = sqrt(market.sig2) )
+market.cdf = function(x) pnorm( x , mean = market.mu , sd = sqrt(market.sig2) )
+market.rnd = function(x) rnorm( x , mean = market.mu , sd = sqrt(market.sig2) ) 
+market.inv = function(x) qnorm( x , mean = market.mu , sd = sqrt(market.sig2) )
+market.VaR95 = market.inv(0.05)
+market.CVaR95 = integrate( function( x ) ( x * market.pdf( x ) ), -100, market.VaR95)$val / 0.05
+
+tmp = ( ghqx - min( ghqx ) )/( max( ghqx ) - min( ghqx ) ) # rescale GH zeros so they belong to [0,1]
+epsilon = 1e-10
+Lower = market.inv( epsilon )
+Upper = market.inv( 1 - epsilon )
+X = Lower + tmp * ( Upper - Lower ) # rescale mesh
+
+p = integrateSubIntervals( X, market.cdf )
+p = normalizeProb( p )
+J = nrow( X )
+
+# Entropy posterior from extreme view on CVaR: brute-force approach
+  
+# view of the analyst
+view.CVaR95 = -3.0
+
+# Iterate over different VaR95 levels
+nVaR95 = 100
+VaR95  = seq(view.CVaR95, market.VaR95, len=nVaR95)
+p_     = matrix(NaN, nrow = J, ncol = nVaR95 )
+s_     = matrix(NaN, nrow = nVaR95, ncol = 1 )
+KLdiv  = matrix(NaN, nrow = nVaR95, ncol = 1)
+
+for ( i in 1:nVaR95 ) {
+  idx = as.matrix( X <= VaR95[i] )
+  s_[i] = sum(idx)
+  posteriorEntropy = EntropyProg(p, t( idx ),  as.matrix( 0.05 ), rbind( rep(1, J), t( idx * X ) ), rbind( 1, 0.05 * view.CVaR95 ) )
+  p_[,i] = posteriorEntropy$p_
+  KLdiv[i] = posteriorEntropy$optimizationPerformance$ml
+}
+                                 
+# Display results
+plot( s_, KLdiv )
+dummy = min( KLdiv )
+idxMin = which.min( KLdiv ) 
+plot( s_[idxMin], KLdiv[idxMin] )
+
+tmp = p_[, idxMin]
+tmp = tmp / sum( tmp )
+plot( X, tmp )
+x = seq(min(X), max(X), len = J);
+tmp = market.pdf(x)
+tmp = tmp / sum(tmp)
+plot(x, tmp)
+plot(market.CVaR95, 0)
+plot(view.CVaR95, 0)
+
+# Entropy posterior from extreme view on CVaR: Newton Raphson approach
+
+s = emptyMatrix
+
+# initial value
+idx = as.matrix( cumsum(p) <= 0.05 )
+s[1] = sum(idx)
+posteriorEntropy = EntropyProg(p, t( idx ), as.matrix( 0.05 ), rbind( rep(1, J), t( idx * X ) ), rbind( 1, 0.05 * view.CVaR95) )                                 
+KLdiv = as.matrix( posteriorEntropy$optimizationPerformance$ml )
+p_ = posteriorEntropy$p_
+                                                                               
+# iterate
+doStop = 0
+i = 1
+while ( !doStop ) {
+  i = i + 1
+  
+  idx = cbind( matrix(1, 1, s[i - 1] ), matrix(0, 1, J - s[i-1] ) )
+  posteriorEntropy1 = EntropyProg(p, idx, as.matrix( 0.05 ), rbind( matrix(1, 1, J), t( t(idx) * X) ), rbind( 1, 0.05 * view.CVaR95 ) )
+  # [dummy, KLdiv_s]  = optimizeEntropy(p, [idx'; (idx .* X)'], [0.05; 0.05 * view.CVaR95], [ones(1, J); X'], [1; view.mu]);
+  
+  idx = cbind( matrix(1, 1, s[i - 1] + 1 ), matrix(0, 1, J - s[i - 1] - 1) )
+  posteriorEntropy2 = EntropyProg(p, idx, as.matrix( 0.05 ), rbind( matrix(1, 1, J), t( t(idx) * X) ), rbind( 1, 0.05 * view.CVaR95 ) )
+  # [dummy, KLdiv_s1] = optimizeEntropy(p, [idx'; (idx .* X)'], [0.05; 0.05 * view.CVaR95], [ones(1, J); X'], [1; view.mu]);
+  
+  idx = cbind( matrix(1, 1, s[i - 1] + 2 ), matrix(0, 1, J - s[i - 1] - 2) )
+  posteriorEntropy3 = EntropyProg(p, idx, as.matrix( 0.05 ), rbind( matrix(1, 1, J), t( t(idx) * X) ), rbind( 1, 0.05 * view.CVaR95 ) )
+  # [dummy, KLdiv_s2] = optimizeEntropy(p, [idx'; (idx .* X)'], [0.05; 0.05 * view.CVaR95], [ones(1, J); X'], [1; view.mu]);
+  
+  # first difference
+  DE  = posteriorEntropy2$optimizationPerformance$ml - posteriorEntropy1$optimizationPerformance$ml
+  
+  # second difference
+  D2E = posteriorEntropy3$optimizationPerformance$ml - 2 * posteriorEntropy2$optimizationPerformance$ml + posteriorEntropy1$optimizationPerformance$ml
+  
+  # optimal s
+  s = rbind( s, round( s[i - 1] - (DE / D2E) ) ) 
+  
+  tmp = emptyMatrix
+  idx  = cbind( matrix( 1, 1, s[i] ), matrix( 0, 1, J - s[i] ) )
+  tempEntropy = EntropyProg(p, idx, as.matrix( 0.05 ), rbind( matrix(1, 1, J), t( t(idx) * X) ), rbind( 1, 0.05 * view.CVaR95 ) )
+  # [tmp.p_, tmp.KLdiv] = optimizeEntropy(p, [idx'; (idx .* X)'], [0.05; 0.05 * view.CVaR95], [ones(1, J); X'], [1; view.mu]);
+  p_ = cbind( p_, tempEntropy$p_ )
+  KLdiv = rbind( KLdiv, tempEntropy$optimizationPerformance$ml ) #ok<*AGROW>
+  
+  # if change in KLdiv less than one percent, stop
+  if( abs( ( KLdiv[i] - KLdiv[i - 1] ) / KLdiv[i - 1] )  < 0.01 ) { doStop = 1 }
+}
+
+# Display results
+
+N = length(s)
+
+plot(1:N, KLdiv)
+x = seq(min(X), max(X), len = J)
+tmp = market.pdf(x)
+tmp = tmp / sum(tmp)
+plot( t( X ), tmp )
+plot( t( X ), p_[, ncol(p_)] )
+plot( market.CVaR95, 0.0 )
+plot( view.CVaR95, 0.0 )
+
+# zoom here
+plot( t( X ), tmp )
+plot( t( X ), p_[, 1] )
+plot( t( X ), p_[, 2] )
+plot( t( X ), p_[, ncol(p_)] )
+plot( market.CVaR95, 0 )
+plot( view.CVaR95, 0 )
\ No newline at end of file

Added: pkg/PortfolioAnalytics/sandbox/FFEV/HermiteGrid_CaseStudy.R
===================================================================
--- pkg/PortfolioAnalytics/sandbox/FFEV/HermiteGrid_CaseStudy.R	                        (rev 0)
+++ pkg/PortfolioAnalytics/sandbox/FFEV/HermiteGrid_CaseStudy.R	2014-06-30 21:40:38 UTC (rev 3453)
@@ -0,0 +1,95 @@
+# This script estimates the prior of a hedge fund return and processes extreme views on CVaR 
+# according to Entropy Pooling
+# This script complements the article
+#  "Fully Flexible Extreme Views"
+#	by A. Meucci, D. Ardia, S. Keel
+#	available at www.ssrn.com
+# The most recent version of this code is available at
+# MATLAB Central - File Exchange
+
+# IMPORTANT - This script is about the methodology, not the input data, which has been modified
+
+xi = as.matrix( 100 * data[, 2] )
+n = nrow(xi)
+
+# bandwidth
+bw = kernelbw(xi)
+
+# weights
+lambda = log(2) / (n / 2)
+wi = as.matrix( exp( -lambda * ( n - seq( from=n, to=1 ) ) ) )
+wi = as.matrix( apply( wi, 2, rev ) / sum( wi ) )
+                
+# Prior market model
+# kernel density
+
+market.mu  = mean(xi)
+market.pdf = function ( x ) kernelpdf( x, xi, bw, wi )
+market.cdf = function ( x ) kernelcdf( x, xi, bw, wi )
+market.inv = function ( x ) kernelinv( x, xi, bw, wi )
+market.VaR95 = market.inv( c(0.05) )
+market.CVaR95 = integrate( function( x ) x * market.pdf( x ), -100, market.VaR95 )$val / 0.05
+
+# numerical (Gauss-Hermite grid) prior 
+tmp = ( ghqx - min( ghqx ) )/( max( ghqx ) - min( ghqx ) ) # rescale GH zeros so they belong to [0,1]
+epsilon = 1e-10
+Lower = market.inv( epsilon )
+Upper = market.inv( 1 - epsilon )
+X  = Lower + tmp * ( Upper - Lower ) # rescale mesh
+                
+p = integrateSubIntervals( X , market.cdf )
+p = normalizeProb( p )
+J = nrow( X )
+                
+# Entropy posterior from extreme view on mean and CVaR
+view.mu   = mean( xi ) - 1.0
+view.CVaR95 = market.CVaR95 - 1.0
+                
+# Netwton Raphson
+emptyMatrix = matrix( ,nrow = 0, ncol = 0)
+s = emptyMatrix
+idx = as.matrix( cumsum(p) <= 0.05 )
+s[1] = sum(idx)
+posteriorEntropy = EntropyProg(p, t( idx ), as.matrix( 0.05 ), rbind( rep(1, J), t( X ), t( idx * X ) ), rbind( 1, view.mu, 0.05 * view.CVaR95) )
+KLdiv = as.matrix( posteriorEntropy$optimizationPerformance$ml )
+p_ = posteriorEntropy$p_
+
+doStop = 0
+i = 1
+while ( !doStop ) {
+  i = i + 1
+
+  idx = cbind( matrix(1, 1, s[i - 1] ), matrix(0, 1, J - s[i-1] ) )
+  posteriorEntropy1 = EntropyProg(p, idx, as.matrix( 0.05 ), rbind( matrix(1, 1, J), t( X ), t( t(idx) * X) ), rbind( 1, view.mu, 0.05 * view.CVaR95 ) )
+  # [dummy, KLdiv_s]  = optimizeEntropy(p, [idx'; (idx .* X)'], [0.05; 0.05 * view.CVaR95], [ones(1, J); X'], [1; view.mu]);
+   
+  idx = cbind( matrix(1, 1, s[i - 1] + 1 ), matrix(0, 1, J - s[i - 1] - 1) )
+  posteriorEntropy2 = EntropyProg(p, idx, as.matrix( 0.05 ), rbind( matrix(1, 1, J), t( X ), t( t(idx) * X) ), rbind( 1, view.mu, 0.05 * view.CVaR95 ) )
+  # [dummy, KLdiv_s1] = optimizeEntropy(p, [idx'; (idx .* X)'], [0.05; 0.05 * view.CVaR95], [ones(1, J); X'], [1; view.mu]);
+   
+  idx = cbind( matrix(1, 1, s[i - 1] + 2 ), matrix(0, 1, J - s[i - 1] - 2) )
+  posteriorEntropy3 = EntropyProg(p, idx, as.matrix( 0.05 ), rbind( matrix(1, 1, J), t( X ), t( t(idx) * X) ), rbind( 1, view.mu, 0.05 * view.CVaR95 ) )
+  # [dummy, KLdiv_s2] = optimizeEntropy(p, [idx'; (idx .* X)'], [0.05; 0.05 * view.CVaR95], [ones(1, J); X'], [1; view.mu]);
+
+  # first difference
+  DE  = posteriorEntropy2$optimizationPerformance$ml - posteriorEntropy1$optimizationPerformance$ml
+  
+  # second difference
+  D2E = posteriorEntropy3$optimizationPerformance$ml - 2 * posteriorEntropy2$optimizationPerformance$ml + posteriorEntropy1$optimizationPerformance$ml
+  
+  # optimal s
+  s = rbind( s, round( s[i - 1] - (DE / D2E) ) ) 
+                                                                                             
+  tmp = emptyMatrix
+  idx  = cbind( matrix( 1, 1, s[i] ), matrix( 0, 1, J - s[i] ) )
+  tempEntropy = EntropyProg(p, idx, as.matrix( 0.05 ), rbind( matrix(1, 1, J), t( X ), t( t(idx) * X) ), rbind( 1, view.mu, 0.05 * view.CVaR95 ) )
+  # [tmp.p_, tmp.KLdiv] = optimizeEntropy(p, [idx'; (idx .* X)'], [0.05; 0.05 * view.CVaR95], [ones(1, J); X'], [1; view.mu]);
+  p_ = cbind( p_, tempEntropy$p_ )
+  KLdiv = rbind( KLdiv, tempEntropy$optimizationPerformance$ml ) #ok<*AGROW>
+
+  # if change in KLdiv less than one percent, stop
+  if( abs( ( KLdiv[i] - KLdiv[i - 1] ) / KLdiv[i - 1] )  < 0.01 ) { doStop = 1 }
+}                
+
+plot( t(X), p )
+plot( t(X), p_[,ncol(p_)])
\ No newline at end of file

Added: pkg/PortfolioAnalytics/sandbox/FFEV/HermiteGrid_demo.R
===================================================================
--- pkg/PortfolioAnalytics/sandbox/FFEV/HermiteGrid_demo.R	                        (rev 0)
+++ pkg/PortfolioAnalytics/sandbox/FFEV/HermiteGrid_demo.R	2014-06-30 21:40:38 UTC (rev 3453)
@@ -0,0 +1,83 @@
+# This script compares the performance of plain Monte Carlo 
+# versus grid in applying Entropy Pooling to process extreme views
+# This script complements the article
+#    "Fully Flexible Extreme Views" A. Meucci, D. Ardia, S. Keel available at www.ssrn.com
+# The most recent version of this code is available at MATLAB Central - File Exchange
+library(matlab)
+
+####################################################################################
+# Prior market model
+####################################################################################
+# analytical (normal) prior
+emptyMatrix = matrix( nrow=0 , ncol=0 )
+market.mu   = 0.0
+market.sig2 = 1.0
+market.pdf = function(x) dnorm( x , mean = market.mu , sd = sqrt(market.sig2) )
+market.cdf = function(x) pnorm( x , mean = market.mu , sd = sqrt(market.sig2) )
+market.rnd = function(x) rnorm( x , mean = market.mu , sd = sqrt(market.sig2) ) 
+market.inv = function(x) qnorm( x , mean = market.mu , sd = sqrt(market.sig2) )
+
+# numerical (Monte Carlo) prior 
+monteCarlo = emptyMatrix
+monteCarlo.J = 100000
+monteCarlo.X = market.rnd( monteCarlo.J )
+monteCarlo.p = normalizeProb( 1/monteCarlo.J * ones( monteCarlo.J , 1 ) )
+
+# numerical (Gauss-Hermite grid) prior 
+ghqMesh = emptyMatrix
+load( "ghq1000" )
+
+tmp = ( ghqx - min( ghqx ) ) / ( max( ghqx ) - min( ghqx ) ) # rescale GH zeros so they belong to [0,1]
+epsilon = 1e-10
+Lower = market.inv( epsilon )
+Upper = market.inv( 1-epsilon )
+ghqMesh.X  = Lower + tmp*(Upper-Lower) # rescale mesh
+
+p = integrateSubIntervals(ghqMesh.X , market.cdf)
+ghqMesh.p = normalizeProb(p)
+ghqMesh.J = nrow(ghqMesh.X) 
+
+####################################################################################
+# Entropy posterior from extreme view on expectation
+####################################################################################
+# view of the analyst
+view = emptyMatrix
+view.mu = -3.0 
+
+# analytical (known since normal model has analytical solution)
+truePosterior = emptyMatrix 
+truePosterior = Prior2Posterior( market.mu, 1, view.mu, market.sig2, 0 )
+truePosterior$pdf = function(x) dnorm( x, truePosterior.mu , sqrt(truePosterior.sig2) )
+
+# numerical (Monte Carlo)
+Aeq = rbind( ones( 1 , monteCarlo.J ) , t(monteCarlo.X) )
+beq = rbind( 1 , view.mu )
+monteCarloOptimResult = EntropyProg( monteCarlo.p , emptyMatrix , emptyMatrix , Aeq , beq )
+
+monteCarlo.p_ = monteCarloOptimResult$p_
+monteCarlo.KLdiv = monteCarloOptimResult$optimizationPerformance$ml
+
+# numerical (Gaussian-Hermite grid)
+Aeq = rbind( ones( 1 , ghqMesh.J ) , t( ghqMesh.X ) )
+beq = rbind( 1 , view.mu )
+ghqMeshOptimResult = EntropyProg( ghqMesh.p , emptyMatrix , emptyMatrix , Aeq , beq )
+
+ghqMesh.p_ = ghqMeshOptimResult$p_
+ghqMesh.KLdiv = ghqMeshOptimResult$optimizationPerformance$ml
+
+####################################################################################
+# Plots
+####################################################################################
+xmin = min(ghqMesh.X)
+xmax = max(ghqMesh.X)
+ymax = 1.0
+xmesh = t(linspace(xmin, xmax, ghqMesh.J))
+
+# Monte Carlo
+plotDataMC = pHist( monteCarlo.X , monteCarlo.p_ , 50 )
+plot( plotDataMC$x , plotDataMC$f , type = "l" )
+
+# Gauss Hermite Grid
+plotDataGHQ = pHist(data.matrix(ghqMesh.X), ghqMesh.p_ , 50 )
+plot( plotDataGHQ$x , plotDataGHQ$f , type = "l" )
+



More information about the Returnanalytics-commits mailing list