[Returnanalytics-commits] r2998 - in pkg/Meucci: . R data demo man

noreply at r-forge.r-project.org noreply at r-forge.r-project.org
Thu Sep 5 13:25:45 CEST 2013


Author: xavierv
Date: 2013-09-05 13:25:44 +0200 (Thu, 05 Sep 2013)
New Revision: 2998

Added:
   pkg/Meucci/R/DoubleDecay.R
   pkg/Meucci/R/Fit2Moms.R
   pkg/Meucci/R/LeastInfoKernel.R
   pkg/Meucci/R/PlotDistributions.R
   pkg/Meucci/data/dbFFP.Rda
   pkg/Meucci/data/fILMR.Rda
   pkg/Meucci/demo/FullFlexProbs.R
   pkg/Meucci/demo/FullyIntegratedLiquidityAndMarketRisk.R
   pkg/Meucci/man/DoubleDecay.Rd
   pkg/Meucci/man/Fit2Moms.Rd
   pkg/Meucci/man/LeastInfoKernel.Rd
Modified:
   pkg/Meucci/DESCRIPTION
   pkg/Meucci/NAMESPACE
   pkg/Meucci/R/EntropyProg.R
   pkg/Meucci/R/Prior2Posterior.R
   pkg/Meucci/man/PlotDistributions.Rd
Log:
-added the code for scripts Historical Scenarios with Fully Flexible.. and Fully Integrated Liquidity and Market Risk

Modified: pkg/Meucci/DESCRIPTION
===================================================================
--- pkg/Meucci/DESCRIPTION	2013-09-05 09:21:51 UTC (rev 2997)
+++ pkg/Meucci/DESCRIPTION	2013-09-05 11:25:44 UTC (rev 2998)
@@ -99,3 +99,7 @@
     'Log2Lin.R'
     'PlotCompositionEfficientFrontier.R'
     'MaxRsqTS.R'
+    'PlotDistributions.R'
+    'DoubleDecay.R'
+    'Fit2Moms.R'
+    'LeastInfoKernel.R'

Modified: pkg/Meucci/NAMESPACE
===================================================================
--- pkg/Meucci/NAMESPACE	2013-09-05 09:21:51 UTC (rev 2997)
+++ pkg/Meucci/NAMESPACE	2013-09-05 11:25:44 UTC (rev 2998)
@@ -11,10 +11,12 @@
 export(ConvertCompoundedReturns2Price)
 export(Cumul2Raw)
 export(DetectOutliersViaMVE)
+export(DoubleDecay)
 export(EfficientFrontierPrices)
 export(EfficientFrontierReturns)
 export(EfficientFrontierReturnsBenchmark)
 export(EntropyProg)
+export(Fit2Moms)
 export(FitExpectationMaximization)
 export(FitMultivariateGarch)
 export(FitOrnsteinUhlenbeck)
@@ -25,6 +27,7 @@
 export(hermitePolynomial)
 export(integrateSubIntervals)
 export(InterExtrapolate)
+export(LeastInfoKernel)
 export(linreturn)
 export(Log2Lin)
 export(LognormalCopulaPdf)
@@ -44,6 +47,7 @@
 export(PlotDistributions)
 export(PlotMarginalsNormalInverseWishart)
 export(PlotVolVsCompositionEfficientFrontier)
+export(Prior2Posterior)
 export(ProjectionStudentT)
 export(QuantileMixture)
 export(RandNormalInverseWishart)

Added: pkg/Meucci/R/DoubleDecay.R
===================================================================
--- pkg/Meucci/R/DoubleDecay.R	                        (rev 0)
+++ pkg/Meucci/R/DoubleDecay.R	2013-09-05 11:25:44 UTC (rev 2998)
@@ -0,0 +1,46 @@
+#' Computes a double-decay covariance matrix.
+#'
+#' This function computes a double-decay covariance matrix for the risk drivers provided, as described in  
+#' A. Meucci, "Personalized Risk Management: Historical Scenarios with Fully Flexible Probabilities"
+#' GARP Risk Professional, Dec 2010, p 47-51
+#' 
+#' @param   X       matrix representing the risk drivers.
+#' @param   lmd_c   numeric representing the low decay (long half-life) for the correlations.
+#' @param   lmd_s   numeric representing the high decay (short half-life) for the volatilities.
+#' @return  m       matrix of zeros, representing the expectation of the risk drivers.
+#' @return  S       matrix representing the double-decay estimation for the correlation matrix of the risk drivers.
+#' 
+#' @references 
+#' \url{http://www.symmys.com/node/150}
+#' See Meucci script for "DoubleDecay.m"
+#' 
+#' @author Xavier Valls \email{flamejat@@gmail.com}
+#' @export
+
+DoubleDecay = function( X, lmd_c, lmd_s)
+{
+
+	N = dim( X )
+	m = matrix( 0, N[2], 1);
+
+	p_c = exp( -lmd_c * ( N[1] - t(rbind( 1:N[1] ) ) ) );
+
+	p_c = kronecker( matrix( 1, 1, N[2] ), p_c / sum( p_c ) ); 	#  workaround on p_c=repmat( p_c/sum(p_c),1,N);
+
+	S_1 = t( p_c * X ) %*% X;
+
+	C = cov2cor( S_1 );
+
+	p_s = exp( -lmd_s * ( N[1] - t(rbind( 1:N[1] ) ) ) );
+
+	p_s = kronecker(matrix(1,1,N[2]),p_s/sum(p_s));
+	S_2 = t( p_s*X ) %*% X;
+
+	R = cov2cor(S_2) ;  
+	s = c( 0.0099, 0.0538, 0.0163 );
+
+	s = sqrt(diag(S_2));
+	S = diag(s) %*% C %*% diag(s);
+
+	return( list( m = m , S = S ) )
+}
\ No newline at end of file

Modified: pkg/Meucci/R/EntropyProg.R
===================================================================
--- pkg/Meucci/R/EntropyProg.R	2013-09-05 09:21:51 UTC (rev 2997)
+++ pkg/Meucci/R/EntropyProg.R	2013-09-05 11:25:44 UTC (rev 2998)
@@ -178,40 +178,8 @@
     return ( list ( p_ = p_ , optimizationPerformance = optimizationPerformance ) )
 }
 
-#' Calculate the full-confidence posterior distributions of Mu and Sigma
-#'
-#' \deqn{ \tilde{ \mu }  \equiv \mu +  \Sigma  Q'    {\big(Q \Sigma  Q' \big)}^{-1}   \big( \tilde{\mu}_{Q} - Q \mu \big),
-#' \\ \tilde{ \Sigma } \equiv \Sigma + \Sigma G' \big({\big(G \Sigma  G' \big)}^{-1} \tilde{ \Sigma }_G {\big(G \Sigma  G' \big)}^{-1} - {\big(G \Sigma  G' \big)}^{-1} \big) G \Sigma }
-#' @param M     a numeric vector with the Mu of the normal reference model
-#' @param Q     a numeric vector used to construct a view on expectation of the linear combination QX
-#' @param M_Q   a numeric vector with the view of the expectations of QX
-#' @param S     a covariance matrix for the normal reference model
-#' @param G     a numeric vector used to construct a view on covariance of the linear combination GX
-#' @param S_G   a numeric with the expectation associated with the covariance of the linear combination GX
-#'
-#' @return a list with 
-#' @return M_   a numeric vector with the full-confidence posterior distribution of Mu
-#' @return S_   a covariance matrix with the full-confidence posterior distribution of Sigma
-#'
-#' @references 
-#' \url{http://www.symmys.com}
-#' \url{http://ssrn.com/abstract=1213325}
-#' A. Meucci - "Fully Flexible Views: Theory and Practice". See formula (21) and (22) on page 7
-#' See Meucci script Prior2Posterior.m attached to Entropy Pooling Paper
-#' @author Ram Ahluwalia \email{ram@@wingedfootcapital.com}
-Prior2Posterior = function( M , Q , M_Q , S , G , S_G )
-{
-  # Compute posterior moments
-  
-  if ( Q != 0 ) { M_ = M + S %*% t(Q) %*% solve( Q %*% S %*% t(Q) ) %*% ( M_Q - Q %*% M) }
-  else { M_ = M }
-  
-  if ( G != 0 ) { S_ = S + (S %*% t(G)) %*% ( solve(G %*% S %*% t(G)) %*% S_G %*% solve(G %*% S %*% t(G)) - solve( G %*% S %*% t(G)) ) %*% (G %*% S) }
-  else { S_ = S }
-  
-  return( list( M_ = M_ , S_ = S_ ) )
-}
 
+
 #' Generates histogram
 #'
 #' @param X       a vector containing the data points

Added: pkg/Meucci/R/Fit2Moms.R
===================================================================
--- pkg/Meucci/R/Fit2Moms.R	                        (rev 0)
+++ pkg/Meucci/R/Fit2Moms.R	2013-09-05 11:25:44 UTC (rev 2998)
@@ -0,0 +1,44 @@
+#' Uses Entropy Pooling to compute a double-decay covariance matrix.
+#'
+#' This function uses Entropy Pooling to compute a double-decay covariance matrix, as described in  
+#' A. Meucci, "Personalized Risk Management: Historical Scenarios with Fully Flexible Probabilities"
+#' GARP Risk Professional, Dec 2010, p 47-51
+#' 
+#' @param   X   matrix representing the risk drivers.
+#' @param   m   matrix of zeros, representing the expectation of the risk drivers.
+#' @param   S   matrix representing the double-decay estimation for the correlation matrix of the risk drivers.
+#' @return  p   list containing the vector of posterior probabilities and information about the optimization performance.
+#' 
+#' @references 
+#' \url{http://www.symmys.com/node/150}
+#' See Meucci script for "S_MainFullFlexProbs.m"
+#' 
+#' @author Xavier Valls \email{flamejat@@gmail.com}
+#' @export
+
+Fit2Moms = function( X, m, S)
+{
+	N = dim(X);
+
+	Aeq = matrix( 1, 1, N[1] );  # constrain probabilities to sum to one...
+	beq = 1;
+
+	Aeq = rbind( Aeq , t(X) ); # ...constrain the first moments...
+	beq = rbind( beq, m );
+
+	SecMom = S + m %*% t(m);  #...constrain the second moments...
+
+	for ( k in  1:N[2] )
+	{
+		for ( l in k:N[2] )
+		{
+			Aeq = rbind( Aeq , t(X[ ,k] * X[ ,l] ) );
+			beq = rbind( beq, SecMom[k,l] );
+		}
+	}
+
+	p_0 = matrix( 1, N[1], 1) / N[1];
+
+	return ( p = EntropyProg( p_0, matrix( , 0, 0), matrix( , 0, 0), Aeq , beq)$p_); # ...compute posterior probabilities
+
+}
\ No newline at end of file

Added: pkg/Meucci/R/LeastInfoKernel.R
===================================================================
--- pkg/Meucci/R/LeastInfoKernel.R	                        (rev 0)
+++ pkg/Meucci/R/LeastInfoKernel.R	2013-09-05 11:25:44 UTC (rev 2998)
@@ -0,0 +1,46 @@
+#' Computes least information kernel smoothing
+#' 
+#' This script uses Entropy Pooling to compute least information kernel smoothing, as described in 
+#' A. Meucci, "Personalized Risk Management: Historical Scenarios with Fully Flexible Probabilities"
+#' GARP Risk Professional, Dec 2010, p 47-51
+#'
+#' @param   Y       Matrix representing the macroeconomic indicator
+#' @param   y       scalar reprenting the target to which Y is expected to be close in the Generalized Empirical Distribution
+#' @param   h2      N X N matrix 
+#'
+#' @return  p       list containing the vector of posterior probabilities and information about the optimization performance.
+#' 
+#' @references 
+#' \url{http://www.symmys.com/node/150}
+#' See Meucci script for "LeastInfoKernel.m"
+#' 
+#' @author Xavier Valls \email{flamejat@@gmail.com}
+#' @export
+
+LeastInfoKernel = function( Y, y, h2 )
+{
+    T   = dim(Y)[1];
+    N   = dim(Y)[2];
+    Aeq = matrix( 1, 1, T );  # constrain probabilities to sum to one...
+    beq = 1;
+    # ...constrain the first moments...
+    Aeq = rbind( Aeq, t(Y) );
+    beq = rbind( beq, y );
+    
+    if( !is.nan(h2) )
+    {
+        SecMom = h2 + y %*% t( y );  # ...constrain the second moments...
+        for( k in 1:N )
+        {
+            for( l in k:N )
+            {
+                Aeq = rbind( Aeq, ( Y[ , k ] * Y[ , l ] ) );
+                beq = rbind( beq, SecMom[ k, l ] );
+            }
+        }
+    }
+    p_0 = matrix( 1, T, 1 ) / T;
+    p   = EntropyProg( p_0, matrix(,0,0), matrix(,0,0), Aeq, beq ); # ...compute posterior probabilities
+    return( p$p_ );
+}
+

Added: pkg/Meucci/R/PlotDistributions.R
===================================================================
--- pkg/Meucci/R/PlotDistributions.R	                        (rev 0)
+++ pkg/Meucci/R/PlotDistributions.R	2013-09-05 11:25:44 UTC (rev 2998)
@@ -0,0 +1,41 @@
+#' Plot numerical and analytical prior and posterior distributions
+#'
+#' @param X       a vector containing the dataset
+#' @param p       a vector cotaining the prior probability values
+#' @param Mu      a vector containing the prior means
+#' @param Sigma   a vector containing the prior standard deviations
+#' @param p_      a vector containing the posterior probability values
+#' @param Mu_     a vector containing the posterior means
+#' @param Sigma_  a vector containing the posterior standard deviations
+#' @author Ram Ahluwalia \email{ram@@wingedfootcapital.com}
+#' @export
+PlotDistributions = function( X , p , Mu , Sigma , p_ , Mu_ , Sigma_ )
+{
+  J = nrow( X )
+  N = ncol( X )
+    
+  NBins = round( 10*log( J ) )
+    
+  for ( n in 1:N )
+  {        
+    # set ranges
+    xl = min( X[ , n ] )
+    xh = max( X[ , n ] )
+    x = as.matrix(seq(from=xl, to=xh, by=(xh-xl)/100))
+        
+    # posterior numerical
+    # h3 = pHist(X[ ,n] , p_ , NBins )
+        
+    # posterior analytical        
+    y1 = dnorm( x , Mu_[n] , sqrt( Sigma_[n,n] ) )
+    h4 = plot( x , y1,  type='l', col='red', xlab='', ylab='' )        
+        
+    # prior analytical
+    par(new = TRUE)
+    y2 = dnorm( x , Mu[n] ,sqrt( Sigma[n,n] ) )
+    h2 = plot( x , y2, type='l', col='blue', xlab='', ylab='' )
+        
+    # xlim( cbind( xl , xh ) )
+    legend(x = 1.5, y =0.4 ,legend=c("analytical","prior"), lwd=c(0.2,0.2), lty=c(1,1), col=c("red", "blue"))
+  }
+}
\ No newline at end of file

Modified: pkg/Meucci/R/Prior2Posterior.R
===================================================================
--- pkg/Meucci/R/Prior2Posterior.R	2013-09-05 09:21:51 UTC (rev 2997)
+++ pkg/Meucci/R/Prior2Posterior.R	2013-09-05 11:25:44 UTC (rev 2998)
@@ -1,41 +1,36 @@
-#' Plot numerical and analytical prior and posterior distributions
+
+#' Calculate the full-confidence posterior distributions of Mu and Sigma
 #'
-#' @param X       a vector containing the dataset
-#' @param p       a vector cotaining the prior probability values
-#' @param Mu      a vector containing the prior means
-#' @param Sigma   a vector containing the prior standard deviations
-#' @param p_      a vector containing the posterior probability values
-#' @param Mu_     a vector containing the posterior means
-#' @param Sigma_  a vector containing the posterior standard deviations
+#' \deqn{ \tilde{ \mu }  \equiv \mu +  \Sigma  Q'    {\big(Q \Sigma  Q' \big)}^{-1}   \big( \tilde{\mu}_{Q} - Q \mu \big),
+#' \\ \tilde{ \Sigma } \equiv \Sigma + \Sigma G' \big({\big(G \Sigma  G' \big)}^{-1} \tilde{ \Sigma }_G {\big(G \Sigma  G' \big)}^{-1} - {\big(G \Sigma  G' \big)}^{-1} \big) G \Sigma }
+#' @param M     a numeric vector with the Mu of the normal reference model
+#' @param Q     a numeric vector used to construct a view on expectation of the linear combination QX
+#' @param M_Q   a numeric vector with the view of the expectations of QX
+#' @param S     a covariance matrix for the normal reference model
+#' @param G     a numeric vector used to construct a view on covariance of the linear combination GX
+#' @param S_G   a numeric with the expectation associated with the covariance of the linear combination GX
+#'
+#' @return a list with 
+#' @return M_   a numeric vector with the full-confidence posterior distribution of Mu
+#' @return S_   a covariance matrix with the full-confidence posterior distribution of Sigma
+#'
+#' @references 
+#' \url{http://www.symmys.com}
+#' \url{http://ssrn.com/abstract=1213325}
+#' A. Meucci - "Fully Flexible Views: Theory and Practice". See formula (21) and (22) on page 7
+#' See Meucci script Prior2Posterior.m attached to Entropy Pooling Paper
 #' @author Ram Ahluwalia \email{ram@@wingedfootcapital.com}
 #' @export
-PlotDistributions = function( X , p , Mu , Sigma , p_ , Mu_ , Sigma_ )
+
+Prior2Posterior = function( M , Q , M_Q , S , G , S_G )
 {
-  J = nrow( X )
-  N = ncol( X )
-    
-  NBins = round( 10*log( J ) )
-    
-  for ( n in 1:N )
-  {        
-    # set ranges
-    xl = min( X[ , n ] )
-    xh = max( X[ , n ] )
-    x = as.matrix(seq(from=xl, to=xh, by=(xh-xl)/100))
-        
-    # posterior numerical
-    # h3 = pHist(X[ ,n] , p_ , NBins )
-        
-    # posterior analytical        
-    y1 = dnorm( x , Mu_[n] , sqrt( Sigma_[n,n] ) )
-    h4 = plot( x , y1,  type='l', col='red', xlab='', ylab='' )        
-        
-    # prior analytical
-    par(new = TRUE)
-    y2 = dnorm( x , Mu[n] ,sqrt( Sigma[n,n] ) )
-    h2 = plot( x , y2, type='l', col='blue', xlab='', ylab='' )
-        
-    # xlim( cbind( xl , xh ) )
-    legend(x = 1.5, y =0.4 ,legend=c("analytical","prior"), lwd=c(0.2,0.2), lty=c(1,1), col=c("red", "blue"))
-  }
+  # Compute posterior moments
+  
+  if ( Q != 0 ) { M_ = M + S %*% t(Q) %*% solve( Q %*% S %*% t(Q) ) %*% ( M_Q - Q %*% M) }
+  else { M_ = M }
+  
+  if ( G != 0 ) { S_ = S + (S %*% t(G)) %*% ( solve(G %*% S %*% t(G)) %*% S_G %*% solve(G %*% S %*% t(G)) - solve( G %*% S %*% t(G)) ) %*% (G %*% S) }
+  else { S_ = S }
+  
+  return( list( M_ = M_ , S_ = S_ ) )
 }
\ No newline at end of file

Added: pkg/Meucci/data/dbFFP.Rda
===================================================================
(Binary files differ)


Property changes on: pkg/Meucci/data/dbFFP.Rda
___________________________________________________________________
Added: svn:mime-type
   + application/octet-stream

Added: pkg/Meucci/data/fILMR.Rda
===================================================================
(Binary files differ)


Property changes on: pkg/Meucci/data/fILMR.Rda
___________________________________________________________________
Added: svn:mime-type
   + application/octet-stream

Added: pkg/Meucci/demo/FullFlexProbs.R
===================================================================
--- pkg/Meucci/demo/FullFlexProbs.R	                        (rev 0)
+++ pkg/Meucci/demo/FullFlexProbs.R	2013-09-05 11:25:44 UTC (rev 2998)
@@ -0,0 +1,160 @@
+#' Computes the Call Price
+#' 
+#' Pricing function to apply to each scenario in order to generate the P&L distribution, as described
+#' A. Meucci, "Personalized Risk Management: Historical Scenarios with Fully Flexible Probabilities"
+#' GARP Risk Professional, Dec 2010, p 47-51
+#'
+#' @param   P       matrix of prices
+#' @param   K       
+#' @param   r       risk
+#' @param   t       expiry
+#' @param   s       volatility
+#'
+#' @return  C       Prices
+#' 
+#' @references 
+#' \url{http://www.symmys.com/node/150}
+#' See Meucci script for "CallPrice.m"
+#' 
+#' @author Xavier Valls \email{flamejat@@gmail.com}
+
+CallPrice = function( P, K, r, t, s )
+{
+    d_1 = log( P/K ) + ( r + s * s/2 ) * t;
+    d_2 = d_1 - s * sqrt( t );
+
+    C = P * pnorm( d_1 ) - K * exp( -r * t ) * pnorm( d_2 );
+
+    return( C );
+}
+
+
+
+#'This script uses Entropy Pooling to compute Fully Flexible Probabilities for historical scenarios
+#'based on time periods, market conditions, constraints on moments, etc., as described in
+#'A. Meucci, "Personalized Risk Management: Historical Scenarios with Fully Flexible Probabilities"
+#'GARP Risk Professional, Dec 2010, p 47-51
+#'
+#' Most recent version of article and code available at
+#' http://www.symmys.com/node/150
+#' @references 
+#' \url{http://www.symmys.com/node/150}
+#' See Meucci script for "DoubleDecay.m"
+#' 
+#' @author Xavier Valls \email{flamejat@@gmail.com}
+
+##########################################################################
+# risk drivers scenarios
+###########################################################################
+
+load( "../data/dbFFP.Rda" )
+
+Infl = dbFFP$Data[ , length( dbFFP$Names ) ];
+Vix = dbFFP$Data[ , length( dbFFP$Names ) - 1 ];
+Crude = dbFFP$Data[ , length( dbFFP$Names )-3 ];
+Swp10 = dbFFP$Data[ , 2 ];
+SnP = dbFFP$Data[ , 4 ];
+
+X = diff( log( cbind( SnP, Vix, Swp10 ) ) );
+Y = matrix(Infl[ -nrow( dbFFP$Data ) ]);
+
+##########################################################################
+#assign probabilities to historical scenarios
+###########################################################################
+# DefineProbs = "1" : rolling window
+# DefineProbs = "2" : exponential smoothing
+# DefineProbs = "3" : market conditions
+# DefineProbs = "4" : kernel damping
+# DefineProbs = "5" : partial information prox. kernel damping
+# DefineProbs = "6" : partial information: match covariance
+
+DefineProbs = "6";
+
+T = dim(X)[1];
+p = matrix( 0, T, 1 );
+
+
+if( DefineProbs = 1)
+{
+	# rolling window
+
+        tau = 2 * 252;
+        p[ 1:tau ] = 1;
+        p = p / sum( p );
+}
+} else if( DefineProbs = 2 )
+{ 	
+	# exponential smoothing
+
+        lmd = 0.0166;
+        p   = exp( -lmd * ( T - ( 1 : T ) ) );
+        p   = p / sum( p );
+
+} else if( DefineProbs = 3 )
+{ 
+	# market conditions
+        Cond = Y >= 2.8;
+        p[ Cond ] = 1;
+        p = p / sum( p );
+
+} else if( DefineProbs = 4 )
+{ 
+	# kernel damping
+        y  = 3;
+        h2 = cov( matrix( diff( Y ) ) );
+        p  = dmvnorm( Y, y, h2 );
+        p  = p / sum( p );
+    
+} else if( DefineProbs = 5 )
+{ 
+	# partial information prox. kernel damping
+        y  = 3;
+        h2 = NaN; # set h2=NaN for no conditioning on second moments
+        h2 = cov( 1 * diff( Y ) );
+        p  = LeastInfoKernel( Y, y, h2 );
+    
+} else if( DefineProbs = 6 ){ 
+	 #partial information: match covariance
+
+		l_c = 0.0055;
+		l_s = 0.0166;
+
+		N = 20;
+		Dd = DoubleDecay( X, l_c, l_s );
+
+		p = Fit2Moms( X, Dd$m, Dd$S );
+}
+
+###########################################################################
+# P&L scenarios
+###########################################################################
+
+N = 20;
+
+# call parameters
+S_0    = SnP[ length(SnP) ];
+vol_0  = Vix[ length(Vix)];
+rf_0   = Swp10[ length(Swp10) ];
+K      = S_0 * ( seq( 0.8, 1.1, length = N) );
+Expiry = ( 2: (N+1) ) / 252;
+
+S_T   = S_0 * exp( X[ , 1 ] );
+vol_T = vol_0 * exp( X[ , 2 ] );
+rf_T  = rf_0 * exp( X[ , 3 ] );
+
+PnL = matrix( NaN, T, N );
+
+# securities scenarios
+for( n in 1:N )
+{
+    Call_1 = CallPrice( S_T, K[ n ], rf_T, Expiry[ n ] - 1 / 252, vol_T );
+    Call_0 = CallPrice( S_0, K[ n ], rf_0, Expiry[ n ], vol_0 );
+    PnL[ , n ] = Call_1 - Call_0;
+}
+
+# portfolio scenarios
+u     = -rbind( -matrix( 1, N/2, 1 ), matrix( 1, N/2, 1 ) ); # number of units (contracts/shares/etc)
+PnL_u = PnL %*% u;
+
+
+

Added: pkg/Meucci/demo/FullyIntegratedLiquidityAndMarketRisk.R
===================================================================
--- pkg/Meucci/demo/FullyIntegratedLiquidityAndMarketRisk.R	                        (rev 0)
+++ pkg/Meucci/demo/FullyIntegratedLiquidityAndMarketRisk.R	2013-09-05 11:25:44 UTC (rev 2998)
@@ -0,0 +1,140 @@
+#'This his script computes the liquidity-risk and funding-risk adjusted P&L distribution, as described in 
+#' A. Meucci, "A Fully Integrated Liquidity and Market Risk Model", Financial Analyst Journal, 68, 6, 35-47 (2012)
+#'
+#' @references 
+#' \url{http://www.symmys.com/node/350}
+#' See Meucci script "S_Main.m"
+#' 
+#' @author Xavier Valls \email{flamejat@@gmail.com}
+
+# INPUTS
+#####################################################################*
+# liquidation policy at horizon as fraction of investment
+Policy=-1;
+
+# collinearity of liquidity perturbations
+CollinLiq=1;
+
+# select only some stock in portfolio and equally allocate capital as fraction of daily dollar volume
+Selectstock = 1:10 ;
+Capital_perDailyVolume = 0.2;
+
+
+# PREPARE DATA
+#####################################################################*
+# load  fILMR$Daily_Prices: closing prices 
+#       fILMR$Daily_Volumes_Shares: daily volumes   
+#       fILMR$Daily_Liq: Morgan Stanley liquidity index 
+load("../data/fILMR.Rda")
+
+# Prices and returns
+#Daily_Prices = Daily_Prices(:,Selectstock);
+Prices_0 = matrix(  fILMR$Daily_Prices[ nrow(fILMR$Daily_Prices), ] );
+Daily_LogRets = log( fILMR$Daily_Prices[ -nrow(fILMR$Daily_Prices), ] / fILMR$Daily_Prices[ -1, ] );
+J = dim( Daily_LogRets )[1]
+N = dim( Daily_LogRets )[2]
+
+# volumes in shares
+#Daily_Volumes = Daily_Volumes_Shares[ , Selectstock ];
+Volumes_0 =  matrix( fILMR$Daily_Volumes_Shares[ nrow(fILMR$Daily_Volumes_Shares), ]);
+
+Volumes_t = matrix( apply( fILMR$Daily_Volumes_Shares[ -(1:(nrow(fILMR$Daily_Volumes_Shares)-250)), ], 2, mean ) );
+
+# liquidity index
+Daily_LiqChanges = diff(fILMR$Daily_Liq);
+Liq_0 = matrix( fILMR$Daily_Liq[ length(fILMR$Daily_Liq), ] );
+
+# normal simulations
+    X    = cbind( Daily_LogRets, Daily_LiqChanges );
+    m_X  = apply( X, 2, mean );
+    s2_X = cov( X ); #covariance
+    J    = 100000;
+    #RandStream.setGlobalStream(RandStream('mt19937ar', 'seed', 11)); 
+    X = rmvnorm( J, m_X, s2_X );  
+    Daily_LogRets    = X[ ,1:N ];
+    Daily_LiqChanges = X[ , dim(X)[2] ];
+
+# Fully Flexible Probabilties associated with each scenario
+Probs = matrix( 1, J, 1 ) / J;
+
+# stock prices at horizon 
+Prices_t = repmat( t( Prices_0) , J, 1 ) * exp(Daily_LogRets);
+
+# liquidity index at horizon
+Liq_t = Liq_0 * exp(Daily_LiqChanges);
+
+# pure market risk: p&L due to market risk
+PnL_mkt = Prices_t - repmat( t(Prices_0), J, 1 );
+
+
+# PORTFOLIO COMPUTATIONS
+######################################################################
+# portfolio and liquidation policy
+Weights = matrix( 0, N, 1);
+Weights[ Selectstock ] = 1 / length(Selectstock);
+DollarVolume_0 = t(Volumes_0) %*% Prices_0;
+Capital = (Capital_perDailyVolume %*% DollarVolume_0)[1];
+
+h = Capital * Weights / Prices_0;
+
+PnL_mkt_h = PnL_mkt %*% h;                           
+
+# LIQUIDITY ADJUSTMENT 
+######################################################################
+# liquidation policy
+Dh = Policy * h;
+
+# market impact
+b_a       = 0.01 * matrix( 1, N, 1 );                          
+Linear    =-b_a * Prices_0 * abs(Dh);
+NonLinear = -(10^5) * Prices_0 * matrix( apply( Daily_LogRets, 2, sd ) ) * ( ( abs(Dh)  / Volumes_t ) ^ 1.5);         
+m_Dh      = Linear + NonLinear;                                      
+    
+# state-dependent expected liquidity impact on all stocks
+s_g1   = 0.5 * sd( PnL_mkt_h );
+g1     = -pmin( PnL_mkt_h, -s_g1 ) / s_g1;
+m_Dh_x = repmat( g1, 1, N ) * repmat( t(m_Dh), J, 1 );    # (14)
+
+# state-dependent expected liquidity impact on portfolio
+m_Dh_h = m_Dh_x %*% matrix( 1, N, 1 );                    # (23)
+
+# state-independent uncertainty on liquidity impact on portfolio
+s_Dh    = 1.5 * m_Dh;                                                                       # 
+r2_Dh   = ( 1 - CollinLiq ) * cor( Daily_LogRets ) + CollinLiq * matrix( 1, N, N );         # 
+s2_Dh   = diag( s_Dh[,] , length(s_Dh) ) %*% r2_Dh %*% diag( s_Dh[,], length( s_Dh ) );     # 
+s2_Dh_h = t( matrix( 1, N, 1 ) ) %*% s2_Dh %*% matrix( 1, N, 1 );                           # 
+s_Dh_h  = sqrt(s2_Dh_h);    
+s_Dh_h  = pmax( s_Dh_h, 0.01 * std(PnL_mkt_h) );                                            # regularization
+
+# TOTAL P&L
+######################################################################
+# conditional center and scatter
+m_j = PnL_mkt_h + m_Dh_h;
+s_j = s_Dh_h[1] * matrix( 1, J, 1 );
+
+# pdf and cdf: taking and not taking into account funding cost
+nu   = 100;
+f_Pi = function(x){ t( Probs / s_j) %*% dt( (x-m_j) / s_j, nu ) };
+F_Pi = function(x){ t( Probs ) %*% pt( (x-m_j) / s_j, nu ) };
+
+NGrid = 200;
+x_= seq( min(PnL_mkt_h) - s_Dh_h, max(PnL_mkt_h) + s_Dh_h, length = NGrid );
+p_= NULL;
+f_Pi_plot = NULL;
+f_Pi_funding_plot = NULL;
+for( k in 1:NGrid )
+{
+    p_= rbind( p_, F_Pi( x_[ k ] ) );
+    f_Pi_plot = rbind( f_Pi_plot, f_Pi( x_[ k ] ) );
+}
+
+###########################################################################
+# plots
+dev.new()
+NumBins = round( 10 * log(J) );
+hist = hist(PnL_mkt_h,NumBins, plot = FALSE ); # compute bin width
+D = hist$mids[ 2 ]- hist$mids[ 1 ];
+hh = plot( hist$mids, hist$counts / ( J * D ), type = "h", xlab ="", ylab = "" ); # plot histogram
+hh1 = lines(x_,f_Pi_plot, col="red");
+
+legend( "topright", 1.9, c("pure market P&L","market + liquidity P&L"), col = c( "black", "red"), lty=1, bg = "gray90" );
\ No newline at end of file

Added: pkg/Meucci/man/DoubleDecay.Rd
===================================================================
--- pkg/Meucci/man/DoubleDecay.Rd	                        (rev 0)
+++ pkg/Meucci/man/DoubleDecay.Rd	2013-09-05 11:25:44 UTC (rev 2998)
@@ -0,0 +1,37 @@
+\name{DoubleDecay}
+\alias{DoubleDecay}
+\title{Computes a double-decay covariance matrix.}
+\usage{
+  DoubleDecay(X, lmd_c, lmd_s)
+}
+\arguments{
+  \item{X}{matrix representing the risk drivers.}
+
+  \item{lmd_c}{numeric representing the low decay (long
+  half-life) for the correlations.}
+
+  \item{lmd_s}{numeric representing the high decay (short
+  half-life) for the volatilities.}
+}
+\value{
+  m matrix of zeros, representing the expectation of the
+  risk drivers.
+
+  S matrix representing the double-decay estimation for the
+  correlation matrix of the risk drivers.
+}
+\description{
+  This function computes a double-decay covariance matrix
+  for the risk drivers provided, as described in A. Meucci,
+  "Personalized Risk Management: Historical Scenarios with
+  Fully Flexible Probabilities" GARP Risk Professional, Dec
+  2010, p 47-51
+}
+\author{
+  Xavier Valls \email{flamejat at gmail.com}
+}
+\references{
+  \url{http://www.symmys.com/node/150} See Meucci script
+  for "DoubleDecay.m"
+}
+

Added: pkg/Meucci/man/Fit2Moms.Rd
===================================================================
--- pkg/Meucci/man/Fit2Moms.Rd	                        (rev 0)
+++ pkg/Meucci/man/Fit2Moms.Rd	2013-09-05 11:25:44 UTC (rev 2998)
@@ -0,0 +1,34 @@
+\name{Fit2Moms}
+\alias{Fit2Moms}
+\title{Uses Entropy Pooling to compute a double-decay covariance matrix.}
+\usage{
+  Fit2Moms(X, m, S)
+}
+\arguments{
+  \item{X}{matrix representing the risk drivers.}
+
+  \item{m}{matrix of zeros, representing the expectation of
+  the risk drivers.}
+
+  \item{S}{matrix representing the double-decay estimation
+  for the correlation matrix of the risk drivers.}
+}
+\value{
+  p list containing the vector of posterior probabilities
+  and information about the optimization performance.
+}
+\description{
+  This function uses Entropy Pooling to compute a
+  double-decay covariance matrix, as described in A.
+  Meucci, "Personalized Risk Management: Historical
+  Scenarios with Fully Flexible Probabilities" GARP Risk
+  Professional, Dec 2010, p 47-51
+}
+\author{
+  Xavier Valls \email{flamejat at gmail.com}
+}
+\references{
+  \url{http://www.symmys.com/node/150} See Meucci script
+  for "S_MainFullFlexProbs.m"
+}
+

Added: pkg/Meucci/man/LeastInfoKernel.Rd
===================================================================
--- pkg/Meucci/man/LeastInfoKernel.Rd	                        (rev 0)
+++ pkg/Meucci/man/LeastInfoKernel.Rd	2013-09-05 11:25:44 UTC (rev 2998)
@@ -0,0 +1,34 @@
+\name{LeastInfoKernel}
+\alias{LeastInfoKernel}
+\title{Computes least information kernel smoothing}
+\usage{
+  LeastInfoKernel(Y, y, h2)
+}
+\arguments{
+  \item{Y}{Matrix representing the macroeconomic indicator}
+
+  \item{y}{scalar reprenting the target to which Y is
+  expected to be close in the Generalized Empirical
+  Distribution}
+
+  \item{h2}{N X N matrix}
+}
+\value{
+  p list containing the vector of posterior probabilities
+  and information about the optimization performance.
+}
+\description{
+  This script uses Entropy Pooling to compute least
+  information kernel smoothing, as described in A. Meucci,
+  "Personalized Risk Management: Historical Scenarios with
+  Fully Flexible Probabilities" GARP Risk Professional, Dec
+  2010, p 47-51
+}
+\author{
+  Xavier Valls \email{flamejat at gmail.com}
+}
+\references{
+  \url{http://www.symmys.com/node/150} See Meucci script
+  for "LeastInfoKernel.m"
+}
+

Modified: pkg/Meucci/man/PlotDistributions.Rd
===================================================================
--- pkg/Meucci/man/PlotDistributions.Rd	2013-09-05 09:21:51 UTC (rev 2997)
+++ pkg/Meucci/man/PlotDistributions.Rd	2013-09-05 11:25:44 UTC (rev 2998)
@@ -1,32 +1,32 @@
-\name{PlotDistributions}
-\alias{PlotDistributions}
-\title{Plot numerical and analytical prior and posterior distributions}
-\usage{
-  PlotDistributions(X, p, Mu, Sigma, p_, Mu_, Sigma_)
-}
-\arguments{
-  \item{X}{a vector containing the dataset}
-
-  \item{p}{a vector cotaining the prior probability values}
-
-  \item{Mu}{a vector containing the prior means}
-
-  \item{Sigma}{a vector containing the prior standard
-  deviations}
-
-  \item{p_}{a vector containing the posterior probability
-  values}
-
-  \item{Mu_}{a vector containing the posterior means}
-
-  \item{Sigma_}{a vector containing the posterior standard
-  deviations}
-}
-\description{
-  Plot numerical and analytical prior and posterior
-  distributions
-}
-\author{
-  Ram Ahluwalia \email{ram at wingedfootcapital.com}
-}
-
+\name{PlotDistributions}
+\alias{PlotDistributions}
+\title{Plot numerical and analytical prior and posterior distributions}
+\usage{
+  PlotDistributions(X, p, Mu, Sigma, p_, Mu_, Sigma_)
+}
+\arguments{
+  \item{X}{a vector containing the dataset}
+
+  \item{p}{a vector cotaining the prior probability values}
+
+  \item{Mu}{a vector containing the prior means}
+
+  \item{Sigma}{a vector containing the prior standard
+  deviations}
+
+  \item{p_}{a vector containing the posterior probability
+  values}
+
+  \item{Mu_}{a vector containing the posterior means}
+
+  \item{Sigma_}{a vector containing the posterior standard
+  deviations}
+}
+\description{
+  Plot numerical and analytical prior and posterior
+  distributions
+}
+\author{
+  Ram Ahluwalia \email{ram at wingedfootcapital.com}
+}
+



More information about the Returnanalytics-commits mailing list