[Returnanalytics-commits] r2284 - in pkg/Meucci: R demo man

noreply at r-forge.r-project.org noreply at r-forge.r-project.org
Tue Sep 11 15:34:29 CEST 2012


Author: braverock
Date: 2012-09-11 15:34:29 +0200 (Tue, 11 Sep 2012)
New Revision: 2284

Added:
   pkg/Meucci/R/MeanDiversificationFrontier.R
   pkg/Meucci/R/MultivariateOUnCointegration.R
   pkg/Meucci/demo/00index
   pkg/Meucci/demo/HermiteGrid_CVaR_Recursion.R
   pkg/Meucci/demo/HermiteGrid_CaseStudy.R
   pkg/Meucci/demo/InvariantProjection.R
   pkg/Meucci/demo/S_CheckDiagonalization.R
   pkg/Meucci/demo/S_CovarianceEvolution.R
   pkg/Meucci/demo/S_DeterministicEvolution.R
   pkg/Meucci/demo/S_FitProjectRates.R
   pkg/Meucci/demo/S_ToyExample.R
   pkg/Meucci/demo/S_plotGaussHermite.R
   pkg/Meucci/man/FitOU.Rd
   pkg/Meucci/man/GenFirstEigVect.Rd
   pkg/Meucci/man/GenPCBasis.Rd
   pkg/Meucci/man/OUstep.Rd
   pkg/Meucci/man/OUstepEuler.Rd
Removed:
   pkg/Meucci/R/ButterflyTrading.R
Log:
- re-merge changes from Manan's last commit svn r2255, oiginal svn mv was somewhat broken


Deleted: pkg/Meucci/R/ButterflyTrading.R
===================================================================
--- pkg/Meucci/R/ButterflyTrading.R	2012-09-07 18:35:53 UTC (rev 2283)
+++ pkg/Meucci/R/ButterflyTrading.R	2012-09-11 13:34:29 UTC (rev 2284)
@@ -1,289 +0,0 @@
-# TODO: plot efficient frontier in R
-
-PlotFrontier = function( e , s , w )
-{
-    # subplot(2,1,1)
-    plot( s , e )
-    # grid on
-    # set(gca,'xlim',[min(s) max(s)])
-    # 
-    # subplot(2,1,2)
-    
-    xx = nrow( w ) ; N = ncol( w )
-    Data = apply( w , 1 , cumsum ) #TODO: Check. Take cumulative sum of *rows*. Try sapply?  
-    
-    for ( n in 1:N ) 
-    {
-        x = cbind( min(s) , s , max(s) )
-        y = cbind( 0 , Data[ , N-n+1 ] , 0 )
-        # hold on
-        #h = fill( x , y , cbind( .9 , .9 , .9) - mod( n , 3 ) %*% cbind( .2 , .2 , .2) )
-    }
-    
-    #set(gca,'xlim',[min(s) max(s)],'ylim',[0 max(max(Data))])
-    #xlabel('portfolio # (risk propensity)')
-    #ylabel('portfolio composition')
-}
-
-ViewCurveSlope = function( X , p )
-{
-    # view 3 (expectations and binding constraints): slope of the yield curve will increase by 5 bp
-    
-    J = nrow( X ) ; K = ncol( X )
-    
-    # constrain probabilities to sum to one...
-    Aeq = ones( 1 , J )
-    beq = 1
-    
-    # ...constrain the expectation...
-    V = X[ , 14 ] - X[ , 13 ]
-    v = .0005
-    
-    Aeq = rbind( Aeq , t(V) )
-    
-    beq = rbind( beq , v )
-    
-    A = b = emptyMatrix  
-    
-    # ...compute posterior probabilities
-    p_ = EntropyProg( p , A , b , Aeq ,beq )$p_  
-    return( p_ )
-}
-
-ViewRealizedVol = function( X , p )
-{  
-    # view 2 (relative inequality view on median): bullish on realized volatility of MSFT (i.e. absolute log-change in the underlying). 
-    # This is the variable such that, if larger than a threshold, a long position in the butterfly turns into a profit (e.g. Rachev 2003)
-    # we issue a relative statement on the media comparing it with the third quintile implied by the reference market model
-    
-    library( matlab )
-    J = nrow( X ) ; K = ncol( X )
-    
-    # constrain probabilities to sum to one...
-    Aeq = ones( 1 , J )
-    beq = 1
-    
-    # ...constrain the median...  
-    V = abs( X[ , 1 ] ) # absolute value of the log of changes in MSFT close prices (definition of realized volatility)  
-    
-    V_Sort = sort( V , decreasing = FALSE ) # sorting of the abs value of log changes in prices from smallest to largest
-    I_Sort = order( V ) 
-    
-    F = cumsum( p[ I_Sort ] ) # represents the cumulative sum of probabilities from ~0 to 1
-    
-    I_Reference = max( matlab:::find( F <= 3/5 ) ) # finds the (max) index corresponding to element with value <= 3/5 along the empirical cumulative density function for the abs log-changes in price
-    V_Reference = V_Sort[ I_Reference ] # returns the corresponding abs log of change in price at the 3/5 of the cumulative density function
-    
-    I_Select = find( V <= V_Reference ) # finds all indices with value of abs log-change in price less than the reference value
-    
-    a = zeros( 1 , J )
-    a[ I_Select ] = 1 # select those cases where the abs log-change in price is less than the 3/5 of the empirical cumulative density...
-    
-    A = a
-    b = .5 # ... and assign the probability of these cases occuring as 50%. This moves the media of the distribution
-    
-    # ...compute posterior probabilities
-    p_ = EntropyProg( p , A , b , Aeq , beq )$p_
-    
-    return( p_ )
-}
-
-ViewImpliedVol = function( X , p )
-{    
-    # View 1 (inequality view): bearish on on 2m-6m implied volaility spread for Google
-    
-    J = nrow( X ) ; K = ncol( X )
-    
-    # constrain probabilities to sum to one...
-    Aeq = ones( 1 , J )
-    beq = 1 
-    
-    # ...constrain the expectation...
-    V = X[ , 12 ] - X[ , 11 ] # GOOG_vol_182 (6m implied vol) - GOOG_vol_91 (2m implied vol)
-    m = mean( V )
-    s = std( V )
-    
-    A = t( V )
-    b = m - s
-    
-    # ...compute posterior probabilities
-    p_ = EntropyProg( p , A , b , Aeq , beq )$p_
-    
-    return( p_ )
-}
-
-ComputeCVaR = function( Units , Scenarios , Conf )
-{
-    PnL = Scenarios %*% Units
-    Sort_PnL = PnL[ order( PnL , decreasing = FALSE ) ] # DOUBLE CHECK IF I SHOULD USE ORDER INSTEAD OF SORT
-    
-    J = length( PnL )
-    Cut = round( J %*% ( 1 - Conf ) , 0 )
-    
-    CVaR = -mean( Sort_PnL[ 1:Cut ] )
-    
-    return( CVaR )
-}
-
-LongShortMeanCVaRFrontier = function( PnL , Probs , Butterflies , Options )
-{
-    library( matlab )
-    library( quadprog )
-    library( limSolve )
-    
-    # setup constraints
-    J = nrow(PnL); N = ncol(PnL)
-    P_0s = matrix(  , nrow = 1 , ncol = 0 )
-    D_s  = matrix(  , nrow = 1 , ncol = 0 )
-    emptyMatrix = matrix( nrow = 0 , ncol = 0 )
-    
-    for ( n in 1:N )
-    {
-        P_0s = cbind( P_0s , Butterflies[[n]]$P_0 ) # 1x9 matrix
-        D_s = cbind( D_s , Butterflies[[n]]$Delta ) # 1x9 matrix
-    }
-    
-    Constr = list()
-    Constr$Aeq = P_0s # linear coefficients in the constraints Aeq*X = beq (equality constraints)
-    Constr$beq = Options$Budget # the constant vector in the constraints Aeq*x = beq
-    
-    if ( Options$DeltaNeutral == TRUE ) 
-    {
-        Constr$Aeq = rbind( Constr$Aeq , D_s ) # 2x9 matrix
-        Constr$beq = rbind( Constr$beq , 0 )   # 2x9 matrix
-    }
-    
-    Constr$Aleq = rbind( diag( as.vector( P_0s ) ) , -diag( as.vector( P_0s ) ) ) # linear coefficients in the constraints A*x <= b. an 18x9 matrix
-    Constr$bleq = rbind( Options$Limit * ones(N,1) , Options$Limit * ones(N,1) ) # constant vector in the constraints A*x <= b. an 18x1 matrix
-    
-    # determine expectation of minimum-variance portfolio
-    Exps = t(PnL) %*% Probs
-    Scnd_Mom = t(PnL) %*% (PnL * (Probs %*% ones(1,N) ) )
-    Scnd_Mom = ( Scnd_Mom + t(Scnd_Mom) ) / 2
-    Covs = Scnd_Mom - Exps %*% t(Exps)
-    
-    Amat = rbind( Constr$Aeq , Constr$Aleq ) # stack the equality constraints on top of the inequality constraints
-    bvec = rbind( Constr$beq , Constr$bleq ) # stack the equality constraints on top of the inequality constraints
-    
-    #if ( nrow(Covs) != length( zeros(N,1) ) ) stop("Dmat and dvec are incompatible!")
-    #if ( nrow(Covs) != nrow(Amat)) stop("Amat and dvec are incompatible!")
-    
-    MinSDev_Units = solve.QP( Dmat = Covs , dvec = -1 * zeros(N,1) , Amat = -1*t(Amat) , bvec = -1*bvec , meq = length( Constr$beq) ) # TODO: Check this
-    MinSDev_Exp = t( MinSDev_Units$solution ) %*% Exps
-    
-    # determine expectation of maximum-expectation portfolio
-    
-    MaxExp_Units = linp( E = Constr$Aeq , F = Constr$beq , G = -1*Constr$Aleq , H = -1*Constr$bleq , Cost = -Exps , ispos = FALSE )$X 
-    
-    MaxExp_Exp = t( MaxExp_Units ) %*% Exps
-    
-    # slice efficient frontier in NumPortf equally thick horizontal sections
-    Grid = t( seq( from = Options$FrontierSpan[1] , to = Options$FrontierSpan[2] , length.out = Options$NumPortf ) )
-    TargetExp = as.numeric( MinSDev_Exp ) + Grid * as.numeric( ( MaxExp_Exp - MinSDev_Exp ) )
-    
-    # compute composition, expectation, s.dev. and CVaR of the efficient frontier
-    Composition = matrix( , ncol = N , nrow = 0 )
-    Exp = matrix( , ncol = 1 , nrow = 0 )
-    SDev = matrix( , ncol = 1 , nrow = 0 )
-    CVaR = matrix( , ncol = 1 , nrow = 0 )
-    
-    for (i in 1:Options$NumPortf )
-    {
-        # determine least risky portfolio for given expectation
-        AEq = rbind( Constr$Aeq , t(Exps) )        # equality constraint: set expected return for each asset...
-        bEq = rbind( Constr$beq , TargetExp[i] )
-        
-        Amat = rbind( AEq , Constr$Aleq ) # stack the equality constraints on top of the inequality constraints
-        bvec = rbind( bEq , Constr$bleq ) # ...and target portfolio return for i'th efficient portfolio
-        
-        # Why is FirstDegree "expected returns" set to 0? 
-        # Becasuse we capture the equality view in the equality constraints matrix
-        # In other words, we have a constraint that the Expected Returns by Asset %*% Weights = Target Return
-        Units = solve.QP( Dmat = Covs , dvec = -1*zeros(N,1) , Amat = -1*t(Amat) , bvec = -1*bvec , meq = length( bEq ) )
-        
-        # store results
-        Composition = rbind( Composition , t( Units$solution ) )
-        
-        Exp = rbind( Exp , t( Units$solution ) %*% Exps )
-        SDev = rbind( SDev , sqrt( t( Units$solution ) %*% Covs %*% Units$solution ) )
-        CVaR = rbind( CVaR , ComputeCVaR( Units$solution , PnL , Options$Quant ) )
-    }   
-    
-    colnames( Composition ) = c( "MSFT_vol_30" , "MSFT_vol_91" , "MSFT_vol_182" , 
-            "YHOO_vol_30" , "YHOO_vol_91" , "YHOO_vol_182" ,    
-            "GOOG_vol_30" , "GOOG_vol_91" , "GOOG_vol_182" )
-    
-    return( list( Exp = Exp , SDev = SDev , CVaR = CVaR , Composition = Composition ) )
-}
-
-
-MapVol = function( sig , y , K , T )
-{
-    # in real life a and b below should be calibrated to security-specific time series
-    
-    a=-.00000000001
-    b= .00000000001 
-    
-    s = sig + a/sqrt(T) * ( log(K) - log(y) ) + b/T*( log(K) - log(y) )^2
-    
-    return( s )
-}
-
-HorizonPricing = function( Butterflies , X )
-{
-    r = .04       # risk-free rate
-    tau = 1/252   # investment horizon
-    
-#  factors: 1. 'MSFT_close'   2. 'MSFT_vol_30'   3. 'MSFT_vol_91'   4. 'MSFT_vol_182'
-#  securities:                1. 'MSFT_vol_30'   2. 'MSFT_vol_91'   3. 'MSFT_vol_182'
-    
-# create a new row called DlnY and Dsig
-# create a new row called 'DlnY'. Assign the first row (vector) of X to this DlnY for the 1:3 securities
-    for ( s in 1:3 ) { Butterflies[[s]]$DlnY = X[ , 1 ] }
-    
-# assign the 2nd row of X to a new element called Dsig
-    Butterflies[[1]]$Dsig=X[ , 2 ]
-    Butterflies[[2]]$Dsig=X[ , 3 ]
-    Butterflies[[3]]$Dsig=X[ , 4 ]
-    
-#  factors:  5. 'YHOO_close'   6. 'YHOO_vol_30'   7. 'YHOO_vol_91'   8. 'YHOO_vol_182'
-#  securities:                 4. 'YHOO_vol_30'   5. 'YHOO_vol_91'   6. 'YHOO_vol_182'
-    for ( s in 4:6 ) { Butterflies[[s]]$DlnY=X[ , 5 ] }
-    
-    Butterflies[[4]]$Dsig=X[ , 6 ]
-    Butterflies[[5]]$Dsig=X[ , 7 ]
-    Butterflies[[6]]$Dsig=X[ , 8 ]
-    
-#  factors:  #  9. 'GOOG_close'  10. 'GOOG_vol_30'  11. 'GOOG_vol_91'  12. 'GOOG_vol_182'
-#  securities:                    7. 'GOOG_vol_30'   8. 'GOOG_vol_91'   9.  'GOOG_vol_182'
-    for ( s in 7:9 ) { Butterflies[[s]]$DlnY=X[ , 9 ] }
-    
-    Butterflies[[7]]$Dsig=X[ , 10 ]
-    Butterflies[[8]]$Dsig=X[ , 11 ]
-    Butterflies[[9]]$Dsig=X[ , 12 ]
-    
-    PnL = matrix( NA , nrow = nrow(X) )
-    
-    for ( s in 1:length(Butterflies) )
-    {  
-        Y = Butterflies[[s]]$Y_0 * exp(Butterflies[[s]]$DlnY)
-        ATMsig = apply( cbind( Butterflies[[s]]$sig_0 + Butterflies[[s]]$Dsig , 10^-6 ) , 1 , max )     
-        t = Butterflies[[s]]$T - tau
-        K = Butterflies[[s]]$K
-        sig = MapVol(ATMsig , Y , K , t )
-        
-        # library(RQuantLib) # this function can only operate on one option at a time, so we use fOptions    
-        # C = EuropeanOption( type = "call" , underlying = Y , strike = K , dividendYield = 0 , riskFreeRate = r , maturity = t , volatility = sig )$value
-        # P = EuropeanOption( type = "put" ,  underlying = Y , strike = K , dividendYield = 0 , riskFreeRate = r , maturity = t , volatility = sig )$value
-        
-        # use fOptions to value options
-        library( fOptions )
-        C = GBSOption( TypeFlag = "c" , S = Y , X = K , r = r , b = 0 , Time = t , sigma = sig  )
-        P = GBSOption( TypeFlag = "p" , S = Y , X = K , r = r , b = 0 , Time = t , sigma = sig  )    
-        
-        Butterflies[[s]]$P_T = C at price + P at price
-        PnL = cbind( PnL , Butterflies[[s]]$P_T )
-    }
-    PnL = PnL[ , -1 ]
-}
-

Copied: pkg/Meucci/R/MeanDiversificationFrontier.R (from rev 2255, pkg/PerformanceAnalytics/sandbox/Meucci/R/MeanDiversificationFrontier.R)
===================================================================
--- pkg/Meucci/R/MeanDiversificationFrontier.R	                        (rev 0)
+++ pkg/Meucci/R/MeanDiversificationFrontier.R	2012-09-11 13:34:29 UTC (rev 2284)
@@ -0,0 +1,215 @@
+#' This function generates the first eigen vector
+#' 
+#' @param S     Covariance Matrix
+#' @param A     Conditioning Matrix
+#'
+#' @return e    First Eigen Vector
+#'
+#' @references 
+#' A. Meucci - "Managing Diversification", Risk Magazine, June 2009
+#' \url{http://ssrn.com/abstract=1358533}
+#' @author Manan Shah \email{mkshah@@cmu.edu}
+GenFirstEigVect = function( S , A )
+{
+  N = nrow( S )
+  P = diag( N )
+
+  if ( qr( A )$rank > 0 )
+  {
+    P = diag(N) - t( A ) %*% solve( A  %*% t( A ) ) %*% A
+  }
+
+  tmp = eigen( P %*% S %*% t(P) )
+  E_ = tmp$vectors
+  L_ = tmp$values
+  
+  I = which.max(  L_ )
+  e = E_[,I]
+  
+  return( e )
+}
+
+#' This function computes the conditional principal portfolios
+#' 
+#' @param S     Covariance Matrix
+#' @param A     Conditioning Matrix
+#'
+#' @return      a list containing
+#' @return E    a matrix containing conditional principal portfolios composition
+#' @return L    a matrix containing conditional principal portfolios variances
+#' @return G    map weights -> conditional diversification distribution (square root of, not normalized)
+#'
+#' \deqn{ e_{n}  \equiv   argmax_{ e'e  \equiv 1 }  \big\{ e' \Sigma e \big\} s.t.  e' \Sigma  e_{j}  \equiv 0 }
+#' @references 
+#' A. Meucci - "Managing Diversification", Risk Magazine, June 2009 - Formula (12)
+#' \url{http://ssrn.com/abstract=1358533}
+#' @author Manan Shah \email{mkshah@@cmu.edu}
+GenPCBasis = function( S , A )
+{  
+  if ( length( A ) == 0 )
+  {
+    N = nrow( S )
+    L = rep( 0, N )
+    K = 0
+    tmp = eigen( S )
+    E_ = tmp$vectors
+    L_ = diag( tmp$values )
+    E = E_
+    for ( n in 1:N )
+    {
+      E[ , n ] = E_[ , N - n + 1 ]
+      L[ n ] = L_[ N - n + 1 , N - n + 1 ]
+    }
+  }
+  else
+  {
+    K = nrow( A )
+    N = ncol( A )
+    emptyMatrix = matrix( ,nrow = 0, ncol = 0 )
+    E = emptyMatrix
+    B = A
+    for ( n in 1:N - K )
+    {
+      if ( length( E ) != 0 )
+      {
+        B = rbind( A, t( E ) %*% S )
+      }
+      e = GenFirstEigVect( S , B )
+      E = cbind( E , e )
+    }
+    
+    for ( n in N - K + 1:N )
+    {
+      B = t( E ) %*% S
+      e = GenFirstEigVect( S , B )
+      E = cbind( E , e )
+    }
+
+    # swap order
+    E = cbind( E[ , N - K + 1:N ], E[ , 1:N - K ] )
+  }  
+ 
+  v = t( E ) %*% as.matrix( S ) %*% E
+  L = diag( v )
+
+  G = diag( sqrt( L ), nrow = length( L ) ) %*% solve( E )
+  G = G[ K + 1:N , ]
+  return( list( E = E, L = L, G = G ) )
+}
+
+#' This function computes the extreme frontier
+#' 
+#' @param G       map weights -> conditional diversification distribution (square root of, not normalized)
+#' @param w_b     a matrix containing the benchmark weights
+#' @param w_0     a matrix containing the initial portfolio weights
+#' @param Constr  a list containing the equality and inequality constraints
+#'
+#' @return x      a numeric containing the maximum entropy
+#'
+#' \deqn{  N_{ent}  \equiv exp \big(-\sum_{n=k+1}^N  p_{n} ln p_{n}  \big),
+#'  w_{  \varphi }  \equiv  argmax_{w \in C,   \mu'w  \geq  \varphi  }  N_{ent}   \big(w\big) }
+#' @references 
+#' A. Meucci - "Managing Diversification", Risk Magazine, June 2009 - Formula (18,19)
+#' \url{http://ssrn.com/abstract=1358533}
+#' @author Manan Shah \email{mkshah@@cmu.edu}
+MaxEntropy = function( G , w_b , w_0 , Constr )
+{
+  library( nloptr )
+  # Nested function that computes fitness
+  nestedfun = function( x )
+  {
+    v_ = G %*% ( x - as.matrix( w_b ) )
+    p = v_ * v_
+    R_2 = pmax( 10^(-10), p / colSums( p ) )
+    Minus_Ent = t( R_2 ) * log( R_2 )
+    
+    # evaluate gradient
+    gradient = rbind( Constr$b - Constr$A %*% x , Constr$beq - Constr$Aeq %*% x )
+    
+    return( list( objective = Minus_Ent , gradient = gradient ) )
+  }
+  
+  local_opts <- list( algorithm = "NLOPT_LD_SLSQP", xtol_rel = 1.0e-6 , 
+                      check_derivatives = FALSE , #check_derivatives_print = "all" , 
+                      eval_f = nestedfun )
+  x = nloptr( x0 = w_0 , eval_f = nestedfun ,
+              opts = list( algorithm = "NLOPT_LD_AUGLAG" , local_opts = local_opts ,
+                print_level = 2 , maxeval = 1000 , 
+                check_derivatives = FALSE , #check_derivatives_print = "all" , 
+                           xtol_rel = 1.0e-6 ) )
+  return( x )
+}
+
+#' This function computes the mean diversification efficient frontier
+#' 
+#' @param S       Covariance Matrix
+#' @param Mu      a matrix containing the expectations 
+#' @param w_b     a matrix containing the benchmark weights
+#' @param w_0     a matrix containing the initial portfolio weights
+#' @param Constr  a list containing the equality and inequality constraints
+#'
+#' @return        a list containing
+#' @return Weights
+#' @return Ne_s
+#' @return R_2_s
+#' @return m_s
+#' @return s_S
+#'
+#' @references 
+#' A. Meucci - "Managing Diversification", Risk Magazine, June 2009
+#' \url{http://ssrn.com/abstract=1358533}
+#' @author Manan Shah \email{mkshah@@cmu.edu}
+MeanTCEntropyFrontier = function( S , Mu , w_b , w_0 , Constr )
+{
+  emptyMatrix = matrix( ,nrow = 0, ncol = 0)
+  # compute conditional principal portfolios
+  GenPCBasisResult = GenPCBasis( S, emptyMatrix )
+
+  # compute frontier extrema
+  library( limSolve )
+  w_MaxExp = as.matrix( linp( E = Constr$Aeq , F = Constr$beq , G = -1*Constr$A , H = -1*Constr$b, Cost = -t( Mu ) , ispos = FALSE)$X )
+  MaxExp = t( Mu ) %*% ( w_MaxExp - as.matrix( w_b ) )
+
+  w_MaxNe = MaxEntropy( GenPCBasisResult$G , w_b , w_0 , Constr )
+  ExpMaxNe = t( Mu ) %*% ( w_MaxNe - w_b )
+
+  # slice efficient frontier in NumPortf equally thick horizontal sections
+  NumPortf = 10
+  Grid_L = .0
+  Grid_H = .9
+  Grid = c( seq( from = Grid_L, to = Grid_H, length.out = NumPortf ) )
+  TargetExp = ExpMaxNe + Grid %*% ( MaxExp - ExpMaxNe )
+
+  # compute diversification distribution
+  Weights = emptyMatrix
+  R_2_s = emptyMatrix
+  Ne_s = emptyMatrix
+  m_s = emptyMatrix
+  s_s = emptyMatrix
+
+  for ( k in 1:NumPortf )
+  {
+    ConstR = Constr
+    ConstR$Aeq = cbind( Constr$Aeq, t( Mu ) )
+    ConstR$beq = cbind( Constr$beq, TargetExp[ k ] + t( Mu ) %*% w_b )
+
+    w = MaxEntropy( GenPCBasisResult$G , w_b , w_0 , ConstR )
+
+    m = t( Mu ) %*% ( w - w_b )
+    
+    s = sqrt( t( w - w_b ) %*% S %*% ( w - w_b ) )
+    
+    v_ = GenPCBasisResult$G %*% ( w - w_b )
+    TE_contr = v_ * v_ / s
+
+    R_2 = max( 10^(-10) , TE_contr / colSums( TE_contr ) )
+    Ne = exp( -R_2 * log( R_2 ) )
+    
+    Weights = cbind( Weights, w )
+    m_s = cbind( m_s, m )
+    s_s = cbind( s_s, s )
+    R_2_s = cbind( R_2_s, R_2 )
+    Ne_s = cbind( Ne_s, Ne )
+  }  
+  return( list( Weights = Weights, Ne_s = Ne_s, R_2_s = R_2_s, m_s = m_s, s_s = s_s ) )
+}
\ No newline at end of file

Copied: pkg/Meucci/R/MultivariateOUnCointegration.R (from rev 2255, pkg/PerformanceAnalytics/sandbox/Meucci/R/MultivariateOUnCointegration.R)
===================================================================
--- pkg/Meucci/R/MultivariateOUnCointegration.R	                        (rev 0)
+++ pkg/Meucci/R/MultivariateOUnCointegration.R	2012-09-11 13:34:29 UTC (rev 2284)
@@ -0,0 +1,141 @@
+#' Generate the next element based on Ornstein-Uhlenbeck Process
+#' 
+#' @param X_0   a matrix containing the starting value of each process
+#' @param t     a numeric containing the timestep   
+#' @param Mu    a vector containing the unconditional expectation of the process
+#' @param Th    a transition matrix, i.e., a fully generic square matrix that steers the deterministic portion
+#'              of the evolution of the process
+#' @param Sig   a square matrix that drives the dispersion of the process
+#'
+#' @return        a list containing
+#' @return X_t    a vector containing the value of the process after the given timestep
+#' @return Mu_t   a vector containing the conditional expectation of the process
+#' @return Sig_t  a matrix containing the covariance after the time step
+#'
+#' \deqn{ X_{t+ \tau } =  \big(I- e^{- \theta  \tau } \big)  \mu  +  e^{- \theta  \tau } X_{t} +   \epsilon _{t, \tau } }
+#' @references 
+#' A. Meucci - "Review of Statistical Arbitrage, Cointegration, and Multivariate Ornstein-Uhlenbeck" - Formula (2)
+#' \url{http://ssrn.com/abstract=1404905}
+#' @author Manan Shah \email{mkshah@@cmu.edu}
+OUstep = function( X_0 , t , Mu , Th , Sig )
+{
+  NumSimul = nrow( X_0 )
+  N = ncol( X_0 )
+  
+  # location
+  ExpM = expm( -Th * t )
+  
+  # repmat = function(X,m,n) - R equivalent of repmat (matlab)
+  X = t( Mu - ExpM %*% Mu )
+  mx = dim( X )[1]
+  nx = dim( X )[2]
+  Mu_t = matrix( t ( matrix( X , mx , nx*1 ) ), mx * NumSimul, nx * 1, byrow = T ) + X_0 %*% ExpM
+  
+  # scatter
+  TsT = kronecker( Th , diag( N ) ) + kronecker( diag( N ) , Th )
+  
+  VecSig = Sig
+  dim( VecSig ) = c( N^2 , 1 )
+  VecSig_t = solve( TsT ) %*% ( diag( N^2 ) - expm( -TsT * t ) ) %*% VecSig
+  Sig_t = VecSig_t
+  dim( Sig_t ) = c( N , N )
+  Sig_t = ( Sig_t + t( Sig_t ) ) / 2
+  
+  Eps = mvrnorm( NumSimul, rep( 0 , N ), Sig_t )
+  
+  X_t = Mu_t + Eps
+  Mu_t = t( colMeans( Mu_t ) )
+  
+  return( list( X_t = X_t, Mu_t = Mu_t, Sig_t = Sig_t ) )
+}
+
+#' Generate the next element based on Ornstein-Uhlenbeck process using antithetic concept and assuming that the
+#' Brownian motion has Euler discretization
+#' 
+#' @param X_0   a matrix containing the starting value of each process
+#' @param Dt     a numeric containing the timestep   
+#' @param Mu    a vector containing the unconditional expectation of the process
+#' @param Th    a transition matrix, i.e., a fully generic square matrix that steers the deterministic portion
+#'              of the evolution of the process
+#' @param Sig   a square matrix that drives the dispersion of the process
+#'
+#' @return        a list containing
+#' @return X_t    a vector containing the value of the process after the given timestep
+#' @return Mu_t   a vector containing the conditional expectation of the process
+#' @return Sig_t  a matrix containing the covariance after the time step
+#'
+#' \deqn{ X_{t+ \tau } =  \big(I- e^{- \theta  \tau } \big)  \mu  +  e^{- \theta  \tau } X_{t} +   \epsilon _{t, \tau } }
+#' @references 
+#' A. Meucci - "Review of Statistical Arbitrage, Cointegration, and Multivariate Ornstein-Uhlenbeck" - Formula (2)
+#' \url{http://ssrn.com/abstract=1404905}
+#' @author Manan Shah \email{mkshah@@cmu.edu}
+OUstepEuler = function( X_0 , Dt , Mu , Th , Sig )
+{
+  NumSimul = nrow( X_0 )
+  N = ncol( X_0 )
+  
+  # location
+  ExpM = expm( as.matrix( -Th %*% Dt ) )
+  
+  # repmat = function(X,m,n) - R equivalent of repmat (matlab)
+  X = t( Mu - ExpM %*% Mu )
+  mx = dim( X )[1]
+  nx = dim( X )[2]
+  Mu_t = matrix( t ( matrix( X , mx , nx*1 ) ), mx * NumSimul, nx * 1, byrow = T ) + X_0 %*% ExpM
+  
+  # scatter
+  Sig_t = Sig %*% Dt
+  Eps = mvrnorm( NumSimul / 2, rep( 0 , N ) , Sig_t )
+  Eps = rbind( Eps, -Eps)
+  
+  X_t = Mu_t + Eps
+  Mu_t = t( colMeans( X_t ) )
+  
+  return( list( X_t = X_t, Mu_t = Mu_t, Sig_t = Sig_t ) )
+}
+
+#' Fit the Ornstein-uhlenbeck process to model the behavior for different values of the timestep.
+#' 
+#' @param Y       a matrix containing the value of a process at various time steps.
+#' @param tau     a numeric containing the timestep   
+#'
+#' @return        a list containing
+#' @return Mu     a vector containing the expectation of the process
+#' @return Sig    a matrix containing the covariance of the resulting fitted OU process
+#' @return Th     a transition matrix required for defining the fitted OU process
+#'
+#' \deqn{  x_{t+ \tau } =  \big(I- e^{- \theta  \tau } \big)  \mu  +  e^{- \theta  \tau } x_{t},
+#' vec \big(  \Sigma _{ \tau } \big)  \equiv    \big( \Theta  \oplus  \Theta \big) ^{-1}  \big(I- e^{( \Theta   \oplus   \Theta ) \tau } \big)  vec \big(  \Sigma \big) }
+#' @references 
+#' A. Meucci - "Review of Statistical Arbitrage, Cointegration, and Multivariate Ornstein-Uhlenbeck" - Formula (8),(9)
+#' \url{http://ssrn.com/abstract=1404905}
+#' @author Manan Shah \email{mkshah@@cmu.edu}
+FitOU = function ( Y, tau )
+{
+  library(expm)
+  T = nrow( Y )
+  N = ncol( Y )
+  
+  X = Y[ -1 , ]
+  F = cbind( rep( 1, T-1 ), Y [ 1:T-1 ,] )
+  E_XF = t( X ) %*% F / T
+  E_FF = t( F ) %*% F / T
+  B = E_XF %*% solve( E_FF )
+  
+  Th = -logm ( B [ , -1 ] ) / tau
+  Mu = solve( diag( N ) - B[ , -1 ] ) %*% B[ , 1 ]
+  
+  U = F %*% t( B ) - X
+  Sig_tau = cov( U )
+  
+  N = length( Mu )
+  TsT = kronecker( Th , diag( N ) ) + kronecker( diag( N ) , Th )
+  
+  VecSig_tau = Sig_tau
+  dim( VecSig_tau ) = c( N^2 , 1 )
+  VecSig = solve( diag( N^2 ) - expm( as.matrix( -TsT * tau ) ) ) %*% TsT %*% VecSig_tau
+  Sig = VecSig
+  dim( Sig ) = c( N , N )
+  
+  return( list( Mu = Mu, Th = Th, Sig = Sig ) )
+}
\ No newline at end of file

Copied: pkg/Meucci/demo/00index (from rev 2255, pkg/PerformanceAnalytics/sandbox/Meucci/demo/00index)
===================================================================
--- pkg/Meucci/demo/00index	                        (rev 0)
+++ pkg/Meucci/demo/00index	2012-09-11 13:34:29 UTC (rev 2284)
@@ -0,0 +1,20 @@
+AnalyticalvsNumerical       This example script compares the numerical and the analytical solution of entropy-pooling
+ButterflyTrading            This example script performs the butterfly-trading case study for the Entropy-Pooling approach by Attilio Meucci
+DetectOutliersviaMVE        This example script detects outliers in two-asset and multi-asset case
+FullyFlexibleBayesNets       This case study uses Entropy Pooling to compute Fully Flexible Bayesian networks for risk management
+HermiteGrid_CaseStudy       This script estimates the prior of a hedge fund return and processes extreme views on CVaR according to Entropy Pooling
+HermiteGrid_CVaR_Recursion  This script illustrates the discrete Newton recursion  to process views on CVaR according to Entropy Pooling
+HermiteGrid_demo            This script compares the performance of plain Monte Carlo versus grid in applying Entropy Pooling to process extreme views
+InvariantProjection         This script projects summary statistics to arbitrary horizons under i.i.d. assumption
+logToArithmeticCovariance   This example script generates arithmetric returns and arithmetric covariance matrix given a distribution of log returns
+Prior2Posterior             This example script compares the numerical and the analytical solution of entropy-pooling
+RankingInformation          This script performs ranking allocation using the Entropy-Pooling approach by Attilio Meucci
+RobustBayesianAllocation    This script replicates the example from Meucci's MATLAB script S_SimulationsCaseStudy.M
+S_plotGaussHermite          This example script displays mesh points based on Gaussian-Hermite quadrature
+S_SnPCaseStudy              This script replicates the example from Meucci's MATLAB script S_SnPCaseStudy.M
+S_ToyExample                This toy example illustrates the use of Entropy Pooling to compute Fully Flexible Bayesian networks
+S_FitProjectRates           This script fits the swap rates dynamics to a multivariate Ornstein-Uhlenbeck process and computes and plots the estimated future distribution
+S_CheckDiagonalization      This script verifies the correctness of the eigenvalue-eigenvector representation in terms of real matrices for the transition matrix of an OU process
+S_CovarianceEvolution       This script represents the evolution of the covariance of an OU process in terms of the dispersion ellipsoid
+S_DeterministicEvolution    This script animates the evolution of the determinstic component of an OU process
+MeanDiversificationFrontier This script computes the mean-diversification efficient frontier
\ No newline at end of file

Copied: pkg/Meucci/demo/HermiteGrid_CVaR_Recursion.R (from rev 2255, pkg/PerformanceAnalytics/sandbox/Meucci/demo/HermiteGrid_CVaR_Recursion.R)
===================================================================
--- pkg/Meucci/demo/HermiteGrid_CVaR_Recursion.R	                        (rev 0)
+++ pkg/Meucci/demo/HermiteGrid_CVaR_Recursion.R	2012-09-11 13:34:29 UTC (rev 2284)
@@ -0,0 +1,134 @@
+# This script illustrates the discrete Newton recursion  to process views on CVaR according to Entropy Pooling
+# This script complements the article
+#	"Fully Flexible Extreme Views"
+#	by A. Meucci, D. Ardia, S. Keel
+#	available at www.ssrn.com
+# The most recent version of this code is available at
+# MATLAB Central - File Exchange
+
+# Prior market model (normal) on grid
+emptyMatrix = matrix( nrow=0 , ncol=0 )
+market.mu   = 0.0
+market.sig2 = 1.0
+market.pdf = function(x) dnorm( x , mean = market.mu , sd = sqrt(market.sig2) )
+market.cdf = function(x) pnorm( x , mean = market.mu , sd = sqrt(market.sig2) )
+market.rnd = function(x) rnorm( x , mean = market.mu , sd = sqrt(market.sig2) ) 
+market.inv = function(x) qnorm( x , mean = market.mu , sd = sqrt(market.sig2) )
+market.VaR95 = market.inv(0.05)
+market.CVaR95 = integrate( function( x ) ( x * market.pdf( x ) ), -100, market.VaR95)$val / 0.05
+
+tmp = ( ghqx - min( ghqx ) )/( max( ghqx ) - min( ghqx ) ) # rescale GH zeros so they belong to [0,1]
+epsilon = 1e-10
+Lower = market.inv( epsilon )
+Upper = market.inv( 1 - epsilon )
+X = Lower + tmp * ( Upper - Lower ) # rescale mesh
+
+p = integrateSubIntervals( X, market.cdf )
+p = normalizeProb( p )
+J = nrow( X )
+
+# Entropy posterior from extreme view on CVaR: brute-force approach
+  
+# view of the analyst
+view.CVaR95 = -3.0
+
+# Iterate over different VaR95 levels
+nVaR95 = 100
+VaR95  = seq(view.CVaR95, market.VaR95, len=nVaR95)
+p_     = matrix(NaN, nrow = J, ncol = nVaR95 )
+s_     = matrix(NaN, nrow = nVaR95, ncol = 1 )
+KLdiv  = matrix(NaN, nrow = nVaR95, ncol = 1)
+
+for ( i in 1:nVaR95 ) {
+  idx = as.matrix( X <= VaR95[i] )
+  s_[i] = sum(idx)
+  posteriorEntropy = EntropyProg(p, t( idx ),  as.matrix( 0.05 ), rbind( rep(1, J), t( idx * X ) ), rbind( 1, 0.05 * view.CVaR95 ) )
+  p_[,i] = posteriorEntropy$p_
+  KLdiv[i] = posteriorEntropy$optimizationPerformance$ml
+}
+                                 
+# Display results
+plot( s_, KLdiv )
+dummy = min( KLdiv )
+idxMin = which.min( KLdiv ) 
+plot( s_[idxMin], KLdiv[idxMin] )
+
+tmp = p_[, idxMin]
+tmp = tmp / sum( tmp )
+plot( X, tmp )
+x = seq(min(X), max(X), len = J);
+tmp = market.pdf(x)
+tmp = tmp / sum(tmp)
+plot(x, tmp)
+plot(market.CVaR95, 0)
+plot(view.CVaR95, 0)
+
+# Entropy posterior from extreme view on CVaR: Newton Raphson approach
+
+s = emptyMatrix
+
+# initial value
+idx = as.matrix( cumsum(p) <= 0.05 )
+s[1] = sum(idx)
+posteriorEntropy = EntropyProg(p, t( idx ), as.matrix( 0.05 ), rbind( rep(1, J), t( idx * X ) ), rbind( 1, 0.05 * view.CVaR95) )                                 
+KLdiv = as.matrix( posteriorEntropy$optimizationPerformance$ml )
+p_ = posteriorEntropy$p_
+                                                                               
+# iterate
+doStop = 0
+i = 1
+while ( !doStop ) {
+  i = i + 1
+  
+  idx = cbind( matrix(1, 1, s[i - 1] ), matrix(0, 1, J - s[i-1] ) )
+  posteriorEntropy1 = EntropyProg(p, idx, as.matrix( 0.05 ), rbind( matrix(1, 1, J), t( t(idx) * X) ), rbind( 1, 0.05 * view.CVaR95 ) )
+  # [dummy, KLdiv_s]  = optimizeEntropy(p, [idx'; (idx .* X)'], [0.05; 0.05 * view.CVaR95], [ones(1, J); X'], [1; view.mu]);
+  
+  idx = cbind( matrix(1, 1, s[i - 1] + 1 ), matrix(0, 1, J - s[i - 1] - 1) )
+  posteriorEntropy2 = EntropyProg(p, idx, as.matrix( 0.05 ), rbind( matrix(1, 1, J), t( t(idx) * X) ), rbind( 1, 0.05 * view.CVaR95 ) )
+  # [dummy, KLdiv_s1] = optimizeEntropy(p, [idx'; (idx .* X)'], [0.05; 0.05 * view.CVaR95], [ones(1, J); X'], [1; view.mu]);
+  
+  idx = cbind( matrix(1, 1, s[i - 1] + 2 ), matrix(0, 1, J - s[i - 1] - 2) )
+  posteriorEntropy3 = EntropyProg(p, idx, as.matrix( 0.05 ), rbind( matrix(1, 1, J), t( t(idx) * X) ), rbind( 1, 0.05 * view.CVaR95 ) )
+  # [dummy, KLdiv_s2] = optimizeEntropy(p, [idx'; (idx .* X)'], [0.05; 0.05 * view.CVaR95], [ones(1, J); X'], [1; view.mu]);
+  
+  # first difference
+  DE  = posteriorEntropy2$optimizationPerformance$ml - posteriorEntropy1$optimizationPerformance$ml
+  
+  # second difference
+  D2E = posteriorEntropy3$optimizationPerformance$ml - 2 * posteriorEntropy2$optimizationPerformance$ml + posteriorEntropy1$optimizationPerformance$ml
+  
+  # optimal s
+  s = rbind( s, round( s[i - 1] - (DE / D2E) ) ) 
+  
+  tmp = emptyMatrix
+  idx  = cbind( matrix( 1, 1, s[i] ), matrix( 0, 1, J - s[i] ) )
+  tempEntropy = EntropyProg(p, idx, as.matrix( 0.05 ), rbind( matrix(1, 1, J), t( t(idx) * X) ), rbind( 1, 0.05 * view.CVaR95 ) )
+  # [tmp.p_, tmp.KLdiv] = optimizeEntropy(p, [idx'; (idx .* X)'], [0.05; 0.05 * view.CVaR95], [ones(1, J); X'], [1; view.mu]);
+  p_ = cbind( p_, tempEntropy$p_ )
+  KLdiv = rbind( KLdiv, tempEntropy$optimizationPerformance$ml ) #ok<*AGROW>
+  
+  # if change in KLdiv less than one percent, stop
+  if( abs( ( KLdiv[i] - KLdiv[i - 1] ) / KLdiv[i - 1] )  < 0.01 ) { doStop = 1 }
+}
+
+# Display results
+
+N = length(s)
+
+plot(1:N, KLdiv)
+x = seq(min(X), max(X), len = J)
+tmp = market.pdf(x)
+tmp = tmp / sum(tmp)
+plot( t( X ), tmp )
+plot( t( X ), p_[, ncol(p_)] )
+plot( market.CVaR95, 0.0 )
+plot( view.CVaR95, 0.0 )
+
+# zoom here
+plot( t( X ), tmp )
+plot( t( X ), p_[, 1] )
[TRUNCATED]

To get the complete diff run:
    svnlook diff /svnroot/returnanalytics -r 2284


More information about the Returnanalytics-commits mailing list