From noreply at r-forge.r-project.org Sun Sep 1 00:34:32 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 1 Sep 2013 00:34:32 +0200 (CEST) Subject: [Returnanalytics-commits] r2959 - pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes Message-ID: <20130831223432.D5319185C4B@r-forge.r-project.org> Author: braverock Date: 2013-09-01 00:34:32 +0200 (Sun, 01 Sep 2013) New Revision: 2959 Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/ACFSTDEV.rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/ConditionalDrawdown.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/GLMReturn.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/GLMSmoothIndex.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/LoSharpe.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/NormCalmar.rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OkunevWhite.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/ShaneAcarMaxLoss.Rnw Log: - fix source lines in vignettes Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/ACFSTDEV.rnw =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/ACFSTDEV.rnw 2013-08-31 21:49:13 UTC (rev 2958) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/ACFSTDEV.rnw 2013-08-31 22:34:32 UTC (rev 2959) @@ -49,7 +49,7 @@ @ <>= -source('C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/ACStdDev.annualized.R') +require(noniid.sm) #source('C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/ACStdDev.annualized.R') @ \section{Methodology} Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/ConditionalDrawdown.Rnw =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/ConditionalDrawdown.Rnw 2013-08-31 21:49:13 UTC (rev 2958) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/ConditionalDrawdown.Rnw 2013-08-31 22:34:32 UTC (rev 2959) @@ -50,7 +50,7 @@ @ <>= -source("C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/CDrawdown.R") +require(noniid.sm) #source("C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/CDrawdown.R") @ \section{Background} Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/GLMReturn.Rnw =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/GLMReturn.Rnw 2013-08-31 21:49:13 UTC (rev 2958) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/GLMReturn.Rnw 2013-08-31 22:34:32 UTC (rev 2959) @@ -48,8 +48,8 @@ @ <>= -source('C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/Return.GLM.R') -source('C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/na.skip.R') +require(noniid.sm) #source('C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/Return.GLM.R') +require(noniid.sm) #source('C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/na.skip.R') @ \section{Methodology} Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/GLMSmoothIndex.Rnw =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/GLMSmoothIndex.Rnw 2013-08-31 21:49:13 UTC (rev 2958) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/GLMSmoothIndex.Rnw 2013-08-31 22:34:32 UTC (rev 2959) @@ -49,7 +49,7 @@ @ <>= -source('C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/GLMSmoothIndex.R') +require(noniid.sm) #source('C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/GLMSmoothIndex.R') @ \section{Background} Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/LoSharpe.Rnw =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/LoSharpe.Rnw 2013-08-31 21:49:13 UTC (rev 2958) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/LoSharpe.Rnw 2013-08-31 22:34:32 UTC (rev 2959) @@ -27,7 +27,7 @@ <>= -source('C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/LoSharpe.R') +require(noniid.sm) #source('C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/LoSharpe.R') @ <>= Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/NormCalmar.rnw =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/NormCalmar.rnw 2013-08-31 21:49:13 UTC (rev 2958) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/NormCalmar.rnw 2013-08-31 22:34:32 UTC (rev 2959) @@ -52,9 +52,9 @@ @ <>= -source("C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/CalmarRatio.Norm.R") -source("C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/SterlingRatio.Norm.R") -source("C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/QP.Norm.R") +require(noniid.sm) #source("C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/CalmarRatio.Norm.R") +require(noniid.sm) #source("C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/SterlingRatio.Norm.R") +require(noniid.sm) #source("C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/QP.Norm.R") @ \section{Background} Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OkunevWhite.Rnw =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OkunevWhite.Rnw 2013-08-31 21:49:13 UTC (rev 2958) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OkunevWhite.Rnw 2013-08-31 22:34:32 UTC (rev 2959) @@ -49,7 +49,7 @@ @ <>= -source("C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/Return.Okunev.R") +require(noniid.sm) #source("C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/Return.Okunev.R") @ \section{Methodology} Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/ShaneAcarMaxLoss.Rnw =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/ShaneAcarMaxLoss.Rnw 2013-08-31 21:49:13 UTC (rev 2958) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/ShaneAcarMaxLoss.Rnw 2013-08-31 22:34:32 UTC (rev 2959) @@ -52,7 +52,7 @@ @ <>= -source("C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/AcarSim.R") +require(noniid.sm) #source("C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/AcarSim.R") @ \section{Background} From noreply at r-forge.r-project.org Sun Sep 1 00:43:27 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 1 Sep 2013 00:43:27 +0200 (CEST) Subject: [Returnanalytics-commits] r2960 - pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes Message-ID: <20130831224327.9D1C7185C4B@r-forge.r-project.org> Author: shubhanm Date: 2013-09-01 00:43:27 +0200 (Sun, 01 Sep 2013) New Revision: 2960 Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/ACFSTDEV.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/ConditionalDrawdown.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/GLMReturn.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/GLMSmoothIndex.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/LoSharpe.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/NormCalmar.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OkunevWhite.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/ShaneAcarMaxLoss.pdf Log: ./ Source remove clean vignette commit Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/ACFSTDEV.pdf =================================================================== (Binary files differ) Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/ConditionalDrawdown.pdf =================================================================== (Binary files differ) Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/GLMReturn.pdf =================================================================== (Binary files differ) Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/GLMSmoothIndex.pdf =================================================================== (Binary files differ) Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/LoSharpe.pdf =================================================================== (Binary files differ) Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/NormCalmar.pdf =================================================================== (Binary files differ) Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OkunevWhite.pdf =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OkunevWhite.pdf 2013-08-31 22:34:32 UTC (rev 2959) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OkunevWhite.pdf 2013-08-31 22:43:27 UTC (rev 2960) @@ -1,10 +1,10 @@ %PDF-1.5 %???? 1 0 obj << -/Length 191 +/Length 187 >> stream -concordance:OkunevWhite.tex:OkunevWhite.Rnw:1 44 1 1 5 1 4 44 1 1 2 1 0 3 1 5 0 1 1 5 0 1 2 6 0 1 1 5 0 1 2 1 0 1 1 1 2 1 0 1 2 1 0 1 2 5 0 1 2 1 1 1 2 1 0 4 1 1 2 1 0 1 2 1 0 1 2 6 0 1 3 1 1 +concordance:OkunevWhite.tex:OkunevWhite.Rnw:1 44 1 1 6 44 1 1 2 1 0 3 1 5 0 1 1 5 0 1 2 6 0 1 1 5 0 1 2 1 0 1 1 1 2 1 0 1 2 1 0 1 2 5 0 1 2 1 1 1 2 1 0 4 1 1 2 1 0 1 2 1 0 1 2 6 0 1 3 1 1 endstream endobj 4 0 obj << @@ -29,7 +29,7 @@ endstream endobj 20 0 obj << -/Length 2157 +/Length 2159 /Filter /FlateDecode >> stream @@ -51,7 +51,7 @@ 8??>2??"?8p????????u????Z?yGT?!?H????? ???KFej?U?AI??,\???5%??????8a??fi???[%d9?xx???_?H?wQ??CKM??xe??z?&???5f?l?@W?}}????Y?? j9???/K#? '??? {oVQCHx?????XTH?@???{?m????`??BNV?????N?e?%?m???>?l???t*??2?WX???1?G.?C?K ?[?C?????????O??????;????? {?'??@~!???? $JN???J????3??P?O??????fd???Hp?OA?z????-K??;E??? -?a???17??r??$(?t?[cA:=\?k?D????*=|.{??W???_=???>\9???????????!}?h?!?Y~C???.S4#S.??c?~???=c.???e(??b?????.?izC??9??w???? ??>:?????@????q?|??t?/3Z: m???????D?V??t?????5?H?K?.??0?m2???B?_?Is? +?a???17??r??$(?t?[cA:=\?k?D????*=|.{??W???_=???>\9???????????!}?h?!?Y~C???.S4#S.??c?~???=c.???e(??b?????.?izC??9??w???? ??>:?????@????q?|??t?/3Z: m???????D?V??t?s? ?Y?C!iV?$??"_?Po?>??x)?/?t? endstream endobj 26 0 obj << @@ -61,13 +61,11 @@ stream x??UKo?0 ??W???U?,;>l??+??0????!??6Hj??a???D?t????m??E??Q??lr|?PLJ?j?l??\E1???'z???]???:T? ???`tt ?8?9n????v? -7?D?@PT??4n?????????&V}???mWn)0?InE X?U?????O\sw??v=*?e?? 6??"????T??~M|Z???m?_]??$???J ??(&\??1?K?.U c?p#H??0?Rt??\$???`>?s2????u -???Q)?????l??0???woJ???????Y???/??^up}:?4????Mo?(?cu??g?|??? -?A?? -? -i?%??#?/??.??@?/??q}$?5?x9?uD?C????????????~6?!???{???7d?_ at fM?i?????:&}5?6?? -???w?O???????????T???z| -?3 C?U??\?O??2??????%E +7?D?@PT??4n?????????&V}???mWn)0?InE X?U?????O\sw??v=*?e?? 6??"????T??~M|Z???m?_]??$???J ??(&\??1?K?.U c?p#H??0?Rt??\$???`>?s2????#??N??????l{z????]?N???V+29V??? M?As???????X?? +q???P???t??pw>??Zw??sc??-^B?T'?:=??Dc??Qb?\c??|????7%?? +uo???????lE?:??z??g???X??????3H?}Dz?????\???4?}???op?`???X??>???b???:"?!????????f?hZ?{????G????v?2?/ ?&?4?W??{? +???H???????;?'r[j?w??qW?^b?m=> +????*?l???'????7'o???c%F endstream endobj 29 0 obj << @@ -101,76 +99,57 @@ >>/ColorSpace << /sRGB 33 0 R >>>> -/Length 8958 +/Length 8573 /Filter /FlateDecode >> stream -x??]??%?q??W????????$??2 C??? ? A???!?CofFd?{Nw?v??????+*?|?<n????n|???????q?b?????w??????o????_???????N>n??????w??y???o?.v??g???#??8????y???????????v???_ ?S???%?~??W???/????Xn??????}???Z?~?KV??C?I?1???w??w????? e%w#????-?p?z??%;-'-????qb????]0??????'?r??w????q???p???????_?+??????\??{K?x/M??????1-??/??w??G}f?? ?m5???k?z/???????V????????uy??p??????z??4??7??Cu??m??1?jl?????77?un?9/?V????????Vh??????=?u\g?rW??-??????6O,??79?{????????????????nh6n???????????SB[O??;?vOe=?|h?h?{??4??! -}[?}???.?z?O;?dO???e????i??????#??z??G?j??{?G?|??4?? ????s?J?7?;?k/vn:?????.??FH????T??go????X?|,???E^_8Ka??>`????????,???B??b????a{>???eg??'??y?X????x????.=5?]?S??wc~?=5????2???'?6?T???{x????X?????yfl;??1???sn?Q?s?4 y??4?????????O[;????j?{?m????t??s?b~?m?????s[??8?????  ??^?|??Zv?gc?9??f??s??q?x6???o?,?is?????.;??#?????????>e([????{??nW??q?3??q?????????#?c?#_GP*???#??O?????????????D%?a???>;Q?????J??y??a?g"?I????????????st??#??#??>???????:?j?G??X???Yxn:*???????[?u;??????|????y?s???'.?x6??"??????D?#?? <.???I95??U?^T?A?ZY???j??? -W?vW?????Gw?3.Z???l?Z??p?A??]?????W??bIB -3???????8??[d5aQb'\7?4??p?????@?p~7???]?X??'?C -L??^??? -b????!_?Ma?# ????=???bX?8b????q???}?a9 ???o?WML????m??r?Cf?[????@?_?hC?}?|#??????DG???????ts?C????00?g?Hf???????????U???)??????????T?@?2/????????5?1\??t?R?6?????,?????M??>??*;?%G?b???? -Oe???? -?? ??%?{?V???????F?`??????tR?28{???|?f?????:Y{F\7?k? -?R?;??8>? -?h??Mv}??m??l?6;???z???0~F??x???-,P??a????i;~dP#??n}??^i?n???O4?????'5??B??{?x??T??????]?????????>????S?n?b????l?Fq?0c?K???IGa?kr????H6|?????????d?z????'?~??4#F?D? 7????????Xk????hE??5????????wR7?X?????K??'?????_??w????-??=???=?[?Qq???DZqv?????o?3? ?w???T???o????F?%????????s???k?5O?G?$W|?GGO??_4JK??o??'?^?O?u?????zW6z?T??8????.????~x??(+^?x?n&??8a?<+^??i?m??j ???????/?????\W\???y ?W???U6?d?K6?d?K6???W???_u??!????W??j??.?7????<??t??+_:??/|?M_??????/??a?k?????u[?? _m?????? _m?W?S?x?k???????K?=n?+^?u at K\????KD^????/ -???W?6}?q>??|m???????o??-yEX???U?R?????/?c????????B]??W??5p_???<$?K_?k?q9?r?|9f^}b? ???c??;_??r???c???X?????v??8??c?}io??:?e~Qq-???|??|???????????/?u??/??|??|??|1??????{]?1??8Aq???:1K?? ?S_???z!f?~b????eq?qNW0O>?ub??7??????'? ?C?pP_?y???????????90?$?k`????~?}??????}i??+_=?n4??)F??%??G?? ?????v?7w[%?|?D=$????R?:,?&`?k??v???O?6??+B_#??????.??\?????}??l|U???4lA?c`????&?K?!?:??-?X?nx8_&,??u????H?m?V1??T}???uK????d2?Czs_????V7=d+h???W??B ?ZW?:y6!*f???n?I?y!???? -???-w?mQ'?Y;???@?|??ub???E??m`KX? -?| -???|a??V?S ?????x????u?luE[?????:# -??K$??0???????"C_?;V?67?X?R7V???????/???g????X???i??^?V???*?W ?#[???6b?k`?7???p????????#?+?/???????m)?A?|)v??}?+???? 8?j?????Q???=??u?K?'???,?W?x?@_???Ru????y?+?|?W<??w1??z????????V6?>+_?_?g????/???M??X??y? ?*?OL???2????Y???? -??:V?????BX ??@c|?????2}]??Z?G??x?Z`??L?y*?'k|?y,Kl?,??3???????2qg|?????_????????yT??5c??????4???|?:???G??3??|^?b)???n/?G????|???'??5?f|??????9^T?L?????h??V+?????bQ??D???y?????6??mu#??G+?T?'?2????????>*??kG=??Q??e?N??y?v?? ??}i~P???A_???s?zm\???g?? -qF~?|R_????3?4???<~j???/???'?5?/??N??B?-?<%?s??0-?b???=??y?p??"??:?????0\? -??|???^?y?F}E_?Wt>??CB???r???/?O??"???[;?9???C??,?????S}?u_??x@;?\_???L?_'??B????T?????'=z???\???k??M??~?3??:0?? ?v?^?C??g? ??????41e??????W/??SMTYo??>?????????y?Y??? ???^??^??4? ????>???????/?kul?'wb?'7b?'c??X?????,?^Ov??d??????????X???7?Fp9>??k???7???#O??V?????????? -Y????o~?????q??=?c?????v?????m???????????X/???w?y_?d???c??c???>4aS?c????&l?l?O[? -?? ??????O?n???!E?lV?????a%?|???w?O?Tt??7?O????>?/=?-??F?Gz???.???sa?D?????N?aw?|?e?????b??c????y?????????6??/?{????n*y?8?V????kNv?t?Nv??N???g?/??.?????_?}?f???????N~?n???}?N???m???hk}%??l??p???CR?-?C?T??S;??b?0???H?}1???.Q}&p$1?>r?0 -??*?????2?S8?????p?S?????UV& ??X1?`?!?????]J??b???P??#P?[??9??K?|^1????0?T?UX-???SN - ??nQ??]????0 -????N+??@?b??Y???)?t?b??N??@!?Q??n%?q?$???T??rF???ZL9?-I -?2H>?)G??a??7P?-?????#????r??LMR???bin????[????b t&LH?#a??j cE??_.R?z?9???PNq6?%? ?51U?r -? ~?N -G?1DD??l?E<]??pn#9?k#9?sm?}???Frz?Nr?:?_ru??FBH???H?` ^X ??,*9?q? ? ??????k W?:?B?c?? Q??m?+?hU?Rq#?m?,WBV??X???!????c?M??phe.,"???B)'???V?? |l???=??????????D???k????????*??C?=?????5?????d?????????h;?O;?d???=r?v????c??%/v?b?????z??/;?b????,v??????k[?u??j??p???????#??N???M???e~??^??>6?kZ?u?6?^v??n??e\?a????b?i?gY?Y6???????X??x?}m_[???eZ?u{??????$???8g;?c]??nv??]?f?e????????9$O?"???f???%m?z,?zl??{[?o? p??GL??{N?]iLv???????????e???>??7??p????k?Ky?G??a?`0?S???????,??v?z??????k??y????>_?????o??????????.??-DZ?}????c???????=Y?9??~?????GV?d?O??E???~xd??????.????????????f????_????{?j_?}??????????z~[??b?=???????????M?9~ \?=.?w{Z?i??j_?c;?/;?j??????????p?/???????b/????o???f_??;???^}? ??/?b/?????j??^W{??s??{??m?o?????????g???#?j???o?y|$\?K??????_???d?'??K???~,?????k]?uo_[?????Oy?????^????=????n??.?}?*?}??????????????????6????o???K???????O??>?y???C(?]??-K??e??r]?w???????????`?????[??i???>????????8????.?9?+i??J^??d?K|Kx?e?d?$.??p????]?????p?? ?e???X?g=??Y??????t;?Yn??[?O??1?X??p????? -?[M?????????????????????/???? ?????q -??(?>??s????????{???[{???_?{bkO7???POl???? y?? ????+w?????&??-a~??????????O/L??c????d?????]???S???d?????M??S????#g?R?Ok?'^_i?w????C|????????6???g?????;????????m5???>k?)?v?Y[M????[M??dl&% ??@??v>A8????O?Nv]e_e?????u???O?`??? ??M>?????xo?3[Ua???????????????mm<_xf+??R?????'??x???V?Y?\3????{/nL???8???(?????? !??V?????rk?j??imZU???{???x???-?Sp?x??E?t/~n:?a}?q????????????????????L???P?^??`?p??????gN???i?Z???u?h?????>???????N?5?????_n_?8Z????Wp??X?9??????????????~???q???????co??h?p???????;7???j? +??p???s??8???_??iT???^??5?K?~j??$=?????????u bz??/1??3??%^G??#F??tx>"?????|?0 ?#??(?u??,??wb9???0??-????t????g?18???}?hvc????? +d??>???f?I?T[???^????"?? ?-n?????????` +????w?2??R??????J?,?'????f?d?? SG??,??\????f\u??np}q????F~/?Zh?`?v?q????)??Q?g?j?b???P?@w?K?h??n????r?`????%??-?i????E????lL????8g?B?\y?????9??\n*[?? +?i$?\?]?;??|?8?? +??-?;X???G????????? ???'mh?m?]?gp?L???????????Fe?{??? +-&?8?????.Dy??`?R???P??DayKW??dp??(????????|???,??:G{?;????'???;?fd???v??????XH??U.??Sg+??K????0???????r8'+??? +?o??3?????S?j???4???^6??0FY???????^??a????????e?z????L?????l?8????(#?#????! '??(????h?????????}??????5?g???q?fG?;[????\??5?????????*9[??G?Q?/d????W4 +????5 +d?N +?' +??WT5j??!?8?Eg\?~s????????????S???????r??kZ?-?OZ??q??V?????zgYY????)?iu?U???U7???W????^??%?^??%?^M??W???W??O?+???^??+s?????U??vy?E???W k?6?j?5K?-???w?kp_??V???_mN??]/??[??!??Vv???9{??\???g?g&?9??K[?-??S/?$+??r_?{??sN+????W??e?z?,SnY??2?:?z??d49?^???N.}e?E>?????~9z?31?"'O??L???S?v?????qm&?^?????E????U?????z?8???u2?I??Y?W'?^`?????m?~,???lih?kb??/????J??w?????N??q?`?? +?8?W`y???HM?????????1N????h??G?? ???:/8?????F?<???/???ip?C9s?#???&p|?^>^?W???????>??W`?'????W?N??*????c??^h??"?????3??>U??? ?i;?d???>?p?G??????=?~??G3??d???d??&???????h??a??=????;?,?d?9???z9??d?????i?g???g?X=??^A???5?X?}???ZP?m?M????d?????????}l??C5?????6;?? ?:O?kk?:O?k?w:?????p??l?v?p??d?6?p??|?k? ???g^?^??y~?k? ?8O?k? w8O?k? ?7?V??+???Ol?????????o +[?????????Ol6?~l???'6K?>~lj~|f?????4??:w? +?n??mg>?N??r??/?g????N?q'?u??nd????1?-?6fu?c?|??? +??elg?tY????f?d? ;YhN?????v?dGaO?U???;??O???s?}???3=?}?j?? ?Wx?h?>a? +O.?`??6?[?Y?:?-??????ge]]??=+?Rtj??m?????,?1Q +3???????W?`?d??fj??,)6Q?3?6S??????(?L2?o??F?F?}V??H?N4)Q?Y ?U'???aV ??,1??R???diVI??d*Y???yV?Z?a?Y??'???J,>Q-?0?????R +'????u???????????f?F?O???D%HTBI??IT?T?L?*?W??d??g?W??? RN3?>???X?`?eZ$Q '(???DP? J8ah*T?rI?>?T?LP?>??P??NP$?????e??}?????+?AT?$(?????$*A???Zl???????SB??J????#???fE#U? 7?5m? s???`?O?,?4?*?5!?K??f %%+|?`??J?R% +2? +>Q0/?4e?,d?$?R5???`@?R??????? +>?=QHM????V???+?J%lU???k\Z??B? ????#??^*asWM???? Y??Y???:l?J?.R??3b?D???Z?6;?????^?#?.X??*a?@?vT??Y??????? ??m?7?uK???u?D?{@?M?F?[9????H2? +]?????? ?n?@b?????<@?? +??u?+?????mD?UC???rN)E?K?J$UBX2??`n???*,???d??H#VA??I???-???h?1AQ{?5?B6??K?"??*?0?????d}?? +G?:????yM%????? +?l/??P):???}???,C.~ Hb???M????*=??J??L?x??M?a4?6$???2?h?2%(?????pNO?$C?h??W(,???1E??F?C????8?????a????a??p-????p?????1??8*?vO???8?KQ?a??????w??????e`??C?9E?+?U.???y? +=]Q?/?y?<*?0"???*NC?UTq??@Q?ihM??????????y?????.?E?(????7 +?9q?`?????8>?y?/W???3?!?/?V??l??l??/fV?????G??Q?li?T??.?E?"??%?9?8e#)???? r? I?te@k^??1?)?#??9E?8e??tu?AtXpJX+Nk?vL???????V,}?)m?3?!,???5/???V_ ?????/??B +?91_? ?R&l.?c?K^?NKi?)-X??m???????-X??}*$6q????r,??B6??s_p??E??3????????,8???"????B?U???c??????-?x?'0???????P?V?e?t?i?VK??km??+?N?m?????n?v?3?8??\v??N????_?Y??{I????~N]??Z|?-t????l??i'.vY???m=?e?l??u?????b?i?K^???l??e?\L5?k_??/vD|?=qJy??j??????????k?WY?u????/{?m!?l/?]?b?U???_??m?9?k???????-?u??+n&??|??V????e??f?m?????j??}?Ov??V?M??_???????k?????????a????n????}??????????6???m???I???V??W????>??}?6?4?S??y?~????O???~d}????e??n_?????/%O????e??no??m?T{*?=????k^?5o????j_????q??????????????W?^??^??u}Y????jo?????????????Z??}?q????~?????????????i}?i??;??=???W?~????????????X?rl????????w????lO?f???y???b??????/??-r???}?e?e????#]1?e???O???z??????^?fo?bo?f?k??^??^????l?e??M????n_????_um???~??;wL???&N?5??{|e???=????foi??????m???????}??}|??v?q??d???%?????fo???z??}o{???????????l???{Z?=????}???i'?????????8????'{\? ?d?y?????i?????Hy??????/?m????|p??k????1a??i?????-?'?b???????3:?}m?r?/n'Nv??????^???8???E??%,?Q??????x??kr??z?}?d?y???~?}???8#8?6V?????????7?????????,{(?}?~?????????b5O[??k????d??l?s?#[{???????V???????????????C?g????-??.???c?)??????d??m??????/???b??????{b???x,?'6?o??kb?-?O??CN???r,pk~???????\???n,?%6????Sb?5???k7oy???m???????J~E???>?V?o?=??=?????J~@??3??aO?k???/?z?a???j??????Hw0?'?|????mg%??)[+???O?Z?o?=?Z???????a?????~>??????X???_T8???n?i?uy>B?????????C?~x>?uq=??~????V,O??o??T?????V,????G,?O?????.???0VA?s??k??????67??,????`?tVC/???9?#??{T1^?i?x ????n???????ay??W???gWK???j???}??x??Q??? endstream endobj 35 0 obj @@ -215,7 +194,7 @@ /Type /XObject /Subtype /Form /FormType 1 -/PTEX.FileName (./OkunevWhite-004.pdf) +/PTEX.FileName (./OkunevWhite-003.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 40 0 R /BBox [0 0 432 432] @@ -1338,54 +1317,48 @@ endstream endobj 83 0 obj << -/Length1 1616 -/Length2 2837 +/Length1 1633 +/Length2 2976 /Length3 0 -/Length 3848 +/Length 3995 /Filter /FlateDecode >> stream -x??T 8?k?'uh$?H?&4?Y;???g+??;????Y??-?rR?(?TR????K????f??la?|/?N????????f?????????K3???)+?,<o?????5?{?,t#[9}k?|???I??O??u?E????]????6?H?G??i??2?D^??K??j?3??h???b7 ?CA???????rU???R]??? ?\???T[;??T{?.?I at W???W??*? ???y??W??y??n-%y?5V????k ?Zs???????sE?O?mq-???W??],{?Y????C'?{#|C -????,XE?????O!K?????Q??\C????B?i??????o?LJ??)?c\x??3I?#?? >????N??wv[???m??z$?W9N?????E??9????????????\??L???!%?{c?????1x?x -;???s??? -???/?/?nX?|;4bM ?W?:????Q????]??@?C??f?i? (?I?v?D???j??????4?=7????????$???????J+????7?!?lD??????Ofy?f??xzQ?????????Ow????!Z9 ?6?}Q ??C????#? B????qU??2Q??Xp??????pt????G?>3?^?}V??????Z????&hWf7W?~?|u[????L( ?V??w"3??+]n????????H?5???JT=!5?????n*m?kaZ7??q0%{?[HMd\???l??????T-?O???\? ??oy???3?{????T?_?i???Q[??3??)???u? -?7??F??y]|#???o??]H??-|??r??|?G^V^M?O6i?(?mO}}H?v???p???%E????Z2=?$?Wp|V???? -M9R?????-?"?k{_??9~??}h????W?????b_ozZ??s????+(F???5 ??-Fyz?"Tu????!wu??\w??{?$8_??y?? ->?U????~)?W??9?Z.?????? -?W?????Hqx??E_???,I9?prs????3?>??D???????.Q+??u?..?_?^?]??Kk?,??u?|[??uPg??{n?s?????K?W??bD?????kO+'2}zr??@??8gR__?/????Y????Fr????eex2?Hb/?\28gn??J(??????????T????a`3xI?$_j?S8U??!h?_?????dm?????u???9B,yMC?!z?[?l}??|u??<?#??W??N?p???W"? -??8??q7%?n???f????????2????^??Q?5p???? -R?{??? -m$y5n??Q????y??7_4?zl*C????7ow??-?p|F??}???x?<@y?mEQ?j?2?:??A?b???m???%?V?JX?G?_?{w\??? W?m?TV???e?r?tr??K????t??????A??q? ?}d|?^o?R???^?w?????NON} ??(??????b?l?I??????'?CR?6???8?????~(>Y??i????Ci3?{???psF?dp??{?1VhSO???!??\C??LS???pzrV??*/???)9_? -o??????? +x??t 8?k?EI????-?,?led?P?d????xg7d)?PY: +EJ?E?r,e??)k?N?d?. E????t?????k?kf?????????#?m???1????0K +???Lm?8??P?`p????????? &D????0e???X?? +{??`???:z ??`t? =???[u`/????TZ??a!y~?PD%???????? ?H?[??G2 ??J?@V?o!P>,M??p8??:???[I?@,?? X? ???i???????np??Y?A????a?????? +`O??d??U`?8V?3???R ^v&?T?`o? Q@?o?? +d???D$P?T??@?(/??|u?7??+??DDc1??eI#z) Rfs?dJ??aSp?~f$"uB?4??r??'2??K2Hl? ??l??l??@??0o?ha0?]H?@?z)?c +\6b?`DCh0?J?? 0"???`0?, +?????  ? " ??!X?Wt???H?P ??A? `?>??F&?D?)A???-F[[9?????H?i41??jX@ +?? h?4m]- ??(???????#@+w???g????w H?~?X? ???(?g??"? ?_??????/??y?]?????????P???????N??(A+ d??,d3l??~??????-H?????Z??????????x($??XD?????$:??}T&???jX ??6d??~???D??l???=?9L???V?? + B? ?/???Ev?.?6?V??,?@??d*Cp???X?? ?@?]$????q?????0 Y???B%C?:?p@e3~?Z?'????0?9:F???h???H2 T?/$  ?wMA???Q??a??? A8?_aw?? ?;?[%?lyd??)??????? Q?u;???{???s?? G???)T=?]w??\*????s??N?c\?:pz?[tw?[???$???]H?i=??K??^??!M???#L6n?????S&yKN??p?Z?6??'????, ??~??3?V?`T}O???k??????'??8????=???2???)????????>{????~sKu5?>UfxX?f?M??2X?-m?????U?o???:?-? Gy1??%diJ?? +G????#?Ec??,o??z_??:??T?vK?W~cc?????`]?>T????ytS???????=;\?FW????'?6??rjB)i?b?????????k?&????|?g?O??2?f????AA?s?id??f?vT<??[???>/? yB.??Sff?U??r?h??C????"??[? +?? +????'?f???)??3G b??c???????69??g?t???t???-????????????\??#]EQ?-?Ew???v???>`U ????zQ(A%??;:??lWB_??\Pxe?/?xjO??PM?????W'????&b'r?F'??Y?m/0??;m?????=???.W?o?????d?s/??????h]??l`???7???????"?{??W"a???sm?7?XVy?v_?j?]???#?:??J??,???'?l?q??zuGN?f?f?Q??MN?1Y???m +??XJ??F??+?%????3?2??a??????q???+??H????_ ???u??>???. J????SM?6k??AI>ueG?1?????2T?????}B;????H??d???N?*m??????~?T +v5?MQa???l???????d????7??x?? +t?O$?s?,??{?N? /?s?S???[???????zU::?%?Z?i?M??d e??u +=?w'?I]8???r??ef!?i??rf?Wu?]???wnO?)?????????}??=???ZL?@??]-???+r????F5mI?-??t??d0o??M]??*?????q?9rmM[yqU???R??G??_??E?Z?j??? ????>?`M ????????Z??2?????H endstream endobj 86 0 obj << /Producer (pdfTeX-1.40.14) /Creator (TeX) -/CreationDate (D:20130901053508+08'00') -/ModDate (D:20130901053508+08'00') +/CreationDate (D:20130901063854+08'00') +/ModDate (D:20130901063854+08'00') /Trapped /False /PTEX.Fullbanner (This is pdfTeX, Version 3.1415926-2.5-1.40.14 (TeX Live 2013/W32TeX) kpathsea version 6.1.1) >> endobj @@ -1393,32 +1366,32 @@ /Type /ObjStm /N 61 /First 475 -/Length 3097 +/Length 3095 /Filter /FlateDecode >> stream -x??ZYs7~???c\[&?k??*I??K?"?W\~????D"???_?_7??p(jE???M???n????P???????$t??? ??0Nx??$B??*}V?????R?Z????:|G'LZ{#,d???hk?pZh?pFh? ?d?E????hr???h??0ZG?h???????????*? ?[||;,?????U?C??Jx?????)-?&? -0??????"?E?G"U???????Q?I"f??H &#??g'R(??-?? +?? -,(??:XP???,m???????>??? i?`?????e ??0~L?$? 9v=????W??????#???Y5?M????:?????'?j?????t???:???w0}?y???~??%?C?x?H?} ?i???? Z?9`_!??\? jN2??3???BR??9? ??y8???xC{?B?T_f??|?7"`T?? ????Ll????i??.??,???=??[? -k.w?d???k?p??????{,toR?g?????U???? -?G!?Gj??6>?8v2?]`?H??^ -??K;?-d??????{??@?@???RfV'??T?w^?.N ?????????|???1v?? ]?o?????????6u?x??v??>wq,O??S???H)??19?????gI? -]????.?m?8?? ?mb??8??`N?????N[?7[:?v???a+??a????)?P??z6??c;Y?<?^ci6?m?????L?#0?Y????b`!?[G?F?3????M?)? ??}?B?s?3g??=???Hw }????j?@<??M"?(xR???/6? -? -??c???????R?e?'??dYR???| ?? ?????????qXG??\^???&?u?????????? ?????D?&???=??J???@A?EnL?v:?t????D??Q?gT???c??>$? ???&m?M?"?? ^??Ig4{???y?^"?.0??ak?2K??%lF?b1?W}???c?d!?)??Z?)?d?\T??%?S??=?????}??z??? - ?u=?????7?????Ty???.???????`( ??#?xg?\?o??hor?/?J?k?a?w???bo[?EI??Zr4?????c|N?>;?|5?}8P?-m?!+C???????^???e.??&zayW??Adg?e????Z???$?k???o{UX8:p?t?#??B??j??f>?6{?p???9d?l?N7{?d?? ??M??9-y????3???qJ??6e2) -K??%'??o? ?U??dx5O?M?? ???gG?????? [TRUNCATED] To get the complete diff run: svnlook diff /svnroot/returnanalytics -r 2960 From noreply at r-forge.r-project.org Sun Sep 1 01:18:49 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 1 Sep 2013 01:18:49 +0200 (CEST) Subject: [Returnanalytics-commits] r2961 - pkg/PerformanceAnalytics/sandbox/pulkit/data Message-ID: <20130831231849.859351853CF@r-forge.r-project.org> Author: pulkit Date: 2013-09-01 01:18:49 +0200 (Sun, 01 Sep 2013) New Revision: 2961 Removed: pkg/PerformanceAnalytics/sandbox/pulkit/data/data.csv pkg/PerformanceAnalytics/sandbox/pulkit/data/data1.csv pkg/PerformanceAnalytics/sandbox/pulkit/data/data3.csv Log: documented data set Deleted: pkg/PerformanceAnalytics/sandbox/pulkit/data/data.csv =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/data/data.csv 2013-08-31 22:43:27 UTC (rev 2960) +++ pkg/PerformanceAnalytics/sandbox/pulkit/data/data.csv 2013-08-31 23:18:49 UTC (rev 2961) @@ -1,152 +0,0 @@ -0.0119,0.0393,0.0178,0.0791,0.0189,0.0213,0.0191,0.0573,0.0281,0.015,0.018,-0.0166,0.0317 -0.0123,0.0298,0.0122,0.0525,0.0101,0.0084,0.0122,0.0175,-6e-04,0.0034,0.0118,0.0426,0.0106 -0.0078,-0.0021,-0.0012,-0.012,0.0016,-0.0023,0.0109,-0.0119,-0.0084,0.006,0.001,0.0778,-0.0077 -0.0086,-0.017,0.003,0.0119,0.0119,-5e-04,0.013,0.0172,0.0084,-1e-04,0.0122,-0.0129,9e-04 -0.0156,-0.0015,0.0233,0.0315,0.0189,0.0346,0.0118,0.0108,0.0394,0.0197,0.0173,-0.0737,0.0275 -0.0212,0.0085,0.0217,0.0581,0.0165,0.0258,0.0108,0.0218,0.0223,0.0231,0.0198,-0.0065,0.0225 -0.0193,0.0591,0.0234,0.056,0.0247,0.0307,0.0095,0.0738,0.0454,0.02,0.0181,-0.0429,0.0435 -0.0134,-0.0473,0.0147,-0.0066,0.0017,0.0071,0.0087,-0.018,0.0107,0.0079,0.0103,-0.0072,0.0051 -0.0122,0.0198,0.035,0.0229,0.0202,0.0329,0.0119,0.029,0.0429,0.0197,0.0183,-0.0155,0.0334 -0.01,-0.0098,-0.0064,-0.0572,0.0095,0.0061,-0.0032,-0.0142,0.001,0.0094,0.0079,0.0572,-0.0099 -0,0.0133,0.0054,-0.0378,0.0041,0.0134,0.0053,0.0106,-0.0026,0.0223,0.0111,0.0217,-0.0034 -0.0068,0.0286,0.0073,0.016,0.0066,0.0154,0.0079,0.0264,0.0104,0.0158,0.0082,0.0161,0.0089 -0.0145,0.0104,0.0095,-0.0429,0.006,0.0055,-0.0026,-0.005,0.0013,0.0055,0.0132,0.0014,-0.0036 -0.0146,-0.0065,0.0227,0.0339,0.0135,0.0294,0.0098,0.0128,0.0342,0.0212,0.013,0.0155,0.0256 -0.0144,0.0122,0.0252,0.0318,0.0179,0.0263,0.0128,0.057,0.0336,0.0164,0.0145,0.0637,0.0373 -0.0126,-0.0296,0.0165,0.0041,0.0067,0.0104,0.0075,0.0034,0.012,0.0139,0.0145,0.0657,0.0125 -0.0056,0.0193,6e-04,-0.0825,0.008,-0.0083,0.004,0.0095,-0.0087,-9e-04,0.0053,0.1437,-0.0072 --6e-04,0.0051,-0.0047,-0.0422,0.0108,2e-04,-0.008,0.012,0.0167,0.0072,0.0026,-0.0053,0.0021 -0.006,-0.001,-0.0069,0.0019,0.0012,-0.0037,0.0106,0.0058,-6e-04,7e-04,0.0011,0.0343,-7e-04 --0.0319,0.0691,-0.0836,-0.1922,-0.0107,-0.0886,-0.0143,-0.0263,-0.0552,-0.0544,-0.0341,0.2463,-0.0616 --0.0196,0.0454,-0.0215,-0.0395,0.0061,-0.011,-0.0362,-0.0059,0.0206,0.0076,5e-04,-0.0376,-0.0037 --0.0214,4e-04,-0.0029,0.014,0.0052,0.0091,-0.0801,-0.0223,0.0169,0.0159,-0.014,-0.1077,-2e-04 -0.0269,-0.0089,0.0164,0.043,0.0158,0.0244,0.0052,0.0194,0.0291,0.022,0.0198,-0.0756,0.022 -0.0113,0.0221,0.0108,-0.0098,0.0209,0.0219,0.012,0.0233,0.0408,0.0224,0.0164,-0.0531,0.0222 -0.0219,-0.0167,0.0181,-0.012,0.0101,0.0201,0.0158,0.0086,0.0258,0.0112,0.0195,-0.0665,0.0202 -0.0082,0.0197,-0.0021,0.0102,0.0023,-0.0042,0.0208,-0.0111,-0.0169,0.0036,0.0085,0.0833,-0.0063 -0.0136,-0.0065,0.0159,0.0585,0.0033,0.0193,0.016,0.0024,0.0229,0.0133,0.0116,-0.0154,0.0213 -0.0243,0.021,0.0418,0.063,0.0107,0.0429,0.0106,0.0329,0.0312,0.0218,0.0238,-0.0375,0.04 -0.0166,-0.015,0.0207,0.0061,0.0089,0.0215,0.0072,-0.0055,0.0095,0.021,0.0146,9e-04,0.0119 -0.0102,0.0234,0.0273,0.0654,0.0168,0.0297,0.0088,0.0214,0.0315,0.0222,0.0148,-0.0412,0.0282 -0.0101,-0.0051,0.0084,-0.0061,0.0135,0.0096,0.0051,-0.0018,0.0177,0.0147,0.011,0.0092,0.0088 -0.0048,-0.0027,0.002,-0.0147,0.0095,-0.0027,-0.0028,-0.0061,0.0022,0.005,0.0062,0.0468,0.0028 -0.0096,0.0064,-0.0041,-0.0069,0.0095,0.009,0.0092,-2e-04,0.0113,0.0116,0.0105,0.0401,0.0052 -0.0045,-0.0354,0.0027,0.0288,0.0066,0.0054,0.0087,0.0073,0.0212,0.0096,0.007,-0.013,0.013 -0.0124,0.0166,0.022,0.0692,0.0133,0.0284,0.0106,0.0405,0.0481,0.0237,0.0137,-0.1239,0.0483 -0.014,0.0142,0.03,0.123,0.0198,0.0286,0.0097,0.0612,0.0745,0.009,0.0183,-0.1137,0.0622 -0.0227,0.0128,0.0088,0.0077,0.0075,0.0088,0.0041,0.0021,0.0075,0.0143,0.0173,0.0427,0.0169 -0.0267,-0.0022,0.0421,0.0528,0.0253,0.0346,0.0097,0.0408,0.0699,0.0239,0.0185,-0.134,0.0666 -0.0243,-0.0138,0.0103,0.0318,0.0134,0.0069,-0.0061,-0.0104,6e-04,0.0131,0.0163,-0.023,0.0039 -0.0223,-0.0241,-0.0101,-0.0541,0.0168,-0.0059,-6e-04,-0.0304,-0.0201,0.0188,0.0092,0.1028,-0.0269 -0.0149,0.0114,-0.0132,-0.0433,0.0062,-0.0034,0.0107,-0.007,-0.0097,0.0146,0.008,0.0704,-0.0122 -0.0179,-0.0124,0.0203,0.0334,0.0171,0.0268,0.0058,0.0154,0.0349,0.0167,0.0176,-0.1107,0.0311 -0.0093,-0.0131,0.0064,0.0025,0.0063,0.0057,0.0018,0.0037,6e-04,0.0116,0.0084,0.0553,-0.0022 -0.0162,0.0189,0.014,0.0368,0.021,0.0173,0.0107,0.0248,0.0345,0.0157,0.0157,-0.1135,0.0267 -0.0141,-0.0208,-0.0019,-0.0462,0.0058,0.0048,0.0076,-0.0149,-0.0016,0.0137,0.0075,0.1204,-0.0069 -0.0052,0.0075,-0.0073,-0.0256,0.004,-0.0068,6e-04,-0.0024,-0.0084,0.0026,-4e-04,0.0784,-0.0104 --0.0081,0.0425,-0.0209,-0.0385,0.0045,-0.0136,0.0066,0.0125,-0.0153,0.0102,6e-04,0.1657,-0.0205 --2e-04,0.0682,1e-04,0.0116,0.016,0.0127,0.0048,0.0472,0.0248,0.0125,0.0075,0.0063,0.0133 -0.0344,0.0025,0.0308,0.0586,0.0075,0.0298,0.0163,0.0214,0.0165,0.0111,0.0333,-0.0271,0.0223 -0.0182,-0.0016,0.01,-0.0221,0.012,0.0045,0.0054,-0.0072,-0.0264,0.0054,0.003,0.1021,-0.0089 -0.0162,0.0438,-0.0037,-0.0175,0.0108,-0.0042,0.0051,0.0038,-0.0199,-0.0061,-0.0011,0.062,-0.0068 -0.0157,-0.0362,0.0048,0.0114,0.0075,0.011,0.0094,0.0049,0.0246,0.0058,0.0174,-0.0991,0.0104 -0.0033,0.0081,0.0235,0.0278,0.0077,0.0185,0.0068,0.0032,0.0043,0.0161,0.0141,-0.013,0.008 -0.0012,-0.0077,0.036,0.016,0.0017,0.0063,0.0017,0.0017,0.0019,-0.0087,0.0019,0.011,0.0013 -0.0091,-0.004,0.0073,-0.0286,0.0031,0.0049,0.0054,-0.004,-0.0144,0.0079,0.001,0.0353,-0.004 -0.0142,0.0153,0.0106,0.003,0.0094,0.009,0.0105,6e-04,-0.0096,0.0099,-0.0031,0.0752,0.0019 -0.0078,0.0246,-0.0014,-0.0425,0.0023,-0.0254,-0.0013,-0.007,-0.0348,-0.0267,-0.0221,0.0941,-0.0142 -0.0117,0.0336,0.0103,0.0278,0.0058,0.0148,0.0134,0.0208,0.0099,0.0085,0.0164,-0.0298,0.0095 -0.008,-0.0543,0.0086,0.0483,0.0055,0.0105,-0.0024,0.0021,0.02,0.0014,0.0136,-0.0655,0.0058 --0.0094,0.0148,0.0015,0.0421,0.0056,0.0107,0.0053,0.0138,0.018,0.0045,0.0097,-0.0251,0.0099 -0.0148,-0.0072,0.0186,0.0273,0.0065,0.0078,0.0086,0.0069,-0.0037,0.0077,0.0097,0.0343,0.003 --0.0049,-0.0202,-0.0033,0.0181,-7e-04,-0.0071,0.0056,-0.0035,-0.0123,-0.0044,-0.0011,0.039,-0.0015 -0.0053,9e-04,0.0052,0.0331,0.0047,0.0153,0.0045,0.0064,0.0155,0.0073,0.0145,-0.0446,0.009 -0.0096,-0.0104,0.0139,0.0144,0.0076,0.0046,0.0113,0.0098,-0.0042,-0.0013,0.007,0.0483,0.0052 -0.0033,0.027,0.0091,1e-04,0.0053,1e-04,0.0099,0.0123,-0.0034,0,0.0031,0.0346,0.005 -4e-04,0.0655,-0.0117,-0.0292,0.0022,-0.0283,0.0069,-0.0022,-0.0249,-0.017,-0.0107,0.0548,-0.0095 --0.0159,0.0413,-0.0133,-0.0309,-0.0013,-0.03,0.0057,-0.0078,-0.0389,-0.0174,-0.0185,0.0644,-0.014 -0.005,0.022,9e-04,0.0119,0.0069,0.006,0.0097,0.0063,0.0041,0.0061,0.0058,0.0015,0.0037 -0.0146,0.0284,-0.0044,-0.0252,0.0015,-0.007,-0.0033,0.0054,-0.016,-0.0028,-0.011,0.0731,-0.0033 -0.0104,-0.0376,-0.0031,0.0154,0.0016,0.0031,-0.0063,-0.0086,0.0123,0.0032,0.0084,-0.0405,-0.0031 -0.0251,-0.0164,0.0239,0.019,0.0025,0.0216,0.0054,0.0047,0.0224,0.0054,0.0185,-0.0547,0.0106 -0.0157,0.0489,0.0222,0.0048,0.0094,0.0044,0.0153,0.0192,-0.0149,0.0046,0.0023,0.0443,0.0077 -0.0283,0.0441,0.0243,0.0012,0.0083,0.0154,0.0106,0.0182,5e-04,0.004,0.0067,0.0162,0.0072 -0.0133,0.0402,0.0092,0.0084,0.0024,0.0026,0.0079,0.0166,-0.0037,0.0018,-4e-04,0.013,0.0031 -0.0089,-0.0445,0.0113,0.0019,0.0015,0.0083,0.0019,-0.0122,0.002,-7e-04,0.0049,-0.0075,-4e-04 -0.015,0.0065,0.0345,0.045,0.0031,0.0272,0.0091,0.0117,0.0298,0.0099,0.0186,-0.0656,0.0134 -0.0136,0.049,0.027,0.0433,0.0107,0.0301,0.0207,0.0397,0.0362,0.0154,0.0212,-0.0499,0.0205 --0.0058,-0.0192,0.0267,0.0268,0.0034,0.0181,0.0044,0.0056,0.0128,0.0048,0.0071,-0.0162,0.0068 --0.0072,-0.0171,0.0117,0.0104,-6e-04,0.0119,-0.0092,-0.0035,0.0118,0.0053,0.0041,-0.0361,0.0025 --0.0087,0.0078,0.0137,0.0374,0.0031,0.0133,0.0043,0.0202,0.0179,0.007,0.0058,-0.0354,0.0078 -0.0171,-0.0019,0.0242,0.0264,0.0078,0.0133,0.0105,0.0215,0.0094,0.0077,0.0086,0.0136,0.0121 -0.0146,0.0104,0.0267,0.0259,0.0115,0.0191,0.0035,0.0111,0.0299,0.0111,0.0159,-0.0656,0.0152 -0.0092,0.0018,0.0154,0.0096,0.0046,0.0116,0.0069,0.0031,0.013,0.0044,0.0102,-0.0136,0.007 -0.0054,0.0381,0.0198,0.0403,0.0054,0.0172,0.0101,0.0293,0.0191,0.0098,0.0127,-0.0178,0.0139 -0.0119,0.0199,0.0301,0.0251,0.0109,0.0234,0.0092,0.0117,0.0192,0.0097,0.0146,-0.009,0.0156 -0.0017,0.0529,0.0075,0.0253,0.0063,0.0113,0.0084,0.015,0.0123,0.0051,0.0057,0.0018,0.0111 -0.0061,-0.0051,0.0046,0.0172,0.0032,0.0016,3e-04,0.0064,0.0041,0.0017,0.0038,-0.0148,0.0043 -0.002,-0.0532,0.0093,-0.0252,-0.0082,2e-04,0.0062,-0.0178,-0.0165,-0.0039,-0.0045,0.0384,-0.0068 --0.0128,-0.0118,-0.001,-0.0181,0.0024,-0.0023,0.004,-0.0081,-0.0035,0,-0.0037,-0.0024,-0.0082 --0.0106,-0.0316,0.0202,0.002,0.0042,0.0113,0.0055,-0.0019,0.0091,0.0017,0.0022,-0.0051,0.0034 -0.0013,-0.0119,0.0019,-0.0027,6e-04,-0.0082,0.0062,-0.0014,-0.0154,-0.0092,7e-04,0.0638,-0.0049 -0.004,-0.0084,0.0088,0.0133,-9e-04,0.0035,0.0036,-0.0039,-0.0022,0.0011,0.0031,0.0126,-0.001 --0.0017,0.022,0.0104,0.028,0.0085,0.0103,0.0012,8e-04,0.021,0.0042,0.0052,-0.0216,0.0099 --0.0044,0.0358,0.0143,0.0185,-5e-04,0.0124,0.0028,0.0138,0.0074,0.0074,0.004,-0.0092,0.0068 -0.0081,0.0475,0.0337,0.0328,0.014,0.0306,0.0075,0.028,0.0308,0.0164,0.0149,-0.0574,0.0244 -0.0056,0,0.0266,0.0201,0.0058,0.0244,0.006,0.0033,0.0178,0.0133,0.0099,-0.0391,0.0145 --0.0096,-0.0438,0.0037,0.0143,0.0081,4e-04,0.0044,-0.0047,-0.0017,0,0.0012,0.0387,6e-04 --0.0058,5e-04,0.0134,0.0346,0.008,0.0144,0.0085,0.0171,0.021,0.0065,0.0081,0.0118,0.0136 --0.014,-6e-04,0.0032,-0.0197,0.0019,-4e-04,0.0024,-0.0027,-0.0096,0.0032,-0.0042,0.0244,-0.0044 --0.0316,-0.0354,-0.0052,-0.0049,-0.003,-0.0128,-3e-04,-0.008,-0.0184,-0.0105,-0.0108,0.0393,-0.0141 --0.0133,0.0232,6e-04,0.0072,0.0047,0.0065,-0.001,0.0088,0.0115,0.0095,-2e-04,-0.0475,0.0018 -0.0107,0.026,0.0133,0.016,0.0081,0.0133,0.001,0.0116,0.0195,0.0085,0.0095,-0.0032,0.0131 -0.0164,-0.0013,0.0173,0.0257,0.0078,0.0215,0.0081,0.0119,0.0265,0.0115,0.0149,-0.0242,0.0134 -0.0066,0.01,0.0124,0.0152,0.0062,0.0092,0.0036,0.0083,0.0097,0.0061,0.0053,0.0259,0.0079 -0.0142,0.0079,0.0112,0.0402,0.0087,0.01,0.0062,0.0269,0.0222,0.0035,0.0122,0.0198,0.0147 --0.0015,-0.0092,-0.0032,-0.023,1e-04,-0.0173,0.0057,-0.0074,-0.0174,-0.0145,-0.0038,0.0233,-0.0149 -4e-04,0.0379,0.01,0.0279,0.0061,0.0125,0.0015,0.0164,0.0211,0.0112,0.0067,-0.03,0.016 -0.0092,-0.0153,0.0122,0.0284,0.0068,0.0142,0.0054,0.0135,0.0249,0.0138,0.0126,-0.0035,0.0191 -0.025,0.0174,0.0253,0.0526,0.0115,0.0341,0.0093,0.0258,0.0381,0.0272,0.0238,-0.0288,0.0286 -0.0116,-0.0186,0.0065,0.0161,0.0046,0.0051,0.0041,2e-04,0.0016,0.0104,0.0073,0.0064,0.0037 -0.0107,0.0284,0.0172,0.0122,0.0098,0.0185,0.0055,0.0094,0.0238,0.0144,0.0157,-0.0139,0.0164 -0.0064,0.0387,0.0193,0.0365,0.0102,0.0164,0.0121,0.0238,0.0172,0.0119,0.0126,-0.0012,0.0171 -0.0091,-0.0146,0.0086,-0.0389,2e-04,8e-04,0.0059,-0.0155,-0.0248,9e-04,-0.0025,0.0246,-0.0133 -0.0012,-0.0142,-0.0015,-0.0097,0.0063,0.0012,0.0036,-0.0015,-0.0062,0.0087,0.0021,0.0118,-0.0028 -0.0066,-0.0216,9e-04,0.0067,0.0051,-0.0011,0.0064,6e-04,-0.0031,0.0058,0.0017,0.0173,-5e-04 -0.0098,0.002,0.0099,0.0133,-9e-04,0.0112,0.0037,-0.0039,0.0114,0.0053,0.0092,-0.0156,0.0066 -0.0093,-0.0055,0.0033,0.0011,9e-04,0.0035,0.0014,-0.0067,5e-04,0.0041,0.004,-0.0236,-3e-04 -0.0054,0.0102,0.0194,0.0257,0.0065,0.0206,0.0067,0.0097,0.0194,0.0132,0.0132,-0.038,0.0163 -0.0092,0.0226,0.0179,0.0323,0.0075,0.0182,0.006,0.0199,0.02,0.0142,0.0129,-0.0268,0.0185 -0.0127,0.0146,0.0165,0.0291,0.0107,0.0168,0.0072,0.0116,0.0153,0.0133,0.0128,0.0039,0.0175 -0.013,0.0113,0.015,0.0079,0.0083,0.0201,0.0069,0.0061,0.0121,0.0191,0.0135,-0.0107,0.0121 -0.0117,-0.0144,0.0145,0.01,0.0051,0.0207,0.0106,0.0018,0.0082,0.0255,0.0114,0.0028,0.0096 -0.006,-0.0141,0.0108,0.0185,0.0101,0.0146,0.006,0.0027,0.0115,0.0063,0.0081,-0.0051,0.0096 -0.0026,0.0241,0.0164,0.0255,0.0089,0.0197,0.0071,0.0152,0.0198,0.016,0.0134,-0.0265,0.0163 -0.011,0.023,0.018,0.027,0.0121,0.0213,0.0055,0.0192,0.0224,0.0171,0.0156,-0.0199,0.0204 -0.0011,0.0229,0.0027,0.0236,0.0077,-7e-04,0.0048,0.0107,0.0077,-0.0053,0.01,0.0236,0.0082 --0.0053,-0.0122,-0.0056,0.0275,0.0051,-0.0032,7e-04,0.0116,9e-04,-0.0054,4e-04,0.0486,0.0041 --0.0145,-0.028,-0.0118,-0.0274,-0.0094,-0.0144,-0.0048,-0.0116,-0.016,1e-04,-0.0077,0.0092,-0.0222 -0.0161,0.0469,0.0095,0.0428,0.0123,0.0134,0.0164,0.033,0.0256,0.0131,0.0153,-0.0207,0.0199 -0.0177,0.028,0.0175,0.0485,0.0168,0.0214,0.0114,0.0304,0.0281,0.0191,0.02,-0.0026,0.0303 --0.0131,-0.0016,-0.0169,-0.0237,-0.0018,-0.0202,-0.0094,-0.0063,-0.0225,-0.0149,-0.0112,0.0719,-0.0148 --0.0077,0.0117,2e-04,0.013,0.0054,7e-04,0.0036,0.0104,0.0043,-0.0025,0.0022,0.0056,0.004 --9e-04,0.0255,-0.0233,-0.0503,-0.0112,-0.0271,-0.0012,-0.001,-0.04,-0.0126,-0.0118,0.0556,-0.0272 --0.0083,0.062,0.0014,0.028,0.012,0.0084,-0.0049,0.0312,0.014,0.006,0.0064,0.03,0.0142 --0.0317,-0.0056,-0.0126,-0.0379,-0.0049,-0.0168,-0.0306,-0.0169,-0.0236,-0.0045,-0.0162,0.0192,-0.0262 -0.0076,-0.0078,0.0088,0.019,0.0059,0.0118,0.0187,0.0078,0.0223,0.0149,0.013,-0.0461,0.0097 -0.0107,0.0162,0.0137,0.0163,0.0126,0.0176,0.0103,0.0114,0.0227,0.0136,0.0159,-0.0142,0.0172 --0.0081,0.033,-0.0031,-0.0274,0.0156,-0.0113,-0.0027,0.003,-0.0164,-0.0109,-0.0084,0.0751,-0.0068 --0.0188,-0.0333,-0.0182,-0.033,-0.01,-0.0166,-0.0023,-0.0213,-0.0261,0.0011,-0.0125,0.0072,-0.0264 --0.0066,-0.0114,-0.0072,-0.0336,-0.0135,-0.0025,-3e-04,-0.0133,-0.0146,0.0051,-0.0023,-0.0215,-0.0156 --0.1027,0.001,-0.0518,-0.0982,-0.0285,-0.0627,-0.0506,-0.0313,-0.0675,-0.0276,-0.0538,0.0378,-0.0618 --0.1237,0.0345,-0.0775,-0.1331,-0.0044,-0.0625,-0.0867,-0.0157,-0.0629,-0.0245,-0.0692,0.117,-0.06 --0.0276,0.0214,-0.0435,-0.0391,-0.0587,-0.0301,-0.0308,0.0033,-0.0188,6e-04,-0.0209,0.0428,-0.0192 -0.0177,0.014,-0.0197,-0.001,5e-04,-0.0071,-0.0035,0.0118,0.0081,0.0162,0.0031,-0.0146,-0.0119 -0.0491,-0.0016,0.0082,-0.0112,0.0079,0.0132,0.0112,0.0029,-0.0017,0.0056,0.01,0.0282,0.006 -0.0164,-0.0031,-0.0122,-0.0133,-0.0046,-0.0091,0.0065,-0.0055,-0.0161,6e-04,-0.0016,0.0328,-0.0037 -0.0235,-0.018,0.0022,0.035,0.0021,0.0117,0.0057,0.0048,0.0188,0.0125,0.01,-0.0462,8e-04 -0.05,-0.014,0.0387,0.0663,-0.0012,0.0337,0.0221,0.0127,0.0375,0.0081,0.0342,-0.082,0.0092 -0.0578,0.0213,0.0504,0.0884,0.0146,0.0442,0.0365,0.0348,0.0516,0.0107,0.0392,8e-04,0.0312 -0.0241,-0.0147,0.0198,0.0013,0.0036,0.0123,0.0126,-0.0076,9e-04,0.0104,0.0101,-0.0094,0.0024 -0.0611,-0.0012,0.0311,0.0451,0.0042,0.0291,0.0322,0.0166,0.0277,0.0068,0.026,-0.0596,0.0153 -0.0315,0.0054,0.0244,0.0166,0.007,0.0207,0.0202,0.005,0.0157,0.0102,0.0162,-0.0165,0.0113 Deleted: pkg/PerformanceAnalytics/sandbox/pulkit/data/data1.csv =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/data/data1.csv 2013-08-31 22:43:27 UTC (rev 2960) +++ pkg/PerformanceAnalytics/sandbox/pulkit/data/data1.csv 2013-08-31 23:18:49 UTC (rev 2961) @@ -1,27 +0,0 @@ -Code,Mean,StDev,Phi,Sigma,t-Stat(Phi) -HFRIFOF Index,0.005516691,0.016969081,0.35942732,0.015835089,6.246140275 -HFRIFWI Index,0.008881851,0.020176781,0.304802238,0.019216682,5.190679363 -HFRIEHI Index,0.009858566,0.026444472,0.26510117,0.025498305,4.460055655 -HFRIMI Index,0.009527016,0.021496073,0.184350274,0.021127643,3.041856755 -HFRIFOFD Index,0.005179518,0.017416384,0.353548291,0.016291569,6.129496094 -HFRIDSI Index,0.009621101,0.018800339,0.545792492,0.015753187,10.56122157 -HFRIEMNI Index,0.005182009,0.009427888,0.164396537,0.009299616,2.703456292 -HFRIFOFC Index,0.004809119,0.011620459,0.455662847,0.01034398,8.302257893 -HFRIEDI Index,0.009536151,0.019247216,0.391629021,0.01770981,6.902140563 -HFRIMTI Index,0.008528045,0.021556689,-0.0188129,0.021552874,-0.305148009 -HFRIFIHY Index,0.007177975,0.017707746,0.483806908,0.015497372,8.972011994 -HFRIFI Index,0.006855376,0.012881753,0.505908165,0.011111637,9.587381222 -HFRIRVA Index,0.008020951,0.012975483,0.452790992,0.011569158,8.242977673 -HFRIMAI Index,0.007142082,0.010437017,0.298219544,0.009962104,5.067023312 -HFRICAI Index,0.007122016,0.019973858,0.578004656,0.016299336,11.48654001 -HFRIEM Index,0.010352034,0.041000178,0.359277175,0.038262633,6.243082394 -HFRIEMA Index,0.007989882,0.038243416,0.311226738,0.036344083,5.310865179 -HFRISHSE Index,-0.001675503,0.053512968,0.090737496,0.053292219,1.477615589 -HFRIEMLA Index,0.011074013,0.05084986,0.196931418,0.04985408,3.257468873 -HFRIFOFS Index,0.006834983,0.024799788,0.323053217,0.023470043,5.536016371 -HFRIENHI Index,0.010092318,0.036682513,0.201118844,0.035932974,3.329910279 -HFRIFWIG Index,0.009382896,0.035972197,0.231372973,0.034996096,3.857301725 -HFRIFOFM Index,0.005607926,0.015907089,0.042154535,0.015892949,0.684239764 -HFRIFWIC Index,0.008947104,0.039009601,0.050499002,0.038959829,0.820004462 -HFRIFWIJ Index,0.008423965,0.03629762,0.0953987,0.036132072,1.554206093 -HFRISTI Index,0.011075118,0.046441033,0.160831261,0.04583646,2.642789417 Deleted: pkg/PerformanceAnalytics/sandbox/pulkit/data/data3.csv =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/data/data3.csv 2013-08-31 22:43:27 UTC (rev 2960) +++ pkg/PerformanceAnalytics/sandbox/pulkit/data/data3.csv 2013-08-31 23:18:49 UTC (rev 2961) @@ -1,68 +0,0 @@ -Date;AORDA;Benchmark -2008-01-01;0.0389;-0.0612 -2008-02-01;0.0713;-0.0348 -2008-03-01;0.0417;-0.006 -2008-04-01;0.0743;0.0475 -2008-05-01;0.0324;0.0107 -2008-06-01;-0.0013;-0.086 -2008-07-01;0.0849;-0.0099 -2008-08-01;-0.0283;0.0122 -2008-09-01;0.2821;-0.0921 -2008-10-01;0.0964;-0.1683 -2008-11-01;0.0055;-0.0748 -2008-12-01;-0.0288;0.0078 -2009-01-01;-0.0078;-0.0857 -2009-02-01;0.0684;-0.1099 -2009-03-01;0.0599;0.0854 -2009-04-01;-0.038;0.0939 -2009-05-01;0.0419;0.0531 -2009-06-01;-0.0489;2e-04 -2009-07-01;-0.1022;0.0741 -2009-08-01;-0.0166;0.0336 -2009-09-01;-0.0884;0.0357 -2009-10-01;-0.0284;-0.0198 -2009-11-01;-0.0103;0.0574 -2009-12-01;-0.0118;0.0178 -2010-01-01;0.0054;-0.037 -2010-02-01;-0.0224;0.0285 -2010-03-01;0.0085;0.0588 -2010-04-01;-0.0294;0.0148 -2010-05-01;0.1343;-0.082 -2010-06-01;0.0779;-0.0539 -2010-07-01;0.0661;0.0688 -2010-08-01;0.0313;-0.0474 -2010-09-01;0.0367;0.0876 -2010-10-01;0.003;0.0369 -2010-11-01;0.0361;-0.0023 -2010-12-01;-0.0042;0.0653 -2011-01-01;0.0346;0.0226 -2011-02-01;-0.0068;0.032 -2011-03-01;-0.03;-0.001 -2011-04-01;-0.0148;0.0285 -2011-05-01;0.0372;-0.0135 -2011-06-01;-0.0358;-0.0183 -2011-07-01;-0.0056;-0.0215 -2011-08-01;0.1628;-0.0568 -2011-09-01;-0.0504;-0.0718 -2011-10-01;-0.0354;0.1077 -2011-11-01;0.0011;-0.0051 -2011-12-01;-0.0204;0.0085 -2012-01-01;-1e-04;0.0436 -2012-02-01;5e-04;0.0406 -2012-03-01;0.0169;0.0313 -2012-04-01;0.029;-0.0075 -2012-05-01;-0.0634;-0.0627 -2012-06-01;0.0684;0.0396 -2012-07-01;-0.0198;0.0126 -2012-08-01;0.0399;0.0198 -2012-09-01;-0.0323;0.0242 -2012-10-01;0.006;-0.0198 -2012-11-01;-0.0074;0.0028 -2012-12-01;-0.0048;0.0071 -2013-01-01;-0.0117;0.0504 -2013-02-01;-0.0173;0.0111 -2013-03-01;0.0174;0.036 -2013-04-01;-0.0367;0.0181 -2013-05-01;0.0117;0.0208 -2013-06-01;0.005;-0.015 -2013-07-01;-0.0046;0.0495 From noreply at r-forge.r-project.org Sun Sep 1 01:21:34 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 1 Sep 2013 01:21:34 +0200 (CEST) Subject: [Returnanalytics-commits] r2962 - in pkg/PerformanceAnalytics/sandbox/pulkit: R data inst inst/doc man Message-ID: <20130831232134.C0B3E1853CF@r-forge.r-project.org> Author: pulkit Date: 2013-09-01 01:21:34 +0200 (Sun, 01 Sep 2013) New Revision: 2962 Added: pkg/PerformanceAnalytics/sandbox/pulkit/R/capm_aorda.R pkg/PerformanceAnalytics/sandbox/pulkit/R/psr_python.R pkg/PerformanceAnalytics/sandbox/pulkit/R/ret.R pkg/PerformanceAnalytics/sandbox/pulkit/data/capm_aorda.csv pkg/PerformanceAnalytics/sandbox/pulkit/data/psr_python.csv pkg/PerformanceAnalytics/sandbox/pulkit/inst/doc/ pkg/PerformanceAnalytics/sandbox/pulkit/inst/doc/ProbSharpe.Rnw pkg/PerformanceAnalytics/sandbox/pulkit/man/CDaR.Rd pkg/PerformanceAnalytics/sandbox/pulkit/man/capm_aorda.Rd pkg/PerformanceAnalytics/sandbox/pulkit/man/chart.REDD.Rd pkg/PerformanceAnalytics/sandbox/pulkit/man/psr_python.Rd pkg/PerformanceAnalytics/sandbox/pulkit/man/ret.Rd Log: documented data Added: pkg/PerformanceAnalytics/sandbox/pulkit/R/capm_aorda.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/capm_aorda.R (rev 0) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/capm_aorda.R 2013-08-31 23:21:34 UTC (rev 2962) @@ -0,0 +1,6 @@ +#' @name capm_aorda +#' @title Data to test Cdar and other related functions +#' @description This data set will be used to validate the results of the paper +#' @docType data +#' @usage capm_aorda +NULL Added: pkg/PerformanceAnalytics/sandbox/pulkit/R/psr_python.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/psr_python.R (rev 0) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/psr_python.R 2013-08-31 23:21:34 UTC (rev 2962) @@ -0,0 +1,6 @@ +#' @name psr_python +#' @title Data to test Probabilistic Sharpe Ratio +#' @description This data set will be used to validate the results of the paper +#' @docType data +#' @usage psr_python +NULL Added: pkg/PerformanceAnalytics/sandbox/pulkit/R/ret.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/ret.R (rev 0) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/ret.R 2013-08-31 23:21:34 UTC (rev 2962) @@ -0,0 +1,6 @@ +#' @name ret +#' @title Return Series to test Rolling Economic Drawdowns +#' @description This data set will be used to validate the results of the paper +#' @docType data +#' @usage ret +NULL Added: pkg/PerformanceAnalytics/sandbox/pulkit/data/capm_aorda.csv =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/data/capm_aorda.csv (rev 0) +++ pkg/PerformanceAnalytics/sandbox/pulkit/data/capm_aorda.csv 2013-08-31 23:21:34 UTC (rev 2962) @@ -0,0 +1,68 @@ +Date;AORDA;Benchmark +2008-01-01;0.0389;-0.0612 +2008-02-01;0.0713;-0.0348 +2008-03-01;0.0417;-0.006 +2008-04-01;0.0743;0.0475 +2008-05-01;0.0324;0.0107 +2008-06-01;-0.0013;-0.086 +2008-07-01;0.0849;-0.0099 +2008-08-01;-0.0283;0.0122 +2008-09-01;0.2821;-0.0921 +2008-10-01;0.0964;-0.1683 +2008-11-01;0.0055;-0.0748 +2008-12-01;-0.0288;0.0078 +2009-01-01;-0.0078;-0.0857 +2009-02-01;0.0684;-0.1099 +2009-03-01;0.0599;0.0854 +2009-04-01;-0.038;0.0939 +2009-05-01;0.0419;0.0531 +2009-06-01;-0.0489;2e-04 +2009-07-01;-0.1022;0.0741 +2009-08-01;-0.0166;0.0336 +2009-09-01;-0.0884;0.0357 +2009-10-01;-0.0284;-0.0198 +2009-11-01;-0.0103;0.0574 +2009-12-01;-0.0118;0.0178 +2010-01-01;0.0054;-0.037 +2010-02-01;-0.0224;0.0285 +2010-03-01;0.0085;0.0588 +2010-04-01;-0.0294;0.0148 +2010-05-01;0.1343;-0.082 +2010-06-01;0.0779;-0.0539 +2010-07-01;0.0661;0.0688 +2010-08-01;0.0313;-0.0474 +2010-09-01;0.0367;0.0876 +2010-10-01;0.003;0.0369 +2010-11-01;0.0361;-0.0023 +2010-12-01;-0.0042;0.0653 +2011-01-01;0.0346;0.0226 +2011-02-01;-0.0068;0.032 +2011-03-01;-0.03;-0.001 +2011-04-01;-0.0148;0.0285 +2011-05-01;0.0372;-0.0135 +2011-06-01;-0.0358;-0.0183 +2011-07-01;-0.0056;-0.0215 +2011-08-01;0.1628;-0.0568 +2011-09-01;-0.0504;-0.0718 +2011-10-01;-0.0354;0.1077 +2011-11-01;0.0011;-0.0051 +2011-12-01;-0.0204;0.0085 +2012-01-01;-1e-04;0.0436 +2012-02-01;5e-04;0.0406 +2012-03-01;0.0169;0.0313 +2012-04-01;0.029;-0.0075 +2012-05-01;-0.0634;-0.0627 +2012-06-01;0.0684;0.0396 +2012-07-01;-0.0198;0.0126 +2012-08-01;0.0399;0.0198 +2012-09-01;-0.0323;0.0242 +2012-10-01;0.006;-0.0198 +2012-11-01;-0.0074;0.0028 +2012-12-01;-0.0048;0.0071 +2013-01-01;-0.0117;0.0504 +2013-02-01;-0.0173;0.0111 +2013-03-01;0.0174;0.036 +2013-04-01;-0.0367;0.0181 +2013-05-01;0.0117;0.0208 +2013-06-01;0.005;-0.015 +2013-07-01;-0.0046;0.0495 Added: pkg/PerformanceAnalytics/sandbox/pulkit/data/psr_python.csv =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/data/psr_python.csv (rev 0) +++ pkg/PerformanceAnalytics/sandbox/pulkit/data/psr_python.csv 2013-08-31 23:21:34 UTC (rev 2962) @@ -0,0 +1,27 @@ +Code,Mean,StDev,Phi,Sigma,t-Stat(Phi) +HFRIFOF Index,0.005516691,0.016969081,0.35942732,0.015835089,6.246140275 +HFRIFWI Index,0.008881851,0.020176781,0.304802238,0.019216682,5.190679363 +HFRIEHI Index,0.009858566,0.026444472,0.26510117,0.025498305,4.460055655 +HFRIMI Index,0.009527016,0.021496073,0.184350274,0.021127643,3.041856755 +HFRIFOFD Index,0.005179518,0.017416384,0.353548291,0.016291569,6.129496094 +HFRIDSI Index,0.009621101,0.018800339,0.545792492,0.015753187,10.56122157 +HFRIEMNI Index,0.005182009,0.009427888,0.164396537,0.009299616,2.703456292 +HFRIFOFC Index,0.004809119,0.011620459,0.455662847,0.01034398,8.302257893 +HFRIEDI Index,0.009536151,0.019247216,0.391629021,0.01770981,6.902140563 +HFRIMTI Index,0.008528045,0.021556689,-0.0188129,0.021552874,-0.305148009 +HFRIFIHY Index,0.007177975,0.017707746,0.483806908,0.015497372,8.972011994 +HFRIFI Index,0.006855376,0.012881753,0.505908165,0.011111637,9.587381222 +HFRIRVA Index,0.008020951,0.012975483,0.452790992,0.011569158,8.242977673 +HFRIMAI Index,0.007142082,0.010437017,0.298219544,0.009962104,5.067023312 +HFRICAI Index,0.007122016,0.019973858,0.578004656,0.016299336,11.48654001 +HFRIEM Index,0.010352034,0.041000178,0.359277175,0.038262633,6.243082394 +HFRIEMA Index,0.007989882,0.038243416,0.311226738,0.036344083,5.310865179 +HFRISHSE Index,-0.001675503,0.053512968,0.090737496,0.053292219,1.477615589 +HFRIEMLA Index,0.011074013,0.05084986,0.196931418,0.04985408,3.257468873 +HFRIFOFS Index,0.006834983,0.024799788,0.323053217,0.023470043,5.536016371 +HFRIENHI Index,0.010092318,0.036682513,0.201118844,0.035932974,3.329910279 +HFRIFWIG Index,0.009382896,0.035972197,0.231372973,0.034996096,3.857301725 +HFRIFOFM Index,0.005607926,0.015907089,0.042154535,0.015892949,0.684239764 +HFRIFWIC Index,0.008947104,0.039009601,0.050499002,0.038959829,0.820004462 +HFRIFWIJ Index,0.008423965,0.03629762,0.0953987,0.036132072,1.554206093 +HFRISTI Index,0.011075118,0.046441033,0.160831261,0.04583646,2.642789417 Added: pkg/PerformanceAnalytics/sandbox/pulkit/inst/doc/ProbSharpe.Rnw =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/inst/doc/ProbSharpe.Rnw (rev 0) +++ pkg/PerformanceAnalytics/sandbox/pulkit/inst/doc/ProbSharpe.Rnw 2013-08-31 23:21:34 UTC (rev 2962) @@ -0,0 +1,101 @@ +\documentclass[12pt,letterpaper,english]{article} +\usepackage{times} +\usepackage[T1]{fontenc} +\IfFileExists{url.sty}{\usepackage{url}} + {\newcommand{\url}{\texttt}} + +\usepackage[utf8]{inputenc} +\usepackage{babel} +\usepackage{Rd} + +\usepackage{Sweave} +\SweaveOpts{engine=R,eps = FALSE} +%\VignetteIndexEntry{Probabilistic Sharpe Ratio} +%\VignetteDepends{PerformanceAnalytics} +%\VignetteKeywords{Probabilistic Sharpe Ratio,Minimum Track Record Length,risk,benchmark,portfolio} +%\VignettePackage{PerformanceAnalytics} + +\begin{document} +\SweaveOpts{concordance=TRUE} + +\title{ Probabilistic Sharpe Ratio Optimization } + +% \keywords{Probabilistic Sharpe Ratio,Minimum Track Record Length,risk,benchmark,portfolio} + +\makeatletter +\makeatother +\maketitle + +\begin{abstract} + + This vignette gives an overview of the Probabilistic Sharpe Ratio , Minimum Track Record Length and the Probabilistic Sharpe Ratio Optimization technique used to find the optimal portfolio that maximizes the Probabilistic Sharpe Ratio. It gives an overview of the usability of the functions and its application. + +A probabilistic translation of Sharpe ratio, called PSR, is proposed to account for estimation errors in an IID non-Normal framework.When assessing Sharpe ratio?s ability to evaluate skill,we find that a longer track record may be able to compensate for certain statistical shortcomings of the returns probability distribution. Stated differently, despite Sharpe ratio's well-documented deficiencies, it can still provide evidence of investment skill, as long as the user learns to require the proper track record length. + +The portfolio of hedge fund indices that maximizes Sharpe ratio can be very different from +the portfolio that delivers the highest PSR. Maximizing for PSR leads to better diversified and +more balanced hedge fund allocations compared to the concentrated outcomes of Sharpe ratio +maximization. + + + +\end{abstract} + +<>= +library(PerformanceAnalytics) +data(edhec) +library(noniid.pm) +@ + + +\section{Probabilistic Sharpe Ratio} + Given a predefined benchmark Sharpe ratio $SR^\ast$ , the observed Sharpe ratio $\hat{SR}$ can be expressed in probabilistic terms as + + \deqn{\hat{PSR}(SR^\ast) = Z\biggl[\frac{(\hat{SR}-SR^\ast)\sqrt{n-1}}{\sqrt{1-\hat{\gamma_3}SR^\ast + \frac{\hat{\gamma_4}-1}{4}\hat{SR^2}}}\biggr]} + + Here $n$ is the track record length or the number of data points. It can be daily,weekly or yearly depending on the input given + + \eqn{\hat{\gamma{_3}}} and \eqn{\hat{\gamma{_4}}} are the skewness and kurtosis respectively. + It is not unusual to find strategies with irregular trading frequencies, such as weekly strategies that may not trade for a month. This poses a problem when computing an annualized Sharpe ratio, and there is no consensus as how skill should be measured in the context of irregular bets. Because PSR measures skill in probabilistic terms, it is invariant to calendar conventions. All calculations are done in the original frequency +of the data, and there is no annualization. The Reference Sharpe Ratio is also given in the non-annualized form and should be greater than the Observed Sharpe Ratio. + +<<>>= +data(edhec) +ProbSharpeRatio(edhec[,1],refSR = 0.23) +@ + +\section{Minimum Track Record Length} + +If a track record is shorter than Minimum Track Record Length(MinTRL), we do +not have enough confidence that the observed \eqn{\hat{SR}} is above the designated threshold +\eqn{SR^\ast}. Minimum Track Record Length is given by the following expression. + +\deqn{MinTRL = n^\ast = 1+\biggl[1-\hat{\gamma_3}\hat{SR}+\frac{\hat{\gamma_4}}{4}\hat{SR^2}\biggr]\biggl(\frac{Z_\alpha}{\hat{SR}-SR^\ast}\biggr)^2} + +\eqn{\gamma{_3}} and \eqn{\gamma{_4}} are the skewness and kurtosis respectively. It is important to note that MinTRL is expressed in terms of number of observations, not annual or calendar terms. All the values used in the above formula are non-annualized, in the same frequency as that of the returns. + +<<>>= +data(edhec) +MinTrackRecord(edhec[,1],refSR = 0.23) +@ + +\section{Probabilistic Sharpe Ratio Optimal Portfolio} + +We would like to find the vector of weights that maximize the expression + + \deqn{\hat{PSR}(SR^\ast) = Z\biggl[\frac{(\hat{SR}-SR^\ast)\sqrt{n-1}}{\sqrt{1-\hat{\gamma_3}SR^\ast + \frac{\hat{\gamma_4}-1}{4}\hat{SR^2}}}\biggr]} + +where \eqn{\sigma = \sqrt{E[(r-\mu)^2]}} ,its standard deviation.\eqn{\gamma_3=\frac{E\biggl[(r-\mu)^3\biggr]}{\sigma^3}} its skewness,\eqn{\gamma_4=\frac{E\biggl[(r-\mu)^4\biggr]}{\sigma^4}} its kurtosis and \eqn{SR = \frac{\mu}{\sigma}} its Sharpe Ratio. + +Because \eqn{\hat{PSR}(SR^\ast)=Z[\hat{Z^\ast}]} is a monotonic increasing function of +\eqn{\hat{Z^\ast}} ,it suffices to compute the vector that maximizes \eqn{\hat{Z^\ast}} + This optimal vector is invariant of the value adopted by the parameter \eqn{SR^\ast}. + + +<<>>= +data(edhec) +PsrPortfolio(edhec) +@ + +\end{document} + Added: pkg/PerformanceAnalytics/sandbox/pulkit/man/CDaR.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/man/CDaR.Rd (rev 0) +++ pkg/PerformanceAnalytics/sandbox/pulkit/man/CDaR.Rd 2013-08-31 23:21:34 UTC (rev 2962) @@ -0,0 +1,58 @@ +\name{CDaR} +\alias{CDaR} +\alias{CDD} +\title{Calculate Uryasev's proposed Conditional Drawdown at Risk (CDD or CDaR) +measure} +\usage{ + CDaR(R, weights = NULL, geometric = TRUE, invert = TRUE, + p = 0.95, ...) +} +\arguments{ + \item{R}{an xts, vector, matrix, data frame, timeSeries + or zoo object of asset returns} + + \item{weights}{portfolio weighting vector, default NULL, + see Details} + + \item{geometric}{utilize geometric chaining (TRUE) or + simple/arithmetic chaining (FALSE) to aggregate returns, + default TRUE} + + \item{invert}{TRUE/FALSE whether to invert the drawdown + measure. see Details.} + + \item{p}{confidence level for calculation, default + p=0.95} + + \item{\dots}{any other passthru parameters} +} +\description{ + For some confidence level \eqn{p}, the conditional + drawdown is the the mean of the worst \eqn{p\%} + drawdowns. +} +\examples{ +library(lpSolve) +data(edhec) +t(round(CDaR(edhec),4)) +} +\author{ + Brian G. Peterson +} +\references{ + Chekhlov, A., Uryasev, S., and M. Zabarankin. Portfolio + Optimization With Drawdown Constraints. B. Scherer (Ed.) + Asset and Liability Management Tools, Risk Books, London, + 2003 http://www.ise.ufl.edu/uryasev/drawdown.pdf +} +\seealso{ + \code{\link{ES}} \code{\link{maxDrawdown}} + \code{\link{CdarMultiPath}} \code{\link{AlphaDrawdown}} + \code{\link{MultiBetaDrawdown}} + \code{\link{BetaDrawdown}} +} +\keyword{distribution} +\keyword{models} +\keyword{multivariate} +\keyword{ts} + Added: pkg/PerformanceAnalytics/sandbox/pulkit/man/capm_aorda.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/man/capm_aorda.Rd (rev 0) +++ pkg/PerformanceAnalytics/sandbox/pulkit/man/capm_aorda.Rd 2013-08-31 23:21:34 UTC (rev 2962) @@ -0,0 +1,10 @@ +\docType{data} +\name{capm_aorda} +\alias{capm_aorda} +\title{Data to test Cdar and other related functions} +\description{ + This data set will be used to validate the results of the + paper +} +\keyword{datasets} + Added: pkg/PerformanceAnalytics/sandbox/pulkit/man/chart.REDD.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/man/chart.REDD.Rd (rev 0) +++ pkg/PerformanceAnalytics/sandbox/pulkit/man/chart.REDD.Rd 2013-08-31 23:21:34 UTC (rev 2962) @@ -0,0 +1,51 @@ +\name{chart.REDD} +\alias{chart.REDD} +\title{Time series of Rolling Economic Drawdown} +\usage{ + chart.REDD(R, rf, h, geometric = TRUE, legend.loc = NULL, + colorset = (1:12), ...) +} +\arguments{ + \item{R}{an xts, vector, matrix, data frame, timeseries, + or zoo object of asset return.} + + \item{rf}{risk free rate can be vector such as government + security rate of return} + + \item{h}{lookback period} + + \item{geometric}{utilize geometric chaining (TRUE) or + simple/arithmetic chaining(FALSE) to aggregate returns, + default is TRUE.} + + \item{legend.loc}{set the legend.loc, as in + \code{\link{plot}}} + + \item{colorset}{set the colorset label, as in + \code{\link{plot}}} + + \item{\dots}{any other variable} +} +\description{ + This function plots the time series of Rolling Economic + Drawdown. For more details on rolling economic drawdown + see \code{rollDrawdown}. +} +\examples{ +data(edhec) +chart.REDD(edhec,0.08,20) +} +\author{ + Pulkit Mehrotra +} +\references{ + Yang, Z. George and Zhong, Liang, Optimal Portfolio + Strategy to Control Maximum Drawdown - The Case of Risk + Based Dynamic Asset Allocation (February 25, 2012) +} +\seealso{ + \code{\link{plot}} \code{\link{EconomicDrawdown}} + \code{\link{EDDCOPS}} \code{\link{rollDrawdown}} + \code{\link{REDDCOPS}} \code{\link{rollEconomicMax}} +} + Added: pkg/PerformanceAnalytics/sandbox/pulkit/man/psr_python.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/man/psr_python.Rd (rev 0) +++ pkg/PerformanceAnalytics/sandbox/pulkit/man/psr_python.Rd 2013-08-31 23:21:34 UTC (rev 2962) @@ -0,0 +1,10 @@ +\docType{data} +\name{psr_python} +\alias{psr_python} +\title{Data to test Probabilistic Sharpe Ratio} +\description{ + This data set will be used to validate the results of the + paper +} +\keyword{datasets} + Added: pkg/PerformanceAnalytics/sandbox/pulkit/man/ret.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/man/ret.Rd (rev 0) +++ pkg/PerformanceAnalytics/sandbox/pulkit/man/ret.Rd 2013-08-31 23:21:34 UTC (rev 2962) @@ -0,0 +1,10 @@ +\docType{data} +\name{ret} +\alias{ret} +\title{Return Series to test Rolling Economic Drawdowns} +\description{ + This data set will be used to validate the results of the + paper +} +\keyword{datasets} + From noreply at r-forge.r-project.org Sun Sep 1 01:51:12 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 1 Sep 2013 01:51:12 +0200 (CEST) Subject: [Returnanalytics-commits] r2963 - in pkg/PerformanceAnalytics/sandbox/pulkit: . R inst/doc Message-ID: <20130831235112.AD59518532E@r-forge.r-project.org> Author: pulkit Date: 2013-09-01 01:51:12 +0200 (Sun, 01 Sep 2013) New Revision: 2963 Added: pkg/PerformanceAnalytics/sandbox/pulkit/inst/doc/ProbSharpe.pdf Modified: pkg/PerformanceAnalytics/sandbox/pulkit/DESCRIPTION pkg/PerformanceAnalytics/sandbox/pulkit/R/BenchmarkPlots.R pkg/PerformanceAnalytics/sandbox/pulkit/R/BenchmarkSR.R pkg/PerformanceAnalytics/sandbox/pulkit/R/SRIndifferenceCurve.R Log: removed edhec values from code Modified: pkg/PerformanceAnalytics/sandbox/pulkit/DESCRIPTION =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/DESCRIPTION 2013-08-31 23:21:34 UTC (rev 2962) +++ pkg/PerformanceAnalytics/sandbox/pulkit/DESCRIPTION 2013-08-31 23:51:12 UTC (rev 2963) @@ -45,3 +45,6 @@ 'table.PSR.R' 'TriplePenance.R' 'TuW.R' + 'capm_aorda.R' + 'psr_python.R' + 'ret.R' Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/BenchmarkPlots.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/BenchmarkPlots.R 2013-08-31 23:21:34 UTC (rev 2962) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/BenchmarkPlots.R 2013-08-31 23:51:12 UTC (rev 2963) @@ -77,7 +77,7 @@ } vs = vs[1] - corr = table.Correlation(edhec,edhec) + corr = table.Correlation(R,R) corr_avg = 0 for(i in 1:(columns-1)){ for(j in (i+1):columns){ Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/BenchmarkSR.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/BenchmarkSR.R 2013-08-31 23:21:34 UTC (rev 2962) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/BenchmarkSR.R 2013-08-31 23:51:12 UTC (rev 2963) @@ -46,7 +46,7 @@ } SR = SharpeRatio(x) sr_avg = mean(SR) - corr = table.Correlation(edhec,edhec) + corr = table.Correlation(R,R) corr_avg = 0 for(i in 1:(columns-1)){ for(j in (i+1):columns){ Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/SRIndifferenceCurve.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/SRIndifferenceCurve.R 2013-08-31 23:21:34 UTC (rev 2962) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/SRIndifferenceCurve.R 2013-08-31 23:51:12 UTC (rev 2963) @@ -76,7 +76,7 @@ } SR = SharpeRatio(x) sr_avg = mean(SR) - corr = table.Correlation(edhec,edhec) + corr = table.Correlation(R,R) corr_avg = 0 for(i in 1:(columns-1)){ for(j in (i+1):columns){ Added: pkg/PerformanceAnalytics/sandbox/pulkit/inst/doc/ProbSharpe.pdf =================================================================== (Binary files differ) Property changes on: pkg/PerformanceAnalytics/sandbox/pulkit/inst/doc/ProbSharpe.pdf ___________________________________________________________________ Added: svn:mime-type + application/octet-stream From noreply at r-forge.r-project.org Sun Sep 1 02:04:02 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 1 Sep 2013 02:04:02 +0200 (CEST) Subject: [Returnanalytics-commits] r2964 - in pkg/PerformanceAnalytics/sandbox/pulkit: R man Message-ID: <20130901000402.D618618532E@r-forge.r-project.org> Author: pulkit Date: 2013-09-01 02:04:02 +0200 (Sun, 01 Sep 2013) New Revision: 2964 Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/BenchmarkPlots.R pkg/PerformanceAnalytics/sandbox/pulkit/man/chart.BenchmarkSR.Rd Log: error correction in BenchmarkSR Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/BenchmarkPlots.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/BenchmarkPlots.R 2013-08-31 23:51:12 UTC (rev 2963) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/BenchmarkPlots.R 2013-09-01 00:04:02 UTC (rev 2964) @@ -17,6 +17,7 @@ #' #'@param R an xts, vector, matrix, data frame, timeSeries or zoo object of #' asset returns +#'@param S Number of strategies #'@param ylab set the y-axis label, as in \code{\link{plot}} #'@param xlab set the x-axis label, as in \code{\link{plot}} #'@param main set the chart title, as in \code{\link{plot}} @@ -48,7 +49,7 @@ #' #'@export -chart.BenchmarkSR<-function(R=NULL,main=NULL,ylab = NULL,xlab = NULL,element.color="darkgrey",lwd = 2,pch = 1,cex = 1,cex.axis=0.8,cex.lab = 1,cex.main = 1,vs=c("sharpe","correlation","strategies"),xlim = NULL,ylim = NULL,...){ +chart.BenchmarkSR<-function(R=NULL,S=NULL,main=NULL,ylab = NULL,xlab = NULL,element.color="darkgrey",lwd = 2,pch = 1,cex = 1,cex.axis=0.8,cex.lab = 1,cex.main = 1,vs=c("sharpe","correlation","strategies"),xlim = NULL,ylim = NULL,...){ # DESCRIPTION: # Draws Benchmark SR vs various variables such as average sharpe , Modified: pkg/PerformanceAnalytics/sandbox/pulkit/man/chart.BenchmarkSR.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/man/chart.BenchmarkSR.Rd 2013-08-31 23:51:12 UTC (rev 2963) +++ pkg/PerformanceAnalytics/sandbox/pulkit/man/chart.BenchmarkSR.Rd 2013-09-01 00:04:02 UTC (rev 2964) @@ -2,9 +2,9 @@ \alias{chart.BenchmarkSR} \title{Benchmark Sharpe Ratio Plots} \usage{ - chart.BenchmarkSR(R = NULL, main = NULL, ylab = NULL, - xlab = NULL, element.color = "darkgrey", lwd = 2, - pch = 1, cex = 1, cex.axis = 0.8, cex.lab = 1, + chart.BenchmarkSR(R = NULL, S = NULL, main = NULL, + ylab = NULL, xlab = NULL, element.color = "darkgrey", + lwd = 2, pch = 1, cex = 1, cex.axis = 0.8, cex.lab = 1, cex.main = 1, vs = c("sharpe", "correlation", "strategies"), xlim = NULL, ylim = NULL, ...) @@ -13,6 +13,8 @@ \item{R}{an xts, vector, matrix, data frame, timeSeries or zoo object of asset returns} + \item{S}{Number of strategies} + \item{ylab}{set the y-axis label, as in \code{\link{plot}}} From noreply at r-forge.r-project.org Sun Sep 1 03:10:11 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 1 Sep 2013 03:10:11 +0200 (CEST) Subject: [Returnanalytics-commits] r2965 - pkg/PortfolioAnalytics/R Message-ID: <20130901011011.EE208185622@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-01 03:10:05 +0200 (Sun, 01 Sep 2013) New Revision: 2965 Modified: pkg/PortfolioAnalytics/R/optimize.portfolio.R Log: Adding iterators to the package check in optimize.portfolio.rebalancing Modified: pkg/PortfolioAnalytics/R/optimize.portfolio.R =================================================================== --- pkg/PortfolioAnalytics/R/optimize.portfolio.R 2013-09-01 00:04:02 UTC (rev 2964) +++ pkg/PortfolioAnalytics/R/optimize.portfolio.R 2013-09-01 01:10:05 UTC (rev 2965) @@ -1109,6 +1109,7 @@ optimize.portfolio.rebalancing <- function(R, portfolio=NULL, constraints=NULL, objectives=NULL, optimize_method=c("DEoptim","random","ROI"), search_size=20000, trace=FALSE, ..., rp=NULL, rebalance_on=NULL, training_period=NULL, trailing_periods=NULL) { stopifnot("package:foreach" %in% search() || require("foreach",quietly=TRUE)) + stopifnot("package:iterators" %in% search() || require("iterators",quietly=TRUE)) start_t<-Sys.time() if (!is.null(portfolio) & !is.portfolio(portfolio)){ From noreply at r-forge.r-project.org Sun Sep 1 03:41:09 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 1 Sep 2013 03:41:09 +0200 (CEST) Subject: [Returnanalytics-commits] r2966 - in pkg/PerformanceAnalytics/sandbox/pulkit: R inst/doc man Message-ID: <20130901014109.8877118532E@r-forge.r-project.org> Author: pulkit Date: 2013-09-01 03:41:03 +0200 (Sun, 01 Sep 2013) New Revision: 2966 Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/GoldenSection.R pkg/PerformanceAnalytics/sandbox/pulkit/R/ProbSharpeRatio.R pkg/PerformanceAnalytics/sandbox/pulkit/inst/doc/ProbSharpe.Rnw pkg/PerformanceAnalytics/sandbox/pulkit/man/ProbSharpeRatio.Rd pkg/PerformanceAnalytics/sandbox/pulkit/man/golden_section.Rd Log: documentation changes Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/GoldenSection.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/GoldenSection.R 2013-09-01 01:10:05 UTC (rev 2965) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/GoldenSection.R 2013-09-01 01:41:03 UTC (rev 2966) @@ -6,12 +6,12 @@ #' The Golden Section Search method is used to find the maximum or minimum of a unimodal #" function. (A unimodal function contains only one minimum or maximum on the interval #' [a,b].) To make the discussion of the method simpler, let us assume that we are trying to find -#' the maximum of a function. choose three points \eqn{x_l},\eqn{x_1} and \eqn{x_u} \eqn{(x_l \textless x_1 \textless x_u)} +#' the maximum of a function. choose three points \eqn{x_l},\eqn{x_1} and \eqn{x_u} \eqn{(x_l < x_1 < x_u)} #' along the x-axis with the corresponding values of the function \eqn{f(x_l)},\eqn{f(x_1)} and \eqn{f(x_u)}, respectively. Since -#' \eqn{f(x_1)\textgreater f(x_l)} and \eqn{f(x_1) \textgreater f(x_u)}, the maximum must lie between \eqn{x_l} and \eqn{x_u}. Now +#' \eqn{f(x_1)< f(x_l)} and \eqn{f(x_1)< f(x_u)}, the maximum must lie between \eqn{x_l} and \eqn{x_u}. Now #' a fourth point denoted by \eqn{x_2} is chosen to be between the larger of the two intervals of \eqn{[x_l,x_1]} and \eqn{[x_1,x_u]}/ #' Assuming that the interval \eqn{[x_l,x_1]} is larger than the interval \eqn{[x_1,x_u]} we would choose \eqn{[x_l,x_1]} as the interval -#' in which \eqn{x_2} is chosen. If \eqn{f(x_2)>f(x_1)} then the new three points would be \eqn{x_l \textless x_2 \textless x_1} else if +#' in which \eqn{x_2} is chosen. If \eqn{f(x_2)>f(x_1)} then the new three points would be \eqn{x_l > x_2 > x_u} else if #' \eqn{f(x_2)f(x_1)} then the new three points - would be \eqn{x_l \textless x_2 \textless x_1} else if + would be \eqn{x_l > x_2 > x_u} else if \eqn{f(x_2) Author: braverock Date: 2013-09-01 03:48:57 +0200 (Sun, 01 Sep 2013) New Revision: 2967 Added: pkg/PerformanceAnalytics/sandbox/pulkit/man/CDaR.Rd pkg/PerformanceAnalytics/sandbox/pulkit/man/chart.REDD.Rd Removed: pkg/PerformanceAnalytics/sandbox/pulkit/man/CDaR.Rd pkg/PerformanceAnalytics/sandbox/pulkit/man/chart.REDD.Rd Log: - restore CDaR.Rd and chart.REDD.Rd, they got lost somehow Deleted: pkg/PerformanceAnalytics/sandbox/pulkit/man/CDaR.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/man/CDaR.Rd 2013-09-01 01:41:03 UTC (rev 2966) +++ pkg/PerformanceAnalytics/sandbox/pulkit/man/CDaR.Rd 2013-09-01 01:48:57 UTC (rev 2967) @@ -1,58 +0,0 @@ -\name{CDaR} -\alias{CDaR} -\alias{CDD} -\title{Calculate Uryasev's proposed Conditional Drawdown at Risk (CDD or CDaR) -measure} -\usage{ - CDaR(R, weights = NULL, geometric = TRUE, invert = TRUE, - p = 0.95, ...) -} -\arguments{ - \item{R}{an xts, vector, matrix, data frame, timeSeries - or zoo object of asset returns} - - \item{weights}{portfolio weighting vector, default NULL, - see Details} - - \item{geometric}{utilize geometric chaining (TRUE) or - simple/arithmetic chaining (FALSE) to aggregate returns, - default TRUE} - - \item{invert}{TRUE/FALSE whether to invert the drawdown - measure. see Details.} - - \item{p}{confidence level for calculation, default - p=0.95} - - \item{\dots}{any other passthru parameters} -} -\description{ - For some confidence level \eqn{p}, the conditional - drawdown is the the mean of the worst \eqn{p\%} - drawdowns. -} -\examples{ -library(lpSolve) -data(edhec) -t(round(CDaR(edhec),4)) -} -\author{ - Brian G. Peterson -} -\references{ - Chekhlov, A., Uryasev, S., and M. Zabarankin. Portfolio - Optimization With Drawdown Constraints. B. Scherer (Ed.) - Asset and Liability Management Tools, Risk Books, London, - 2003 http://www.ise.ufl.edu/uryasev/drawdown.pdf -} -\seealso{ - \code{\link{ES}} \code{\link{maxDrawdown}} - \code{\link{CdarMultiPath}} \code{\link{AlphaDrawdown}} - \code{\link{MultiBetaDrawdown}} - \code{\link{BetaDrawdown}} -} -\keyword{distribution} -\keyword{models} -\keyword{multivariate} -\keyword{ts} - Added: pkg/PerformanceAnalytics/sandbox/pulkit/man/CDaR.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/man/CDaR.Rd (rev 0) +++ pkg/PerformanceAnalytics/sandbox/pulkit/man/CDaR.Rd 2013-09-01 01:48:57 UTC (rev 2967) @@ -0,0 +1,58 @@ +\name{CDaR} +\alias{CDaR} +\alias{CDD} +\title{Calculate Uryasev's proposed Conditional Drawdown at Risk (CDD or CDaR) +measure} +\usage{ + CDaR(R, weights = NULL, geometric = TRUE, invert = TRUE, + p = 0.95, ...) +} +\arguments{ + \item{R}{an xts, vector, matrix, data frame, timeSeries + or zoo object of asset returns} + + \item{weights}{portfolio weighting vector, default NULL, + see Details} + + \item{geometric}{utilize geometric chaining (TRUE) or + simple/arithmetic chaining (FALSE) to aggregate returns, + default TRUE} + + \item{invert}{TRUE/FALSE whether to invert the drawdown + measure. see Details.} + + \item{p}{confidence level for calculation, default + p=0.95} + + \item{\dots}{any other passthru parameters} +} +\description{ + For some confidence level \eqn{p}, the conditional + drawdown is the the mean of the worst \eqn{p\%} + drawdowns. +} +\examples{ +library(lpSolve) +data(edhec) +t(round(CDaR(edhec),4)) +} +\author{ + Brian G. Peterson +} +\references{ + Chekhlov, A., Uryasev, S., and M. Zabarankin. Portfolio + Optimization With Drawdown Constraints. B. Scherer (Ed.) + Asset and Liability Management Tools, Risk Books, London, + 2003 http://www.ise.ufl.edu/uryasev/drawdown.pdf +} +\seealso{ + \code{\link{ES}} \code{\link{maxDrawdown}} + \code{\link{CdarMultiPath}} \code{\link{AlphaDrawdown}} + \code{\link{MultiBetaDrawdown}} + \code{\link{BetaDrawdown}} +} +\keyword{distribution} +\keyword{models} +\keyword{multivariate} +\keyword{ts} + Deleted: pkg/PerformanceAnalytics/sandbox/pulkit/man/chart.REDD.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/man/chart.REDD.Rd 2013-09-01 01:41:03 UTC (rev 2966) +++ pkg/PerformanceAnalytics/sandbox/pulkit/man/chart.REDD.Rd 2013-09-01 01:48:57 UTC (rev 2967) @@ -1,51 +0,0 @@ -\name{chart.REDD} -\alias{chart.REDD} -\title{Time series of Rolling Economic Drawdown} -\usage{ - chart.REDD(R, rf, h, geometric = TRUE, legend.loc = NULL, - colorset = (1:12), ...) -} -\arguments{ - \item{R}{an xts, vector, matrix, data frame, timeseries, - or zoo object of asset return.} - - \item{rf}{risk free rate can be vector such as government - security rate of return} - - \item{h}{lookback period} - - \item{geometric}{utilize geometric chaining (TRUE) or - simple/arithmetic chaining(FALSE) to aggregate returns, - default is TRUE.} - - \item{legend.loc}{set the legend.loc, as in - \code{\link{plot}}} - - \item{colorset}{set the colorset label, as in - \code{\link{plot}}} - - \item{\dots}{any other variable} -} -\description{ - This function plots the time series of Rolling Economic - Drawdown. For more details on rolling economic drawdown - see \code{rollDrawdown}. -} -\examples{ -data(edhec) -chart.REDD(edhec,0.08,20) -} -\author{ - Pulkit Mehrotra -} -\references{ - Yang, Z. George and Zhong, Liang, Optimal Portfolio - Strategy to Control Maximum Drawdown - The Case of Risk - Based Dynamic Asset Allocation (February 25, 2012) -} -\seealso{ - \code{\link{plot}} \code{\link{EconomicDrawdown}} - \code{\link{EDDCOPS}} \code{\link{rollDrawdown}} - \code{\link{REDDCOPS}} \code{\link{rollEconomicMax}} -} - Added: pkg/PerformanceAnalytics/sandbox/pulkit/man/chart.REDD.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/man/chart.REDD.Rd (rev 0) +++ pkg/PerformanceAnalytics/sandbox/pulkit/man/chart.REDD.Rd 2013-09-01 01:48:57 UTC (rev 2967) @@ -0,0 +1,51 @@ +\name{chart.REDD} +\alias{chart.REDD} +\title{Time series of Rolling Economic Drawdown} +\usage{ + chart.REDD(R, rf, h, geometric = TRUE, legend.loc = NULL, + colorset = (1:12), ...) +} +\arguments{ + \item{R}{an xts, vector, matrix, data frame, timeseries, + or zoo object of asset return.} + + \item{rf}{risk free rate can be vector such as government + security rate of return} + + \item{h}{lookback period} + + \item{geometric}{utilize geometric chaining (TRUE) or + simple/arithmetic chaining(FALSE) to aggregate returns, + default is TRUE.} + + \item{legend.loc}{set the legend.loc, as in + \code{\link{plot}}} + + \item{colorset}{set the colorset label, as in + \code{\link{plot}}} + + \item{\dots}{any other variable} +} +\description{ + This function plots the time series of Rolling Economic + Drawdown. For more details on rolling economic drawdown + see \code{rollDrawdown}. +} +\examples{ +data(edhec) +chart.REDD(edhec,0.08,20) +} +\author{ + Pulkit Mehrotra +} +\references{ + Yang, Z. George and Zhong, Liang, Optimal Portfolio + Strategy to Control Maximum Drawdown - The Case of Risk + Based Dynamic Asset Allocation (February 25, 2012) +} +\seealso{ + \code{\link{plot}} \code{\link{EconomicDrawdown}} + \code{\link{EDDCOPS}} \code{\link{rollDrawdown}} + \code{\link{REDDCOPS}} \code{\link{rollEconomicMax}} +} + From noreply at r-forge.r-project.org Sun Sep 1 20:38:41 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 1 Sep 2013 20:38:41 +0200 (CEST) Subject: [Returnanalytics-commits] r2968 - in pkg/PortfolioAnalytics: . R man sandbox Message-ID: <20130901183841.88CCD1812A2@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-01 20:38:41 +0200 (Sun, 01 Sep 2013) New Revision: 2968 Added: pkg/PortfolioAnalytics/R/charts.groups.R pkg/PortfolioAnalytics/man/chart.GroupWeights.Rd pkg/PortfolioAnalytics/man/extractGroups.Rd pkg/PortfolioAnalytics/sandbox/testing_groups.R Modified: pkg/PortfolioAnalytics/DESCRIPTION pkg/PortfolioAnalytics/NAMESPACE pkg/PortfolioAnalytics/R/extractstats.R pkg/PortfolioAnalytics/R/portfolio.R Log: Adding functions to extract and chart weights by groups. Modified: pkg/PortfolioAnalytics/DESCRIPTION =================================================================== --- pkg/PortfolioAnalytics/DESCRIPTION 2013-09-01 01:48:57 UTC (rev 2967) +++ pkg/PortfolioAnalytics/DESCRIPTION 2013-09-01 18:38:41 UTC (rev 2968) @@ -54,3 +54,4 @@ 'chart.RiskReward.R' 'charts.efficient.frontier.R' 'charts.risk.R' + 'charts.groups.R' Modified: pkg/PortfolioAnalytics/NAMESPACE =================================================================== --- pkg/PortfolioAnalytics/NAMESPACE 2013-09-01 01:48:57 UTC (rev 2967) +++ pkg/PortfolioAnalytics/NAMESPACE 2013-09-01 18:38:41 UTC (rev 2968) @@ -8,6 +8,7 @@ export(chart.EfficientFrontier.optimize.portfolio) export(chart.EfficientFrontier) export(chart.EfficientFrontierOverlay) +export(chart.GroupWeights) export(chart.RiskBudget) export(chart.RiskReward.optimize.portfolio.DEoptim) export(chart.RiskReward.optimize.portfolio.GenSA) @@ -50,6 +51,7 @@ export(diversification) export(extract.efficient.frontier) export(extractEfficientFrontier) +export(extractGroups) export(extractObjectiveMeasures) export(extractStats.optimize.portfolio.DEoptim) export(extractStats.optimize.portfolio.GenSA) Added: pkg/PortfolioAnalytics/R/charts.groups.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.groups.R (rev 0) +++ pkg/PortfolioAnalytics/R/charts.groups.R 2013-09-01 18:38:41 UTC (rev 2968) @@ -0,0 +1,81 @@ +#' Chart weights by group or category +#' +#' @param object object of class \code{optimize.portfolio} +#' @param ... passthrough parameters to \code{\link{plot}} +#' @param grouping +#' \itemize{ +#' \item{groups: }{group the weights group constraints} +#' \item{category_labels: }{group the weights by category_labels in portfolio object} +#' } +#' @param main an overall title for the plot: see \code{\link{title}} +#' @param las numeric in \{0,1,2,3\}; the style of axis labels +#' \describe{ +#' \item{0:}{always parallel to the axis [\emph{default}],} +#' \item{1:}{always horizontal,} +#' \item{2:}{always perpendicular to the axis,} +#' \item{3:}{always vertical.} +#' } +#' @param xlab a title for the x axis: see \code{\link{title}} +#' @param cex.lab The magnification to be used for x and y labels relative to the current setting of \code{cex} +#' @param element.color color for the default border and axis +#' @param cex.axis The magnification to be used for x and y axis relative to the current setting of \code{cex} +#' @author Ross Bennett +#' @export +chart.GroupWeights <- function(object, ..., grouping=c("groups", "category"), main="Group Weights", las = 3, xlab=NULL, cex.lab = 1, element.color = "darkgray", cex.axis=0.8){ + if(!inherits(object, "optimize.portfolio.ROI")) stop("object must be of class 'optimize.portfolio'") + + constraints <- get_constraints(object$portfolio) + tmp <- extractGroups(object) + grouping <- grouping[1] + + if(grouping == "groups"){ + weights <- tmp$group_weights + if(is.null(weights)) stop("No weights detected for groups") + if(any(is.infinite(constraints$cUP)) | any(is.infinite(constraints$cLO))){ + # set ylim based on weights if box constraints contain Inf or -Inf + ylim <- range(weights) + } else { + # set ylim based on the range of box constraints min and max + ylim <- range(c(constraints$cLO, constraints$cUP)) + } + } + + if(grouping == "category"){ + weights <- tmp$category_weights + if(is.null(weights)) stop("No weights detected for category") + ylim <- range(weights) + } + + columnnames = names(weights) + numgroups = length(columnnames) + + if(is.null(xlab)) + minmargin = 3 + else + minmargin = 5 + if(main=="") topmargin=1 else topmargin=4 + if(las > 1) {# set the bottom border to accommodate labels + bottommargin = max(c(minmargin, (strwidth(columnnames,units="in"))/par("cin")[1])) * cex.lab + if(bottommargin > 10 ) { + bottommargin<-10 + columnnames<-substr(columnnames,1,19) + } + } + else { + bottommargin = minmargin + } + par(mar = c(bottommargin, 4, topmargin, 2) +.1) + + plot(weights, axes=FALSE, xlab='', ylim=ylim, ylab="Weights", main=main, ...) + if(grouping == "groups"){ + if(!any(is.infinite(constraints$cLO))){ + points(constraints$cLO, type="b", col="darkgray", lty="solid", lwd=2, pch=24) + } + if(!any(is.infinite(constraints$cUP))){ + points(constraints$cUP, type="b", col="darkgray", lty="solid", lwd=2, pch=25) + } + } + axis(2, cex.axis = cex.axis, col = element.color) + axis(1, labels=columnnames, at=1:numgroups, las=las, cex.axis = cex.axis, col = element.color) + box(col = element.color) +} Modified: pkg/PortfolioAnalytics/R/extractstats.R =================================================================== --- pkg/PortfolioAnalytics/R/extractstats.R 2013-09-01 01:48:57 UTC (rev 2967) +++ pkg/PortfolioAnalytics/R/extractstats.R 2013-09-01 18:38:41 UTC (rev 2968) @@ -366,3 +366,56 @@ return(out) } +#' Extract the group and/or category weights +#' +#' This function extracts the weights by group and/or category from an object +#' of class \code{optimize.portfolio} +#' +#' @param object object of class \code{optimize.portfolio} +#' @param ... passthrough parameters. Not currently used +#' @return a list with two elements +#' \itemize{ +#' \item{weights: }{Optimal set of weights from the \code{optimize.portfolio} object} +#' \item{category_weights: }{Weights by category if category_labels are supplied in the \code{portfolio} object} +#' \item{group_weights: }{Weights by group if group is a constraint type} +#' } +#' @author Ross Bennett +#' @export +extractGroups <- function(object, ...){ + if(!inherits(object, "optimize.portfolio")) stop("object must be of class 'optimize.portfolio'") + + # Check category_labels in portfolio object + category_labels <- object$portfolio$category_labels + + # Get the constraints to check for group constraints + constraints <- get_constraints(object$portfolio) + + groups <- constraints$groups + + cat_weights <- NULL + group_weights <- NULL + + if(!is.null(category_labels)){ + cat_names <- names(category_labels) + ncats <- length(category_labels) + cat_weights <- rep(0, ncats) + for(i in 1:ncats){ + cat_weights[i] <- sum(object$weights[category_labels[[i]]]) + } + names(cat_weights) <- cat_names + } + + if(!is.null(groups)){ + n.groups <- length(groups) + group_weights <- rep(0, n.groups) + for(i in 1:n.groups){ + group_weights[i] <- sum(object$weights[groups[[i]]]) + } + names(group_weights) <- constraints$group_labels + } + return(list(weights=object$weights, + category_weights=cat_weights, + group_weights=group_weights) + ) +} + Modified: pkg/PortfolioAnalytics/R/portfolio.R =================================================================== --- pkg/PortfolioAnalytics/R/portfolio.R 2013-09-01 01:48:57 UTC (rev 2967) +++ pkg/PortfolioAnalytics/R/portfolio.R 2013-09-01 18:38:41 UTC (rev 2968) @@ -88,6 +88,13 @@ if(length(category_labels) != length(assets)) { stop("length(category_labels) must be equal to length(assets)") } + # Turn category_labels into a list that can be used with group constraints + unique_labels <- unique(category_labels) + tmp <- list() + for(i in 1:length(unique_labels)){ + tmp[[unique_labels[i]]] <- which(category_labels == unique_labels[i]) + } + category_labels <- tmp } ## now structure and return Added: pkg/PortfolioAnalytics/man/chart.GroupWeights.Rd =================================================================== --- pkg/PortfolioAnalytics/man/chart.GroupWeights.Rd (rev 0) +++ pkg/PortfolioAnalytics/man/chart.GroupWeights.Rd 2013-09-01 18:38:41 UTC (rev 2968) @@ -0,0 +1,47 @@ +\name{chart.GroupWeights} +\alias{chart.GroupWeights} +\title{Chart weights by group or category} +\usage{ + chart.GroupWeights(object, ..., + grouping = c("groups", "category"), + main = "Group Weights", las = 3, xlab = NULL, + cex.lab = 1, element.color = "darkgray", + cex.axis = 0.8) +} +\arguments{ + \item{object}{object of class \code{optimize.portfolio}} + + \item{...}{passthrough parameters to \code{\link{plot}}} + + \item{grouping}{\itemize{ \item{groups: }{group the + weights group constraints} \item{category_labels: }{group + the weights by category_labels in portfolio object} }} + + \item{main}{an overall title for the plot: see + \code{\link{title}}} + + \item{las}{numeric in \{0,1,2,3\}; the style of axis + labels \describe{ \item{0:}{always parallel to the axis + [\emph{default}],} \item{1:}{always horizontal,} + \item{2:}{always perpendicular to the axis,} + \item{3:}{always vertical.} }} + + \item{xlab}{a title for the x axis: see + \code{\link{title}}} + + \item{cex.lab}{The magnification to be used for x and y + labels relative to the current setting of \code{cex}} + + \item{element.color}{color for the default border and + axis} + + \item{cex.axis}{The magnification to be used for x and y + axis relative to the current setting of \code{cex}} +} +\description{ + Chart weights by group or category +} +\author{ + Ross Bennett +} + Added: pkg/PortfolioAnalytics/man/extractGroups.Rd =================================================================== --- pkg/PortfolioAnalytics/man/extractGroups.Rd (rev 0) +++ pkg/PortfolioAnalytics/man/extractGroups.Rd 2013-09-01 18:38:41 UTC (rev 2968) @@ -0,0 +1,28 @@ +\name{extractGroups} +\alias{extractGroups} +\title{Extract the group and/or category weights} +\usage{ + extractGroups(object, ...) +} +\arguments{ + \item{object}{object of class \code{optimize.portfolio}} + + \item{...}{passthrough parameters. Not currently used} +} +\value{ + a list with two elements \itemize{ \item{weights: + }{Optimal set of weights from the + \code{optimize.portfolio} object} \item{category_weights: + }{Weights by category if category_labels are supplied in + the \code{portfolio} object} \item{group_weights: + }{Weights by group if group is a constraint type} } +} +\description{ + This function extracts the weights by group and/or + category from an object of class + \code{optimize.portfolio} +} +\author{ + Ross Bennett +} + Added: pkg/PortfolioAnalytics/sandbox/testing_groups.R =================================================================== --- pkg/PortfolioAnalytics/sandbox/testing_groups.R (rev 0) +++ pkg/PortfolioAnalytics/sandbox/testing_groups.R 2013-09-01 18:38:41 UTC (rev 2968) @@ -0,0 +1,59 @@ +library(PortfolioAnalytics) +library(ROI) +library(ROI.plugin.quadprog) +library(ROI.plugin.glpk) + + +# data(edhec) +# R <- edhec[, 1:4] +# colnames(R) <- c("CA", "CTAG", "DS", "EM") +# funds <- colnames(R) + +load("~/Desktop/Testing/crsp.short.Rdata") +R <- cbind(microcap.ts[, 1:2], + smallcap.ts[, 1:2], + midcap.ts[, 1:2], + largecap.ts[, 1:2]) + +funds <- colnames(R) + +cap_labels <- c(rep("MICRO", 2), rep("SMALL", 2), + rep("MID", 2), rep("LARGE", 2)) + +# Create initial portfolio object with category_labels +init <- portfolio.spec(assets=funds, category_labels=cap_labels) +# Add some weight constraints +init <- add.constraint(portfolio=init, type="full_investment") +init <- add.constraint(portfolio=init, type="long_only") +# Add objective to minimize variance +minvar <- add.objective(portfolio=init, type="risk", name="var") + +# Specify group constraints by passing in category_labels from initial +# portfolio object +group1 <- add.constraint(portfolio=init, type="group", + groups=init$category_labels, + group_min=c(0.15, 0.25, 0.15, 0.2), + group_max=c(0.4, 0.4, 0.6, 0.6)) + +# Alternative way by specifying a list for group constraints +group2 <- add.constraint(portfolio=init, type="group", + groups=list(MICRO=c(1, 2), + SMALL=c(3, 4), + MID=c(5, 6), + LARGE=c(7, 8)), + group_min=c(0.2, 0.1, 0.2, 0.2), + group_max=c(0.4, 0.4, 0.4, 0.45)) +group2$category_labels <- NULL + +all.equal(group1$constraints[[3]]$groups, group2$constraints[[3]]$groups) + +opt_group1 <- optimize.portfolio(R=R, portfolio=group1, optimize_method="ROI") +extractGroups(opt_group1) +chart.GroupWeights(opt_group1, type="b", col="blue", pch=15, lty=2) + +opt_group2 <- optimize.portfolio(R=R, portfolio=group2, optimize_method="ROI") +extractGroups(opt_group2) +chart.GroupWeights(opt_group2, type="b", col="black", pch=21, bg="gray") + + + \ No newline at end of file From noreply at r-forge.r-project.org Sun Sep 1 23:28:27 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 1 Sep 2013 23:28:27 +0200 (CEST) Subject: [Returnanalytics-commits] r2969 - pkg/PortfolioAnalytics/vignettes Message-ID: <20130901212827.DFA091844FA@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-01 23:28:27 +0200 (Sun, 01 Sep 2013) New Revision: 2969 Modified: pkg/PortfolioAnalytics/vignettes/risk_budget_optimization.Rnw pkg/PortfolioAnalytics/vignettes/risk_budget_optimization.pdf Log: Modifying risk_budget vignette to add visualizations and completely compile with R code. Modified: pkg/PortfolioAnalytics/vignettes/risk_budget_optimization.Rnw =================================================================== --- pkg/PortfolioAnalytics/vignettes/risk_budget_optimization.Rnw 2013-09-01 18:38:41 UTC (rev 2968) +++ pkg/PortfolioAnalytics/vignettes/risk_budget_optimization.Rnw 2013-09-01 21:28:27 UTC (rev 2969) @@ -46,9 +46,9 @@ \section{General information} -Risk budgets are a central tool to estimate and manage the portfolio risk allocation. They decompose total portfolio risk into the risk contribution of each position. \citet{ BoudtCarlPeterson2010} propose several portfolio allocation strategies that use an appropriate transformation of the portfolio Conditional Value at Risk (CVaR) budget as an objective or constraint in the portfolio optimization problem. This document explains how risk allocation optimized portfolios can be obtained under general constraints in the \verb"PortfolioAnalytics" package of \citet{PortAnalytics}. +Risk budgets are a central tool to estimate and manage the portfolio risk allocation. They decompose total portfolio risk into the risk contribution of each position. \citet{ BoudtCarlPeterson2010} propose several portfolio allocation strategies that use an appropriate transformation of the portfolio Conditional Value at Risk (CVaR) budget as an objective or constraint in the portfolio optimization problem. This document explains how risk allocation optimized portfolios can be obtained under general constraints in the \verb"PortfolioAnalytics" package of \citet{PortfolioAnalytics}. -\verb"PortfolioAnalytics" is designed to provide numerical solutions for portfolio problems with complex constraints and objective sets comprised of any R function. It can e.g.~construct portfolios that minimize a risk objective with (possibly non-linear) per-asset constraints on returns and drawdowns \citep{CarlPetersonBoudt2010}. The generality of possible constraints and objectives is a distinctive characteristic of the package with respect to RMetrics \verb"fPortfolio" of \citet{fPortfolioBook}. For standard Markowitz optimization problems, use of \verb"fPortfolio" rather than \verb"PortfolioAnalytics" is recommended. +\verb"PortfolioAnalytics" is designed to provide numerical solutions for portfolio problems with complex constraints and objective sets comprised of any R function. It can e.g.~construct portfolios that minimize a risk objective with (possibly non-linear) per-asset constraints on returns and drawdowns \citep{CarlPetersonBoudt2010}. The generality of possible constraints and objectives is a distinctive characteristic of the package with respect to RMetrics \verb"fPortfolio" of \citet{fPortfolioBook}. For standard Markowitz optimization problems, use of \verb"fPortfolio" rather than \verb"PortfolioAnalytics" is recommended. \verb"PortfolioAnalytics" solves the following type of problem \begin{equation} \min_w g(w) \ \ s.t. \ \ @@ -65,16 +65,17 @@ Its principal functions are: \begin{itemize} -\item \verb"constraint(assets,min,max,min_sum,max_sum)": the portfolio optimization specification starts with specifying the shape of the weight vector through the function \verb"constraint". The weights have to be between \verb"min} and \verb"max" and their sum between \verb"min_sum" and \verb"max_sum". The first argument \verb"assets" is either a number indicating the number of portfolio assets or a vector holding the names of the assets. +\item \verb"portfolio.spec(assets)": the portfolio specification starts with creating a \verb"portfolio" object with information about the assets. The first argument \verb"assets" is either a number indicating the number of portfolio assets or a vector holding the names of the assets. The \verb"portfolio" object is a list holding the constraints and objectives. -\item \verb"add.objective(constraints, type, name)": \verb"constraints" is a list holding the objective to be minimized and the constraints. New elements to this list are added by the function \verb"add.objective". Many common risk budget objectives and constraints are prespecified and can be identified by specifying the \verb"type" and \verb"name". +\item \verb"add.constraint(portfolio, type)": Constraints are added to the \verb"portfolio" object by the function \verb"add.constraint". Basic constraint types include leverage constraints that specify the sum of the weights have to be between \verb"min_sum" and \verb"max_sum" and box constraints where the asset weights have to be between \verb"min" and \verb"max". +\item \verb"add.objective(portfolio, type, name)": New objectives are added to the \verb"portfolio" objected with the function \verb"add.objective". Many common risk budget objectives and constraints are prespecified and can be identified by specifying the \verb"type" and \verb"name". -\item \verb"constrained_objective(w, R, constraints)": given the portfolio weight and return data, it evaluates the penalty augmented objective function in (\ref{eq:constrainedobj}). +\item \verb"constrained_objective(w, R, portfolio)": given the portfolio weight and return data, it evaluates the penalty augmented objective function in (\ref{eq:constrainedobj}). -\item \verb"optimize.portfolio(R,constraints)": this function returns the portfolio weight that solves the problem in (\ref{optimproblem}). {\it R} is the multivariate return series of the portfolio components. +\item \verb"optimize.portfolio(R, portfolio)": this function returns the portfolio weight that solves the problem in (\ref{optimproblem}). {\it R} is the multivariate return series of the portfolio components. -\item \verb"optimize.portfolio.rebalancing(R,constraints,rebalance_on,trailing_periods": this function solves the multiperiod optimization problem. It returns for each rebalancing period the optimal weights and allows the estimation sample to be either from inception or a moving window. +\item \verb"optimize.portfolio.rebalancing(R, portfolio, rebalance_on, trailing_periods)": this function solves the multiperiod optimization problem. It returns for each rebalancing period the optimal weights and allows the estimation sample to be either from inception or a moving window. \end{itemize} @@ -82,7 +83,7 @@ in the dataset \verb"indexes". The first step is to load the package \verb"PortfolioAnalytics" and the dataset. An important first note is that some of the functions (especially \verb" optimize.portfolio.rebalancing") requires the dataset to be a \verb"xts" object \citep{xts}. -<>= +<>= options(width=80) @ @@ -102,8 +103,8 @@ \subsection{Weight constraints} <>=| -# create the portfolio specification object -Wcons <- portfolio.spec( assets = colnames(indexes[,1:4]) ) +# Create the portfolio specification object +Wcons <- portfolio.spec( assets = colnames(indexes) ) # Add box constraints Wcons <- add.constraint( portfolio=Wcons, type='box', min = 0, max=1 ) # Add the full investment constraint that specifies the weights must sum to 1. @@ -112,9 +113,10 @@ Given the weight constraints, we can call the value of the function to be minimized. We consider the case of no violation and a case of violation. By default, \verb"normalize=TRUE" which means that if the sum of weights exceeds \verb"max_sum", the weight vector is normalized by multiplying it with \verb"sum(weights)/max_sum" such that the weights evaluated in the objective function satisfy the \verb"max_sum" constraint. <>=| -constrained_objective( w = rep(1/4,4) , R = indexes[,1:4] , portfolio = Wcons) -constrained_objective( w = rep(1/3,4) , R = indexes[,1:4] , portfolio = Wcons) -constrained_objective( w = rep(1/3,4) , R = indexes[,1:4] , portfolio = Wcons, normalize=FALSE) +constrained_objective( w = rep(1/4,4) , R = indexes, portfolio = Wcons) +constrained_objective( w = rep(1/3,4) , R = indexes, portfolio = Wcons) +constrained_objective( w = rep(1/3,4) , R = indexes, portfolio = Wcons, + normalize=FALSE) @ The latter value can be recalculated as penalty times the weight violation, that is: $10000 \times 1/3.$ @@ -125,42 +127,42 @@ <>=| ObjSpec = add.objective( portfolio = Wcons , type="risk",name="CVaR", -arguments=list(p=0.95), enabled=TRUE) + arguments=list(p=0.95), enabled=TRUE) @ The value of the objective function is: <>=| -constrained_objective( w = rep(1/4,4) , R = indexes[,1:4] , portfolio = ObjSpec) +constrained_objective( w = rep(1/4,4) , R = indexes, portfolio = ObjSpec) @ + This is the CVaR of the equal-weight portfolio as computed by the function \verb"ES" in the \verb"PerformanceAnalytics" package of \citet{ Carl2007} <>=| library(PerformanceAnalytics) -out<-ES(indexes[,1:4],weights = rep(1/4,4),p=0.95, portfolio_method="component") +out<-ES(indexes, weights = rep(1/4,4),p=0.95, portfolio_method="component") out$MES @ All arguments in the function \verb"ES" can be passed on through \verb"arguments". E.g. to reduce the impact of extremes on the portfolio results, it is recommended to winsorize the data using the option clean="boudt". <>=| -out<-ES(indexes[,1:4],weights = rep(1/4,4),p=0.95,clean="boudt", portfolio_method="component") +out<-ES(indexes, weights = rep(1/4,4),p=0.95, clean="boudt", + portfolio_method="component") out$MES @ - - For the formulation of the objective function, this implies setting: <>=| ObjSpec = add.objective( portfolio = Wcons , type="risk",name="CVaR", -arguments=list(p=0.95,clean="boudt"), enabled=TRUE) + arguments=list(p=0.95,clean="boudt"), enabled=TRUE) constrained_objective( w = rep(1/4,4) , R = indexes[,1:4] , portfolio = ObjSpec) @ -An additional argument that is not available for the moment in \verb"ES" is to estimate the conditional covariance matrix trough -the constant conditional correlation model of \citet{Bollerslev90}. +An additional argument that is not available for the moment in \verb"ES" is to estimate the conditional covariance matrix through the constant conditional correlation model of \citet{Bollerslev90}. For the formulation of the objective function, this implies setting: <>=| ObjSpec = add.objective( portfolio = Wcons , type="risk",name="CVaR", -arguments=list(p=0.95,clean="boudt"), enabled=TRUE, garch=TRUE) + arguments=list(p=0.95,clean="boudt"), + enabled=TRUE, garch=TRUE) constrained_objective( w = rep(1/4,4) , R = indexes[,1:4] , portfolio = ObjSpec) @ @@ -168,28 +170,32 @@ Add the minimum 95\% CVaR concentration objective to the objective function: <>=| -ObjSpec = add.objective( portfolio = Wcons , type="risk_budget_objective",name="CVaR", -arguments=list(p=0.95,clean="boudt"), min_concentration=TRUE, enabled=TRUE) +ObjSpec = add.objective( portfolio = Wcons , type="risk_budget_objective", + name="CVaR", arguments=list(p=0.95, clean="boudt"), + min_concentration=TRUE, enabled=TRUE) @ + The value of the objective function is: <>=| -constrained_objective( w = rep(1/4,4) , R = indexes[,1:4] , portfolio = ObjSpec) +constrained_objective( w = rep(1/4,4) , R = indexes, portfolio = ObjSpec, + trace=TRUE) @ + We can verify that this is effectively the largest CVaR contribution of that portfolio as follows: <>=| -ES(indexes[,1:4],weights = rep(1/4,4),p=0.95,clean="boudt", portfolio_method="component") +ES(indexes[,1:4],weights = rep(1/4,4),p=0.95,clean="boudt", + portfolio_method="component") @ \subsection{Risk allocation constraints} -We see that in the equal-weight portfolio, the international equities and commodities investment -cause more than 30\% of total risk. We could specify as a constraint that no asset can contribute -more than 30\% to total portfolio risk. This involves the construction of the following objective function: +We see that in the equal-weight portfolio, the international equities and commodities investment cause more than 30\% of total risk. We could specify as a constraint that no asset can contribute more than 30\% to total portfolio risk with the argument \verb"max_prisk=0.3". This involves the construction of the following objective function: <>=| -ObjSpec = add.objective( portfolio = Wcons , type="risk_budget_objective",name="CVaR", max_prisk = 0.3, -arguments=list(p=0.95,clean="boudt"), enabled=TRUE) -constrained_objective( w = rep(1/4,4) , R = indexes[,1:4] , portfolio = ObjSpec) +ObjSpec = add.objective( portfolio = Wcons , type="risk_budget_objective", + name="CVaR", max_prisk = 0.3, + arguments=list(p=0.95,clean="boudt"), enabled=TRUE) +constrained_objective( w = rep(1/4,4) , R = indexes, portfolio = ObjSpec) @ This value corresponds to the penalty parameter which has by default the value of 10000 times the exceedances: $ 10000*(0.045775103+0.054685023)\approx 1004.601.$ @@ -198,167 +204,148 @@ The penalty augmented objective function is minimized through Differential Evolution. Two parameters are crucial in tuning the optimization: \verb"search_size" and \verb"itermax". The optimization routine \begin{enumerate} -\item First creates the initial generation of \verb"NP= search_size/itermax" guesses for the optimal value of the parameter vector, using the \verb"random_portfolios" function generating random weights satisfying the weight constraints. +\item First creates the initial generation of \verb"NP = search_size/itermax" guesses for the optimal value of the parameter vector, using the \verb"random_portfolios" function generating random weights satisfying the weight constraints. \item Then DE evolves over this population of candidate solutions using alteration and selection operators in order to minimize the objective function. It restarts \verb"itermax" times. -\end{enumerate} It is important that \verb"search_size/itermax" is high enough. It is generally recommended that this ratio is at least ten times the length of the weight vector. For more details on the use of DE strategy in portfolio allocation, we refer the +\end{enumerate} +It is important that \verb"search_size/itermax" is high enough. It is generally recommended that this ratio is at least ten times the length of the weight vector. For more details on the use of DE strategy in portfolio allocation, we refer the reader to \citet{Ardia2010}. \subsection{Minimum CVaR portfolio under an upper 40\% CVaR allocation constraint} -The functions needed to obtain the minimum CVaR portfolio under an upper 40\% CVaR allocation constraint are the following: -\begin{verbatim} -> ObjSpec <- constraint(assets = colnames(indexes[,1:4]),min = rep(0,4), -+ max=rep(1,4), min_sum=1,max_sum=1 ) -> ObjSpec <- add.objective_v1( constraints = ObjSpec, type="risk", -+ name="CVaR", arguments=list(p=0.95,clean="boudt"),enabled=TRUE) -> ObjSpec <- add.objective_v1( constraints = ObjSpec, -+ type="risk_budget_objective", name="CVaR", max_prisk = 0.4, -+ arguments=list(p=0.95,clean="boudt"), enabled=TRUE) -> set.seed(1234) -> out = optimize.portfolio_v1(R= indexes[,1:4],constraints=ObjSpec, -+ optimize_method="DEoptim",itermax=10, search_size=2000) -\end{verbatim} -After the call to these functions it starts to explore the feasible space iteratively: -\begin{verbatim} -Iteration: 1 bestvalit: 0.029506 bestmemit: 0.810000 0.126000 0.010000 0.140000 -Iteration: 2 bestvalit: 0.029506 bestmemit: 0.810000 0.126000 0.010000 0.140000 -Iteration: 3 bestvalit: 0.029272 bestmemit: 0.758560 0.079560 0.052800 0.112240 -Iteration: 4 bestvalit: 0.029272 bestmemit: 0.758560 0.079560 0.052800 0.112240 -Iteration: 5 bestvalit: 0.029019 bestmemit: 0.810000 0.108170 0.010000 0.140000 -Iteration: 6 bestvalit: 0.029019 bestmemit: 0.810000 0.108170 0.010000 0.140000 -Iteration: 7 bestvalit: 0.029019 bestmemit: 0.810000 0.108170 0.010000 0.140000 -Iteration: 8 bestvalit: 0.028874 bestmemit: 0.692069 0.028575 0.100400 0.071600 -Iteration: 9 bestvalit: 0.028874 bestmemit: 0.692069 0.028575 0.100400 0.071600 -Iteration: 10 bestvalit: 0.028874 bestmemit: 0.692069 0.028575 0.100400 0.071600 -elapsed time:1.85782111114926 -\end{verbatim} +The portfolio object and functions needed to obtain the minimum CVaR portfolio under an upper 40\% CVaR allocation objective are the following: +<>= +# Create the portfolio specification object +ObjSpec <- portfolio.spec(assets=colnames(indexes[,1:4])) +# Add box constraints +ObjSpec <- add.constraint(portfolio=ObjSpec, type='box', min = 0, max=1) +# Add the full investment constraint that specifies the weights must sum to 1. +ObjSpec <- add.constraint(portfolio=ObjSpec, type="full_investment") +# Add objective to minimize CVaR +ObjSpec <- add.objective(portfolio=ObjSpec, type="risk", name="CVaR", + arguments=list(p=0.95, clean="boudt")) +# Add objective for an upper 40% CVaR allocation +ObjSpec <- add.objective(portfolio=ObjSpec, type="risk_budget_objective", + name="CVaR", max_prisk=0.4, + arguments=list(p=0.95, clean="boudt")) +@ -If \verb"TRACE=FALSE" the only output in \verb"out" is the weight vector that optimizes the objective function. +After the call to these functions it starts to explore the feasible space iteratively and is shown in the output. Iterations are given as intermediate output and by default every iteration will be printed. We set \verb"traceDE=5" to print every 5 iterations and \verb"itermax=50" for a maximum of 50 iterations. -\begin{verbatim} -> out[[1]] - US Bonds US Equities Int'l Equities Commodities - 0.77530240 0.03201150 0.11247491 0.08021119 \end{verbatim} +<>= +set.seed(1234) +out <- optimize.portfolio(R=indexes, portfolio=ObjSpec, + optimize_method="DEoptim", search_size=2000, + traceDE=5, itermax=50, trace=TRUE) +print(out) +@ -If \verb"TRACE=TRUE" additional information is given such as the value of the objective function and the different constraints. +If \verb"trace=TRUE" in \verb"optimize.portfolio", additional output from the DEoptim solver is included in the \verb"out" object created by \verb"optimize.portfolio". The additional elements in the output are \verb"DEoptim_objective_results" and \verb"DEoutput". The \verb"DEoutput" element contains output from the function \verb"DEoptim". The \verb"DEoptim_objective_results" element contains the weights, value of the objective measures, and other data at each iteration. + +<>= +names(out) +# View the DEoptim_objective_results information at the last iteration +out$DEoptim_objective_results[[601]] + +# Extract stats from the out object into a matrix +xtract <- extractStats(out) +dim(xtract) +head(xtract) +@ + +It can be seen from the charts that although US Bonds has a higher weight allocation, the percentage contribution to risk is the lowest of all four indexes. +<<>>= +chart.Weights(out) +chart.RiskBudget(out, risk.type="pct_contrib", col="blue", pch=18) +@ + + \subsection{Minimum CVaR concentration portfolio} The functions needed to obtain the minimum CVaR concentration portfolio are the following: -\begin{verbatim} -> ObjSpec <- constraint(assets = colnames(indexes[,1:4]) ,min = rep(0,4), -+ max=rep(1,4), min_sum=1,max_sum=1 ) -> ObjSpec <- add.objective_v1( constraints = ObjSpec, -+ type="risk_budget_objective", name="CVaR", -+ arguments=list(p=0.95,clean="boudt"), -+ min_concentration=TRUE,enabled=TRUE) -> set.seed(1234) -> out = optimize.portfolio_v1(R= indexes[,1:4],constraints=ObjSpec, -+ optimize_method="DEoptim",itermax=50, search_size=5000) -\end{verbatim} -The iterations are as follows: -\begin{verbatim} -Iteration: 1 bestvalit: 0.010598 bestmemit: 0.800000 0.100000 0.118000 0.030000 -Iteration: 2 bestvalit: 0.010598 bestmemit: 0.800000 0.100000 0.118000 0.030000 -Iteration: 3 bestvalit: 0.010598 bestmemit: 0.800000 0.100000 0.118000 0.030000 -Iteration: 4 bestvalit: 0.010598 bestmemit: 0.800000 0.100000 0.118000 0.030000 -Iteration: 5 bestvalit: 0.010598 bestmemit: 0.800000 0.100000 0.118000 0.030000 -Iteration: 45 bestvalit: 0.008209 bestmemit: 0.976061 0.151151 0.120500 0.133916 -Iteration: 46 bestvalit: 0.008170 bestmemit: 0.897703 0.141514 0.109601 0.124004 -Iteration: 47 bestvalit: 0.008170 bestmemit: 0.897703 0.141514 0.109601 0.124004 -Iteration: 48 bestvalit: 0.008170 bestmemit: 0.897703 0.141514 0.109601 0.124004 -Iteration: 49 bestvalit: 0.008170 bestmemit: 0.897703 0.141514 0.109601 0.124004 -Iteration: 50 bestvalit: 0.008170 bestmemit: 0.897703 0.141514 0.109601 0.124004 -elapsed time:4.1324522222413 -\end{verbatim} -This portfolio has the equal risk contribution characteristic: -\begin{verbatim} -> out[[1]] - US Bonds US Equities Int'l Equities Commodities - 0.70528537 0.11118139 0.08610905 0.09742419 -> ES(indexes[,1:4],weights = out[[1]],p=0.95,clean="boudt", -+ portfolio_method="component") -$MES - [,1] -[1,] 0.03246264 +<>= +# Create the portfolio specification object +ObjSpec <- portfolio.spec(assets=colnames(indexes)) +# Add box constraints +ObjSpec <- add.constraint(portfolio=ObjSpec, type='box', min = 0, max=1) +# Add the full investment constraint that specifies the weights must sum to 1. +ObjSpec <- add.constraint(portfolio=ObjSpec, type="full_investment") +# Add objective for min CVaR concentration +ObjSpec <- add.objective(portfolio=ObjSpec, type="risk_budget_objective", + name="CVaR", arguments=list(p=0.95, clean="boudt"), + min_concentration=TRUE) -$contribution - US Bonds US Equities Int'l Equities Commodities - 0.008169565 0.008121930 0.008003228 0.008167917 +set.seed(1234) +out <- optimize.portfolio(R=indexes, portfolio=ObjSpec, + optimize_method="DEoptim", search_size=5000, + itermax=50, traceDE=5, trace=TRUE) +@ -$pct_contrib_MES - US Bonds US Equities Int'l Equities Commodities - 0.2516605 0.2501931 0.2465366 0.2516098 \end{verbatim} +This portfolio has the near equal risk contribution characteristic: +<>= +print(out) +# Verify results with ES function +ES(indexes[,1:4], weights=out$weights, p=0.95, clean="boudt", + portfolio_method="component") +@ +The 95\% CVaR percent contribution to risk is near equal for all four indexes. The neighbor portfolios can be plotted to view other near optimal portfolios. Alternatively, the contribution to risk in absolute terms can plotted by setting \verb"risk.type="absolute". +<<>>= +chart.RiskBudget(out, neighbors=25, risk.type="pct_contrib", col="blue", pch=18) +@ + \subsection{Dynamic optimization} -Dynamic rebalancing of the risk budget optimized portfolio is possible through the function \verb"optimize.portfolio.rebalancing". Additional arguments are \verb"rebalance\_on} which indicates the rebalancing frequency (years, quarters, months). The estimation is either done from inception (\verb"trailing\_periods=0") or through moving window estimation, where each window has \verb"trailing_periods" observations. The minimum number of observations in the estimation sample is specified by \verb"training_period". Its default value is 36, which corresponds to three years for monthly data. +Dynamic rebalancing of the risk budget optimized portfolio is possible through the function \verb"optimize.portfolio.rebalancing". Additional arguments are \verb"rebalance_on" which indicates the rebalancing frequency (years, quarters, months). The estimation is either done from inception (\verb"trailing_periods=0") or through moving window estimation, where each window has \verb"trailing_periods" observations. The minimum number of observations in the estimation sample is specified by \verb"training_period". Its default value is 36, which corresponds to three years for monthly data. -As an example, consider the minimum CVaR concentration portfolio, with estimation from in inception and monthly rebalancing. Since we require a minimum estimation length of total number of observations -1, we can optimize the portfolio only for the last two months. +As an example, consider the minimum CVaR concentration portfolio, with estimation from inception and monthly rebalancing. Since we require a minimum estimation length of total number of observations -1, we can optimize the portfolio only for the last two months. -\begin{verbatim} -> set.seed(1234) -> out = optimize.portfolio.rebalancing_v1(R= indexes,constraints=ObjSpec, rebalance_on ="months", -+ optimize_method="DEoptim",itermax=50, search_size=5000, training_period = nrow(indexes)-1 ) -\end{verbatim} +<>= +library(iterators) +set.seed(1234) +out <- optimize.portfolio.rebalancing(R=indexes, portfolio=ObjSpec, + optimize_method="DEoptim", search_size=5000, + rebalance_on="months", + training_period=nrow(indexes)-1, + traceDE=10) +@ -For each of the optimization, the iterations are given as intermediate output: -\begin{verbatim} -Iteration: 1 bestvalit: 0.010655 bestmemit: 0.800000 0.100000 0.118000 0.030000 -Iteration: 2 bestvalit: 0.010655 bestmemit: 0.800000 0.100000 0.118000 0.030000 -Iteration: 49 bestvalit: 0.008207 bestmemit: 0.787525 0.124897 0.098001 0.108258 -Iteration: 50 bestvalit: 0.008195 bestmemit: 0.774088 0.122219 0.095973 0.104338 -elapsed time:4.20546416666773 -Iteration: 1 bestvalit: 0.011006 bestmemit: 0.770000 0.050000 0.090000 0.090000 -Iteration: 2 bestvalit: 0.010559 bestmemit: 0.498333 0.010000 0.070000 0.080000 -Iteration: 49 bestvalit: 0.008267 bestmemit: 0.828663 0.126173 0.100836 0.114794 -Iteration: 50 bestvalit: 0.008267 bestmemit: 0.828663 0.126173 0.100836 0.114794 -elapsed time:4.1060591666566 -overall elapsed time:8.31152777777778 -\end{verbatim} -The output is a list holding for each rebalancing period the output of the optimization, such as portfolio weights. -\begin{verbatim} -> out[[1]]$weights - US Bonds US Equities Int'l Equities Commodities - 0.70588695 0.11145087 0.08751686 0.09514531 -> out[[2]]$weights - US Bonds US Equities Int'l Equities Commodities - 0.70797640 0.10779728 0.08615059 0.09807574 -\end{verbatim} -But also the value of the objective function: -\begin{verbatim} -> out[[1]]$out -[1] 0.008195072 -> out[[2]]$out -[1] 0.008266844 -\end{verbatim} -The first and last observation from the estimation sample: -\begin{verbatim} -> out[[1]]$data_summary -$first - US Bonds US Equities Int'l Equities Commodities -1980-01-31 -0.0272 0.061 0.0462 0.0568 -$last - US Bonds US Equities Int'l Equities Commodities -2009-11-30 0.0134 0.0566 0.0199 0.015 +The output of \verb"optimize.portfolio.rebalancing" is a list of objects created by \verb"optimize.portfolio", one for each rebalancing period. -> out[[2]]$data_summary -$first - US Bonds US Equities Int'l Equities Commodities -1980-01-31 -0.0272 0.061 0.0462 0.0568 +<>= +names(out) +names(out[[1]]) +print(out) +@ -$last - US Bonds US Equities Int'l Equities Commodities -2009-12-31 -0.0175 0.0189 0.0143 0.0086 -\end{verbatim} +The optimal weights for each rebalancing period can be extracted fron the object with the following function: -Of course, DE is a stochastic optimizaer and typically will only find a near-optimal solution that depends on the seed. The function \verb"optimize.portfolio.parallel" in \verb"PortfolioAnalytics" allows to run an arbitrary number of portfolio sets in parallel in order to develop "confidence bands" around your solution. It is based on Revolution's \verb"foreach" package \citep{foreach}. +<>= +extractWeights(out) +@ + +Also the value of the objective function at each rebalancing period: +<>= +out[[1]]$out +out[[2]]$out +@ + + +The first and last observation from the estimation sample: +<>= +out[[1]]$data_summary +out[[2]]$data_summary +@ + + +Of course, DE is a stochastic optimizer and typically will only find a near-optimal solution that depends on the seed. The function \verb"optimize.portfolio.parallel" in \verb"PortfolioAnalytics" allows to run an arbitrary number of portfolio sets in parallel in order to develop "confidence bands" around your solution. It is based on Revolution's \verb"foreach" package \citep{foreach}. + \bibliography{PA} Modified: pkg/PortfolioAnalytics/vignettes/risk_budget_optimization.pdf =================================================================== --- pkg/PortfolioAnalytics/vignettes/risk_budget_optimization.pdf 2013-09-01 18:38:41 UTC (rev 2968) +++ pkg/PortfolioAnalytics/vignettes/risk_budget_optimization.pdf 2013-09-01 21:28:27 UTC (rev 2969) @@ -1,1374 +1,988 @@ %PDF-1.5 %???? -1 0 obj << -/Length 372 +3 0 obj << +/Length 1348 +/Filter /FlateDecode >> stream -concordance:risk_budget_optimization.tex:risk_budget_optimization.Rnw:1 32 1 1 0 51 1 1 4 1 2 1 0 2 1 5 0 2 1 7 0 1 1 8 0 1 2 6 1 1 3 2 0 1 2 1 0 1 2 4 0 1 2 1 1 1 2 6 0 1 1 5 0 1 1 6 0 1 2 6 1 1 3 5 0 1 2 1 1 1 2 8 0 2 2 1 0 2 1 7 0 1 2 1 1 1 2 1 0 1 1 7 0 1 2 3 1 1 3 2 0 1 1 7 0 1 2 4 1 1 3 2 0 1 1 7 0 1 2 3 1 1 3 5 0 2 2 7 0 2 2 17 0 1 2 6 1 1 3 2 0 1 1 6 0 1 2 172 1 +x??XKs?4??W?B?? + ?????[,E?VH-?????????S????????{fB 'I-???????m?.????????Y?3&x???M??b??A?s???f??U??Xk?^E*N??+??mg7m]??H?"|s??????U??Sew4?z??tX\??vXo??W?????K?,??`y??c?g?x?????lU?{ rw?N?"G2gZ????`????)??$????Hg???Z)?5?H?(?y,D?? +??j?[??-??g??/??????8??{8???NPM +????,K?iS[???8?$???#??????? YX?)z?????i??D6=???????y_4??s?~va???$: +p4?L???????)?}46?s??|{|FK?:??8}????Y?????x???@P???Z?VFS?????(G??O??o?X:V??&??I?M;??l?@rj@??L_??N4??? ???? ??????~lh?o?f}(?;?K?*?4???F????5|?mm???9?u_?* +?????????|?\????????YavjU5?b??\]??t??$$@??yk?I??@e??|?L?)?????$?$y??????q?t?????????9Kg??LWb(?H{??Y at 2v?????*??XP?S??R?? ?3%?? m?d ????&l???T??c?6Dh?y0]U"?H?????h?????81l??? +???T?_?O?c????fy???q~??????Vr? endstream endobj -4 0 obj << -/Length 1596 +14 0 obj << +/Length 2667 /Filter /FlateDecode >> stream -x??XIo?F??W?R???pf?!{)bwAS -\#=?=??$?(?T???mCR??-,?3o???ux~u???Y2J?2I?FW?Q??J??(+r;W?????8???????m?o???.z;6YT?????o}?HZ?c???Wx??\[-???l????FG???.?????Qy?o?????) -'????????Q??Ql???g?=K?j??f(?&?|?F -p?.h5!??AlU??1?m?r? ?????%??????? -??S?? -P?-_??-)CD%??g????Nz ?m9 #??N%?$??=?!??-Z????X? `N?? -??m)o?}@C?&h?'?????????X???Ds?C????? V????{\R(?sp???e?????l???3v???*?1?O?c?9??x9n?t%[N%"`oO -??R? ????A ?&????Tp?XS?W -6dCG~?7??h`?]? v??~?w4@?hp?r? ????B?u????????8=hJ1H??&??E?a`???Q?c???H??c?NB??/<7?\ -M_???4??B?r?a????M*?8???W?i??_?0GY? ??Me+s(?]O?zU??y??Z?4??P?t???M?`E? iF ??f?q??????U|*?E|??)???aaX??Y?]?P????/?=??8???}iY??c?^??C????r???^5pH#??????9??Z9+? B???)???N4<&L?A???nM???}?????(??N h??? ??^??^???w??????|???z??&?x??.?f?W?Z?Ab? ?.??????? RB*??1??D???e???+A???A7g?^b?J??5??V?w?%Sx?? i?{a?/n?? ??K????#?? ?+??+????L?VT^5?u??? -?Vb?:??x?>?? G??~??N.?$P???^N???? ?%?z?\???j?[c!?'? _?J?^??????.????|n?Q??????0?R?????R?#l?X?(&??oeF??????I?J?-e?ns?~?? -? ?-??????O!??r??? Author: ababii Date: 2013-09-02 14:46:24 +0200 (Mon, 02 Sep 2013) New Revision: 2970 Modified: pkg/PerformanceAnalytics/R/Return.portfolio.R Log: - Fix a bag with conversion of the wealth index to the xts. The function as.xts was replaced by reclass. Modified: pkg/PerformanceAnalytics/R/Return.portfolio.R =================================================================== --- pkg/PerformanceAnalytics/R/Return.portfolio.R 2013-09-01 21:28:27 UTC (rev 2969) +++ pkg/PerformanceAnalytics/R/Return.portfolio.R 2013-09-02 12:46:24 UTC (rev 2970) @@ -180,7 +180,7 @@ for (col in colnames(weights)){ wealthindex.weighted[,col]=weights[,col]*wealthindex.assets[,col] } - wealthindex=as.xts(apply(wealthindex.weighted,1,sum)) + wealthindex=reclass(apply(wealthindex.weighted,1,sum), R) result = wealthindex result[2:length(result)] = result[2:length(result)] / lag(result)[2:length(result)] - 1 From noreply at r-forge.r-project.org Mon Sep 2 19:24:46 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 2 Sep 2013 19:24:46 +0200 (CEST) Subject: [Returnanalytics-commits] r2971 - pkg/Meucci/R Message-ID: <20130902172446.D007D184468@r-forge.r-project.org> Author: braverock Date: 2013-09-02 19:24:46 +0200 (Mon, 02 Sep 2013) New Revision: 2971 Removed: pkg/Meucci/R/ FitOrnsteinUhlenbeck.R Log: - remove duplicate copy of the FitOrnSteinUhlenbeck.R script with a *leading* space in the filename Deleted: pkg/Meucci/R/ FitOrnsteinUhlenbeck.R =================================================================== --- pkg/Meucci/R/ FitOrnsteinUhlenbeck.R 2013-09-02 12:46:24 UTC (rev 2970) +++ pkg/Meucci/R/ FitOrnsteinUhlenbeck.R 2013-09-02 17:24:46 UTC (rev 2971) @@ -1,55 +0,0 @@ -#' Fit a multivariate OU process at estimation step tau, as described in A. Meucci -#' "Risk and Asset Allocation", Springer, 2005 -#' -#' @param Y : [matrix] (T x N) -#' @param tau : [scalar] time step -#' -#' @return Mu : [vector] long-term means -#' @return Th : [matrix] whose eigenvalues have positive real part / mean reversion speed -#' @return Sig : [matrix] Sig = S * S', covariance matrix of Brownian motions -#' -#' @note -#' o dY_t = -Th * (Y_t - Mu) * dt + S * dB_t where -#' o dB_t: vector of Brownian motions -#' -#' @references -#' \url{http://symmys.com/node/170} -#' See Meucci's script for "EfficientFrontierReturns.m" -#' -#' @author Xavier Valls \email{flamejat@@gmail.com} -#' @export - -FitOrnsteinUhlenbeck = function( Y, tau ) -{ - T = nrow(Y); - N = ncol(Y); - - X = Y[ -1, ]; - F = cbind( matrix( 1, T-1, 1 ), Y[ -nrow(Y), ] ); - E_XF = t(X) %*% F / T; - E_FF = t(F) %*% F / T; - B = E_XF %*% solve( E_FF ); - if( length( B[ , -1 ] ) != 1 ) - { - Th = -logm( B[ , -1 ] ) / tau; - - }else - { - Th = -log( B[ , -1 ] ) / tau; - } - - Mu = solve( diag( 1, N ) - B[ , -1 ] ) %*% B[ , 1 ] ; - - U = F %*% t(B) - X; - - Sig_tau = cov(U); - - N = length(Mu); - TsT = kron( Th, diag( 1, N ) ) + kron( diag( 1, N ), Th ); - - VecSig_tau = matrix(Sig_tau, N^2, 1); - VecSig = ( solve( diag( 1, N^2 ) - expm( -TsT * tau ) ) %*% TsT ) %*% VecSig_tau; - Sig = matrix( VecSig, N, N ); - - return( list( Mu = Mu, Theta = Th, Sigma = Sig ) ) -} \ No newline at end of file From noreply at r-forge.r-project.org Tue Sep 3 04:35:11 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Tue, 3 Sep 2013 04:35:11 +0200 (CEST) Subject: [Returnanalytics-commits] r2972 - in pkg/PortfolioAnalytics: R sandbox Message-ID: <20130903023511.227FD184BB1@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-03 04:35:10 +0200 (Tue, 03 Sep 2013) New Revision: 2972 Modified: pkg/PortfolioAnalytics/R/charts.efficient.frontier.R pkg/PortfolioAnalytics/sandbox/testing_efficient_frontier.R Log: Adding functionality to plot group weights along the efficient frontier. Updated testing script with examples. Modified: pkg/PortfolioAnalytics/R/charts.efficient.frontier.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.efficient.frontier.R 2013-09-02 17:24:46 UTC (rev 2971) +++ pkg/PortfolioAnalytics/R/charts.efficient.frontier.R 2013-09-03 02:35:10 UTC (rev 2972) @@ -272,6 +272,7 @@ #' @param ... passthrough parameters to \code{barplot}. #' @param n.portfolios number of portfolios to extract along the efficient frontier. #' This is only used for objects of class \code{optimize.portfolio} +#' @param by.groups TRUE/FALSE. If TRUE, the weights by group are charted. #' @param match.col match.col string name of column to use for risk (horizontal axis). #' Must match the name of an objective. #' @param main main title used in the plot. @@ -283,13 +284,13 @@ #' @param legend.loc NULL, "topright", "right", or "bottomright". If legend.loc is NULL, the legend will not be plotted. #' @author Ross Bennett #' @export -chart.Weights.EF <- function(object, colorset=NULL, ..., n.portfolios=25, match.col="ES", main="EF Weights", cex.lab=0.8, cex.axis=0.8, cex.legend=0.8, legend.labels=NULL, element.color="darkgray"){ +chart.Weights.EF <- function(object, colorset=NULL, ..., n.portfolios=25, by.groups=FALSE, match.col="ES", main="EF Weights", cex.lab=0.8, cex.axis=0.8, cex.legend=0.8, legend.labels=NULL, element.color="darkgray"){ UseMethod("chart.Weights.EF") } #' @rdname chart.Weights.EF #' @export -chart.Weights.EF.efficient.frontier <- function(object, colorset=NULL, ..., n.portfolios=25, match.col="ES", main="", cex.lab=0.8, cex.axis=0.8, cex.legend=0.8, legend.labels=NULL, element.color="darkgray", legend.loc="topright"){ +chart.Weights.EF.efficient.frontier <- function(object, colorset=NULL, ..., n.portfolios=25, by.groups=FALSE, match.col="ES", main="", cex.lab=0.8, cex.axis=0.8, cex.legend=0.8, legend.labels=NULL, element.color="darkgray", legend.loc="topright"){ # using ideas from weightsPlot.R in fPortfolio package if(!inherits(object, "efficient.frontier")) stop("object must be of class 'efficient.frontier'") @@ -308,6 +309,25 @@ wts_idx <- grep(pattern="^w\\.", cnames) wts <- frontier[, wts_idx] + if(by.groups){ + constraints <- get_constraints(object$portfolio) + groups <- constraints$groups + if(is.null(groups)) stop("group constraints not in portfolio object") + if(!is.null(groups)){ + groupfun <- function(weights, groups){ + # This function is to calculate weights by group given the group list + # and a matrix of weights along the efficient frontier + ngroups <- length(groups) + group_weights <- rep(0, ngroups) + for(i in 1:ngroups){ + group_weights[i] <- sum(weights[groups[[i]]]) + } + group_weights + } + wts <- t(apply(wts, 1, groupfun, groups=groups)) + } + } + # return along the efficient frontier # get the "mean" column mean.mtc <- pmatch("mean", cnames) @@ -357,7 +377,12 @@ if(legend.loc %in% c("topright", "right", "bottomright")){ # set the legend information if(is.null(legend.labels)){ - legend.labels <- gsub(pattern="^w\\.", replacement="", cnames[wts_idx]) + if(by.groups){ + legend.labels <- names(groups) + if(is.null(legend.labels)) legend.labels <- constraints$group_labels + } else { + legend.labels <- gsub(pattern="^w\\.", replacement="", cnames[wts_idx]) + } } legend(legend.loc, legend = legend.labels, bty = "n", cex = cex.legend, fill = colorset) } @@ -389,14 +414,14 @@ #' @rdname chart.Weights.EF #' @export -chart.Weights.EF.optimize.portfolio <- function(object, colorset=NULL, ..., n.portfolios=25, match.col="ES", main="", cex.lab=0.8, cex.axis=0.8, cex.legend=0.8, legend.labels=NULL, element.color="darkgray", legend.loc="topright"){ +chart.Weights.EF.optimize.portfolio <- function(object, colorset=NULL, ..., n.portfolios=25, by.groups=FALSE, match.col="ES", main="", cex.lab=0.8, cex.axis=0.8, cex.legend=0.8, legend.labels=NULL, element.color="darkgray", legend.loc="topright"){ # chart the weights along the efficient frontier of an objected created by optimize.portfolio if(!inherits(object, "optimize.portfolio")) stop("object must be of class optimize.portfolio") frontier <- extractEfficientFrontier(object=object, match.col=match.col, n.portfolios=n.portfolios) PortfolioAnalytics:::chart.Weights.EF(object=frontier, colorset=colorset, ..., - match.col=match.col, main=main, cex.lab=cex.lab, + match.col=match.col, by.groups=by.groups, main=main, cex.lab=cex.lab, cex.axis=cex.axis, cex.legend=cex.legend, legend.labels=legend.labels, element.color=element.color, legend.loc=legend.loc) Modified: pkg/PortfolioAnalytics/sandbox/testing_efficient_frontier.R =================================================================== --- pkg/PortfolioAnalytics/sandbox/testing_efficient_frontier.R 2013-09-02 17:24:46 UTC (rev 2971) +++ pkg/PortfolioAnalytics/sandbox/testing_efficient_frontier.R 2013-09-03 02:35:10 UTC (rev 2972) @@ -22,7 +22,12 @@ init <- portfolio.spec(assets=funds) # initial constraints init <- add.constraint(portfolio=init, type="full_investment") -init <- add.constraint(portfolio=init, type="box", min=0, max=1) +init <- add.constraint(portfolio=init, type="box", min=0.15, max=0.45) +init <- add.constraint(portfolio=init, type="group", + groups=list(c(1, 3), + c(2, 4, 5)), + group_min=0.05, + group_max=0.7) # initial objective init <- add.objective(portfolio=init, type="return", name="mean") @@ -62,8 +67,12 @@ chart.EfficientFrontier(meanvar.ef, match.col="StdDev", type="l", tangent.line=FALSE, labels.assets=FALSE, pch.assets=1) +# Chart the asset weights along the efficient frontier chart.Weights.EF(meanvar.ef, colorset=bluemono, match.col="StdDev") +# Chart the group weights along the efficient frontier +chart.Weights.EF(meanvar.ef, colorset=bluemono, by.groups=TRUE, match.col="StdDev") + # The labels for Mean, Weight, and StdDev can be increased or decreased with # the cex.lab argument. The default is cex.lab=0.8 chart.Weights.EF(meanvar.ef, colorset=bluemono, match.col="StdDev", main="", cex.lab=1) @@ -93,6 +102,8 @@ # optimize.portfolio output object chart.Weights.EF(opt_meanvar, match.col="StdDev") +chart.Weights.EF(opt_meanvar, match.col="StdDev", by.groups=TRUE) + # Extract the efficient frontier and then plot it # Note that if you want to do multiple charts of the efficient frontier from # the optimize.portfolio object, it is best to extractEfficientFrontier as shown @@ -101,6 +112,7 @@ print(ef) summary(ef, digits=5) chart.Weights.EF(ef, match.col="StdDev", colorset=bluemono) +chart.Weights.EF(ef, match.col="StdDev", colorset=bluemono, by.groups=TRUE) # mean-etl efficient frontier meanetl.ef <- create.EfficientFrontier(R=R, portfolio=meanetl.portf, type="mean-ES") @@ -108,6 +120,7 @@ summary(meanetl.ef) chart.EfficientFrontier(meanetl.ef, match.col="ES", main="mean-ETL Efficient Frontier", type="l", col="blue", RAR.text="STARR") chart.Weights.EF(meanetl.ef, colorset=bluemono, match.col="ES") +chart.Weights.EF(meanetl.ef, by.groups=TRUE, colorset=bluemono, match.col="ES") # mean-etl efficient frontier using random portfolios meanetl.rp.ef <- create.EfficientFrontier(R=R, portfolio=meanetl.portf, type="random", match.col="ES") From noreply at r-forge.r-project.org Tue Sep 3 07:23:35 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Tue, 3 Sep 2013 07:23:35 +0200 (CEST) Subject: [Returnanalytics-commits] r2973 - in pkg/PortfolioAnalytics: R man Message-ID: <20130903052335.D633D184F90@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-03 07:23:35 +0200 (Tue, 03 Sep 2013) New Revision: 2973 Modified: pkg/PortfolioAnalytics/R/charts.groups.R pkg/PortfolioAnalytics/man/chart.GroupWeights.Rd pkg/PortfolioAnalytics/man/chart.Weights.EF.Rd Log: Adding barplot for group weights Modified: pkg/PortfolioAnalytics/R/charts.groups.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.groups.R 2013-09-03 02:35:10 UTC (rev 2972) +++ pkg/PortfolioAnalytics/R/charts.groups.R 2013-09-03 05:23:35 UTC (rev 2973) @@ -7,6 +7,7 @@ #' \item{groups: }{group the weights group constraints} #' \item{category_labels: }{group the weights by category_labels in portfolio object} #' } +#' @param plot.type "line" or "barplot" #' @param main an overall title for the plot: see \code{\link{title}} #' @param las numeric in \{0,1,2,3\}; the style of axis labels #' \describe{ @@ -21,61 +22,95 @@ #' @param cex.axis The magnification to be used for x and y axis relative to the current setting of \code{cex} #' @author Ross Bennett #' @export -chart.GroupWeights <- function(object, ..., grouping=c("groups", "category"), main="Group Weights", las = 3, xlab=NULL, cex.lab = 1, element.color = "darkgray", cex.axis=0.8){ - if(!inherits(object, "optimize.portfolio.ROI")) stop("object must be of class 'optimize.portfolio'") +chart.GroupWeights <- function(object, ..., grouping=c("groups", "category"), plot.type="line", main="Group Weights", las=3, xlab=NULL, cex.lab=0.8, element.color="darkgray", cex.axis=0.8){ + if(!inherits(object, "optimize.portfolio")) stop("object must be of class 'optimize.portfolio'") + if(plot.type %in% c("bar", "barplot")){ + barplotGroupWeights(object=object, ...=..., grouping=grouping, main=main, + las=las, xlab=xlab, cex.lab=cex.lab, + element.color=element.color, cex.axis=cex.axis) + } else if(plot.type == "line"){ + constraints <- get_constraints(object$portfolio) + tmp <- extractGroups(object) + grouping <- grouping[1] + + if(grouping == "groups"){ + weights <- tmp$group_weights + if(is.null(weights)) stop("No weights detected for groups") + if(any(is.infinite(constraints$cUP)) | any(is.infinite(constraints$cLO))){ + # set ylim based on weights if box constraints contain Inf or -Inf + ylim <- range(weights) + } else { + # set ylim based on the range of box constraints min and max + ylim <- range(c(constraints$cLO, constraints$cUP)) + } + } + + if(grouping == "category"){ + weights <- tmp$category_weights + if(is.null(weights)) stop("No weights detected for category") + ylim <- range(weights) + } + + columnnames = names(weights) + numgroups = length(columnnames) + + if(is.null(xlab)) + minmargin = 3 + else + minmargin = 5 + if(main=="") topmargin=1 else topmargin=4 + if(las > 1) {# set the bottom border to accommodate labels + bottommargin = max(c(minmargin, (strwidth(columnnames,units="in"))/par("cin")[1])) * cex.lab + if(bottommargin > 10 ) { + bottommargin<-10 + columnnames<-substr(columnnames,1,19) + } + } + else { + bottommargin = minmargin + } + par(mar = c(bottommargin, 4, topmargin, 2) +.1) + + plot(weights, axes=FALSE, xlab='', ylim=ylim, ylab="Weights", main=main, ...) + if(grouping == "groups"){ + if(!any(is.infinite(constraints$cLO))){ + points(constraints$cLO, type="b", col="darkgray", lty="solid", lwd=2, pch=24) + } + if(!any(is.infinite(constraints$cUP))){ + points(constraints$cUP, type="b", col="darkgray", lty="solid", lwd=2, pch=25) + } + } + axis(2, cex.axis = cex.axis, col = element.color) + axis(1, labels=columnnames, at=1:numgroups, las=las, cex.axis = cex.axis, col = element.color) + box(col = element.color) + } +} + +#' barplot of group weights +#' @author Ross Bennett +barplotGroupWeights <- function(object, ..., grouping=c("groups", "category"), main="Group Weights", las=3, xlab=NULL, cex.lab=0.8, element.color="darkgray", cex.axis=0.8){ + if(!inherits(object, "optimize.portfolio")) stop("object must be of class 'optimize.portfolio'") + constraints <- get_constraints(object$portfolio) tmp <- extractGroups(object) - grouping <- grouping[1] + grouping <- "groups" if(grouping == "groups"){ weights <- tmp$group_weights if(is.null(weights)) stop("No weights detected for groups") - if(any(is.infinite(constraints$cUP)) | any(is.infinite(constraints$cLO))){ - # set ylim based on weights if box constraints contain Inf or -Inf - ylim <- range(weights) - } else { - # set ylim based on the range of box constraints min and max - ylim <- range(c(constraints$cLO, constraints$cUP)) - } } if(grouping == "category"){ weights <- tmp$category_weights if(is.null(weights)) stop("No weights detected for category") - ylim <- range(weights) } columnnames = names(weights) numgroups = length(columnnames) - if(is.null(xlab)) - minmargin = 3 - else - minmargin = 5 - if(main=="") topmargin=1 else topmargin=4 - if(las > 1) {# set the bottom border to accommodate labels - bottommargin = max(c(minmargin, (strwidth(columnnames,units="in"))/par("cin")[1])) * cex.lab - if(bottommargin > 10 ) { - bottommargin<-10 - columnnames<-substr(columnnames,1,19) - } - } - else { - bottommargin = minmargin - } - par(mar = c(bottommargin, 4, topmargin, 2) +.1) - - plot(weights, axes=FALSE, xlab='', ylim=ylim, ylab="Weights", main=main, ...) - if(grouping == "groups"){ - if(!any(is.infinite(constraints$cLO))){ - points(constraints$cLO, type="b", col="darkgray", lty="solid", lwd=2, pch=24) - } - if(!any(is.infinite(constraints$cUP))){ - points(constraints$cUP, type="b", col="darkgray", lty="solid", lwd=2, pch=25) - } - } - axis(2, cex.axis = cex.axis, col = element.color) - axis(1, labels=columnnames, at=1:numgroups, las=las, cex.axis = cex.axis, col = element.color) - box(col = element.color) + barplot(weights, ylab = "", names.arg=columnnames, + border=element.color, cex.axis=cex.axis, main=main, las=las, + cex.names=cex.lab, xlab=xlab, ...) + box(col=element.color) } Modified: pkg/PortfolioAnalytics/man/chart.GroupWeights.Rd =================================================================== --- pkg/PortfolioAnalytics/man/chart.GroupWeights.Rd 2013-09-03 02:35:10 UTC (rev 2972) +++ pkg/PortfolioAnalytics/man/chart.GroupWeights.Rd 2013-09-03 05:23:35 UTC (rev 2973) @@ -3,9 +3,9 @@ \title{Chart weights by group or category} \usage{ chart.GroupWeights(object, ..., - grouping = c("groups", "category"), + grouping = c("groups", "category"), plot.type = "line", main = "Group Weights", las = 3, xlab = NULL, - cex.lab = 1, element.color = "darkgray", + cex.lab = 0.8, element.color = "darkgray", cex.axis = 0.8) } \arguments{ @@ -17,6 +17,8 @@ weights group constraints} \item{category_labels: }{group the weights by category_labels in portfolio object} }} + \item{plot.type}{"line" or "barplot"} + \item{main}{an overall title for the plot: see \code{\link{title}}} Modified: pkg/PortfolioAnalytics/man/chart.Weights.EF.Rd =================================================================== --- pkg/PortfolioAnalytics/man/chart.Weights.EF.Rd 2013-09-03 02:35:10 UTC (rev 2972) +++ pkg/PortfolioAnalytics/man/chart.Weights.EF.Rd 2013-09-03 05:23:35 UTC (rev 2973) @@ -5,22 +5,24 @@ \title{chart the weights along the efficient frontier} \usage{ chart.Weights.EF(object, colorset = NULL, ..., - n.portfolios = 25, match.col = "ES", + n.portfolios = 25, by.groups = FALSE, match.col = "ES", main = "EF Weights", cex.lab = 0.8, cex.axis = 0.8, cex.legend = 0.8, legend.labels = NULL, element.color = "darkgray") chart.Weights.EF.efficient.frontier(object, colorset = NULL, ..., n.portfolios = 25, - match.col = "ES", main = "", cex.lab = 0.8, - cex.axis = 0.8, cex.legend = 0.8, legend.labels = NULL, - element.color = "darkgray", legend.loc = "topright") + by.groups = FALSE, match.col = "ES", main = "", + cex.lab = 0.8, cex.axis = 0.8, cex.legend = 0.8, + legend.labels = NULL, element.color = "darkgray", + legend.loc = "topright") chart.Weights.EF.optimize.portfolio(object, colorset = NULL, ..., n.portfolios = 25, - match.col = "ES", main = "", cex.lab = 0.8, - cex.axis = 0.8, cex.legend = 0.8, legend.labels = NULL, - element.color = "darkgray", legend.loc = "topright") + by.groups = FALSE, match.col = "ES", main = "", + cex.lab = 0.8, cex.axis = 0.8, cex.legend = 0.8, + legend.labels = NULL, element.color = "darkgray", + legend.loc = "topright") } \arguments{ \item{object}{object of class \code{efficient.frontier} @@ -34,6 +36,9 @@ the efficient frontier. This is only used for objects of class \code{optimize.portfolio}} + \item{by.groups}{TRUE/FALSE. If TRUE, the weights by + group are charted.} + \item{match.col}{match.col string name of column to use for risk (horizontal axis). Must match the name of an objective.} From noreply at r-forge.r-project.org Tue Sep 3 19:22:58 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Tue, 3 Sep 2013 19:22:58 +0200 (CEST) Subject: [Returnanalytics-commits] r2974 - pkg/FactorAnalytics/R Message-ID: <20130903172258.7935F181167@r-forge.r-project.org> Author: chenyian Date: 2013-09-03 19:22:58 +0200 (Tue, 03 Sep 2013) New Revision: 2974 Modified: pkg/FactorAnalytics/R/factorModelPerformanceAttribution.r Log: debug time index Modified: pkg/FactorAnalytics/R/factorModelPerformanceAttribution.r =================================================================== --- pkg/FactorAnalytics/R/factorModelPerformanceAttribution.r 2013-09-03 05:23:35 UTC (rev 2973) +++ pkg/FactorAnalytics/R/factorModelPerformanceAttribution.r 2013-09-03 17:22:58 UTC (rev 2974) @@ -86,12 +86,12 @@ fundName = rownames(fit$beta) attr.list <- list() - + # data <- checkData(fit$data) for (k in fundName) { fit.lm = fit$asset.fit[[k]] ## extract information from lm object - date <- index(fit$data[,k]) + date <- rownames(fit.lm$model[1]) actual.xts = xts(fit.lm$model[1], as.Date(date)) @@ -187,7 +187,7 @@ cum.spec.ret <- fit$r2 factorName = rownames(fit$loadings) fundName = colnames(fit$loadings) - + # create list for attribution attr.list <- list() # pca method From noreply at r-forge.r-project.org Tue Sep 3 22:16:33 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Tue, 3 Sep 2013 22:16:33 +0200 (CEST) Subject: [Returnanalytics-commits] r2975 - in pkg/FactorAnalytics: . R Message-ID: <20130903201633.8DE9A181167@r-forge.r-project.org> Author: chenyian Date: 2013-09-03 22:16:33 +0200 (Tue, 03 Sep 2013) New Revision: 2975 Added: pkg/FactorAnalytics/R/summary.FM.attribution.r Modified: pkg/FactorAnalytics/NAMESPACE pkg/FactorAnalytics/R/factorModelPerformanceAttribution.r Log: add function summary.FM.attribution.r Modified: pkg/FactorAnalytics/NAMESPACE =================================================================== --- pkg/FactorAnalytics/NAMESPACE 2013-09-03 17:22:58 UTC (rev 2974) +++ pkg/FactorAnalytics/NAMESPACE 2013-09-03 20:16:33 UTC (rev 2975) @@ -1,3 +1,4 @@ +S3method(summary.FM.attribution) export(factorModelPerformanceAttribution) export(dCornishFisher) export(factorModelCovariance) Modified: pkg/FactorAnalytics/R/factorModelPerformanceAttribution.r =================================================================== --- pkg/FactorAnalytics/R/factorModelPerformanceAttribution.r 2013-09-03 17:22:58 UTC (rev 2974) +++ pkg/FactorAnalytics/R/factorModelPerformanceAttribution.r 2013-09-03 20:16:33 UTC (rev 2975) @@ -86,42 +86,42 @@ fundName = rownames(fit$beta) attr.list <- list() - # data <- checkData(fit$data) + # data <- checkData(fit$data) for (k in fundName) { - fit.lm = fit$asset.fit[[k]] - - ## extract information from lm object - date <- rownames(fit.lm$model[1]) - - actual.xts = xts(fit.lm$model[1], as.Date(date)) - - -# attributed returns -# active portfolio management p.512 17A.9 + fit.lm = fit$asset.fit[[k]] + + ## extract information from lm object + date <- rownames(fit.lm$model[1]) + + actual.xts = xts(fit.lm$model[1], as.Date(date)) + + + # attributed returns + # active portfolio management p.512 17A.9 + + cum.ret <- Return.cumulative(actual.xts) + # setup initial value + attr.ret.xts.all <- xts(, as.Date(date)) + for ( i in factorName ) { + + if (is.na(fit$beta[k,i])) { + cum.attr.ret[k,i] <- NA + attr.ret.xts.all <- merge(attr.ret.xts.all,xts(rep(NA,length(date)),as.Date(date))) + } else { + attr.ret.xts <- actual.xts - xts(as.matrix(fit.lm$model[i])%*%as.matrix(fit.lm$coef[i]), + as.Date(date)) + cum.attr.ret[k,i] <- cum.ret - Return.cumulative(actual.xts-attr.ret.xts) + attr.ret.xts.all <- merge(attr.ret.xts.all,attr.ret.xts) + } + } - cum.ret <- Return.cumulative(actual.xts) - # setup initial value - attr.ret.xts.all <- xts(, as.Date(date)) - for ( i in factorName ) { - - if (fit$beta[k,i]==0) { - cum.attr.ret[k,i] <- 0 - attr.ret.xts.all <- merge(attr.ret.xts.all,xts(rep(0,length(date)),as.Date(date))) - } else { - attr.ret.xts <- actual.xts - xts(as.matrix(fit.lm$model[i])%*%as.matrix(fit.lm$coef[i]), - as.Date(date)) - cum.attr.ret[k,i] <- cum.ret - Return.cumulative(actual.xts-attr.ret.xts) - attr.ret.xts.all <- merge(attr.ret.xts.all,attr.ret.xts) - } - - } - - # specific returns - spec.ret.xts <- actual.xts - xts(as.matrix(fit.lm$model[,-1])%*%as.matrix(fit.lm$coef[-1]), - as.Date(date)) - cum.spec.ret[k] <- cum.ret - Return.cumulative(actual.xts-spec.ret.xts) - attr.list[[k]] <- merge(attr.ret.xts.all,spec.ret.xts) - colnames(attr.list[[k]]) <- c(factorName,"specific.returns") + + # specific returns + spec.ret.xts <- actual.xts - xts(as.matrix(fit.lm$model[,-1])%*%as.matrix(fit.lm$coef[-1]), + as.Date(date)) + cum.spec.ret[k] <- cum.ret - Return.cumulative(actual.xts-spec.ret.xts) + attr.list[[k]] <- merge(attr.ret.xts.all,spec.ret.xts) + colnames(attr.list[[k]]) <- c(factorName,"specific.returns") } Added: pkg/FactorAnalytics/R/summary.FM.attribution.r =================================================================== --- pkg/FactorAnalytics/R/summary.FM.attribution.r (rev 0) +++ pkg/FactorAnalytics/R/summary.FM.attribution.r 2013-09-03 20:16:33 UTC (rev 2975) @@ -0,0 +1,32 @@ +#' summary FM.attribution object. +#' +#' Generic function of summary method for factorModelPerformanceAttribution. +#' +#' +#' @param fm.attr FM.attribution object created by +#' factorModelPerformanceAttribution. +#' @author Yi-An Chen. +#' @examples +#' # load data from the database +#' data(managers.df) +#' # fit the factor model with OLS +#' fit.ts <- fitTimeSeriesFactorModel(assets.names=colnames(managers.df[,(1:6)]), +#' factors.names=c("EDHEC.LS.EQ","SP500.TR"), +#' data=managers.df,fit.method="OLS") +#' +#' fm.attr <- factorModelPerformanceAttribution(fit.ts) +#' summary(fm.attr) +#' @method summary FM.attribution +#' @export +#' +summary.FM.attribution <- function(fm.attr) { +# n <- dim(fm.attr[[1]])[1] +# k <- dim(fm.attr[[1]])[2]+1 +# table.mat <- matrix(rep(NA,n*k*2),ncol=n) + cat("\nMean of returns attributed to factors + \n") + print(sapply(fm.attr[[3]],function(x) apply(x,2,mean))) + cat("\nStandard Deviation of returns attributed to factors + \n") + print(sapply(fm.attr[[3]],function(x) apply(x,2,sd))) +} From noreply at r-forge.r-project.org Tue Sep 3 22:16:58 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Tue, 3 Sep 2013 22:16:58 +0200 (CEST) Subject: [Returnanalytics-commits] r2976 - pkg/FactorAnalytics/man Message-ID: <20130903201658.11FCC181167@r-forge.r-project.org> Author: chenyian Date: 2013-09-03 22:16:57 +0200 (Tue, 03 Sep 2013) New Revision: 2976 Added: pkg/FactorAnalytics/man/summary.FM.attribution.Rd Log: add summary.FM.attribution.Rd Added: pkg/FactorAnalytics/man/summary.FM.attribution.Rd =================================================================== --- pkg/FactorAnalytics/man/summary.FM.attribution.Rd (rev 0) +++ pkg/FactorAnalytics/man/summary.FM.attribution.Rd 2013-09-03 20:16:57 UTC (rev 2976) @@ -0,0 +1,29 @@ +\name{summary.FM.attribution} +\alias{summary.FM.attribution} +\title{summary FM.attribution object.} +\usage{ + \method{summary}{FM.attribution} (fm.attr) +} +\arguments{ + \item{fm.attr}{FM.attribution object created by + factorModelPerformanceAttribution.} +} +\description{ + Generic function of summary method for + factorModelPerformanceAttribution. +} +\examples{ +# load data from the database + data(managers.df) + # fit the factor model with OLS + fit.ts <- fitTimeSeriesFactorModel(assets.names=colnames(managers.df[,(1:6)]), + factors.names=c("EDHEC.LS.EQ","SP500.TR"), + data=managers.df,fit.method="OLS") + + fm.attr <- factorModelPerformanceAttribution(fit.ts) + summary(fm.attr) +} +\author{ + Yi-An Chen. +} + From noreply at r-forge.r-project.org Tue Sep 3 23:01:25 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Tue, 3 Sep 2013 23:01:25 +0200 (CEST) Subject: [Returnanalytics-commits] r2977 - in pkg/FactorAnalytics: . R Message-ID: <20130903210125.A7EDE1854F0@r-forge.r-project.org> Author: chenyian Date: 2013-09-03 23:01:25 +0200 (Tue, 03 Sep 2013) New Revision: 2977 Modified: pkg/FactorAnalytics/DESCRIPTION pkg/FactorAnalytics/NAMESPACE pkg/FactorAnalytics/R/factorModelPerformanceAttribution.r Log: debug for statistical model and fundamental model Modified: pkg/FactorAnalytics/DESCRIPTION =================================================================== --- pkg/FactorAnalytics/DESCRIPTION 2013-09-03 20:16:57 UTC (rev 2976) +++ pkg/FactorAnalytics/DESCRIPTION 2013-09-03 21:01:25 UTC (rev 2977) @@ -7,5 +7,5 @@ Maintainer: Yi-An Chen Description: An R package for estimation and risk analysis of linear factor models for asset returns and portfolios. It contains three major fitting method for the factor models: fitting macroeconomic factor model, fitting fundamental factor model and fitting statistical factor model and some risk analysis tools like VaR, ES to use the result of the fitting method. It also provides the different type of distribution to fit the fat-tail behavior of the financial returns, including edgeworth expansion type distribution. License: GPL-2 -Depends: robust, robustbase, leaps, lars, zoo, MASS, PerformanceAnalytics, ff, sn, tseries, strucchange,xts,ellipse +Depends: robust, robustbase, leaps, lars,ff, MASS, PerformanceAnalytics, sn, tseries, strucchange,xts,ellipse LazyLoad: yes \ No newline at end of file Modified: pkg/FactorAnalytics/NAMESPACE =================================================================== --- pkg/FactorAnalytics/NAMESPACE 2013-09-03 20:16:57 UTC (rev 2976) +++ pkg/FactorAnalytics/NAMESPACE 2013-09-03 21:01:25 UTC (rev 2977) @@ -1,4 +1,4 @@ -S3method(summary.FM.attribution) +S3method(summary,FM.attribution) export(factorModelPerformanceAttribution) export(dCornishFisher) export(factorModelCovariance) Modified: pkg/FactorAnalytics/R/factorModelPerformanceAttribution.r =================================================================== --- pkg/FactorAnalytics/R/factorModelPerformanceAttribution.r 2013-09-03 20:16:57 UTC (rev 2976) +++ pkg/FactorAnalytics/R/factorModelPerformanceAttribution.r 2013-09-03 21:01:25 UTC (rev 2977) @@ -142,8 +142,13 @@ #cumulative return attributed to factors + if (factor.names[1] == "(Intercept)") { cum.attr.ret <- matrix(,nrow=length(ticker),ncol=length(factor.names), - dimnames=list(ticker,factor.names)) + dimnames=list(ticker,factor.names))[,-1] # discard intercept + } else { + cum.attr.ret <- matrix(,nrow=length(ticker),ncol=length(factor.names), + dimnames=list(ticker,factor.names)) + } cum.spec.ret <- rep(0,length(ticker)) names(cum.spec.ret) <- ticker @@ -164,7 +169,7 @@ attr.factor <- exposure * coredata(factor.returns) specific.returns <- returns - apply(attr.factor,1,sum) - attr <- cbind(returns,attr.factor,specific.returns) + attr <- cbind(attr.factor,specific.returns) attr.list[[k]] <- xts(attr,as.Date(dates)) cum.attr.ret[k,] <- apply(attr.factor,2,Return.cumulative) cum.spec.ret[k] <- Return.cumulative(specific.returns) From noreply at r-forge.r-project.org Tue Sep 3 23:22:24 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Tue, 3 Sep 2013 23:22:24 +0200 (CEST) Subject: [Returnanalytics-commits] r2978 - pkg/FactorAnalytics/R Message-ID: <20130903212225.0590C1854E9@r-forge.r-project.org> Author: chenyian Date: 2013-09-03 23:22:24 +0200 (Tue, 03 Sep 2013) New Revision: 2978 Modified: pkg/FactorAnalytics/R/factorModelPerformanceAttribution.r Log: debug Modified: pkg/FactorAnalytics/R/factorModelPerformanceAttribution.r =================================================================== --- pkg/FactorAnalytics/R/factorModelPerformanceAttribution.r 2013-09-03 21:01:25 UTC (rev 2977) +++ pkg/FactorAnalytics/R/factorModelPerformanceAttribution.r 2013-09-03 21:22:24 UTC (rev 2978) @@ -86,13 +86,12 @@ fundName = rownames(fit$beta) attr.list <- list() - # data <- checkData(fit$data) + for (k in fundName) { fit.lm = fit$asset.fit[[k]] ## extract information from lm object - date <- rownames(fit.lm$model[1]) - + date <- index(na.omit(fit$data[,k])) actual.xts = xts(fit.lm$model[1], as.Date(date)) From noreply at r-forge.r-project.org Tue Sep 3 23:32:50 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Tue, 3 Sep 2013 23:32:50 +0200 (CEST) Subject: [Returnanalytics-commits] r2979 - in pkg/PerformanceAnalytics/sandbox/pulkit: R man Message-ID: <20130903213250.DE57F1854E9@r-forge.r-project.org> Author: pulkit Date: 2013-09-03 23:32:50 +0200 (Tue, 03 Sep 2013) New Revision: 2979 Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/BenchmarkPlots.R pkg/PerformanceAnalytics/sandbox/pulkit/R/CDaRMultipath.R pkg/PerformanceAnalytics/sandbox/pulkit/R/CdaR.R pkg/PerformanceAnalytics/sandbox/pulkit/R/DrawdownBeta.R pkg/PerformanceAnalytics/sandbox/pulkit/R/Drawdownalpha.R pkg/PerformanceAnalytics/sandbox/pulkit/R/PSRopt.R pkg/PerformanceAnalytics/sandbox/pulkit/R/chart.Penance.R pkg/PerformanceAnalytics/sandbox/pulkit/R/chart.REDD.R pkg/PerformanceAnalytics/sandbox/pulkit/man/AlphaDrawdown.Rd pkg/PerformanceAnalytics/sandbox/pulkit/man/BetaDrawdown.Rd pkg/PerformanceAnalytics/sandbox/pulkit/man/CdarMultiPath.Rd pkg/PerformanceAnalytics/sandbox/pulkit/man/chart.BenchmarkSR.Rd pkg/PerformanceAnalytics/sandbox/pulkit/man/chart.Penance.Rd Log: na in psr optimization Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/BenchmarkPlots.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/BenchmarkPlots.R 2013-09-03 21:22:24 UTC (rev 2978) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/BenchmarkPlots.R 2013-09-03 21:32:50 UTC (rev 2979) @@ -34,7 +34,7 @@ #'@param xlim set the xlim value, as in \code{\link{plot}} #'@param \dots any other passthru variable #'@author Pulkit Mehrotra -#'@seealso \code{\link{BenchmarkSR}} \code{\link{chart.SRIndifference}} \code{\link{plot}} +#'@seealso \code{\link{BenchmarkSR}} \code{\link{chart.SRIndifference}} #'@references #'Bailey, David H. and Lopez de Prado, Marcos, The Strategy Approval Decision: #'A Sharpe Ratio Indifference Curve Approach (January 2013). Algorithmic Finance, Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/CDaRMultipath.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/CDaRMultipath.R 2013-09-03 21:22:24 UTC (rev 2978) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/CDaRMultipath.R 2013-09-03 21:32:50 UTC (rev 2979) @@ -29,8 +29,8 @@ #'@param \dots any other passthru parameters #' #'@author Pulkit Mehrotra -#' @seealso \code{\link{ES}} \code{\link{maxDrawdown}} \code{\link{CDaR}} -#'\code{\link{AlphaDrawdown}} \code{\link{MultiBetaDrawdown}} \code{\link{BetaDrawdown}} +#' @seealso \code{\link{CDaR}} \code{\link{AlphaDrawdown}} \code{\link{MultiBetaDrawdown}} +#'\code{\link{BetaDrawdown}} #'@references #'Zabarankin, M., Pavlikov, K., and S. Uryasev. Capital Asset Pricing Model (CAPM) Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/CdaR.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/CdaR.R 2013-09-03 21:22:24 UTC (rev 2978) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/CdaR.R 2013-09-03 21:32:50 UTC (rev 2979) @@ -17,8 +17,8 @@ #' @param p confidence level for calculation, default p=0.95 #' @param \dots any other passthru parameters #' @author Brian G. Peterson -#' @seealso \code{\link{ES}} \code{\link{maxDrawdown}} \code{\link{CdarMultiPath}} -#'\code{\link{AlphaDrawdown}} \code{\link{MultiBetaDrawdown}} \code{\link{BetaDrawdown}} +#' @seealso \code{\link{CdarMultiPath}} \code{\link{AlphaDrawdown}} +#'\code{\link{MultiBetaDrawdown}} \code{\link{BetaDrawdown}} #' @references Chekhlov, A., Uryasev, S., and M. Zabarankin. Portfolio #' Optimization With Drawdown Constraints. B. Scherer (Ed.) Asset and Liability #' Management Tools, Risk Books, London, 2003 Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/DrawdownBeta.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/DrawdownBeta.R 2013-09-03 21:22:24 UTC (rev 2978) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/DrawdownBeta.R 2013-09-03 21:32:50 UTC (rev 2979) @@ -40,7 +40,7 @@ #'@param \dots any passthru variable. #' #'@author Pulkit Mehrotra -#' @seealso \code{\link{ES}} \code{\link{maxDrawdown}} \code{\link{CdarMultiPath}} +#' @seealso \code{\link{CdarMultiPath}} #'\code{\link{AlphaDrawdown}} \code{\link{MultiBetaDrawdown}} \code{\link{CDaR}} #' Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/Drawdownalpha.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/Drawdownalpha.R 2013-09-03 21:22:24 UTC (rev 2978) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/Drawdownalpha.R 2013-09-03 21:32:50 UTC (rev 2979) @@ -26,7 +26,7 @@ #'@param \dots any passthru variable #' #'@author Pulkit Mehrotra -#' @seealso \code{\link{ES}} \code{\link{maxDrawdown}} \code{\link{CdarMultiPath}} +#' @seealso \code{\link{CdarMultiPath}} #'\code{\link{CDaR}} \code{\link{MultiBetaDrawdown}} \code{\link{BetaDrawdown}} #'@references Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/PSRopt.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/PSRopt.R 2013-09-03 21:22:24 UTC (rev 2978) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/PSRopt.R 2013-09-03 21:32:50 UTC (rev 2979) @@ -211,7 +211,7 @@ } get_Moments<-function(series,order,mean = 0){ sum = 0 - mat = as.matrix(series) + mat = na.omit(as.matrix(series)) sum = .Call("sums_m",mat,mean,order) # for(i in series){ # sum = sum + (i-mean)^order Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/chart.Penance.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/chart.Penance.R 2013-09-03 21:22:24 UTC (rev 2978) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/chart.Penance.R 2013-09-03 21:32:50 UTC (rev 2979) @@ -33,8 +33,8 @@ #'@param xlim set the xlim value, as in \code{\link{plot}} #'@param \dots Any other pass thru variable #'@author Pulkit Mehrotra -#'@seealso \code{\link{plot}} \code{\link{table.Penance}} \code{\link{MaxDD}} \code{\link{TuW}} -#'@keywords ts multivariate distribution models hplot +#'@seealso \code{\link{table.Penance}} \code{\link{MaxDD}} \code{\link{TuW}} +#'@keywords ts multivariate distribution models plot #'@examples #'data(edhec) #'chart.Penance(edhec,0.95) Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/chart.REDD.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/chart.REDD.R 2013-09-03 21:22:24 UTC (rev 2978) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/chart.REDD.R 2013-09-03 21:32:50 UTC (rev 2979) @@ -13,7 +13,7 @@ #'@param colorset set the colorset label, as in \code{\link{plot}} #'@param \dots any other variable #'@author Pulkit Mehrotra -#'@seealso \code{\link{plot}} \code{\link{EconomicDrawdown}} \code{\link{EDDCOPS}} +#'@seealso \code{\link{EconomicDrawdown}} \code{\link{EDDCOPS}} #'\code{\link{rollDrawdown}} \code{\link{REDDCOPS}} \code{\link{rollEconomicMax}} #'@references Yang, Z. George and Zhong, Liang, Optimal Portfolio Strategy to #'Control Maximum Drawdown - The Case of Risk Based Dynamic Asset Allocation (February 25, 2012) Modified: pkg/PerformanceAnalytics/sandbox/pulkit/man/AlphaDrawdown.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/man/AlphaDrawdown.Rd 2013-09-03 21:22:24 UTC (rev 2978) +++ pkg/PerformanceAnalytics/sandbox/pulkit/man/AlphaDrawdown.Rd 2013-09-03 21:32:50 UTC (rev 2979) @@ -65,7 +65,6 @@ 2012. } \seealso{ - \code{\link{ES}} \code{\link{maxDrawdown}} \code{\link{CdarMultiPath}} \code{\link{CDaR}} \code{\link{MultiBetaDrawdown}} \code{\link{BetaDrawdown}} Modified: pkg/PerformanceAnalytics/sandbox/pulkit/man/BetaDrawdown.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/man/BetaDrawdown.Rd 2013-09-03 21:22:24 UTC (rev 2978) +++ pkg/PerformanceAnalytics/sandbox/pulkit/man/BetaDrawdown.Rd 2013-09-03 21:32:50 UTC (rev 2979) @@ -76,7 +76,6 @@ 2012. } \seealso{ - \code{\link{ES}} \code{\link{maxDrawdown}} \code{\link{CdarMultiPath}} \code{\link{AlphaDrawdown}} \code{\link{MultiBetaDrawdown}} \code{\link{CDaR}} } Modified: pkg/PerformanceAnalytics/sandbox/pulkit/man/CdarMultiPath.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/man/CdarMultiPath.Rd 2013-09-03 21:22:24 UTC (rev 2978) +++ pkg/PerformanceAnalytics/sandbox/pulkit/man/CdarMultiPath.Rd 2013-09-03 21:32:50 UTC (rev 2979) @@ -61,7 +61,6 @@ September 2012 } \seealso{ - \code{\link{ES}} \code{\link{maxDrawdown}} \code{\link{CDaR}} \code{\link{AlphaDrawdown}} \code{\link{MultiBetaDrawdown}} \code{\link{BetaDrawdown}} Modified: pkg/PerformanceAnalytics/sandbox/pulkit/man/chart.BenchmarkSR.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/man/chart.BenchmarkSR.Rd 2013-09-03 21:22:24 UTC (rev 2978) +++ pkg/PerformanceAnalytics/sandbox/pulkit/man/chart.BenchmarkSR.Rd 2013-09-03 21:32:50 UTC (rev 2979) @@ -90,7 +90,7 @@ } \seealso{ \code{\link{BenchmarkSR}} - \code{\link{chart.SRIndifference}} \code{\link{plot}} + \code{\link{chart.SRIndifference}} \code{\link{plot}} } Modified: pkg/PerformanceAnalytics/sandbox/pulkit/man/chart.Penance.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/man/chart.Penance.Rd 2013-09-03 21:22:24 UTC (rev 2978) +++ pkg/PerformanceAnalytics/sandbox/pulkit/man/chart.Penance.Rd 2013-09-03 21:32:50 UTC (rev 2979) @@ -82,12 +82,12 @@ Rule(January 1, 2013). } \seealso{ - \code{\link{plot}} \code{\link{table.Penance}} - \code{\link{MaxDD}} \code{\link{TuW}} + \code{\link{table.Penance}} \code{\link{MaxDD}} + \code{\link{TuW}} } \keyword{distribution} -\keyword{hplot} \keyword{models} \keyword{multivariate} +\keyword{plot} \keyword{ts} From noreply at r-forge.r-project.org Tue Sep 3 23:43:27 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Tue, 3 Sep 2013 23:43:27 +0200 (CEST) Subject: [Returnanalytics-commits] r2980 - pkg/FactorAnalytics/R Message-ID: <20130903214327.8CB2B1854E9@r-forge.r-project.org> Author: chenyian Date: 2013-09-03 23:43:27 +0200 (Tue, 03 Sep 2013) New Revision: 2980 Modified: pkg/FactorAnalytics/R/factorModelPerformanceAttribution.r Log: clean up codes. Modified: pkg/FactorAnalytics/R/factorModelPerformanceAttribution.r =================================================================== --- pkg/FactorAnalytics/R/factorModelPerformanceAttribution.r 2013-09-03 21:32:50 UTC (rev 2979) +++ pkg/FactorAnalytics/R/factorModelPerformanceAttribution.r 2013-09-03 21:43:27 UTC (rev 2980) @@ -91,7 +91,8 @@ fit.lm = fit$asset.fit[[k]] ## extract information from lm object - date <- index(na.omit(fit$data[,k])) + data <- checkData(fit$data) + date <- index(na.omit(data[,k])) actual.xts = xts(fit.lm$model[1], as.Date(date)) From noreply at r-forge.r-project.org Wed Sep 4 02:02:34 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 4 Sep 2013 02:02:34 +0200 (CEST) Subject: [Returnanalytics-commits] r2981 - in pkg/FactorAnalytics: . R man Message-ID: <20130904000234.B7D70185C3B@r-forge.r-project.org> Author: chenyian Date: 2013-09-04 02:02:34 +0200 (Wed, 04 Sep 2013) New Revision: 2981 Added: pkg/FactorAnalytics/R/plot.FM.attribution.r pkg/FactorAnalytics/man/plot.FM.attribution.Rd Modified: pkg/FactorAnalytics/NAMESPACE Log: add new function plot.FM.attribution.r and .Rd file plot.FM.attribution.Rd Modified: pkg/FactorAnalytics/NAMESPACE =================================================================== --- pkg/FactorAnalytics/NAMESPACE 2013-09-03 21:43:27 UTC (rev 2980) +++ pkg/FactorAnalytics/NAMESPACE 2013-09-04 00:02:34 UTC (rev 2981) @@ -1,3 +1,4 @@ +S3method(plot,FM.attribution) S3method(summary,FM.attribution) export(factorModelPerformanceAttribution) export(dCornishFisher) Added: pkg/FactorAnalytics/R/plot.FM.attribution.r =================================================================== --- pkg/FactorAnalytics/R/plot.FM.attribution.r (rev 0) +++ pkg/FactorAnalytics/R/plot.FM.attribution.r 2013-09-04 00:02:34 UTC (rev 2981) @@ -0,0 +1,128 @@ +#' plot FM.attribution class +#' +#' Generic function of plot method for factorModelPerformanceAttribution. +#' Either plot all fit models or choose a single asset to plot. +#' +#' +#' @param fm.attr FM.attribution object created by +#' factorModelPerformanceAttribution. +#' @param which.plot integer indicating which plot to create: "none" will +#' create a menu to choose. Defualt is none. 1 = attributed cumulative returns, +#' 2 = attributed returns on date selected by user, 3 = time series of +#' attributed returns +#' @param max.show Maximum assets to plot. Default is 6. +#' @param date date indicates for attributed returns, the date format should be +#' xts compatible. +#' @param plot.single Plot a single asset of lm class. Defualt is FALSE. +#' @param fundName Name of the portfolio to be plotted. +#' @param which.plot.single integer indicating which plot to create: "none" +#' will create a menu to choose. Defualt is none. 1 = attributed cumulative +#' returns, 2 = attributed returns on date selected by user, 3 = time series of +#' attributed returns +#' @param ... more arguements for \code{chart.TimeSeries} used for plotting +#' time series +#' @author Yi-An Chen. +#' @examples +#' \dontrun{ +#' data(managers.df) +#' fit.ts <- fitTimeSeriesFactorModel(assets.names=colnames(managers.df[,(1:6)]), +#' factors.names=c("EDHEC.LS.EQ","SP500.TR"), +#' data=managers.df,fit.method="OLS") +#' fm.attr <- factorModelPerformanceAttribution(fit.ts) +#' # plot all +#' plot(fm.attr,legend.loc="topleft",max.show=6) +#' dev.off() +#' # plot only one assets "HAM1 +#' plot(fm.attr,plot.single=TRUE,fundName="HAM1") +#' } +#' @method plot FM.attribution +#' @export +#' +plot.FM.attribution <- function(fm.attr, which.plot=c("none","1L","2L","3L"),max.show=6, + date=NULL,plot.single=FALSE,fundName, + which.plot.single=c("none","1L","2L","3L"),...) { + # ... for chart.TimeSeries + require(PerformanceAnalytics) + if (is.null(date)){ + date = index(fm.attr[[3]][[1]])[1] + } + + # plot single assets + if (plot.single==TRUE){ + + which.plot.single<-which.plot.single[1] + + if (which.plot.single=="none") + which.plot.single<-menu(c("attributed cumulative returns", + paste("attributed returns","on",date,sep=" "), + "Time series of attributed returns"), + title="performance attribution plot \nMake a plot selection (or 0 to exit):\n") + switch(which.plot.single, + "1L" = { + bar <- c(fm.attr$cum.spec.ret[fundName],fm.attr$cum.ret.attr.f[fundName,]) + names(bar)[1] <- "specific.returns" + barplot(bar,horiz=TRUE,main="cumulative attributed returns",las=1) + }, + "2L" ={ + bar <- coredata(fm.attr$attr.list[[fundName]][as.Date(date)]) + tryCatch( {barplot(bar,horiz=TRUE,main=fundName,las=1) + },error=function(e){cat("\nthis date is not available for this assets.\n")}) + }, + "3L" = { + chart.TimeSeries(fm.attr$attr.list[[fundName]], + main=paste("Time series of attributed returns of ",fundName,sep=""),... ) + }, + invisible()) + } + # plot all assets + else { + which.plot<-which.plot[1] + fundnames <- rownames(fm.attr$cum.ret.attr.f) + n <- length(fundnames) + + if(which.plot=='none') + which.plot<-menu(c("attributed cumulative returns", + paste("attributed returns","on",date,sep=" "), + "time series of attributed returns"), + title="performance attribution plot \nMake a plot selection (or 0 to exit):\n") + if (n >= max.show) { + cat(paste("numbers of assets are greater than",max.show,", show only first", + max.show,"assets",sep=" ")) + n <- max.show + } + switch(which.plot, + + "1L" = { + par(mfrow=c(2,n/2)) + for (i in fundnames[1:n]) { + bar <- c(fm.attr$cum.spec.ret[i],fm.attr$cum.ret.attr.f[i,]) + names(bar)[1] <- "specific.returns" + barplot(bar,horiz=TRUE,main=i,las=1) + } + par(mfrow=c(1,1)) + }, + "2L" ={ + par(mfrow=c(2,n/2)) + for (i in fundnames[1:n]) { + tryCatch({ + bar <- coredata(fm.attr$attr.list[[i]][as.Date(date)]) + barplot(bar,horiz=TRUE,main=i,las=1) + }, error=function(e) { + cat("\nDate for some assets returns is not available.\n") + dev.off() + } ) + } + par(mfrow=c(1,1)) + }, + "3L" = { + par(mfrow=c(2,n/2)) + for (i in fundnames[1:n]) { + chart.TimeSeries(fm.attr$attr.list[[i]],main=i,...) + } + par(mfrow=c(1,1)) + }, + invisible() + ) + + } +} Added: pkg/FactorAnalytics/man/plot.FM.attribution.Rd =================================================================== --- pkg/FactorAnalytics/man/plot.FM.attribution.Rd (rev 0) +++ pkg/FactorAnalytics/man/plot.FM.attribution.Rd 2013-09-04 00:02:34 UTC (rev 2981) @@ -0,0 +1,61 @@ +\name{plot.FM.attribution} +\alias{plot.FM.attribution} +\title{plot FM.attribution class} +\usage{ + \method{plot}{FM.attribution} (fm.attr, + which.plot = c("none", "1L", "2L", "3L"), max.show = 6, + date = NULL, plot.single = FALSE, fundName, + which.plot.single = c("none", "1L", "2L", "3L"), ...) +} +\arguments{ + \item{fm.attr}{FM.attribution object created by + factorModelPerformanceAttribution.} + + \item{which.plot}{integer indicating which plot to + create: "none" will create a menu to choose. Defualt is + none. 1 = attributed cumulative returns, 2 = attributed + returns on date selected by user, 3 = time series of + attributed returns} + + \item{max.show}{Maximum assets to plot. Default is 6.} + + \item{date}{date indicates for attributed returns, the + date format should be xts compatible.} + + \item{plot.single}{Plot a single asset of lm class. + Defualt is FALSE.} + + \item{fundName}{Name of the portfolio to be plotted.} + + \item{which.plot.single}{integer indicating which plot to + create: "none" will create a menu to choose. Defualt is + none. 1 = attributed cumulative returns, 2 = attributed + returns on date selected by user, 3 = time series of + attributed returns} + + \item{...}{more arguements for \code{chart.TimeSeries} + used for plotting time series} +} +\description{ + Generic function of plot method for + factorModelPerformanceAttribution. Either plot all fit + models or choose a single asset to plot. +} +\examples{ +\dontrun{ +data(managers.df) +fit.ts <- fitTimeSeriesFactorModel(assets.names=colnames(managers.df[,(1:6)]), + factors.names=c("EDHEC.LS.EQ","SP500.TR"), + data=managers.df,fit.method="OLS") + fm.attr <- factorModelPerformanceAttribution(fit.ts) +# plot all +plot(fm.attr,legend.loc="topleft",max.show=6) +dev.off() +# plot only one assets "HAM1 +plot(fm.attr,plot.single=TRUE,fundName="HAM1") +} +} +\author{ + Yi-An Chen. +} + From noreply at r-forge.r-project.org Wed Sep 4 02:23:36 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 4 Sep 2013 02:23:36 +0200 (CEST) Subject: [Returnanalytics-commits] r2982 - in pkg/FactorAnalytics: . R man vignettes Message-ID: <20130904002336.C9E6E185D2B@r-forge.r-project.org> Author: chenyian Date: 2013-09-04 02:23:36 +0200 (Wed, 04 Sep 2013) New Revision: 2982 Added: pkg/FactorAnalytics/R/print.FM.attribution.r pkg/FactorAnalytics/man/print.FM.attribution.Rd Modified: pkg/FactorAnalytics/NAMESPACE pkg/FactorAnalytics/vignettes/fundamentalFM.Rnw Log: add new function print.FM.attribution.r and print.FM.attribution.Rd Modified: pkg/FactorAnalytics/NAMESPACE =================================================================== --- pkg/FactorAnalytics/NAMESPACE 2013-09-04 00:02:34 UTC (rev 2981) +++ pkg/FactorAnalytics/NAMESPACE 2013-09-04 00:23:36 UTC (rev 2982) @@ -1,3 +1,4 @@ +S3method(print,FM.attribution) S3method(plot,FM.attribution) S3method(summary,FM.attribution) export(factorModelPerformanceAttribution) Added: pkg/FactorAnalytics/R/print.FM.attribution.r =================================================================== --- pkg/FactorAnalytics/R/print.FM.attribution.r (rev 0) +++ pkg/FactorAnalytics/R/print.FM.attribution.r 2013-09-04 00:23:36 UTC (rev 2982) @@ -0,0 +1,28 @@ +#' Print FM.attribution object. +#' +#' Generic function of print method for factorModelPerformanceAttribution. +#' +#' +#' @param fm.attr FM.attribution object created by +#' factorModelPerformanceAttribution. +#' @author Yi-An Chen. +#' @examples +#' \dontrun{ +#' # load data from the database +#' data(managers.df) +#' # fit the factor model with OLS +#' fit.ts <- fitTimeSeriesFactorModel(assets.names=colnames(managers.df[,(1:6)]), +#' factors.names=c("EDHEC.LS.EQ","SP500.TR"), +#' data=managers.df,fit.method="OLS") +#' +#' fm.attr <- factorModelPerformanceAttribution(fit.ts) +#' print(fm.attr) +#' } +#' @method print FM.attribution +#' @export +#' +print.FM.attribution <- function(fm.attr) { + cat("\nMean of returns attributed to factors + \n") + print(sapply(fm.attr[[3]],function(x) apply(x,2,mean))) + } Added: pkg/FactorAnalytics/man/print.FM.attribution.Rd =================================================================== --- pkg/FactorAnalytics/man/print.FM.attribution.Rd (rev 0) +++ pkg/FactorAnalytics/man/print.FM.attribution.Rd 2013-09-04 00:23:36 UTC (rev 2982) @@ -0,0 +1,31 @@ +\name{print.FM.attribution} +\alias{print.FM.attribution} +\title{Print FM.attribution object.} +\usage{ + \method{print}{FM.attribution} (fm.attr) +} +\arguments{ + \item{fm.attr}{FM.attribution object created by + factorModelPerformanceAttribution.} +} +\description{ + Generic function of print method for + factorModelPerformanceAttribution. +} +\examples{ +\dontrun{ +# load data from the database + data(managers.df) + # fit the factor model with OLS + fit.ts <- fitTimeSeriesFactorModel(assets.names=colnames(managers.df[,(1:6)]), + factors.names=c("EDHEC.LS.EQ","SP500.TR"), + data=managers.df,fit.method="OLS") + + fm.attr <- factorModelPerformanceAttribution(fit.ts) + print(fm.attr) + } +} +\author{ + Yi-An Chen. +} + Modified: pkg/FactorAnalytics/vignettes/fundamentalFM.Rnw =================================================================== --- pkg/FactorAnalytics/vignettes/fundamentalFM.Rnw 2013-09-04 00:02:34 UTC (rev 2981) +++ pkg/FactorAnalytics/vignettes/fundamentalFM.Rnw 2013-09-04 00:23:36 UTC (rev 2982) @@ -389,7 +389,7 @@ ts.attr <- factorModelPerformanceAttribution(fit.time) names(ts.attr) @ -There are 3 items generated by the function. \verb at cum.ret.attr.f@ will return a N x K matrix with cummulative returns attributed to factors. \verb at cum.spec.ret@ will return a N x 1 matrix with cummulative specific returns. \verb at attr.list@ will return a list which contains returns atttribution to each factors and specific returns asset by asset. +There are 3 items generated by the function. \verb at cum.ret.attr.f@ will return a N x K matrix with cummulative returns attributed to factors. \verb at cum.spec.ret@ will return a N x 1 matrix with cummulative specific returns. \verb at attr.list@ will return a list which contains returns atttribution to each factors and specific returns asset by asset. In addition, a \emph{FM.attribution} class will be generated and generic function \verb at print()@, \verb at summary()@ and \verb at plot()@ can be applied to it. \end{document} \ No newline at end of file From noreply at r-forge.r-project.org Wed Sep 4 02:29:08 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 4 Sep 2013 02:29:08 +0200 (CEST) Subject: [Returnanalytics-commits] r2983 - in pkg/Meucci: . man Message-ID: <20130904002909.06C1C185D2B@r-forge.r-project.org> Author: braverock Date: 2013-09-04 02:29:08 +0200 (Wed, 04 Sep 2013) New Revision: 2983 Modified: pkg/Meucci/ pkg/Meucci/DESCRIPTION pkg/Meucci/man/FitOrnsteinUhlenbeck.Rd Log: - fix collate and doc Property changes on: pkg/Meucci ___________________________________________________________________ Added: svn:ignore + .Rproj.user .Rhistory .RData Modified: pkg/Meucci/DESCRIPTION =================================================================== --- pkg/Meucci/DESCRIPTION 2013-09-04 00:23:36 UTC (rev 2982) +++ pkg/Meucci/DESCRIPTION 2013-09-04 00:29:08 UTC (rev 2983) @@ -98,6 +98,4 @@ 'BlackLittermanFormula.R' 'Log2Lin.R' 'PlotCompositionEfficientFrontier.R' - ' - FitOrnsteinUhlenbeck.R' 'MaxRsqTS.R' Modified: pkg/Meucci/man/FitOrnsteinUhlenbeck.Rd =================================================================== --- pkg/Meucci/man/FitOrnsteinUhlenbeck.Rd 2013-09-04 00:23:36 UTC (rev 2982) +++ pkg/Meucci/man/FitOrnsteinUhlenbeck.Rd 2013-09-04 00:29:08 UTC (rev 2983) @@ -4,17 +4,11 @@ "Risk and Asset Allocation", Springer, 2005} \usage{ FitOrnsteinUhlenbeck(Y, tau) - - FitOrnsteinUhlenbeck(Y, tau) } \arguments{ \item{Y}{: [matrix] (T x N)} \item{tau}{: [scalar] time step} - - \item{Y}{: [matrix] (T x N)} - - \item{tau}{: [scalar] time step} } \value{ Mu : [vector] long-term means @@ -24,41 +18,21 @@ Sig : [matrix] Sig = S * S', covariance matrix of Brownian motions - - Mu : [vector] long-term means - - Th : [matrix] whose eigenvalues have positive real part / - mean reversion speed - - Sig : [matrix] Sig = S * S', covariance matrix of - Brownian motions } \description{ Fit a multivariate OU process at estimation step tau, as described in A. Meucci "Risk and Asset Allocation", Springer, 2005 - - Fit a multivariate OU process at estimation step tau, as - described in A. Meucci "Risk and Asset Allocation", - Springer, 2005 } \note{ o dY_t = -Th * (Y_t - Mu) * dt + S * dB_t where o dB_t: vector of Brownian motions - - o dY_t = -Th * (Y_t - Mu) * dt + S * dB_t where o dB_t: - vector of Brownian motions } \author{ Xavier Valls \email{flamejat at gmail.com} - - Xavier Valls \email{flamejat at gmail.com} } \references{ \url{http://symmys.com/node/170} See Meucci's script for "FitOrnsteinUhlenbeck.m" - - \url{http://symmys.com/node/170} See Meucci's script for - "EfficientFrontierReturns.m" } From noreply at r-forge.r-project.org Wed Sep 4 06:31:31 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 4 Sep 2013 06:31:31 +0200 (CEST) Subject: [Returnanalytics-commits] r2984 - in pkg/PortfolioAnalytics: . R man sandbox Message-ID: <20130904043131.BC572183E3A@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-04 06:31:31 +0200 (Wed, 04 Sep 2013) New Revision: 2984 Added: pkg/PortfolioAnalytics/man/HHI.Rd pkg/PortfolioAnalytics/man/weight_concentration_objective.Rd pkg/PortfolioAnalytics/sandbox/testing_weight_conc.R Modified: pkg/PortfolioAnalytics/NAMESPACE pkg/PortfolioAnalytics/R/constraints.R pkg/PortfolioAnalytics/R/generics.R pkg/PortfolioAnalytics/R/objective.R pkg/PortfolioAnalytics/R/objectiveFUN.R pkg/PortfolioAnalytics/R/optFUN.R pkg/PortfolioAnalytics/R/optimize.portfolio.R pkg/PortfolioAnalytics/man/diversification_constraint.Rd pkg/PortfolioAnalytics/man/gmv_opt.Rd Log: Adding functionality for weight_concentration objective as penalty in quadratic objective for QP problems based on King paper. Modified: pkg/PortfolioAnalytics/NAMESPACE =================================================================== --- pkg/PortfolioAnalytics/NAMESPACE 2013-09-04 00:29:08 UTC (rev 2983) +++ pkg/PortfolioAnalytics/NAMESPACE 2013-09-04 04:31:31 UTC (rev 2984) @@ -131,4 +131,5 @@ export(update_constraint_v1tov2) export(update.constraint) export(var.portfolio) +export(weight_concentration_objective) export(weight_sum_constraint) Modified: pkg/PortfolioAnalytics/R/constraints.R =================================================================== --- pkg/PortfolioAnalytics/R/constraints.R 2013-09-04 00:29:08 UTC (rev 2983) +++ pkg/PortfolioAnalytics/R/constraints.R 2013-09-04 04:31:31 UTC (rev 2984) @@ -784,8 +784,6 @@ #' #' @param type character type of the constraint #' @param div_target diversification target value -#' @param conc_aversion concentration aversion parameter. Penalizes over -#' concentration for quadratic utility and minimum variance problems. #' @param enabled TRUE/FALSE #' @param message TRUE/FALSE. The default is message=FALSE. Display messages if TRUE. #' @param \dots any other passthru parameters to specify box and/or group constraints @@ -799,10 +797,9 @@ #' #' pspec <- add.constraint(portfolio=pspec, type="diversification", div_target=0.7) #' @export -diversification_constraint <- function(type="diversification", div_target=NULL, conc_aversion=NULL, enabled=TRUE, message=FALSE, ...){ +diversification_constraint <- function(type="diversification", div_target=NULL, enabled=TRUE, message=FALSE, ...){ Constraint <- constraint_v2(type, enabled=enabled, constrclass="diversification_constraint", ...) Constraint$div_target <- div_target - Constraint$conc_aversion <- conc_aversion return(Constraint) } Modified: pkg/PortfolioAnalytics/R/generics.R =================================================================== --- pkg/PortfolioAnalytics/R/generics.R 2013-09-04 00:29:08 UTC (rev 2983) +++ pkg/PortfolioAnalytics/R/generics.R 2013-09-04 04:31:31 UTC (rev 2984) @@ -256,6 +256,18 @@ for(i in 1:length(objective_measures)){ print(tmp_obj[i], digits=4) cat("\n") + if(length(objective_measures[[i]]) > 1){ + # This will be the case for any objective measures with HHI for QP problems + for(j in 2:length(objective_measures[[i]])){ + tmpl <- objective_measures[[i]][j] + cat(names(tmpl), ":\n") + tmpv <- unlist(tmpl) + # names(tmpv) <- names(x$weights) + print(tmpv) + cat("\n") + } + } + cat("\n") } cat("\n") } Modified: pkg/PortfolioAnalytics/R/objective.R =================================================================== --- pkg/PortfolioAnalytics/R/objective.R 2013-09-04 00:29:08 UTC (rev 2983) +++ pkg/PortfolioAnalytics/R/objective.R 2013-09-04 04:31:31 UTC (rev 2984) @@ -106,6 +106,12 @@ arguments=arguments, ...=...) }, + weight_conc=, weight_concentration = + {tmp_objective = weight_concentration_objective(name=name, + enabled=enabled, + arguments=arguments, + ...=...) + }, null = {return(constraints)} # got nothing, default to simply returning @@ -182,6 +188,12 @@ portfolio$objectives <- c(portfolio$objectives, tmp_objective) return(portfolio) }, + weight_conc=, weight_concentration = + {tmp_objective = weight_concentration_objective(name=name, + enabled=enabled, + arguments=arguments, + ...=...) + }, null = {return(portfolio)} # got nothing, default to simply returning ) # end objective type switch @@ -410,6 +422,51 @@ return(qu) } # end quadratic utility objective constructor +#' Constructor for weight concentration objective +#' +#' This function penalizes weight concentration using the Herfindahl-Hirschman Index +#' as a measure of concentration. +#' +#' The \code{conc_aversion} argument can be a scalar or vector of concentration +#' aversion values If \code{conc_aversion} is a scalar and \code{conc_groups} is +#' \code{NULL}, then the concentration aversion value will be applied to the overall +#' weights. +#' +#' If \code{conc_groups} is specified as an argument, then the concentration +#' aversion value(s) will be applied to each group. +#' +#' @param name name of concentration measure, currently only "HHI" is supported. +#' @param conc_aversion concentration aversion value(s) +#' @param conc_groups list of vectors specifying the groups of the assets. Similar +#' to \code{groups} in \code{\link{group_constraint}} +#' @param arguments default arguments to be passed to an objective function when executed +#' @param enabled TRUE/FALSE +#' @param \dots any other passthru parameters +#' @author Ross Bennett +#' @export +weight_concentration_objective <- function(name, conc_aversion, conc_groups=NULL, arguments=NULL, enabled=TRUE, ...){ + # TODO: write HHI function to be used by global solvers in constrained_objective + + # check if conc_groups is specified as an argument + if(!is.null(conc_groups)){ + arguments$groups <- conc_groups + if(!is.list(conc_groups)) stop("conc_groups must be passed in as a list") + + if(length(conc_aversion) == 1){ + # if conc_aversion is a scalar, replicate to the number of groups + conc_aversion <- rep(conc_aversion, length(conc_groups)) + } + # length of conc_aversion must be equal to the length of conc_groups + if(length(conc_aversion) != length(conc_groups)) stop("length of conc_aversion must be equal to length of groups") + } else if(is.null(conc_groups)){ + if(length(conc_aversion) != 1) stop("conc_aversion must be a scalar value when conc_groups are not specified") + } + Objective <- objective(name=name, enabled=enabled, arguments=arguments, objclass=c("weight_concentration_objective","objective"), ... ) + Objective$conc_aversion <- conc_aversion + Objective$conc_groups <- conc_groups + return(Objective) +} + #' Insert a list of objectives into the objectives slot of a portfolio object #' #' @param portfolio object of class 'portfolio' Modified: pkg/PortfolioAnalytics/R/objectiveFUN.R =================================================================== --- pkg/PortfolioAnalytics/R/objectiveFUN.R 2013-09-04 00:29:08 UTC (rev 2983) +++ pkg/PortfolioAnalytics/R/objectiveFUN.R 2013-09-04 04:31:31 UTC (rev 2984) @@ -36,3 +36,29 @@ weights <- matrix(weights, ncol=1) return(as.numeric(t(weights) %*% var(R) %*% weights)) } + +#' Function to calculate weight concentration +#' +#' This function calculates the concentration of weights using the +#' Herfindahl?Hirschman Index as a measure of concentration +#' +#' @param weights +#' @param groups +#' @author Ross Bennett +HHI <- function(weights, groups=NULL){ + + # calculate overall HHI + hhi <- sum(weights^2) + + # calculate group HHI + if(!is.null(groups)){ + ngroups <- length(groups) + group_hhi <- rep(0, ngroups) + for(i in 1:ngroups){ + group_hhi[i] <- sum(weights[groups[[i]]]^2) + } + return(list(hhi=hhi, group_hhi=group_hhi)) + } else { + return(hhi) + } +} \ No newline at end of file Modified: pkg/PortfolioAnalytics/R/optFUN.R =================================================================== --- pkg/PortfolioAnalytics/R/optFUN.R 2013-09-04 00:29:08 UTC (rev 2983) +++ pkg/PortfolioAnalytics/R/optFUN.R 2013-09-04 04:31:31 UTC (rev 2984) @@ -10,9 +10,10 @@ #' @param lambda risk_aversion parameter #' @param target target return value #' @param lambda_hhi concentration aversion parameter +#' @param conc_groups list of vectors specifying the groups of the assets. #' @author Ross Bennett -gmv_opt <- function(R, constraints, moments, lambda, target, lambda_hhi){ - +gmv_opt <- function(R, constraints, moments, lambda, target, lambda_hhi, conc_groups){ + N <- ncol(R) # Applying box constraints bnds <- list(lower=list(ind=seq.int(1L, N), val=as.numeric(constraints$min)), @@ -59,8 +60,24 @@ } # set up the quadratic objective - if(!is.null(constraints$conc_aversion)){ - ROI_objective <- Q_objective(Q=2*lambda*moments$var + lambda_hhi * diag(N), L=-moments$mean) + if(!is.null(lambda_hhi)){ + if(length(lambda_hhi) == 1 & is.null(conc_groups)){ + ROI_objective <- Q_objective(Q=2*lambda*moments$var + lambda_hhi * diag(N), L=-moments$mean) + } else if(!is.null(conc_groups)){ + # construct the matrix with concentration aversion values by group + hhi_mat <- matrix(0, nrow=N, ncol=N) + vec <- 1:N + for(i in 1:length(conc_groups)){ + tmpI <- diag(N) + tmpvec <- conc_groups[[i]] + zerovec <- setdiff(vec, tmpvec) + for(j in 1:length(zerovec)){ + tmpI[zerovec[j], ] <- rep(0, N) + } + hhi_mat <- hhi_mat + lambda_hhi[i] * tmpI + } + ROI_objective <- Q_objective(Q=2*lambda*moments$var + hhi_mat, L=-moments$mean) + } } else { ROI_objective <- Q_objective(Q=2*lambda*moments$var, L=-moments$mean) } Modified: pkg/PortfolioAnalytics/R/optimize.portfolio.R =================================================================== --- pkg/PortfolioAnalytics/R/optimize.portfolio.R 2013-09-04 00:29:08 UTC (rev 2983) +++ pkg/PortfolioAnalytics/R/optimize.portfolio.R 2013-09-04 04:31:31 UTC (rev 2984) @@ -704,15 +704,17 @@ } else { target <- NA } - if(!is.null(constraints$conc_aversion)){ - lambda_hhi <- constraints$conc_aversion - } else { - lambda_hhi <- 0 - } + # comment out so concentration aversion can only be specified as an objective + # because it is added to the quadratic objective term for QP problems (minvar and qu) + # if(!is.null(constraints$conc_aversion)){ + # lambda_hhi <- constraints$conc_aversion + #} else { + # lambda_hhi <- 0 + #} lambda <- 1 for(objective in portfolio$objectives){ if(objective$enabled){ - if(!any(c(objective$name == "mean", objective$name == "var", objective$name == "CVaR", objective$name == "ES", objective$name == "ETL"))) + if(!any(c(objective$name == "HHI", objective$name == "mean", objective$name == "var", objective$name == "CVaR", objective$name == "ES", objective$name == "ETL"))) stop("ROI only solves mean, var, or sample ETL/ES/CVaR type business objectives, choose a different optimize_method.") # I'm not sure what changed, but moments$mean used to be a vector of the column means # now it is a scalar value of the mean of the entire R object @@ -724,6 +726,8 @@ target <- ifelse(!is.null(objective$target), objective$target, target) alpha <- ifelse(!is.null(objective$alpha), objective$alpha, alpha) lambda <- ifelse(!is.null(objective$risk_aversion), objective$risk_aversion, lambda) + if(!is.null(objective$conc_aversion)) lambda_hhi <- objective$conc_aversion else lambda_hhi <- NULL + if(!is.null(objective$conc_groups)) conc_groups <- objective$conc_groups else conc_groups <- NULL } } if("var" %in% names(moments)){ @@ -735,7 +739,7 @@ obj_vals <- constrained_objective(w=weights, R=R, portfolio, trace=TRUE, normalize=FALSE)$objective_measures out <- list(weights=weights, objective_measures=obj_vals, opt_values=obj_vals, out=roi_result$out, call=call) } else { - roi_result <- gmv_opt(R=R, constraints=constraints, moments=moments, lambda=lambda, target=target, lambda_hhi=lambda_hhi) + roi_result <- gmv_opt(R=R, constraints=constraints, moments=moments, lambda=lambda, target=target, lambda_hhi=lambda_hhi, conc_groups=conc_groups) weights <- roi_result$weights obj_vals <- constrained_objective(w=weights, R=R, portfolio, trace=TRUE, normalize=FALSE)$objective_measures out <- list(weights=weights, objective_measures=obj_vals, opt_values=obj_vals, out=roi_result$out, call=call) Added: pkg/PortfolioAnalytics/man/HHI.Rd =================================================================== --- pkg/PortfolioAnalytics/man/HHI.Rd (rev 0) +++ pkg/PortfolioAnalytics/man/HHI.Rd 2013-09-04 04:31:31 UTC (rev 2984) @@ -0,0 +1,20 @@ +\name{HHI} +\alias{HHI} +\title{Function to calculate weight concentration} +\usage{ + HHI(weights, groups = NULL) +} +\arguments{ + \item{weights}{} + + \item{groups}{} +} +\description{ + This function calculates the concentration of weights + using the Herfindahl?Hirschman Index as a measure of + concentration +} +\author{ + Ross Bennett +} + Modified: pkg/PortfolioAnalytics/man/diversification_constraint.Rd =================================================================== --- pkg/PortfolioAnalytics/man/diversification_constraint.Rd 2013-09-04 00:29:08 UTC (rev 2983) +++ pkg/PortfolioAnalytics/man/diversification_constraint.Rd 2013-09-04 04:31:31 UTC (rev 2984) @@ -3,18 +3,14 @@ \title{constructor for diversification_constraint} \usage{ diversification_constraint(type = "diversification", - div_target = NULL, conc_aversion = NULL, - enabled = TRUE, message = FALSE, ...) + div_target = NULL, enabled = TRUE, message = FALSE, + ...) } \arguments{ \item{type}{character type of the constraint} \item{div_target}{diversification target value} - \item{conc_aversion}{concentration aversion parameter. - Penalizes over concentration for quadratic utility and - minimum variance problems.} - \item{enabled}{TRUE/FALSE} \item{message}{TRUE/FALSE. The default is message=FALSE. Modified: pkg/PortfolioAnalytics/man/gmv_opt.Rd =================================================================== --- pkg/PortfolioAnalytics/man/gmv_opt.Rd 2013-09-04 00:29:08 UTC (rev 2983) +++ pkg/PortfolioAnalytics/man/gmv_opt.Rd 2013-09-04 04:31:31 UTC (rev 2984) @@ -3,7 +3,7 @@ \title{Optimization function to solve minimum variance or maximum quadratic utility problems} \usage{ gmv_opt(R, constraints, moments, lambda, target, - lambda_hhi) + lambda_hhi, conc_groups) } \arguments{ \item{R}{xts object of asset returns} @@ -19,6 +19,9 @@ \item{target}{target return value} \item{lambda_hhi}{concentration aversion parameter} + + \item{conc_groups}{list of vectors specifying the groups + of the assets.} } \description{ This function is called by optimize.portfolio to solve Added: pkg/PortfolioAnalytics/man/weight_concentration_objective.Rd =================================================================== --- pkg/PortfolioAnalytics/man/weight_concentration_objective.Rd (rev 0) +++ pkg/PortfolioAnalytics/man/weight_concentration_objective.Rd 2013-09-04 04:31:31 UTC (rev 2984) @@ -0,0 +1,44 @@ +\name{weight_concentration_objective} +\alias{weight_concentration_objective} +\title{Constructor for weight concentration objective} +\usage{ + weight_concentration_objective(name, conc_aversion, + conc_groups = NULL, arguments = NULL, enabled = TRUE, + ...) +} +\arguments{ + \item{name}{name of concentration measure, currently only + "HHI" is supported.} + + \item{conc_aversion}{concentration aversion value(s)} + + \item{conc_groups}{list of vectors specifying the groups + of the assets. Similar to \code{groups} in + \code{\link{group_constraint}}} + + \item{arguments}{default arguments to be passed to an + objective function when executed} + + \item{enabled}{TRUE/FALSE} + + \item{\dots}{any other passthru parameters} +} +\description{ + This function penalizes weight concentration using the + Herfindahl-Hirschman Index as a measure of concentration. +} +\details{ + The \code{conc_aversion} argument can be a scalar or + vector of concentration aversion values If + \code{conc_aversion} is a scalar and \code{conc_groups} + is \code{NULL}, then the concentration aversion value + will be applied to the overall weights. + + If \code{conc_groups} is specified as an argument, then + the concentration aversion value(s) will be applied to + each group. +} +\author{ + Ross Bennett +} + Added: pkg/PortfolioAnalytics/sandbox/testing_weight_conc.R =================================================================== --- pkg/PortfolioAnalytics/sandbox/testing_weight_conc.R (rev 0) +++ pkg/PortfolioAnalytics/sandbox/testing_weight_conc.R 2013-09-04 04:31:31 UTC (rev 2984) @@ -0,0 +1,50 @@ +library(PortfolioAnalytics) +library(ROI) +library(ROI.plugin.quadprog) + +# data(edhec) +# R <- edhec[, 1:4] +# colnames(R) <- c("CA", "CTAG", "DS", "EM") +# funds <- colnames(R) + +load("~/Desktop/Testing/crsp.short.Rdata") +R <- cbind(microcap.ts[, 1:2], + smallcap.ts[, 1:2], + midcap.ts[, 1:2], + largecap.ts[, 1:2]) + +funds <- colnames(R) + +cap_labels <- c(rep("MICRO", 2), rep("SMALL", 2), + rep("MID", 2), rep("LARGE", 2)) + +# Create initial portfolio object with category_labels +init <- portfolio.spec(assets=funds, category_labels=cap_labels) +# Add some weight constraints +init <- add.constraint(portfolio=init, type="full_investment") +init <- add.constraint(portfolio=init, type="long_only") +# Add objective to minimize variance +init <- add.objective(portfolio=init, type="risk", name="var") + +# Run the optimization with var as the only objective +opt1 <- optimize.portfolio(R=R, portfolio=init, optimize_method="ROI", trace=TRUE) +opt1 + +# Add the weight_concentration objective +# Set the conc_aversion values to 0 so that we should get the same value as min var +conc <- add.objective(portfolio=init, type="weight_concentration", name="HHI", + conc_aversion=0, conc_groups=init$category_labels) + +opt2 <- optimize.portfolio(R=R, portfolio=conc, optimize_method="ROI", trace=TRUE) +opt2 +all.equal(opt1$weights, opt2$weights) + +# Now change the conc_aversion values to give highest penalty to small cap stocks +conc$objectives[[2]]$conc_aversion <- c(0.05, 1, 0.1, 0) +opt3 <- optimize.portfolio(R=R, portfolio=conc, optimize_method="ROI", trace=TRUE) +opt3 + +# If all the conc_aversion values are very high, this should result in an equal weight portfolio +conc$objectives[[2]]$conc_aversion <- rep(1e6, 4) +opt4 <- optimize.portfolio(R=R, portfolio=conc, optimize_method="ROI", trace=TRUE) +opt4 From noreply at r-forge.r-project.org Wed Sep 4 12:49:09 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 4 Sep 2013 12:49:09 +0200 (CEST) Subject: [Returnanalytics-commits] r2985 - in pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm: R vignettes Message-ID: <20130904104909.449A3185C11@r-forge.r-project.org> Author: shubhanm Date: 2013-09-04 12:49:08 +0200 (Wed, 04 Sep 2013) New Revision: 2985 Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/CommodityReport.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/CommodityReport.pdf Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/AcarSim.R pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/chart.Autocorrelation.R pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/ACFSTDEV.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/GLMSmoothIndex.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/LoSharpe.pdf Log: .r and .rnw modification Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/AcarSim.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/AcarSim.R 2013-09-04 04:31:31 UTC (rev 2984) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/AcarSim.R 2013-09-04 10:49:08 UTC (rev 2985) @@ -40,7 +40,7 @@ T= 36 j=1 dt=1/T -nsim=3; +nsim=30; thres=4; r=matrix(0,nsim,T+1) monthly = 0 Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/chart.Autocorrelation.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/chart.Autocorrelation.R 2013-09-04 04:31:31 UTC (rev 2984) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/chart.Autocorrelation.R 2013-09-04 10:49:08 UTC (rev 2985) @@ -36,12 +36,12 @@ aa= table.Autocorrelation(R) barplot(as.matrix(aa), main="ACF Lag Plot", ylab= "Value of Coefficient", - , xlab = NULL,col=rainbow(6)) + , xlab = NULL,col=rich6equal) # Place the legend at the top-left corner with no frame # using rainbow colors legend("topright", c("1","2","3","4","5","6"), cex=0.6, - bty="n", fill=rainbow(6)); + bty="n", fill=rich6equal); Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/ACFSTDEV.pdf =================================================================== (Binary files differ) Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/CommodityReport.Rnw =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/CommodityReport.Rnw (rev 0) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/CommodityReport.Rnw 2013-09-04 10:49:08 UTC (rev 2985) @@ -0,0 +1,226 @@ +%% no need for \DeclareGraphicsExtensions{.pdf,.eps} + +\documentclass[12pt,letterpaper,english]{article} +\usepackage{times} +\usepackage[T1]{fontenc} +\IfFileExists{url.sty}{\usepackage{url}} + {\newcommand{\url}{\texttt}} + +\usepackage{babel} +%\usepackage{noweb} +\usepackage{Rd} + +\usepackage{Sweave} +\SweaveOpts{engine=R,eps=FALSE} +%\VignetteIndexEntry{Performance Attribution from Bacon} +%\VignetteDepends{PerformanceAnalytics} +%\VignetteKeywords{returns, performance, risk, benchmark, portfolio} +%\VignettePackage{PerformanceAnalytics} + +%\documentclass[a4paper]{article} +%\usepackage[noae]{Sweave} +%\usepackage{ucs} +%\usepackage[utf8x]{inputenc} +%\usepackage{amsmath, amsthm, latexsym} +%\usepackage[top=3cm, bottom=3cm, left=2.5cm]{geometry} +%\usepackage{graphicx} +%\usepackage{graphicx, verbatim} +%\usepackage{ucs} +%\usepackage[utf8x]{inputenc} +%\usepackage{amsmath, amsthm, latexsym} +%\usepackage{graphicx} + +\title{Commodity Index Fund Performance Analysis} +\author{Shubhankit Mohan} + +\begin{document} +\SweaveOpts{concordance=TRUE} + +\maketitle + + +\begin{abstract} +The fact that many hedge fund returns exhibit extraordinary levels of serial correlation is now well-known and generally accepted as fact. The effect of this autocorrelation on investment returns diminishes the apparent risk of such asset classes as the true returns/risk is easily \textbf{camouflaged} within a haze of illiquidity, stale prices, averaged price quotes and smoothed return reporting. We highlight the effect \emph{autocorrelation} and \emph{drawdown} has on performance analysis by investigating the results of functions developed during the Google Summer of Code 2013 on \textbf{commodity based index} . +\end{abstract} + +<>= +library(PerformanceAnalytics) +library(noniid.sm) +data(edhec) +@ + + +\section{Background} +The investigated fund index that tracks a basket of \emph{commodities} to measure their performance.The value of these indexes fluctuates based on their underlying commodities, and this value depends on the \emph{component}, \emph{methodology} and \emph{style} to cover commodity markets . + +A brief overview of the four index invested in our report are : + \begin{itemize} + \item + \textbf{DJUBS Commodity index} : is a broadly diversified index that allows investors to track commodity futures through a single, simple measure. As the index has grown in popularity since its introduction in 1998, additional versions and a full complement of subindices have been introduced. Together, the family offers investors a comprehensive set of tools for measuring the commodity markets. + \item + \textbf{Morningstar CLS index} : is a simple rules-based trend following index operated in commodities + \item + \textbf{Newedge CTI} : includes funds that utilize a variety of investment strategies to profit from price moves in commodity markets. +Managers typically use either (i) a trading orientated approach,involving the trading of physical commodity products and/or of commodity +derivative instruments in either directional or relative value strategies; Or (ii) Long short equity strategies focused on commodity related stocks. + \end{itemize} +%Let $X \sim N(0,1)$ and $Y \sim \textrm{Exponential}(\mu)$. Let +%$Z = \sin(X)$. $\sqrt{X}$. + +%$\hat{\mu}$ = $\displaystyle\frac{22}{7}$ +%e^{2 \mu} = 1 +%\begin{equation} +%\left(\sum_{t=1}^{T} R_t/T\right) = \hat{\mu} \\ +%\end{equation} + +\section{Performance Summary Chart} + +Given a series of historical returns \((R_1,R_2, . . .,R_T)\) from \textbf{January-2001} to \textbf{December-2009}, create a wealth index chart, bars for per-period performance, and underwater chart for drawdown of the 3 funds. + +<>= +data <- read.csv("C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/data/HAM3-data.csv") +dates <- data$X +values <- data[,-1] # convert percentage to return +COM <- as.xts(values, order.by=as.Date(dates)) +COM.09<-COM[,9:11] +charts.PerformanceSummary(COM.09[1:108,],colorset = rich6equal, lwd = 2, ylog = TRUE) +@ + +The above figure shows the behaviour of the respective fund performance, which is \textbf{upward} trending for all the funds till the period of \textbf{"January-2008"}.For comparitive purpose, one can observe the distinct \textbf{drawdown} of \textbf{Newedge CTI} since the latter period. + +\section{Statistical and Drawdown Analysis} + +A summary of Fund Return series characteristics show that \textbf{DJUBS.Commodity} performs worse relatively to it's peers.The most distinct charactersitic being highest : \textbf{Variance, Stdev, SE Mean} and well as negative \textbf{Skewness} + +<>= +table.Stats(COM.09, ci = 0.95, digits = 4) +@ + + +The results are consistent with Drawdown Analysis in which \textbf{DJUBS.Commodity} performs worse relatively to it's peers. + +<>= +table.DownsideRisk(COM.09, ci = 0.95, digits = 4) +@ +\section{Non-i.i.d GSoC Usage} +\subsection{Auctocorrelation Adjusted Standard Devitation} +Given a sample of historical returns \((R_1,R_2, . . .,R_T)\),the method assumes the fund manager smooths returns in the following manner, when 't' is the unit time interval, with $\rho$\ as the respective term autocorrelation coefficient + +%Let $X \sim N(0,1)$ and $Y \sim \textrm{Exponential}(\mu)$. Let +%$Z = \sin(X)$. $\sqrt{X}$. + +%$\hat{\mu}$ = $\displaystyle\frac{22}{7}$ +%e^{2 \mu} = 1 +%\begin{equation} +%\left(\sum_{t=1}^{T} R_t/T\right) = \hat{\mu} \\ +%\end{equation} +\begin{equation} + \sigma_{T} = \sqrt{ \sum_k^n(\sigma_{t}^2 + 2*\rho_i) } \\ +\end{equation} + + +<>= +ACFVol = ACStdDev.annualized(COM.09) +Vol = StdDev.annualized(COM.09) +barplot(rbind(ACFVol,Vol), main="ACF and Orignal Volatility", + xlab="Fund Type",ylab="Volatilty (in %)", col=rich6equal[2:3], beside=TRUE) + legend("topright", c("1","2"), cex=0.6, + bty="2", fill=rich6equal[2:3]); +@ + +From the above figure, we can observe that all the funds, exhibit \textbf{serial auto correlation}, which results in significantly \emph{inflated} standard deveation. +\subsection{Andrew Lo Statistics of Sharpe Ratio} + +The building blocks of the \textbf{Sharpe Ratio} : expected returns and volatilities are unknown quantities that must be estimated statistically and are, +therefore, subject to \emph{estimation error} .To address this question, Andrew Lo derives explicit expressions for the statistical distribution of the Sharpe ratio using +standard asymptotic theory. +Given a predefined benchmark ,\eqn{\hat{SR}} can be expressed in terms of autocorrelated coefficients as + + \deqn{ \hat{SR} (q) - SR(q)= Normal Distribution(0,V_{GMM}(q)) } + +In given commodity funds, we find results, similar reported in paper, that the annual Sharpe ratio for a hedge fund can be overstated by as much as \textbf{65} \% because of the presence of \textbf{serial correlation}.We can observe that the fund "\textbf{DJUBS.Commodity}", which has the largest drawdown and serial autocorrelation, has it's Andrew Lo Sharpe ratio , \emph{decrease} most significantly as comapared to other funds. + +<>= +Lo.Sharpe = LoSharpe(COM.09) +Theoretical.Sharpe= SharpeRatio.annualized(COM.09) +barplot(rbind(Theoretical.Sharpe,Lo.Sharpe), main="Theoretical and Andrew Lo Sharpe Ratio Observed", + xlab="Fund Type",ylab="Value", col=rich6equal[2:3], beside=TRUE) + legend("topright", c("1","2"), cex=0.6, + bty="2", fill=rich6equal[2:3]); +@ +\subsection{Conditional Drawdown} +A new one-parameter family of risk measures called Conditional Drawdown (CDD) has +been proposed. These measures of risk are functionals of the portfolio drawdown (underwater) curve considered in active portfolio management. For some value of $\hat{\alpha}$ the tolerance parameter, in the case of a single sample path, drawdown functional is defined as the mean of the worst (1 \(-\) $\hat{\alpha}$)100\% drawdowns. The CDD measure generalizes the notion of the drawdown functional to a multi-scenario case and can be considered as a generalization of deviation measure to a dynamic case. The CDD measure includes the Maximal Drawdown and Average Drawdown as its limiting cases.Similar to other cases, \textbf{DJUBS.Commodity}, is the worst performing fund with worst case conditional drawdown greater than \textbf{50\%} and \textbf{Newedge.CTI} performing significantly well among the peer commodity indices with less than \textbf{15\%}. + +<>= +c.draw=CDrawdown(COM.09) +barplot(as.matrix(-c.draw), main="Conditional Drawdown (in %)", + xlab="Fund Type",ylab="Value",colorset = rich6equal[5], beside=TRUE) +@ +\subsection{Calmar and Sterling Ratio} +Both the Calmar and the Sterling ratio are the ratio of annualized returnmover the absolute value of the maximum drawdown of an investment. +{equation} +\begin{equation} + Calmar Ratio = \frac{Return [0,T]}{max Drawdown [0,T]} \\ +\end{equation} + +\begin{equation} + Sterling Ratio = \frac{Return [0,T]}{max Drawdown [0,T] - 10\%} \\ +\end{equation} +<>= +CalmarRatio.Norm(COM.09,1) +SterlingRatio.Norm(COM.09,1) +@ +For a 1 year \emph{horizon} return, we can see that Newedge.CTI is the clear performer in this metric as well.However, a \textbf{suprising} observed result, is negative \emph{Sterling} and \emph{Calmar} ratio for Morningstar.CLS . +\subsection{GLM Smooth Index} +A useful summary statistic for measuringthe concentration of weights is : +\begin{equation} +\xi = \sum_{j=0}^{k} \theta _j^2 \\ +\end{equation} + +This measure is well known in the industrial organization literature as the Herfindahl index, a measure of the concentration of firms in a given industry where $\theta$\(_j\) represents the market share of firm j. Becaus $\xi_t$\ is confined to the unit interval, and is minimized when all the $\theta$\(_j\) 's are identical, which implies a value of 1/k+1 for $\xi_i$\ ; and is maximized when one coefficient is 1 and the rest are 0. In the context of smoothed returns, a lower value of implies more smoothing, and the upper bound of 1 implies no smoothing, hence we shall refer to $\theta$\(_j\) as a \textbf{smoothing index}. + +<>= +library(noniid.sm) +source("C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/GLMSmoothIndex.R") +GLM.index=GLMSmoothIndex(COM.09) +barplot(as.matrix(GLM.index), main="GLM Smooth Index", + xlab="Fund Type",ylab="Value",colorset = rich6equal[1], beside=TRUE) +@ + +For the given chart, we can observe that \textbf{all the funds} have significant level of smooth returns. +\subsection{Acar Shane Maximum Loss} +The main concern of this paper is the study of alternative risk measures: namely maximum loss and +maximum drawdown. Unfortunately, there is no analytical formulae to establish the maximum drawdown properties under the random walk assumption. We should note first that due to its definition, the maximum drawdown divided by volatility is an only function of the ratio mean divided by volatility. +\begin{equation} +MD / \sigma = Min \frac{ \sum_{j=1}^{t} X_{j}}{\sigma} = F(\frac{\mu}{\sigma}) \\ +\end{equation} + +Such a ratio is useful in that this is a complementary statistic to the return divided by volatility ratio. To get some insight on the relationships between maximum drawdown per unit of volatility and mean return divided by volatility, we have proceeded to Monte-Carlo simulations. We have simulated cash flows over a period of 36 monthly returns and measured maximum drawdown for varied levels of annualised return divided by volatility varying from minus two to two by step of 0.1. The process has been repeated six thousand times. + + +<>= +source("C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/AcarSim.R") +AcarSim(COM.09) +@ + +As we can see from the \emph{simulted chart}, DJUBS.Commodity comes at the bottom , which imply a \emph{lower} \textbf{return-maximum loss} ratio. + +<>= +source("C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/chart.Autocorrelation.R") +chart.Autocorrelation(COM.09) +@ + +Finally, from the autocorrelation lag plot, one can observe, significant \textbf{positive} autocorrelation for \textbf{Newedge.CTI}, which is a \emph{warning} signal in case drawdown occurs, in an otherwise excellent performing fund. +\section{Conclusion} + +Analyzing all the function results, one can clearly differentiate \textbf{Newedge.CTI}, as a far superior fund as compared to it's peer.\textbf{MorningStar.CLS}, exbhibits highest autocorrelation as well as lowest Calmar/Sterling ratio, but compared on other front, it distincly outperforms \textbf{DJUBS.Commodity}, which has performed poorfly on all the tests. + +The above figure shows the characteristic of the respective fund performance, which is after the period of analysis till \textbf{"July-2013"}.At this moment, we would like the readers, to use the functions developed in the R \textbf{"PerformanceAnalytics"} package, to study ,use it for analysis as well as for forming their own opinion. + +<>= +charts.PerformanceSummary(COM.09[109:151],colorset = rich6equal, lwd = 2, ylog = TRUE) +@ + + +\end{document} \ No newline at end of file Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/CommodityReport.pdf =================================================================== (Binary files differ) Property changes on: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/CommodityReport.pdf ___________________________________________________________________ Added: svn:mime-type + application/octet-stream Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/GLMSmoothIndex.pdf =================================================================== (Binary files differ) Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/LoSharpe.pdf =================================================================== (Binary files differ) From noreply at r-forge.r-project.org Wed Sep 4 14:57:34 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 4 Sep 2013 14:57:34 +0200 (CEST) Subject: [Returnanalytics-commits] r2986 - in pkg/PerformanceAnalytics/sandbox/Shubhankit: man noniid.sm noniid.sm/R noniid.sm/man Message-ID: <20130904125734.71869185B9E@r-forge.r-project.org> Author: shubhanm Date: 2013-09-04 14:57:34 +0200 (Wed, 04 Sep 2013) New Revision: 2986 Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/man/GLMSmoothIndex.Rd pkg/PerformanceAnalytics/sandbox/Shubhankit/man/Return.Okunev.Rd pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/DESCRIPTION pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/AcarSim.R pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/AcarSim.Rd Log: Documentation update and parameter change in AcarSim Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/man/GLMSmoothIndex.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/man/GLMSmoothIndex.Rd 2013-09-04 10:49:08 UTC (rev 2985) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/man/GLMSmoothIndex.Rd 2013-09-04 12:57:34 UTC (rev 2986) @@ -1,48 +1,50 @@ -\name{GLMSmoothIndex} -\alias{GLMSmoothIndex} -\alias{Return.Geltner} -\title{GLM Index} -\usage{ - GLMSmoothIndex(R = NULL, ...) -} -\arguments{ - \item{R}{an xts, vector, matrix, data frame, timeSeries - or zoo object of asset returns} -} -\description{ - Getmansky Lo Markov Smoothing Index is a useful summary - statistic for measuring the concentration of weights is a - sum of square of Moving Average lag coefficient. This - measure is well known in the industrial organization - literature as the \bold{ Herfindahl index}, a measure of - the concentration of firms in a given industry. The index - is maximized when one coefficient is 1 and the rest are - 0. In the context of smoothed returns, a lower value - implies more smoothing, and the upper bound of 1 implies - no smoothing, hence \eqn{\xi} is reffered as a - '\bold{smoothingindex}'. \deqn{ \xi = \sum\theta(j)^2} - Where j belongs to 0 to k,which is the number of lag - factors input. -} -\examples{ -data(edhec) -head(GLMSmoothIndex(edhec)) -} -\author{ - Peter Carl, Brian Peterson, Shubhankit Mohan -} -\references{ - \emph{Getmansky, Mila, Lo, Andrew W. and Makarov, Igor} - An Econometric Model of Serial Correlation and - Illiquidity in Hedge Fund Returns (March 1, 2003). MIT - Sloan Working Paper No. 4288-03; MIT Laboratory for - Financial Engineering Working Paper No. LFE-1041A-03; - EFMA 2003 Helsinki Meetings. Available at SSRN: - \url{http://ssrn.com/abstract=384700} -} -\keyword{distribution} -\keyword{models} -\keyword{multivariate} -\keyword{non-iid} -\keyword{ts} - +\name{GLMSmoothIndex} +\alias{GLMSmoothIndex} +\alias{Return.Geltner} +\title{GLM Index} +\usage{ + GLMSmoothIndex(R = NULL, ...) +} +\arguments{ + \item{R}{an xts, vector, matrix, data frame, timeSeries + or zoo object of asset returns} +} +\description{ + Getmansky Lo Markov Smoothing Index is a useful summary + statistic for measuring the concentration of weights is a + sum of square of Moving Average lag coefficient. This + measure is well known in the industrial organization + literature as the \bold{ Herfindahl index}, a measure of + the concentration of firms in a given industry. The index + is maximized when one coefficient is 1 and the rest are + 0. In the context of smoothed returns, a lower value + implies more smoothing, and the upper bound of 1 implies + no smoothing, hence \eqn{\xi} is reffered as a + '\bold{smoothingindex}'. \deqn{ \xi = \sum\theta(j)^2} + Where j belongs to 0 to k,which is the number of lag + factors input. +} +\examples{ +require(PerformanceAnalytics) + library(PerformanceAnalytics) + data(edhec) +GLMSmoothIndex(edhec) +} +\author{ + Peter Carl, Brian Peterson, Shubhankit Mohan +} +\references{ + \emph{Getmansky, Mila, Lo, Andrew W. and Makarov, Igor} + An Econometric Model of Serial Correlation and + Illiquidity in Hedge Fund Returns (March 1, 2003). MIT + Sloan Working Paper No. 4288-03; MIT Laboratory for + Financial Engineering Working Paper No. LFE-1041A-03; + EFMA 2003 Helsinki Meetings. Available at SSRN: + \url{http://ssrn.com/abstract=384700} +} +\keyword{distribution} +\keyword{models} +\keyword{multivariate} +\keyword{non-iid} +\keyword{ts} + Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/man/Return.Okunev.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/man/Return.Okunev.Rd 2013-09-04 10:49:08 UTC (rev 2985) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/man/Return.Okunev.Rd 2013-09-04 12:57:34 UTC (rev 2986) @@ -1,71 +1,78 @@ -\name{Return.Okunev} -\alias{Return.Okunev} -\title{OW Return Model} -\usage{ - Return.Okunev(R, q = 3) -} -\description{ - The objective is to determine the true underlying return - by removing the autocorrelation structure in the original - return series without making any assumptions regarding - the actual time series properties of the underlying - process. We are implicitly assuming by this approach that - the autocorrelations that arise in reported returns are - entirely due to the smoothing behavior funds engage in - when reporting results. In fact, the method may be - adopted to produce any desired level of autocorrelation - at any lag and is not limited to simply eliminating all - autocorrelations.It can be be said as the general form of - Geltner Return Model -} -\details{ - Given a sample of historical returns \eqn{R(1),R(2), . . - .,R(T)},the method assumes the fund manager smooths - returns in the following manner: \deqn{ r(0,t) = \sum - \beta (i) r(0,t-i) + (1- \alpha)r(m,t) } Where :\deqn{ - \sum \beta (i) = (1- \alpha) } \bold{r(0,t)} : is the - observed (reported) return at time t (with 0 adjustments - to reported returns), \bold{r(m,t)} : is the true - underlying (unreported) return at time t (determined by - making m adjustments to reported returns). - - To remove the \bold{first m orders} of autocorrelation - from a given return series we would proceed in a manner - very similar to that detailed in \bold{ - \code{\link{Return.Geltner}} \cr}. We would initially - remove the first order autocorrelation, then proceed to - eliminate the second order autocorrelation through the - iteration process. In general, to remove any order, m, - autocorrelations from a given return series we would make - the following transformation to returns: autocorrelation - structure in the original return series without making - any assumptions regarding the actual time series - properties of the underlying process. We are implicitly - assuming by this approach that the autocorrelations that - arise in reported returns are entirely due to the - smoothing behavior funds engage in when reporting - results. In fact, the method may be adopted to produce - any desired level of autocorrelation at any lag and is - not limited to simply eliminating all autocorrelations. -} -\examples{ -data(managers) -head(Return.Okunev(managers[,1:3]),n=3) -} -\author{ - Peter Carl, Brian Peterson, Shubhankit Mohan -} -\references{ - Okunev, John and White, Derek R., \emph{ Hedge Fund Risk - Factors and Value at Risk of Credit Trading Strategies} - (October 2003). Available at SSRN: - \url{http://ssrn.com/abstract=460641} -} -\seealso{ - \code{\link{Return.Geltner}} \cr -} -\keyword{distribution} -\keyword{models} -\keyword{multivariate} -\keyword{ts} - +\name{Return.Okunev} +\alias{Return.Okunev} +\title{OW Return Model} +\usage{ + Return.Okunev(R, q = 3) +} +\arguments{ + \item{R}{: an xts, vector, matrix, data frame, timeSeries + or zoo object of asset returns} + + \item{q}{: order of autocorrelation coefficient lag + factors} +} +\description{ + The objective is to determine the true underlying return + by removing the autocorrelation structure in the original + return series without making any assumptions regarding + the actual time series properties of the underlying + process. We are implicitly assuming by this approach that + the autocorrelations that arise in reported returns are + entirely due to the smoothing behavior funds engage in + when reporting results. In fact, the method may be + adopted to produce any desired level of autocorrelation + at any lag and is not limited to simply eliminating all + autocorrelations.It can be be said as the general form of + Geltner Return Model +} +\details{ + Given a sample of historical returns \eqn{R(1),R(2), . . + .,R(T)},the method assumes the fund manager smooths + returns in the following manner: \deqn{ r(0,t) = \sum + \beta (i) r(0,t-i) + (1- \alpha)r(m,t) } Where :\deqn{ + \sum \beta (i) = (1- \alpha) } \bold{r(0,t)} : is the + observed (reported) return at time t (with 0 adjustments + to reported returns), \bold{r(m,t)} : is the true + underlying (unreported) return at time t (determined by + making m adjustments to reported returns). + + To remove the \bold{first m orders} of autocorrelation + from a given return series we would proceed in a manner + very similar to that detailed in \bold{ + \code{\link{Return.Geltner}} \cr}. We would initially + remove the first order autocorrelation, then proceed to + eliminate the second order autocorrelation through the + iteration process. In general, to remove any order, m, + autocorrelations from a given return series we would make + the following transformation to returns: autocorrelation + structure in the original return series without making + any assumptions regarding the actual time series + properties of the underlying process. We are implicitly + assuming by this approach that the autocorrelations that + arise in reported returns are entirely due to the + smoothing behavior funds engage in when reporting + results. In fact, the method may be adopted to produce + any desired level of autocorrelation at any lag and is + not limited to simply eliminating all autocorrelations. +} +\examples{ +data(managers) +head(Return.Okunev(managers[,1:3]),n=3) +} +\author{ + Peter Carl, Brian Peterson, Shubhankit Mohan +} +\references{ + Okunev, John and White, Derek R., \emph{ Hedge Fund Risk + Factors and Value at Risk of Credit Trading Strategies} + (October 2003). Available at SSRN: + \url{http://ssrn.com/abstract=460641} +} +\seealso{ + \code{\link{Return.Geltner}} \cr +} +\keyword{distribution} +\keyword{models} +\keyword{multivariate} +\keyword{ts} + Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/DESCRIPTION =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/DESCRIPTION 2013-09-04 10:49:08 UTC (rev 2985) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/DESCRIPTION 2013-09-04 12:57:34 UTC (rev 2986) @@ -1,38 +1,38 @@ -Package: noniid.sm -Type: Package -Title: Non-i.i.d. GSoC 2013 Shubhankit -Version: 0.1 -Date: $Date: 2013-05-13 14:30:22 -0500 (Mon, 13 May 2013) $ -Author: Shubhankit Mohan -Contributors: Peter Carl, Brian G. Peterson -Depends: - xts, - PerformanceAnalytics, - tseries, - stats -Maintainer: Brian G. Peterson -Description: GSoC 2013 project to replicate literature on drawdowns and - non-i.i.d assumptions in finance. -License: GPL-3 -ByteCompile: TRUE -Collate: - 'AcarSim.R' - 'ACStdDev.annualized.R' - 'CalmarRatio.Norm.R' - 'CDrawdown.R' - 'chart.AcarSim.R' - 'chart.Autocorrelation.R' - 'EmaxDDGBM.R' - 'GLMSmoothIndex.R' - 'LoSharpe.R' - 'na.skip.R' - 'noniid.sm-internal.R' - 'QP.Norm.R' - 'Return.GLM.R' - 'Return.Okunev.R' - 'se.LoSharpe.R' - 'SterlingRatio.Norm.R' - 'table.ComparitiveReturn.GLM.R' - 'table.EMaxDDGBM.R' - 'table.UnsmoothReturn.R' - 'UnsmoothReturn.R' +Package: noniid.sm +Type: Package +Title: Non-i.i.d. GSoC 2013 Shubhankit +Version: 0.1 +Date: $Date: 2013-05-13 14:30:22 -0500 (Mon, 13 May 2013) $ +Author: Shubhankit Mohan +Contributors: Peter Carl, Brian G. Peterson +Depends: + xts, + PerformanceAnalytics, + tseries, + stats +Maintainer: Brian G. Peterson +Description: GSoC 2013 project to replicate literature on drawdowns and + non-i.i.d assumptions in finance. +License: GPL-3 +ByteCompile: TRUE +Collate: + 'AcarSim.R' + 'ACStdDev.annualized.R' + 'CalmarRatio.Norm.R' + 'CDrawdown.R' + 'chart.AcarSim.R' + 'chart.Autocorrelation.R' + 'EmaxDDGBM.R' + 'GLMSmoothIndex.R' + 'LoSharpe.R' + 'na.skip.R' + 'noniid.sm-internal.R' + 'QP.Norm.R' + 'Return.GLM.R' + 'Return.Okunev.R' + 'se.LoSharpe.R' + 'SterlingRatio.Norm.R' + 'table.ComparitiveReturn.GLM.R' + 'table.EMaxDDGBM.R' + 'table.UnsmoothReturn.R' + 'UnsmoothReturn.R' Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/AcarSim.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/AcarSim.R 2013-09-04 10:49:08 UTC (rev 2985) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/AcarSim.R 2013-09-04 12:57:34 UTC (rev 2986) @@ -12,6 +12,7 @@ #' Where j varies from 1 to n ,which is the number of drawdown's in simulation #' @param R an xts, vector, matrix, data frame, timeSeries or zoo object of #' asset returns +#' @param nsim number of simulations input #' @author Shubhankit Mohan #' @references Maximum Loss and Maximum Drawdown in Financial Markets,\emph{International Conference Sponsored by BNP and Imperial College on: #' Forecasting Financial Markets, London, United Kingdom, May 1997} \url{http://www.intelligenthedgefundinvesting.com/pubs/easj.pdf} @@ -22,7 +23,7 @@ #' @rdname AcarSim #' @export AcarSim <- - function(R) + function(R,nsim=1) { library(PerformanceAnalytics) @@ -40,7 +41,6 @@ T= 36 j=1 dt=1/T -nsim=30; thres=4; r=matrix(0,nsim,T+1) monthly = 0 Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/AcarSim.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/AcarSim.Rd 2013-09-04 10:49:08 UTC (rev 2985) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/AcarSim.Rd 2013-09-04 12:57:34 UTC (rev 2986) @@ -2,11 +2,13 @@ \alias{AcarSim} \title{Acar-Shane Maximum Loss Plot} \usage{ - AcarSim(R) + AcarSim(R, nsim = 1) } \arguments{ \item{R}{an xts, vector, matrix, data frame, timeSeries or zoo object of asset returns} + + \item{nsim}{number of simulations input} } \description{ To get some insight on the relationships between maximum From noreply at r-forge.r-project.org Wed Sep 4 21:09:14 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 4 Sep 2013 21:09:14 +0200 (CEST) Subject: [Returnanalytics-commits] r2987 - in pkg/FactorAnalytics: R man Message-ID: <20130904190914.1C7851850F7@r-forge.r-project.org> Author: chenyian Date: 2013-09-04 21:09:13 +0200 (Wed, 04 Sep 2013) New Revision: 2987 Modified: pkg/FactorAnalytics/R/factorModelPerformanceAttribution.r pkg/FactorAnalytics/R/fitStatisticalFactorModel.R pkg/FactorAnalytics/man/factorModelPerformanceAttribution.Rd Log: add benchmark and active returns performance attribution in factorModelPerformanceAttribution.r Modified: pkg/FactorAnalytics/R/factorModelPerformanceAttribution.r =================================================================== --- pkg/FactorAnalytics/R/factorModelPerformanceAttribution.r 2013-09-04 12:57:34 UTC (rev 2986) +++ pkg/FactorAnalytics/R/factorModelPerformanceAttribution.r 2013-09-04 19:09:13 UTC (rev 2987) @@ -1,25 +1,25 @@ -# performance attribution -# Yi-An Chen -# July 30, 2012 - - - -#' Compute BARRA-type performance attribution +#' Compute performance attribution #' #' Decompose total returns or active returns into returns attributed to factors #' and specific returns. Class of FM.attribution is generated and generic #' function \code{plot()} and \code{summary()},\code{print()} can be used. #' #' total returns can be decomposed into returns attributed to factors and -#' specific returns. \eqn{R_t = \sum_j b_{jt} * f_{jt} + -#' u_t},t=1..T,\eqn{b_{jt}} is exposure to factor j and \eqn{f_{jt}} is factor -#' j. The returns attributed to factor j is \eqn{b_{jt} * f_{jt}} and portfolio -#' specific returns is \eqn{u_t} +#' specific returns. \eqn{R_t = \sum_j b_{j} * f_{jt} + +#' u_t},t=1..T,\eqn{b_{j}} is exposure to factor j and \eqn{f_{jt}} is factor +#' j. The returns attributed to factor j is \eqn{b_{j} * f_{jt}} and specific +#' returns is \eqn{u_t}. #' +#' If benchmark is provided. active returns = total returns - benchmark returns = +#' active returns attributed to factors + specific returns. Specifically, +#' \eqn{R_t = \sum_j b_{j}^A * f_{jt} + u_t},t=1..T, \eqn{b_{j}^A} is \emph{active beta} to factor j +#' and \eqn{f_{jt}} is factor j. The active returns attributed to factor j is +#' \eqn{b_{j}^A * f_{jt}} specific returns is \eqn{u_t}, and \eqn{b_{j}^A = b_{j}-1} +#' #' @param fit Class of "TimeSeriesFactorModel", "FundamentalFactorModel" or #' "statFactorModel". #' @param benchmark a xts, vector or data.frame provides benchmark time series -#' returns. +#' returns. If benchmark is provided, active returns decomposition will be calculated. #' @param ... Other controled variables for fit methods. #' @return an object of class \code{FM.attribution} containing #' \itemize{ @@ -47,20 +47,9 @@ #' factorModelPerformanceAttribution <- function(fit,benchmark=NULL,...) { - - # input - # fit : Class of MacroFactorModel, FundamentalFactorModel and statFactorModel - # benchmark: benchmark returns, default is NULL. If benchmark is provided, active returns - # is used. - # ... : controlled variables for fitMacroeconomicsFactorModel and fitStatisticalFactorModel - # output - # class of "FMattribution" - # - # plot.FMattribution - # summary.FMattribution - # print.FMattribution - require(xts) - + + require(PerformanceAnalytics) + if (class(fit) !="TimeSeriesFactorModel" & class(fit) !="FundamentalFactorModel" & class(fit) != "StatFactorModel") { @@ -68,194 +57,203 @@ 'StatFactorModel'.") } - # TimeSeriesFactorModel chunk + # TimeSeriesFactorModel chunk - if (class(fit) == "TimeSeriesFactorModel") { + if (class(fit) == "TimeSeriesFactorModel") { + + # return attributed to factors + cum.attr.ret <- fit$beta + cum.spec.ret <- fit$alpha + factorName = colnames(fit$beta) + fundName = rownames(fit$beta) + + attr.list <- list() + + for (k in fundName) { + fit.lm = fit$asset.fit[[k]] + + ## extract information from lm object + data <- checkData(fit$data) + date <- index(na.omit(data[,k])) + actual.xts = xts(fit.lm$model[1], as.Date(date)) + if (!is.null(benchmark)) { + benchmark.xts <- checkData(benchmark)[as.Date(date)] + } + # attributed returns + # active portfolio management p.512 17A.9 + # top-down method + + cum.ret <- Return.cumulative(actual.xts) + # setup initial value + attr.ret.xts.all <- xts(, as.Date(date)) + + for ( i in factorName ) { + + if (is.na(fit$beta[k,i])) { + cum.attr.ret[k,i] <- NA + attr.ret.xts.all <- merge(attr.ret.xts.all,xts(rep(NA,length(date)),as.Date(date))) + } else { + if (!is.null(benchmark)) { + attr.ret.xts <- actual.xts - xts(as.matrix(benchmark.xts)%*%as.matrix(fit.lm$coef[i]-1), + as.Date(date)) + } else { + attr.ret.xts <- actual.xts - xts(as.matrix(fit.lm$model[i])%*%as.matrix(fit.lm$coef[i]), + as.Date(date)) + } + cum.attr.ret[k,i] <- cum.ret - Return.cumulative(actual.xts-attr.ret.xts) + attr.ret.xts.all <- merge(attr.ret.xts.all,attr.ret.xts) + } + } + + + # specific returns + spec.ret.xts <- actual.xts - xts(as.matrix(fit.lm$model[,-1])%*%as.matrix(fit.lm$coef[-1]), + as.Date(date)) + cum.spec.ret[k] <- cum.ret - Return.cumulative(actual.xts-spec.ret.xts) + attr.list[[k]] <- merge(attr.ret.xts.all,spec.ret.xts) + colnames(attr.list[[k]]) <- c(factorName,"specific.returns") + } + + + } - # if benchmark is provided - -# if (!is.null(benchmark)) { -# ret.assets = fit$data[] - benchmark -# fit = fitTimeSeriesFactorModel(ret.assets=ret.assets,...) -# } - - # return attributed to factors - cum.attr.ret <- fit$beta - cum.spec.ret <- fit$alpha - factorName = colnames(fit$beta) - fundName = rownames(fit$beta) - - attr.list <- list() - - for (k in fundName) { - fit.lm = fit$asset.fit[[k]] + if (class(fit) =="FundamentalFactorModel" ) { + # if benchmark is provided - ## extract information from lm object - data <- checkData(fit$data) - date <- index(na.omit(data[,k])) - actual.xts = xts(fit.lm$model[1], as.Date(date)) + if (!is.null(benchmark)) { + stop("use fitFundamentalFactorModel instead") + } + # return attributed to factors + factor.returns <- fit$factor.returns[,-1] + factor.names <- colnames(fit$beta) + date <- index(factor.returns) + ticker <- fit$asset.names - # attributed returns - # active portfolio management p.512 17A.9 - cum.ret <- Return.cumulative(actual.xts) - # setup initial value - attr.ret.xts.all <- xts(, as.Date(date)) - for ( i in factorName ) { + #cumulative return attributed to factors + if (factor.names[1] == "(Intercept)") { + cum.attr.ret <- matrix(,nrow=length(ticker),ncol=length(factor.names), + dimnames=list(ticker,factor.names))[,-1] # discard intercept + } else { + cum.attr.ret <- matrix(,nrow=length(ticker),ncol=length(factor.names), + dimnames=list(ticker,factor.names)) + } + cum.spec.ret <- rep(0,length(ticker)) + names(cum.spec.ret) <- ticker + + # make list of every asstes and every list contains return attributed to factors + # and specific returns + + attr.list <- list() + for (k in ticker) { + idx <- which(fit$data[,fit$assetvar]== k) + returns <- fit$data[idx,fit$returnsvar] + num.f.names <- intersect(fit$exposure.names,factor.names) + # check if there is industry factors + if (length(setdiff(fit$exposure.names,factor.names))>0 ){ + ind.f <- matrix(rep(fit$beta[k,][-(1:length(num.f.names))],length(idx)),nrow=length(idx),byrow=TRUE) + colnames(ind.f) <- colnames(fit$beta)[-(1:length(num.f.names))] + exposure <- cbind(fit$data[idx,num.f.names],ind.f) + } else {exposure <- fit$data[idx,num.f.names] } - if (is.na(fit$beta[k,i])) { - cum.attr.ret[k,i] <- NA - attr.ret.xts.all <- merge(attr.ret.xts.all,xts(rep(NA,length(date)),as.Date(date))) - } else { - attr.ret.xts <- actual.xts - xts(as.matrix(fit.lm$model[i])%*%as.matrix(fit.lm$coef[i]), - as.Date(date)) - cum.attr.ret[k,i] <- cum.ret - Return.cumulative(actual.xts-attr.ret.xts) - attr.ret.xts.all <- merge(attr.ret.xts.all,attr.ret.xts) - } + attr.factor <- exposure * coredata(factor.returns) + specific.returns <- returns - apply(attr.factor,1,sum) + attr <- cbind(attr.factor,specific.returns) + attr.list[[k]] <- xts(attr,as.Date(date)) + cum.attr.ret[k,] <- apply(attr.factor,2,Return.cumulative) + cum.spec.ret[k] <- Return.cumulative(specific.returns) } - - # specific returns - spec.ret.xts <- actual.xts - xts(as.matrix(fit.lm$model[,-1])%*%as.matrix(fit.lm$coef[-1]), - as.Date(date)) - cum.spec.ret[k] <- cum.ret - Return.cumulative(actual.xts-spec.ret.xts) - attr.list[[k]] <- merge(attr.ret.xts.all,spec.ret.xts) - colnames(attr.list[[k]]) <- c(factorName,"specific.returns") + + } - - } - -if (class(fit) =="FundamentalFactorModel" ) { - # if benchmark is provided - - if (!is.null(benchmark)) { - stop("use fitFundamentalFactorModel instead") - } - # return attributed to factors - factor.returns <- fit$factor.returns[,-1] - factor.names <- colnames(fit$beta) - dates <- index(factor.returns) - ticker <- fit$asset.names - - - - #cumulative return attributed to factors - if (factor.names[1] == "(Intercept)") { - cum.attr.ret <- matrix(,nrow=length(ticker),ncol=length(factor.names), - dimnames=list(ticker,factor.names))[,-1] # discard intercept - } else { - cum.attr.ret <- matrix(,nrow=length(ticker),ncol=length(factor.names), - dimnames=list(ticker,factor.names)) - } - cum.spec.ret <- rep(0,length(ticker)) - names(cum.spec.ret) <- ticker - - # make list of every asstes and every list contains return attributed to factors - # and specific returns - - attr.list <- list() - for (k in ticker) { - idx <- which(fit$data[,fit$assetvar]== k) - returns <- fit$data[idx,fit$returnsvar] - num.f.names <- intersect(fit$exposure.names,factor.names) - # check if there is industry factors - if (length(setdiff(fit$exposure.names,factor.names))>0 ){ - ind.f <- matrix(rep(fit$beta[k,][-(1:length(num.f.names))],length(idx)),nrow=length(idx),byrow=TRUE) - colnames(ind.f) <- colnames(fit$beta)[-(1:length(num.f.names))] - exposure <- cbind(fit$data[idx,num.f.names],ind.f) - } else {exposure <- fit$data[idx,num.f.names] } - - attr.factor <- exposure * coredata(factor.returns) - specific.returns <- returns - apply(attr.factor,1,sum) - attr <- cbind(attr.factor,specific.returns) - attr.list[[k]] <- xts(attr,as.Date(dates)) - cum.attr.ret[k,] <- apply(attr.factor,2,Return.cumulative) - cum.spec.ret[k] <- Return.cumulative(specific.returns) - } - - - -} - if (class(fit) == "StatFactorModel") { - # if benchmark is provided - - if (!is.null(benchmark)) { - x = fit$asset.ret - benchmark - fit = fitStatisticalFactorModel(data=x,...) - } # return attributed to factors cum.attr.ret <- t(fit$loadings) cum.spec.ret <- fit$r2 factorName = rownames(fit$loadings) fundName = colnames(fit$loadings) - + data <- checkData(fit$data) # create list for attribution attr.list <- list() # pca method - + if ( dim(fit$asset.ret)[1] > dim(fit$asset.ret)[2] ) { - - for (k in fundName) { - fit.lm = fit$asset.fit[[k]] - ## extract information from lm object - date <- index(fit$data[,k]) - # probably needs more general Date setting - actual.xts = xts(fit.lm$model[1], as.Date(date)) - - - # attributed returns - # active portfolio management p.512 17A.9 - - cum.ret <- Return.cumulative(actual.xts) - # setup initial value - attr.ret.xts.all <- xts(, as.Date(date)) - for ( i in factorName ) { - + for (k in fundName) { + fit.lm = fit$asset.fit[[k]] + + ## extract information from lm object + date <- index(fit$data[,k]) + # probably needs more general Date setting + actual.xts = xts(fit.lm$model[1], as.Date(date)) + if (!is.null(benchmark)) { + benchmark.xts <- checkData(benchmark)[as.Date(date)] + } + + # attributed returns + # active portfolio management p.512 17A.9 + + cum.ret <- Return.cumulative(actual.xts) + # setup initial value + attr.ret.xts.all <- xts(, as.Date(date)) + for ( i in factorName ) { + if (!is.null(benchmark)) { + attr.ret.xts <- actual.xts - xts(as.matrix(benchmark.xts)%*%as.matrix(fit.lm$coef[i]-1), + as.Date(date)) + } else { attr.ret.xts <- actual.xts - xts(as.matrix(fit.lm$model[i])%*%as.matrix(fit.lm$coef[i]), - as.Date(date)) + as.Date(date)) + } cum.attr.ret[k,i] <- cum.ret - Return.cumulative(actual.xts-attr.ret.xts) attr.ret.xts.all <- merge(attr.ret.xts.all,attr.ret.xts) - + + + } + # specific returns + spec.ret.xts <- actual.xts - xts(as.matrix(fit.lm$model[,-1])%*%as.matrix(fit.lm$coef[-1]), + as.Date(date)) + cum.spec.ret[k] <- cum.ret - Return.cumulative(actual.xts-spec.ret.xts) + attr.list[[k]] <- merge(attr.ret.xts.all,spec.ret.xts) + colnames(attr.list[[k]]) <- c(factorName,"specific.returns") } - - # specific returns - spec.ret.xts <- actual.xts - xts(as.matrix(fit.lm$model[,-1])%*%as.matrix(fit.lm$coef[-1]), - as.Date(date)) - cum.spec.ret[k] <- cum.ret - Return.cumulative(actual.xts-spec.ret.xts) - attr.list[[k]] <- merge(attr.ret.xts.all,spec.ret.xts) - colnames(attr.list[[k]]) <- c(factorName,"specific.returns") - } } else { - # apca method -# fit$loadings # f X K -# fit$factors # T X f + # apca method + # fit$loadings # f X K + # fit$factors # T X f - dates <- index(fit$factors) + date <- index(fit$factors) for ( k in fundName) { - attr.ret.xts.all <- xts(, as.Date(dates)) - actual.xts <- xts(fit$asset.ret[,k],as.Date(dates)) + attr.ret.xts.all <- xts(, as.Date(date)) + actual.xts <- xts(fit$asset.ret[,k],as.Date(date)) cum.ret <- Return.cumulative(actual.xts) + if (!is.null(benchmark)) { + benchmark.xts <- checkData(benchmark)[as.Date(date)] + } for (i in factorName) { - attr.ret.xts <- xts(fit$factors[,i] * fit$loadings[i,k], as.Date(dates) ) - attr.ret.xts.all <- merge(attr.ret.xts.all,attr.ret.xts) - cum.attr.ret[k,i] <- cum.ret - Return.cumulative(actual.xts-attr.ret.xts) - } - spec.ret.xts <- actual.xts - xts(fit$factors%*%fit$loadings[,k],as.Date(dates)) + if (!is.null(benchmark)) { + attr.ret.xts <- actual.xts - xts(coredata(benchmark.xts)*(fit$loadings[i,k]-1), + as.Date(date)) + } else { + attr.ret.xts <- xts(fit$factors[,i] * fit$loadings[i,k], as.Date(date) ) + } + attr.ret.xts.all <- merge(attr.ret.xts.all,attr.ret.xts) + cum.attr.ret[k,i] <- cum.ret - Return.cumulative(actual.xts-attr.ret.xts) + } + spec.ret.xts <- actual.xts - xts(fit$factors%*%fit$loadings[,k],as.Date(date)) cum.spec.ret[k] <- cum.ret - Return.cumulative(actual.xts-spec.ret.xts) - attr.list[[k]] <- merge(attr.ret.xts.all,spec.ret.xts) + attr.list[[k]] <- merge(attr.ret.xts.all,spec.ret.xts) colnames(attr.list[[k]]) <- c(factorName,"specific.returns") } - - } - + + } + } @@ -263,6 +261,6 @@ ans = list(cum.ret.attr.f=cum.attr.ret, cum.spec.ret=cum.spec.ret, attr.list=attr.list) -class(ans) = "FM.attribution" -return(ans) - } + class(ans) = "FM.attribution" + return(ans) + } Modified: pkg/FactorAnalytics/R/fitStatisticalFactorModel.R =================================================================== --- pkg/FactorAnalytics/R/fitStatisticalFactorModel.R 2013-09-04 12:57:34 UTC (rev 2986) +++ pkg/FactorAnalytics/R/fitStatisticalFactorModel.R 2013-09-04 19:09:13 UTC (rev 2987) @@ -342,37 +342,37 @@ # check data data.xts <- checkData(data,method="xts") -data <- coredata(data.xts) + call <- match.call() - pos <- rownames(data) - data <- as.matrix(data) - if(any(is.na(data))) { + pos <- rownames(coredata(data.xts)) + data.m <- as.matrix(coredata(data.xts)) + if(any(is.na(data.m))) { if(na.rm) { - data <- na.omit(data) + data.m <- na.omit(data.m) } else { stop("Missing values are not allowed if na.rm=F.") } } # use PCA if T > N - if(ncol(data) < nrow(data)) { + if(ncol(data.m) < nrow(data.m)) { if(is.character(k)) { stop("k must be the number of factors for PCA.") } - if(k >= ncol(data)) { + if(k >= ncol(data.m)) { stop("Number of factors must be smaller than number of variables." ) } - ans <- mfactor.pca(data, k, check = check) + ans <- mfactor.pca(data.m, k, check = check) } else if(is.character(k)) { - ans <- mfactor.test(data, k, refine = refine, check = + ans <- mfactor.test(data.m, k, refine = refine, check = check, max.k = max.k, sig = sig) } else { # use aPCA if T <= N - if(k >= ncol(data)) { + if(k >= ncol(data.m)) { stop("Number of factors must be smaller than number of variables." ) } - ans <- mfactor.apca(data, k, refine = refine, check = + ans <- mfactor.apca(data.m, k, refine = refine, check = check) } @@ -383,20 +383,20 @@ f <- as.matrix(f) } - if(nrow(data) < ncol(data)) { - mimic <- ginv(data) %*% f + if(nrow(data.m) < ncol(data.m)) { + mimic <- ginv(data.m) %*% f } else { - mimic <- qr.solve(data, f) + mimic <- qr.solve(data.m, f) } mimic <- t(t(mimic)/colSums(mimic)) - dimnames(mimic)[[1]] <- dimnames(data)[[2]] + dimnames(mimic)[[1]] <- dimnames(data.m)[[2]] ans$mimic <- mimic ans$resid.variance <- apply(ans$residuals,2,var) ans$call <- call ans$data <- data - ans$assets.names <- colnames(data) + ans$assets.names <- colnames(data.m) class(ans) <- "StatFactorModel" return(ans) } Modified: pkg/FactorAnalytics/man/factorModelPerformanceAttribution.Rd =================================================================== --- pkg/FactorAnalytics/man/factorModelPerformanceAttribution.Rd 2013-09-04 12:57:34 UTC (rev 2986) +++ pkg/FactorAnalytics/man/factorModelPerformanceAttribution.Rd 2013-09-04 19:09:13 UTC (rev 2987) @@ -1,6 +1,6 @@ \name{factorModelPerformanceAttribution} \alias{factorModelPerformanceAttribution} -\title{Compute BARRA-type performance attribution} +\title{Compute performance attribution} \usage{ factorModelPerformanceAttribution(fit, benchmark = NULL, ...) @@ -10,7 +10,8 @@ "FundamentalFactorModel" or "statFactorModel".} \item{benchmark}{a xts, vector or data.frame provides - benchmark time series returns.} + benchmark time series returns. If benchmark is provided, + active returns decomposition will be calculated.} \item{...}{Other controled variables for fit methods.} } @@ -31,11 +32,20 @@ } \details{ total returns can be decomposed into returns attributed - to factors and specific returns. \eqn{R_t = \sum_j b_{jt} - * f_{jt} + u_t},t=1..T,\eqn{b_{jt}} is exposure to factor + to factors and specific returns. \eqn{R_t = \sum_j b_{j} + * f_{jt} + u_t},t=1..T,\eqn{b_{j}} is exposure to factor j and \eqn{f_{jt}} is factor j. The returns attributed to - factor j is \eqn{b_{jt} * f_{jt}} and portfolio specific - returns is \eqn{u_t} + factor j is \eqn{b_{j} * f_{jt}} and specific returns is + \eqn{u_t}. + + If benchmark is provided. active returns = total returns + - benchmark returns = active returns attributed to + factors + specific returns. Specifically, \eqn{R_t = + \sum_j b_{j}^A * f_{jt} + u_t},t=1..T, \eqn{b_{j}^A} is + \emph{active beta} to factor j and \eqn{f_{jt}} is factor + j. The active returns attributed to factor j is + \eqn{b_{j}^A * f_{jt}} specific returns is \eqn{u_t}, and + \eqn{b_{j}^A = b_{j}-1} } \examples{ data(managers.df) From noreply at r-forge.r-project.org Wed Sep 4 22:47:13 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 4 Sep 2013 22:47:13 +0200 (CEST) Subject: [Returnanalytics-commits] r2988 - in pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code: . Covariance Matrix Integrated Regression Function Data R Tests Message-ID: <20130904204714.0E8AA185C15@r-forge.r-project.org> Author: shubhanm Date: 2013-09-04 22:47:13 +0200 (Wed, 04 Sep 2013) New Revision: 2988 Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/Covariance Matrix Integrated Regression Function/ pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/Covariance Matrix Integrated Regression Function/glmi.R pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/Covariance Matrix Integrated Regression Function/lmi.R pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/Covariance Matrix Integrated Regression Function/nlsi.R pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/Data/ pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/Data/Investment.csv pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/Data/PublicSchools.csv pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/Data/RealInt.csv pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/Data/ps.csv pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/R Tests/ pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/R Tests/Cross Sectional Data.R pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/R Tests/HAC Data.R pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/R Tests/Tests.R pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/R Tests/Time Series Data.R Removed: pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/Tests/ pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/glmi.R pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/lmi.R pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/nlsi.R Log: HAC Error Test Matlab code /R code / Data Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/Covariance Matrix Integrated Regression Function/glmi.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/Covariance Matrix Integrated Regression Function/glmi.R (rev 0) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/Covariance Matrix Integrated Regression Function/glmi.R 2013-09-04 20:47:13 UTC (rev 2988) @@ -0,0 +1,92 @@ +glmi <- function (formula, family = gaussian, data,vcov = NULL, weights, subset, + na.action, start = NULL, etastart, mustart, offset, control = list(...), + model = TRUE, method = "glm.fit", x = FALSE, y = TRUE, contrasts = NULL, + ...) +{ + call <- match.call() + if (is.character(family)) + family <- get(family, mode = "function", envir = parent.frame()) + if (is.function(family)) + family <- family() + if (is.null(family$family)) { + print(family) + stop("'family' not recognized") + } + if (missing(data)) + data <- environment(formula) + mf <- match.call(expand.dots = FALSE) + m <- match(c("formula", "data", "subset", "weights", "na.action", + "etastart", "mustart", "offset"), names(mf), 0L) + mf <- mf[c(1L, m)] + mf$drop.unused.levels <- TRUE + mf[[1L]] <- as.name("model.frame") + mf <- eval(mf, parent.frame()) + if (identical(method, "model.frame")) + return(mf) + if (!is.character(method) && !is.function(method)) + stop("invalid 'method' argument") + if (identical(method, "glm.fit")) + control <- do.call("glm.control", control) + mt <- attr(mf, "terms") + Y <- model.response(mf, "any") + if (length(dim(Y)) == 1L) { + nm <- rownames(Y) + dim(Y) <- NULL + if (!is.null(nm)) + names(Y) <- nm + } + X <- if (!is.empty.model(mt)) + model.matrix(mt, mf, contrasts) + else matrix(, NROW(Y), 0L) + weights <- as.vector(model.weights(mf)) + if (!is.null(weights) && !is.numeric(weights)) + stop("'weights' must be a numeric vector") + if (!is.null(weights) && any(weights < 0)) + stop("negative weights not allowed") + offset <- as.vector(model.offset(mf)) + if (!is.null(offset)) { + if (length(offset) != NROW(Y)) + stop(gettextf("number of offsets is %d should equal %d (number of observations)", + length(offset), NROW(Y)), domain = NA) + } + mustart <- model.extract(mf, "mustart") + etastart <- model.extract(mf, "etastart") + fit <- eval(call(if (is.function(method)) "method" else method, + x = X, y = Y, weights = weights, start = start, etastart = etastart, + mustart = mustart, offset = offset, family = family, + control = control, intercept = attr(mt, "intercept") > + 0L)) + if (length(offset) && attr(mt, "intercept") > 0L) { + fit2 <- eval(call(if (is.function(method)) "method" else method, + x = X[, "(Intercept)", drop = FALSE], y = Y, weights = weights, + offset = offset, family = family, control = control, + intercept = TRUE)) + if (!fit2$converged) + warning("fitting to calculate the null deviance did not converge -- increase 'maxit'?") + fit$null.deviance <- fit2$deviance + } + if (model) + fit$model <- mf + fit$na.action <- attr(mf, "na.action") + if (x) + fit$x <- X + if (!y) + fit$y <- NULL + fit <- c(fit, list(call = call, formula = formula, terms = mt, + data = data, offset = offset, control = control, method = method, + contrasts = attr(X, "contrasts"), xlevels = .getXlevels(mt, + mf))) + class(fit) <- c(fit$class, c("glm", "lm")) + fit + if(is.null(vcov)) { + se <- vcov(fit) + } else { + if (is.function(vcov)) + se <- vcov(fit) + else + se <- vcov + } + fit = list(fit,vHaC = se) + fit + +} \ No newline at end of file Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/Covariance Matrix Integrated Regression Function/lmi.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/Covariance Matrix Integrated Regression Function/lmi.R (rev 0) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/Covariance Matrix Integrated Regression Function/lmi.R 2013-09-04 20:47:13 UTC (rev 2988) @@ -0,0 +1,76 @@ +lmi <- function (formula, data,vcov = NULL, subset, weights, na.action, method = "qr", + model = TRUE, x = FALSE, y = FALSE, qr = TRUE, singular.ok = TRUE, + contrasts = NULL, offset, ...) +{ + ret.x <- x + ret.y <- y + cl <- match.call() + mf <- match.call(expand.dots = FALSE) + m <- match(c("formula", "data", "subset", "weights", "na.action", + "offset"), names(mf), 0L) + mf <- mf[c(1L, m)] + mf$drop.unused.levels <- TRUE + mf[[1L]] <- as.name("model.frame") + mf <- eval(mf, parent.frame()) + if (method == "model.frame") + return(mf) + else if (method != "qr") + warning(gettextf("method = '%s' is not supported. Using 'qr'", + method), domain = NA) + mt <- attr(mf, "terms") + y <- model.response(mf, "numeric") + w <- as.vector(model.weights(mf)) + if (!is.null(w) && !is.numeric(w)) + stop("'weights' must be a numeric vector") + offset <- as.vector(model.offset(mf)) + if (!is.null(offset)) { + if (length(offset) != NROW(y)) + stop(gettextf("number of offsets is %d, should equal %d (number of observations)", + length(offset), NROW(y)), domain = NA) + } + if (is.empty.model(mt)) { + x <- NULL + z <- list(coefficients = if (is.matrix(y)) matrix(, 0, + 3) else numeric(), residuals = y, fitted.values = 0 * + y, weights = w, rank = 0L, df.residual = if (!is.null(w)) sum(w != + 0) else if (is.matrix(y)) nrow(y) else length(y)) + if (!is.null(offset)) { + z$fitted.values <- offset + z$residuals <- y - offset + } + } + else { + x <- model.matrix(mt, mf, contrasts) + z <- if (is.null(w)) + lm.fit(x, y, offset = offset, singular.ok = singular.ok, + ...) + else lm.wfit(x, y, w, offset = offset, singular.ok = singular.ok, + ...) + } + class(z) <- c(if (is.matrix(y)) "mlm", "lm") + z$na.action <- attr(mf, "na.action") + z$offset <- offset + z$contrasts <- attr(x, "contrasts") + z$xlevels <- .getXlevels(mt, mf) + z$call <- cl + z$terms <- mt + if (model) + z$model <- mf + if (ret.x) + z$x <- x + if (ret.y) + z$y <- y + if (!qr) + z$qr <- NULL + #z + if(is.null(vcov)) { + se <- vcov(z) + } else { + if (is.function(vcov)) + se <- vcov(z) + else + se <- vcov + } + z = list(z,vHaC = se) + z +} Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/Covariance Matrix Integrated Regression Function/nlsi.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/Covariance Matrix Integrated Regression Function/nlsi.R (rev 0) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/Covariance Matrix Integrated Regression Function/nlsi.R 2013-09-04 20:47:13 UTC (rev 2988) @@ -0,0 +1,178 @@ +nlsi <- function (formula, data = parent.frame(),vcov = NULL, start, control = nls.control(), + algorithm = c("default", "plinear", "port"), trace = FALSE, + subset, weights, na.action, model = FALSE, lower = -Inf, + upper = Inf, ...) +{ + formula <- as.formula(formula) + algorithm <- match.arg(algorithm) + if (!is.list(data) && !is.environment(data)) + stop("'data' must be a list or an environment") + mf <- match.call() + varNames <- all.vars(formula) + if (length(formula) == 2L) { + formula[[3L]] <- formula[[2L]] + formula[[2L]] <- 0 + } + form2 <- formula + form2[[2L]] <- 0 + varNamesRHS <- all.vars(form2) + mWeights <- missing(weights) + pnames <- if (missing(start)) { + if (!is.null(attr(data, "parameters"))) { + names(attr(data, "parameters")) + } + else { + cll <- formula[[length(formula)]] + func <- get(as.character(cll[[1L]])) + if (!is.null(pn <- attr(func, "pnames"))) + as.character(as.list(match.call(func, call = cll))[-1L][pn]) + } + } + else names(start) + env <- environment(formula) + if (is.null(env)) + env <- parent.frame() + if (length(pnames)) + varNames <- varNames[is.na(match(varNames, pnames))] + lenVar <- function(var) tryCatch(length(eval(as.name(var), + data, env)), error = function(e) -1) + if (length(varNames)) { + n <- sapply(varNames, lenVar) + if (any(not.there <- n == -1)) { + nnn <- names(n[not.there]) + if (missing(start)) { + if (algorithm == "plinear") + stop("no starting values specified") + warning("No starting values specified for some parameters.\n", + "Initializing ", paste(sQuote(nnn), collapse = ", "), + " to '1.'.\n", "Consider specifying 'start' or using a selfStart model", + domain = NA) + start <- setNames(as.list(rep(1, length(nnn))), + nnn) + varNames <- varNames[i <- is.na(match(varNames, + nnn))] + n <- n[i] + } + else stop(gettextf("parameters without starting value in 'data': %s", + paste(nnn, collapse = ", ")), domain = NA) + } + } + else { + if (length(pnames) && any((np <- sapply(pnames, lenVar)) == + -1)) { + message(sprintf(ngettext(sum(np == -1), "fitting parameter %s without any variables", + "fitting parameters %s without any variables"), + paste(sQuote(pnames[np == -1]), collapse = ", ")), + domain = NA) + n <- integer() + } + else stop("no parameters to fit") + } + respLength <- length(eval(formula[[2L]], data, env)) + if (length(n) > 0L) { + varIndex <- n%%respLength == 0 + if (is.list(data) && diff(range(n[names(n) %in% names(data)])) > + 0) { + mf <- data + if (!missing(subset)) + warning("argument 'subset' will be ignored") + if (!missing(na.action)) + warning("argument 'na.action' will be ignored") + if (missing(start)) + start <- getInitial(formula, mf) + startEnv <- new.env(hash = FALSE, parent = environment(formula)) + for (i in names(start)) assign(i, start[[i]], envir = startEnv) + rhs <- eval(formula[[3L]], data, startEnv) + n <- NROW(rhs) + wts <- if (mWeights) + rep(1, n) + else eval(substitute(weights), data, environment(formula)) + } + else { + mf$formula <- as.formula(paste("~", paste(varNames[varIndex], + collapse = "+")), env = environment(formula)) + mf$start <- mf$control <- mf$algorithm <- mf$trace <- mf$model <- NULL + mf$lower <- mf$upper <- NULL + mf[[1L]] <- as.name("model.frame") + mf <- eval.parent(mf) + n <- nrow(mf) + mf <- as.list(mf) + wts <- if (!mWeights) + model.weights(mf) + else rep(1, n) + } + if (any(wts < 0 | is.na(wts))) + stop("missing or negative weights not allowed") + } + else { + varIndex <- logical() + mf <- list(0) + wts <- numeric() + } + if (missing(start)) + start <- getInitial(formula, mf) + for (var in varNames[!varIndex]) mf[[var]] <- eval(as.name(var), + data, env) + varNamesRHS <- varNamesRHS[varNamesRHS %in% varNames[varIndex]] + m <- switch(algorithm, plinear = nlsModel.plinear(formula, + mf, start, wts), port = nlsModel(formula, mf, start, + wts, upper), nlsModel(formula, mf, start, wts)) + ctrl <- nls.control() + if (!missing(control)) { + control <- as.list(control) + ctrl[names(control)] <- control + } + if (algorithm != "port") { + if (!missing(lower) || !missing(upper)) + warning("upper and lower bounds ignored unless algorithm = \"port\"") + convInfo <- .Call(C_nls_iter, m, ctrl, trace) + nls.out <- list(m = m, convInfo = convInfo, data = substitute(data), + call = match.call()) + } + else { + pfit <- nls_port_fit(m, start, lower, upper, control, + trace, give.v = TRUE) + iv <- pfit[["iv"]] + msg.nls <- port_msg(iv[1L]) + conv <- (iv[1L] %in% 3:6) + if (!conv) { + msg <- paste("Convergence failure:", msg.nls) + if (ctrl$warnOnly) + warning(msg) + else stop(msg) + } + v. <- port_get_named_v(pfit[["v"]]) + cInfo <- list(isConv = conv, finIter = iv[31L], finTol = v.[["NREDUC"]], + nEval = c(`function` = iv[6L], gradient = iv[30L]), + stopCode = iv[1L], stopMessage = msg.nls) + cl <- match.call() + cl$lower <- lower + cl$upper <- upper + nls.out <- list(m = m, data = substitute(data), call = cl, + convInfo = cInfo, convergence = as.integer(!conv), + message = msg.nls) + } + nls.out$call$algorithm <- algorithm + nls.out$call$control <- ctrl + nls.out$call$trace <- trace + nls.out$na.action <- attr(mf, "na.action") + nls.out$dataClasses <- attr(attr(mf, "terms"), "dataClasses")[varNamesRHS] + if (model) + nls.out$model <- mf + if (!mWeights) + nls.out$weights <- wts + nls.out$control <- control + class(nls.out) <- "nls" + nls.out + if(is.null(vcov)) { + se <- vcov(nls.out) + } else { + if (is.function(vcov)) + se <- vcov(nls.out) + else + se <- vcov + } + nls.out = list(nls.out,vHaC = se) + nls.out + +} \ No newline at end of file Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/Data/Investment.csv =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/Data/Investment.csv (rev 0) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/Data/Investment.csv 2013-09-04 20:47:13 UTC (rev 2988) @@ -0,0 +1,21 @@ +"","GNP","Investment","Price","Interest","RealGNP","RealInv","RealInt" +"1",596.7,90.9,0.7167,3.23,832.565927166178,126.83131017162,NA +"2",637.7,97.4,0.7277,3.55,876.322660436993,133.84636526041,2.01518766568997 +"3",691.1,113.5,0.7436,4.04,929.397525551372,152.635825712749,1.85503366772021 +"4",756,125.7,0.7676,4.5,984.887962480459,163.757165190203,1.27245831091986 +"5",799.6,122.8,0.7906,4.19,1011.38375917025,155.325069567417,1.19364773319437 +"6",873.4,133.3,0.8254,5.16,1058.15362248607,161.497455779016,0.758279787503155 +"7",944,149.3,0.8679,5.87,1087.68291277797,172.024426777279,0.720981342379455 +"8",992.7,144.2,0.9145,5.95,1085.51120831055,157.681793329688,0.580717824634178 +"9",1077.6,166.4,0.9601,4.88,1122.3830850953,173.315279658369,-0.106331328594858 +"10",1185.9,195,1,4.5,1185.9,195,0.344182897614827 +"11",1326.4,229.8,1.0575,6.44,1254.27895981087,217.304964539007,0.68999999999999 +"12",1434.2,228.7,1.1508,7.83,1246.26346889121,198.731317344456,-0.992695035460986 +"13",1549.2,206.1,1.2579,6.25,1231.57643691867,163.844502742666,-3.05656934306569 +"14",1718,257.9,1.3234,5.5,1298.17137675684,194.87683240139,0.292908816281112 +"15",1918.3,324.1,1.4005,5.46,1369.72509817922,231.417350946091,-0.365902977180004 +"16",2163.9,386.6,1.5042,7.46,1438.57199840447,257.013694987369,0.0555016065690905 +"17",2417.8,423,1.6342,10.28,1479.50067311223,258.842246970995,1.63753224305278 +"18",2631.7,401.9,1.7842,11.77,1475.00280237642,225.255016253783,2.59119691592217 +"19",2954.1,474.9,1.9514,13.42,1513.83622014964,243.363738854156,4.0488532675709 +"20",3073,414.5,2.0688,11.02,1485.40216550657,200.357695282289,5.00380649789895 Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/Data/PublicSchools.csv =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/Data/PublicSchools.csv (rev 0) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/Data/PublicSchools.csv 2013-09-04 20:47:13 UTC (rev 2988) @@ -0,0 +1,52 @@ +"","Expenditure","Income" +"Alabama",275,6247 +"Alaska",821,10851 +"Arizona",339,7374 +"Arkansas",275,6183 +"California",387,8850 +"Colorado",452,8001 +"Connecticut",531,8914 +"Delaware",424,8604 +"Florida",316,7505 +"Georgia",265,6700 +"Hawaii",403,8380 +"Idaho",304,6813 +"Illinois",437,8745 +"Indiana",345,7696 +"Iowa",431,7873 +"Kansas",355,8001 +"Kentucky",260,6615 +"Louisiana",316,6640 +"Maine",327,6333 +"Maryland",427,8306 +"Massachusetts",427,8063 +"Michigan",466,8442 +"Minnesota",477,7847 +"Mississippi",259,5736 +"Missouri",274,7342 +"Montana",433,7051 +"Nebraska",294,7391 +"Nevada",359,9032 +"New Hampshire",279,7277 +"New Jersey",423,8818 +"New Mexico",388,6505 +"New York",447,8267 +"North Carolina",335,6607 +"North Dakota",311,7478 +"Ohio",322,7812 +"Oklahoma",320,6951 +"Oregon",397,7839 +"Pennsylvania",412,7733 +"Rhode Island",342,7526 +"South Carolina",315,6242 +"South Dakota",321,6841 +"Tennessee",268,6489 +"Texas",315,7697 +"Utah",417,6622 +"Vermont",353,6541 +"Virginia",356,7624 +"Washington",415,8450 +"Washington DC",428,10022 +"West Virginia",320,6456 +"Wisconsin",NA,7597 +"Wyoming",500,9096 Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/Data/RealInt.csv =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/Data/RealInt.csv (rev 0) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/Data/RealInt.csv 2013-09-04 20:47:13 UTC (rev 2988) @@ -0,0 +1,104 @@ +"","V1" +"1",1.99132 +"2",0.00403 +"3",2.27 +"4",0.84833 +"5",1.89112 +"6",-0.28 +"7",3.68431 +"8",1.62478 +"9",1.21599 +"10",1.28373 +"11",1.70141 +"12",3.16687 +"13",2.32779 +"14",2.26202 +"15",1.91218 +"16",3.53196 +"17",-0.31777 +"18",3.44694 +"19",1.52422 +"20",0.74268 +"21",1.31541 +"22",0.36646 +"23",3.59562 +"24",3.6574 +"25",0.97494 +"26",-0.5328 +"27",1.12681 +"28",0.31123 +"29",0.57834 +"30",1.08163 +"31",0.24977 +"32",0.23792 +"33",-0.32653 +"34",1.14733 +"35",1.19323 +"36",2.58962 +"37",0.01195 +"38",2.63842 +"39",0.42092 +"40",2.58823 +"41",-2.19809 +"42",2.89548 +"43",1.8213 +"44",0.86332 +"45",0.67496 +"46",0.34435 +"47",1.22762 +"48",-2.83991 +"49",-1.8163 +"50",-2.22965 +"51",-1.58457 +"52",-6.31184 +"53",-2.46258 +"54",-5.61479 +"55",-3.53887 +"56",1.0178 +"57",-1.58875 +"58",-1.85396 +"59",-0.2567 +"60",2.42226 +"61",-1.29502 +"62",-0.48977 +"63",1.21167 +"64",-4.85498 +"65",-3.599 +"66",0.17094 +"67",1.51603 +"68",-1.85304 +"69",-5.60478 +"70",-1.25767 +"71",0.96658 +"72",-3.09451 +"73",-5.26773 +"74",-4.03154 +"75",-1.76618 +"76",-5.73978 +"77",3.83047 +"78",0.52007 +"79",-0.18033 +"80",3.82808 +"81",3.01171 +"82",2.75289 +"83",11.74184 +"84",9.91701 +"85",3.03444 +"86",10.15143 +"87",9.29177 +"88",6.87498 +"89",2.43675 +"90",4.37202 +"91",6.77774 +"92",4.16692 +"93",5.65037 +"94",5.17734 +"95",9.41206 +"96",3.77006 +"97",4.24568 +"98",4.53154 +"99",3.39706 +"100",8.93951 +"101",4.20825 +"102",3.43461 +"103",4.30529 Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/Data/ps.csv =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/Data/ps.csv (rev 0) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/Data/ps.csv 2013-09-04 20:47:13 UTC (rev 2988) @@ -0,0 +1,51 @@ +"","Expenditure","Income" +"Alabama",275,0.6247 +"Alaska",821,1.0851 +"Arizona",339,0.7374 +"Arkansas",275,0.6183 +"California",387,0.885 +"Colorado",452,0.8001 +"Connecticut",531,0.8914 +"Delaware",424,0.8604 +"Florida",316,0.7505 +"Georgia",265,0.67 +"Hawaii",403,0.838 +"Idaho",304,0.6813 +"Illinois",437,0.8745 +"Indiana",345,0.7696 +"Iowa",431,0.7873 +"Kansas",355,0.8001 +"Kentucky",260,0.6615 +"Louisiana",316,0.664 +"Maine",327,0.6333 +"Maryland",427,0.8306 +"Massachusetts",427,0.8063 +"Michigan",466,0.8442 +"Minnesota",477,0.7847 +"Mississippi",259,0.5736 +"Missouri",274,0.7342 +"Montana",433,0.7051 +"Nebraska",294,0.7391 +"Nevada",359,0.9032 +"New Hampshire",279,0.7277 +"New Jersey",423,0.8818 +"New Mexico",388,0.6505 +"New York",447,0.8267 +"North Carolina",335,0.6607 +"North Dakota",311,0.7478 +"Ohio",322,0.7812 +"Oklahoma",320,0.6951 +"Oregon",397,0.7839 +"Pennsylvania",412,0.7733 +"Rhode Island",342,0.7526 +"South Carolina",315,0.6242 +"South Dakota",321,0.6841 +"Tennessee",268,0.6489 +"Texas",315,0.7697 +"Utah",417,0.6622 +"Vermont",353,0.6541 +"Virginia",356,0.7624 +"Washington",415,0.845 +"Washington DC",428,1.0022 +"West Virginia",320,0.6456 +"Wyoming",500,0.9096 Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/R Tests/Cross Sectional Data.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/R Tests/Cross Sectional Data.R (rev 0) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/R Tests/Cross Sectional Data.R 2013-09-04 20:47:13 UTC (rev 2988) @@ -0,0 +1,79 @@ +library("sandwich") +library("lmtest") +library("strucchange") +data("PublicSchools") +ps <- na.omit(PublicSchools) +ps$Income <- ps$Income * 0.0001 +fm.ps <- lm(Expenditure ~ Income + I(Income^3), data = ps) +sqrt(diag(vcov(fm.ps))) +sqrt(diag(vcovHC(fm.ps, type = "const"))) +sqrt(diag(vcovHC(fm.ps, type = "HC0"))) +sqrt(diag(vcovHC(fm.ps, type = "HC3"))) +sqrt(diag(vcovHC(fm.ps, type = "HC4"))) +coeftest(fm.ps, df = Inf, vcov = vcovHC(fm.ps, type = "HC0")) +coeftest(fm.ps, df = Inf, vcov = vcovHC(fm.ps, type = "HC4")) +plot(Expenditure ~ Income, data = ps, + xlab = "per capita income", + ylab = "per capita spending on public schools") +inc <- seq(0.5, 1.2, by = 0.001) +lines(inc, predict(fm.ps, data.frame(Income = inc)), col = 4, lty = 2) +fm.ps2 <- lm(Expenditure ~ Income, data = ps) +abline(fm.ps2, col = 4) +text(ps[2,2], ps[2,1], rownames(ps)[2], pos = 2) +## Willam H. Greene, Econometric Analysis, 2nd Ed. +## Chapter 14 +## load data set, p. 385, Table 14.1 +data(PublicSchools) + +## omit NA in Wisconsin and scale income +ps <- na.omit(PublicSchools) +ps$Income <- ps$Income * 0.0001 + +## fit quadratic regression, p. 385, Table 14.2 +fmq <- lm(Expenditure ~ Income + I(Income^2), data = ps) +summary(fmq) + +## compare standard and HC0 standard errors +## p. 391, Table 14.3 +library(sandwich) +coef(fmq) +sqrt(diag(vcovHC(fmq, type = "const"))) +sqrt(diag(vcovHC(fmq, type = "HC0"))) + +if(require(lmtest)) { + ## compare t ratio + coeftest(fmq, vcov = vcovHC(fmq, type = "HC0")) + + ## White test, p. 393, Example 14.5 + wt <- lm(residuals(fmq)^2 ~ poly(Income, 4), data = ps) + wt.stat <- summary(wt)$r.squared * nrow(ps) + c(wt.stat, pchisq(wt.stat, df = 3, lower = FALSE)) + + ## Bresch-Pagan test, p. 395, Example 14.7 + bptest(fmq, studentize = FALSE) + bptest(fmq) + + ## Francisco Cribari-Neto, Asymptotic Inference, CSDA 45 + ## quasi z-tests, p. 229, Table 8 + ## with Alaska + coeftest(fmq, df = Inf)[3,4] + coeftest(fmq, df = Inf, vcov = vcovHC(fmq, type = "HC0"))[3,4] + coeftest(fmq, df = Inf, vcov = vcovHC(fmq, type = "HC3"))[3,4] + coeftest(fmq, df = Inf, vcov = vcovHC(fmq, type = "HC4"))[3,4] + ## without Alaska (observation 2) + fmq1 <- lm(Expenditure ~ Income + I(Income^2), data = ps[-2,]) + coeftest(fmq1, df = Inf)[3,4] + coeftest(fmq1, df = Inf, vcov = vcovHC(fmq1, type = "HC0"))[3,4] + coeftest(fmq1, df = Inf, vcov = vcovHC(fmq1, type = "HC3"))[3,4] + coeftest(fmq1, df = Inf, vcov = vcovHC(fmq1, type = "HC4"))[3,4] +} + +## visualization, p. 230, Figure 1 +plot(Expenditure ~ Income, data = ps, + xlab = "per capita income", + ylab = "per capita spending on public schools") +inc <- seq(0.5, 1.2, by = 0.001) +lines(inc, predict(fmq, data.frame(Income = inc)), col = 4) +fml <- lm(Expenditure ~ Income, data = ps) +abline(fml) +text(ps[2,2], ps[2,1], rownames(ps)[2], pos = 2) \ No newline at end of file Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/R Tests/HAC Data.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/R Tests/HAC Data.R (rev 0) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/R Tests/HAC Data.R 2013-09-04 20:47:13 UTC (rev 2988) @@ -0,0 +1,17 @@ +data("RealInt") +#OLS-based CUSUM test with quadratic spectral kernel HAC estimate: + ocus <- gefp(RealInt ~ 1, fit = lm, vcov = kernHAC) +plot(ocus, aggregate = FALSE) +sctest(ocus) +#supF test with quadratic spectral kernel HAC estimate: + fs <- Fstats(RealInt ~ 1, vcov = kernHAC) +plot(fs) +sctest(fs) +#Breakpoint estimation and con?dence intervals with quadratic spectral kernel HAC estimate: + bp <- breakpoints(RealInt ~ 1) +confint(bp, vcov = kernHAC) +plot(bp) +#Visualization: + plot(RealInt, ylab = "Real interest rate") +lines(ts(fitted(bp), start = start(RealInt), freq = 4), col = 4) +lines(confint(bp, vcov = kernHAC)) \ No newline at end of file Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/R Tests/Tests.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/R Tests/Tests.R (rev 0) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/R Tests/Tests.R 2013-09-04 20:47:13 UTC (rev 2988) @@ -0,0 +1,21 @@ +fpe <- read.table("http://data.princeton.edu/wws509/datasets/effort.dat") +attach(fpe) +lmfit = lm( change ~ setting + effort ) +sandwich(lmfit) +Fr <- c(68,42,42,30, 37,52,24,43, + 66,50,33,23, 47,55,23,47, + 63,53,29,27, 57,49,19,29) + +Temp <- gl(2, 2, 24, labels = c("Low", "High")) +Soft <- gl(3, 8, 24, labels = c("Hard","Medium","Soft")) +M.user <- gl(2, 4, 24, labels = c("N", "Y")) +Brand <- gl(2, 1, 24, labels = c("X", "M")) + +detg <- data.frame(Fr,Temp, Soft,M.user, Brand) +detg.m0 <- glm(Fr ~ M.user*Temp*Soft + Brand, family = poisson, data = detg) +summary(detg.m0) + +detg.mod <- glm(terms(Fr ~ M.user*Temp*Soft + Brand*M.user*Temp, + keep.order = TRUE), + family = poisson, data = detg) +sandwich(detg.mod) \ No newline at end of file Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/R Tests/Time Series Data.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/R Tests/Time Series Data.R (rev 0) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/R Tests/Time Series Data.R 2013-09-04 20:47:13 UTC (rev 2988) @@ -0,0 +1,78 @@ +## Willam H. Greene, Econometric Analysis, 2nd Ed. +## Chapter 15 +## load data set, p. 411, Table 15.1 +data(Investment) + +## fit linear model, p. 412, Table 15.2 +fm <- lm(RealInv ~ RealGNP + RealInt, data = Investment) +summary(fm) + +## visualize residuals, p. 412, Figure 15.1 +plot(ts(residuals(fm), start = 1964), + type = "b", pch = 19, ylim = c(-35, 35), ylab = "Residuals") +sigma <- sqrt(sum(residuals(fm)^2)/fm$df.residual) ## maybe used df = 26 instead of 16 ?? +abline(h = c(-2, 0, 2) * sigma, lty = 2) + +if(require(lmtest)) { + ## Newey-West covariances, Example 15.3 + coeftest(fm, vcov = NeweyWest(fm, lag = 4)) + ## Note, that the following is equivalent: + coeftest(fm, vcov = kernHAC(fm, kernel = "Bartlett", bw = 5, prewhite = FALSE, adjust = FALSE)) + + ## Durbin-Watson test, p. 424, Example 15.4 + dwtest(fm) + + ## Breusch-Godfrey test, p. 427, Example 15.6 + bgtest(fm, order = 4) +} + +## visualize fitted series +plot(Investment[, "RealInv"], type = "b", pch = 19, ylab = "Real investment") +lines(ts(fitted(fm), start = 1964), col = 4) + +## 3-d visualization of fitted model +if(require(scatterplot3d)) { + s3d <- scatterplot3d(Investment[,c(5,7,6)], + type = "b", angle = 65, scale.y = 1, pch = 16) + s3d$plane3d(fm, lty.box = "solid", col = 4) +} +## fit investment equation +data(Investment) +fm <- lm(RealInv ~ RealGNP + RealInt, data = Investment) + +## Newey & West (1994) compute this type of estimator +NeweyWest(fm) + +## The Newey & West (1987) estimator requires specification +## of the lag and suppression of prewhitening +NeweyWest(fm, lag = 4, prewhite = FALSE) + +## bwNeweyWest() can also be passed to kernHAC(), e.g. +## for the quadratic spectral kernel +kernHAC(fm, bw = bwNeweyWest) + +curve(kweights(x, kernel = "Quadratic", normalize = TRUE), + from = 0, to = 3.2, xlab = "x", ylab = "k(x)") +curve(kweights(x, kernel = "Bartlett", normalize = TRUE), + from = 0, to = 3.2, col = 2, add = TRUE) +curve(kweights(x, kernel = "Parzen", normalize = TRUE), + from = 0, to = 3.2, col = 3, add = TRUE) +curve(kweights(x, kernel = "Tukey", normalize = TRUE), + from = 0, to = 3.2, col = 4, add = TRUE) +curve(kweights(x, kernel = "Truncated", normalize = TRUE), + from = 0, to = 3.2, col = 5, add = TRUE) + +## fit investment equation +data(Investment) +fm <- lm(RealInv ~ RealGNP + RealInt, data = Investment) + +## compute quadratic spectral kernel HAC estimator +kernHAC(fm) +kernHAC(fm, verbose = TRUE) + +## use Parzen kernel instead, VAR(2) prewhitening, no finite sample +## adjustment and Newey & West (1994) bandwidth selection +kernHAC(fm, kernel = "Parzen", prewhite = 2, adjust = FALSE, + bw = bwNeweyWest, verbose = TRUE) +## compare with estimate under assumption of spheric errors +vcov(fm) \ No newline at end of file Deleted: pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/glmi.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/glmi.R 2013-09-04 19:09:13 UTC (rev 2987) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/glmi.R 2013-09-04 20:47:13 UTC (rev 2988) @@ -1,92 +0,0 @@ -glmi <- function (formula, family = gaussian, data,vcov = NULL, weights, subset, - na.action, start = NULL, etastart, mustart, offset, control = list(...), - model = TRUE, method = "glm.fit", x = FALSE, y = TRUE, contrasts = NULL, - ...) -{ - call <- match.call() - if (is.character(family)) - family <- get(family, mode = "function", envir = parent.frame()) - if (is.function(family)) - family <- family() - if (is.null(family$family)) { - print(family) - stop("'family' not recognized") - } - if (missing(data)) - data <- environment(formula) - mf <- match.call(expand.dots = FALSE) - m <- match(c("formula", "data", "subset", "weights", "na.action", - "etastart", "mustart", "offset"), names(mf), 0L) - mf <- mf[c(1L, m)] - mf$drop.unused.levels <- TRUE - mf[[1L]] <- as.name("model.frame") - mf <- eval(mf, parent.frame()) - if (identical(method, "model.frame")) - return(mf) - if (!is.character(method) && !is.function(method)) - stop("invalid 'method' argument") - if (identical(method, "glm.fit")) - control <- do.call("glm.control", control) - mt <- attr(mf, "terms") - Y <- model.response(mf, "any") - if (length(dim(Y)) == 1L) { - nm <- rownames(Y) - dim(Y) <- NULL - if (!is.null(nm)) - names(Y) <- nm - } - X <- if (!is.empty.model(mt)) - model.matrix(mt, mf, contrasts) - else matrix(, NROW(Y), 0L) - weights <- as.vector(model.weights(mf)) - if (!is.null(weights) && !is.numeric(weights)) - stop("'weights' must be a numeric vector") - if (!is.null(weights) && any(weights < 0)) - stop("negative weights not allowed") - offset <- as.vector(model.offset(mf)) - if (!is.null(offset)) { - if (length(offset) != NROW(Y)) - stop(gettextf("number of offsets is %d should equal %d (number of observations)", - length(offset), NROW(Y)), domain = NA) - } - mustart <- model.extract(mf, "mustart") - etastart <- model.extract(mf, "etastart") - fit <- eval(call(if (is.function(method)) "method" else method, - x = X, y = Y, weights = weights, start = start, etastart = etastart, - mustart = mustart, offset = offset, family = family, - control = control, intercept = attr(mt, "intercept") > - 0L)) - if (length(offset) && attr(mt, "intercept") > 0L) { - fit2 <- eval(call(if (is.function(method)) "method" else method, - x = X[, "(Intercept)", drop = FALSE], y = Y, weights = weights, - offset = offset, family = family, control = control, - intercept = TRUE)) - if (!fit2$converged) - warning("fitting to calculate the null deviance did not converge -- increase 'maxit'?") - fit$null.deviance <- fit2$deviance - } - if (model) - fit$model <- mf - fit$na.action <- attr(mf, "na.action") - if (x) - fit$x <- X - if (!y) - fit$y <- NULL - fit <- c(fit, list(call = call, formula = formula, terms = mt, [TRUNCATED] To get the complete diff run: svnlook diff /svnroot/returnanalytics -r 2988 From noreply at r-forge.r-project.org Wed Sep 4 23:05:13 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 4 Sep 2013 23:05:13 +0200 (CEST) Subject: [Returnanalytics-commits] r2989 - in pkg/FactorAnalytics: R man Message-ID: <20130904210513.81600185DF2@r-forge.r-project.org> Author: chenyian Date: 2013-09-04 23:05:13 +0200 (Wed, 04 Sep 2013) New Revision: 2989 Modified: pkg/FactorAnalytics/R/factorModelPerformanceAttribution.r pkg/FactorAnalytics/man/factorModelPerformanceAttribution.Rd Log: cancel benchmark function in factorModelPerformanceAttribution.r. I think it works better to use fit function then apply performance attribution analysis. Modified: pkg/FactorAnalytics/R/factorModelPerformanceAttribution.r =================================================================== --- pkg/FactorAnalytics/R/factorModelPerformanceAttribution.r 2013-09-04 20:47:13 UTC (rev 2988) +++ pkg/FactorAnalytics/R/factorModelPerformanceAttribution.r 2013-09-04 21:05:13 UTC (rev 2989) @@ -10,16 +10,8 @@ #' j. The returns attributed to factor j is \eqn{b_{j} * f_{jt}} and specific #' returns is \eqn{u_t}. #' -#' If benchmark is provided. active returns = total returns - benchmark returns = -#' active returns attributed to factors + specific returns. Specifically, -#' \eqn{R_t = \sum_j b_{j}^A * f_{jt} + u_t},t=1..T, \eqn{b_{j}^A} is \emph{active beta} to factor j -#' and \eqn{f_{jt}} is factor j. The active returns attributed to factor j is -#' \eqn{b_{j}^A * f_{jt}} specific returns is \eqn{u_t}, and \eqn{b_{j}^A = b_{j}-1} -#' #' @param fit Class of "TimeSeriesFactorModel", "FundamentalFactorModel" or #' "statFactorModel". -#' @param benchmark a xts, vector or data.frame provides benchmark time series -#' returns. If benchmark is provided, active returns decomposition will be calculated. #' @param ... Other controled variables for fit methods. #' @return an object of class \code{FM.attribution} containing #' \itemize{ @@ -46,7 +38,7 @@ #' #' factorModelPerformanceAttribution <- - function(fit,benchmark=NULL,...) { + function(fit,...) { require(PerformanceAnalytics) @@ -76,9 +68,6 @@ data <- checkData(fit$data) date <- index(na.omit(data[,k])) actual.xts = xts(fit.lm$model[1], as.Date(date)) - if (!is.null(benchmark)) { - benchmark.xts <- checkData(benchmark)[as.Date(date)] - } # attributed returns # active portfolio management p.512 17A.9 # top-down method @@ -93,13 +82,8 @@ cum.attr.ret[k,i] <- NA attr.ret.xts.all <- merge(attr.ret.xts.all,xts(rep(NA,length(date)),as.Date(date))) } else { - if (!is.null(benchmark)) { - attr.ret.xts <- actual.xts - xts(as.matrix(benchmark.xts)%*%as.matrix(fit.lm$coef[i]-1), - as.Date(date)) - } else { - attr.ret.xts <- actual.xts - xts(as.matrix(fit.lm$model[i])%*%as.matrix(fit.lm$coef[i]), + attr.ret.xts <- actual.xts - xts(as.matrix(fit.lm$model[i])%*%as.matrix(fit.lm$coef[i]), as.Date(date)) - } cum.attr.ret[k,i] <- cum.ret - Return.cumulative(actual.xts-attr.ret.xts) attr.ret.xts.all <- merge(attr.ret.xts.all,attr.ret.xts) } @@ -119,10 +103,10 @@ if (class(fit) =="FundamentalFactorModel" ) { # if benchmark is provided - - if (!is.null(benchmark)) { - stop("use fitFundamentalFactorModel instead") - } +# +# if (!is.null(benchmark)) { +# stop("use fitFundamentalFactorModel instead") +# } # return attributed to factors factor.returns <- fit$factor.returns[,-1] factor.names <- colnames(fit$beta) @@ -188,13 +172,9 @@ fit.lm = fit$asset.fit[[k]] ## extract information from lm object - date <- index(fit$data[,k]) + date <- index(data[,k]) # probably needs more general Date setting actual.xts = xts(fit.lm$model[1], as.Date(date)) - if (!is.null(benchmark)) { - benchmark.xts <- checkData(benchmark)[as.Date(date)] - } - # attributed returns # active portfolio management p.512 17A.9 @@ -202,13 +182,8 @@ # setup initial value attr.ret.xts.all <- xts(, as.Date(date)) for ( i in factorName ) { - if (!is.null(benchmark)) { - attr.ret.xts <- actual.xts - xts(as.matrix(benchmark.xts)%*%as.matrix(fit.lm$coef[i]-1), - as.Date(date)) - } else { attr.ret.xts <- actual.xts - xts(as.matrix(fit.lm$model[i])%*%as.matrix(fit.lm$coef[i]), as.Date(date)) - } cum.attr.ret[k,i] <- cum.ret - Return.cumulative(actual.xts-attr.ret.xts) attr.ret.xts.all <- merge(attr.ret.xts.all,attr.ret.xts) @@ -232,16 +207,8 @@ attr.ret.xts.all <- xts(, as.Date(date)) actual.xts <- xts(fit$asset.ret[,k],as.Date(date)) cum.ret <- Return.cumulative(actual.xts) - if (!is.null(benchmark)) { - benchmark.xts <- checkData(benchmark)[as.Date(date)] - } for (i in factorName) { - if (!is.null(benchmark)) { - attr.ret.xts <- actual.xts - xts(coredata(benchmark.xts)*(fit$loadings[i,k]-1), - as.Date(date)) - } else { attr.ret.xts <- xts(fit$factors[,i] * fit$loadings[i,k], as.Date(date) ) - } attr.ret.xts.all <- merge(attr.ret.xts.all,attr.ret.xts) cum.attr.ret[k,i] <- cum.ret - Return.cumulative(actual.xts-attr.ret.xts) } @@ -264,3 +231,10 @@ class(ans) = "FM.attribution" return(ans) } + + +# If benchmark is provided, active return attribution will be calculated. +# active returns = total returns - benchmark returns. Specifically, +# \eqn{R_t^A = \sum_j b_{j}^A * f_{jt} + u_t^A},t=1..T, \eqn{b_{j}^A} is \emph{active exposure} to factor j +# and \eqn{f_{jt}} is factor j. The active returns attributed to factor j is +# \eqn{b_{j}^A * f_{jt}} specific returns is \eqn{u_t^A} Modified: pkg/FactorAnalytics/man/factorModelPerformanceAttribution.Rd =================================================================== --- pkg/FactorAnalytics/man/factorModelPerformanceAttribution.Rd 2013-09-04 20:47:13 UTC (rev 2988) +++ pkg/FactorAnalytics/man/factorModelPerformanceAttribution.Rd 2013-09-04 21:05:13 UTC (rev 2989) @@ -2,17 +2,12 @@ \alias{factorModelPerformanceAttribution} \title{Compute performance attribution} \usage{ - factorModelPerformanceAttribution(fit, benchmark = NULL, - ...) + factorModelPerformanceAttribution(fit, ...) } \arguments{ \item{fit}{Class of "TimeSeriesFactorModel", "FundamentalFactorModel" or "statFactorModel".} - \item{benchmark}{a xts, vector or data.frame provides - benchmark time series returns. If benchmark is provided, - active returns decomposition will be calculated.} - \item{...}{Other controled variables for fit methods.} } \value{ @@ -37,15 +32,6 @@ j and \eqn{f_{jt}} is factor j. The returns attributed to factor j is \eqn{b_{j} * f_{jt}} and specific returns is \eqn{u_t}. - - If benchmark is provided. active returns = total returns - - benchmark returns = active returns attributed to - factors + specific returns. Specifically, \eqn{R_t = - \sum_j b_{j}^A * f_{jt} + u_t},t=1..T, \eqn{b_{j}^A} is - \emph{active beta} to factor j and \eqn{f_{jt}} is factor - j. The active returns attributed to factor j is - \eqn{b_{j}^A * f_{jt}} specific returns is \eqn{u_t}, and - \eqn{b_{j}^A = b_{j}-1} } \examples{ data(managers.df) From noreply at r-forge.r-project.org Wed Sep 4 23:32:10 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 4 Sep 2013 23:32:10 +0200 (CEST) Subject: [Returnanalytics-commits] r2990 - in pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm: . R man Message-ID: <20130904213210.6956A1806E8@r-forge.r-project.org> Author: shubhanm Date: 2013-09-04 23:32:10 +0200 (Wed, 04 Sep 2013) New Revision: 2990 Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/inst/ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/QP.Norm.Rd pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.EMaxDDGBM.Rd Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/ACStdDev.annualized.R pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/AcarSim.R pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/CDrawdown.R pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/CalmarRatio.Norm.R pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/GLMSmoothIndex.R pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/chart.Autocorrelation.R pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/ACStdDev.annualized.Rd pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/CalmarRatio.Norm.Rd pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/Cdrawdown.Rd pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/GLMSmoothIndex.Rd pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/chart.Autocorrelation.Rd Log: Documentation addition Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/ACStdDev.annualized.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/ACStdDev.annualized.R 2013-09-04 21:05:13 UTC (rev 2989) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/ACStdDev.annualized.R 2013-09-04 21:32:10 UTC (rev 2990) @@ -11,9 +11,8 @@ #' 12, quarterly scale = 4) #' @param \dots any other passthru parameters #' @author Peter Carl,Brian Peterson, Shubhankit Mohan -#' \url{http://en.wikipedia.org/wiki/Volatility_(finance)} #' @references Burghardt, G., and L. Liu, \emph{ It's the Autocorrelation, Stupid (November 2012) Newedge -#' working paper.} +#' working paper.} Paper Available at : #' \url{http://www.amfmblog.com/assets/Newedge-Autocorrelation.pdf} #' @keywords ts multivariate distribution models #' @examples Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/AcarSim.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/AcarSim.R 2013-09-04 21:05:13 UTC (rev 2989) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/AcarSim.R 2013-09-04 21:32:10 UTC (rev 2990) @@ -62,16 +62,16 @@ r[j,2:37]=monthly+(sig*dz*sqrt(3*dt)) - ddown[j,i,1]= ES((r[j,]),.99) + ddown[j,i,1]= ES((r[j,]),.99, method="modified") ddown[j,i,1][is.na(ddown[j,i,1])] <- 0 fddown[i,1]=fddown[i,1]+ddown[j,i,1] - ddown[j,i,2]= ES((r[j,]),.95) + ddown[j,i,2]= ES((r[j,]),.95, method="modified") ddown[j,i,2][is.na(ddown[j,i,2])] <- 0 fddown[i,2]=fddown[i,2]+ddown[j,i,2] - ddown[j,i,3]= ES((r[j,]),.90) + ddown[j,i,3]= ES((r[j,]),.90, method="modified") ddown[j,i,3][is.na(ddown[j,i,3])] <- 0 fddown[i,3]=fddown[i,3]+ddown[j,i,3] - ddown[j,i,4]= ES((r[j,]),.85) + ddown[j,i,4]= ES((r[j,]),.85, method="modified") ddown[j,i,4][is.na(ddown[j,i,4])] <- 0 fddown[i,4]=fddown[i,4]+ddown[j,i,4] assign("last.warning", NULL, envir = baseenv()) Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/CDrawdown.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/CDrawdown.R 2013-09-04 21:05:13 UTC (rev 2989) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/CDrawdown.R 2013-09-04 21:32:10 UTC (rev 2990) @@ -21,7 +21,7 @@ #' @param p confidence interval #' @param ... adiitional parameters #' @author Peter Carl, Brian Peterson, Shubhankit Mohan -#' @references Chekhlov, Alexei, Uryasev, Stanislav P. and Zabarankin, Michael, \emph{Drawdown Measure in Portfolio Optimization} (June 25, 2003). Available at SSRN: \url{http://ssrn.com/abstract=544742} or \url{http://dx.doi.org/10.2139/ssrn.544742} +#' @references Chekhlov, Alexei, Uryasev, Stanislav P. and Zabarankin, Michael, \emph{Drawdown Measure in Portfolio Optimization} (June 25, 2003). Paper available at SSRN: \url{http://ssrn.com/abstract=544742} or \url{http://dx.doi.org/10.2139/ssrn.544742} #' @keywords Conditional Drawdown models #' @examples #' Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/CalmarRatio.Norm.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/CalmarRatio.Norm.R 2013-09-04 21:05:13 UTC (rev 2989) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/CalmarRatio.Norm.R 2013-09-04 21:32:10 UTC (rev 2990) @@ -27,7 +27,7 @@ #' traditionally and default .1 (10\%) #' @author Brian G. Peterson , Peter Carl , Shubhankit Mohan #' @references Bacon, Carl, Magdon-Ismail, M. and Amir Atiya,\emph{ Maximum drawdown. Risk Magazine,} 01 Oct 2004. -#' \url{http://www.cs.rpi.edu/~magdon/talks/mdd_NYU04.pdf} +#' Paper Available at : \url{http://www.cs.rpi.edu/~magdon/talks/mdd_NYU04.pdf} #' @keywords ts multivariate distribution models #' @examples #' Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/GLMSmoothIndex.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/GLMSmoothIndex.R 2013-09-04 21:05:13 UTC (rev 2989) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/GLMSmoothIndex.R 2013-09-04 21:32:10 UTC (rev 2990) @@ -14,7 +14,7 @@ #' @param ... Additional Parameters #' @author Peter Carl, Brian Peterson, Shubhankit Mohan #' @aliases Return.Geltner -#' @references \emph{Getmansky, Mila, Lo, Andrew W. and Makarov, Igor} An Econometric Model of Serial Correlation and Illiquidity in Hedge Fund Returns (March 1, 2003). MIT Sloan Working Paper No. 4288-03; MIT Laboratory for Financial Engineering Working Paper No. LFE-1041A-03; EFMA 2003 Helsinki Meetings. Available at SSRN: \url{http://ssrn.com/abstract=384700} +#' @references \emph{Getmansky, Mila, Lo, Andrew W. and Makarov, Igor} An Econometric Model of Serial Correlation and Illiquidity in Hedge Fund Returns (March 1, 2003). MIT Sloan Working Paper No. 4288-03; MIT Laboratory for Financial Engineering Working Paper No. LFE-1041A-03; EFMA 2003 Helsinki Meetings. Paper available at SSRN: \url{http://ssrn.com/abstract=384700} #' #' @keywords ts multivariate distribution models non-iid #' @examples Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/chart.Autocorrelation.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/chart.Autocorrelation.R 2013-09-04 21:05:13 UTC (rev 2989) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/chart.Autocorrelation.R 2013-09-04 21:32:10 UTC (rev 2990) @@ -13,7 +13,7 @@ #' @seealso \code{\link[graphics]{boxplot}} #' @references Burghardt, G., and L. Liu, \emph{ It's the Autocorrelation, Stupid (November 2012) Newedge #' working paper.} -#' \url{http://www.amfmblog.com/assets/Newedge-Autocorrelation.pdf} +#' Paper Available at : \url{http://www.amfmblog.com/assets/Newedge-Autocorrelation.pdf} #' @keywords Autocorrelation lag factors #' @examples #' Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/ACStdDev.annualized.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/ACStdDev.annualized.Rd 2013-09-04 21:05:13 UTC (rev 2989) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/ACStdDev.annualized.Rd 2013-09-04 21:32:10 UTC (rev 2990) @@ -1,53 +1,52 @@ -\name{ACStdDev.annualized} -\alias{ACStdDev.annualized} -\alias{sd.annualized} -\alias{sd.multiperiod} -\alias{StdDev.annualized} -\title{Autocorrleation adjusted Standard Deviation} -\usage{ - ACStdDev.annualized(R, lag = 6, scale = NA, ...) -} -\arguments{ - \item{R}{an xts, vector, matrix, data frame, timeSeries - or zoo object of asset returns} - - \item{lag}{: number of autocorrelated lag factors - inputted by user} - - \item{scale}{number of periods in a year (daily scale = - 252, monthly scale = 12, quarterly scale = 4)} - - \item{\dots}{any other passthru parameters} -} -\description{ - Incorporating the component of lagged autocorrelation - factor into adjusted time scale standard deviation - translation -} -\details{ - Given a sample of historical returns R(1),R(2), . . - .,R(T),the method assumes the fund manager smooths - returns in the following manner, when 't' is the unit - time interval: The square root time translation can be - defined as : \deqn{ \sigma(T) = T \sqrt\sigma(t)} -} -\examples{ -library(PerformanceAnalytics) -data(edhec) -ACStdDev.annualized(edhec,3) -} -\author{ - Peter Carl,Brian Peterson, Shubhankit Mohan - \url{http://en.wikipedia.org/wiki/Volatility_(finance)} -} -\references{ - Burghardt, G., and L. Liu, \emph{ It's the - Autocorrelation, Stupid (November 2012) Newedge working - paper.} - \url{http://www.amfmblog.com/assets/Newedge-Autocorrelation.pdf} -} -\keyword{distribution} -\keyword{models} -\keyword{multivariate} -\keyword{ts} - +\name{ACStdDev.annualized} +\alias{ACStdDev.annualized} +\alias{sd.annualized} +\alias{sd.multiperiod} +\alias{StdDev.annualized} +\title{Autocorrleation adjusted Standard Deviation} +\usage{ + ACStdDev.annualized(R, lag = 6, scale = NA, ...) +} +\arguments{ + \item{R}{an xts, vector, matrix, data frame, timeSeries + or zoo object of asset returns} + + \item{lag}{: number of autocorrelated lag factors + inputted by user} + + \item{scale}{number of periods in a year (daily scale = + 252, monthly scale = 12, quarterly scale = 4)} + + \item{\dots}{any other passthru parameters} +} +\description{ + Incorporating the component of lagged autocorrelation + factor into adjusted time scale standard deviation + translation +} +\details{ + Given a sample of historical returns R(1),R(2), . . + .,R(T),the method assumes the fund manager smooths + returns in the following manner, when 't' is the unit + time interval: The square root time translation can be + defined as : \deqn{ \sigma(T) = T \sqrt\sigma(t)} +} +\examples{ +library(PerformanceAnalytics) +data(edhec) +ACStdDev.annualized(edhec,3) +} +\author{ + Peter Carl,Brian Peterson, Shubhankit Mohan +} +\references{ + Burghardt, G., and L. Liu, \emph{ It's the + Autocorrelation, Stupid (November 2012) Newedge working + paper.} Paper Available at : + \url{http://www.amfmblog.com/assets/Newedge-Autocorrelation.pdf} +} +\keyword{distribution} +\keyword{models} +\keyword{multivariate} +\keyword{ts} + Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/CalmarRatio.Norm.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/CalmarRatio.Norm.Rd 2013-09-04 21:05:13 UTC (rev 2989) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/CalmarRatio.Norm.Rd 2013-09-04 21:32:10 UTC (rev 2990) @@ -46,7 +46,8 @@ } \references{ Bacon, Carl, Magdon-Ismail, M. and Amir Atiya,\emph{ - Maximum drawdown. Risk Magazine,} 01 Oct 2004. + Maximum drawdown. Risk Magazine,} 01 Oct 2004. Paper + Available at : \url{http://www.cs.rpi.edu/~magdon/talks/mdd_NYU04.pdf} } \keyword{distribution} Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/Cdrawdown.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/Cdrawdown.Rd 2013-09-04 21:05:13 UTC (rev 2989) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/Cdrawdown.Rd 2013-09-04 21:32:10 UTC (rev 2990) @@ -50,7 +50,7 @@ \references{ Chekhlov, Alexei, Uryasev, Stanislav P. and Zabarankin, Michael, \emph{Drawdown Measure in Portfolio - Optimization} (June 25, 2003). Available at SSRN: + Optimization} (June 25, 2003). Paper available at SSRN: \url{http://ssrn.com/abstract=544742} or \url{http://dx.doi.org/10.2139/ssrn.544742} } Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/GLMSmoothIndex.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/GLMSmoothIndex.Rd 2013-09-04 21:05:13 UTC (rev 2989) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/GLMSmoothIndex.Rd 2013-09-04 21:32:10 UTC (rev 2990) @@ -41,7 +41,7 @@ Illiquidity in Hedge Fund Returns (March 1, 2003). MIT Sloan Working Paper No. 4288-03; MIT Laboratory for Financial Engineering Working Paper No. LFE-1041A-03; - EFMA 2003 Helsinki Meetings. Available at SSRN: + EFMA 2003 Helsinki Meetings. Paper available at SSRN: \url{http://ssrn.com/abstract=384700} } \keyword{distribution} Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/QP.Norm.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/QP.Norm.Rd (rev 0) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/QP.Norm.Rd 2013-09-04 21:32:10 UTC (rev 2990) @@ -0,0 +1,22 @@ +\name{QP.Norm} +\alias{QP.Norm} +\title{QP function for calculation of Sharpe Ratio} +\usage{ + QP.Norm(R, tau, scale = NA) +} +\arguments{ + \item{R}{an xts, vector, matrix, data frame, timeSeries + or zoo object of asset returns} + + \item{tau}{Time Scale Translations Factor} + + \item{scale}{number of periods in a year (daily scale = + 252, monthly scale =} +} +\description{ + QP function for calculation of Sharpe Ratio +} +\seealso{ + \code{\link{CalmarRatio.Norm}}, \cr +} + Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/chart.Autocorrelation.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/chart.Autocorrelation.Rd 2013-09-04 21:05:13 UTC (rev 2989) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/chart.Autocorrelation.Rd 2013-09-04 21:32:10 UTC (rev 2990) @@ -1,44 +1,44 @@ -\name{chart.Autocorrelation} -\alias{chart.Autocorrelation} -\title{Stacked Bar Autocorrelation Plot} -\usage{ - chart.Autocorrelation(R) -} -\arguments{ - \item{R}{an xts, vector, matrix, data frame, timeSeries - or zoo object of an asset return} -} -\value{ - Stack Bar plot of lagged return coefficients -} -\description{ - A wrapper to create box and whiskers plot of lagged - autocorrelation analysis -} -\details{ - We have also provided controls for all the symbols and - lines in the chart. One default, set by - \code{as.Tufte=TRUE}, will strip chartjunk and draw a - Boxplot per recommendations by Burghardt, Duncan and - Liu(2013) -} -\examples{ -data(edhec) -chart.Autocorrelation(edhec[,1]) -} -\author{ - Peter Carl, Brian Peterson, Shubhankit Mohan -} -\references{ - Burghardt, G., and L. Liu, \emph{ It's the - Autocorrelation, Stupid (November 2012) Newedge working - paper.} - \url{http://www.amfmblog.com/assets/Newedge-Autocorrelation.pdf} -} -\seealso{ - \code{\link[graphics]{boxplot}} -} -\keyword{Autocorrelation} -\keyword{factors} -\keyword{lag} - +\name{chart.Autocorrelation} +\alias{chart.Autocorrelation} +\title{Stacked Bar Autocorrelation Plot} +\usage{ + chart.Autocorrelation(R) +} +\arguments{ + \item{R}{an xts, vector, matrix, data frame, timeSeries + or zoo object of an asset return} +} +\value{ + Stack Bar plot of lagged return coefficients +} +\description{ + A wrapper to create box and whiskers plot of lagged + autocorrelation analysis +} +\details{ + We have also provided controls for all the symbols and + lines in the chart. One default, set by + \code{as.Tufte=TRUE}, will strip chartjunk and draw a + Boxplot per recommendations by Burghardt, Duncan and + Liu(2013) +} +\examples{ +data(edhec) +chart.Autocorrelation(edhec[,1]) +} +\author{ + Peter Carl, Brian Peterson, Shubhankit Mohan +} +\references{ + Burghardt, G., and L. Liu, \emph{ It's the + Autocorrelation, Stupid (November 2012) Newedge working + paper.} Paper Available at : + \url{http://www.amfmblog.com/assets/Newedge-Autocorrelation.pdf} +} +\seealso{ + \code{\link[graphics]{boxplot}} +} +\keyword{Autocorrelation} +\keyword{factors} +\keyword{lag} + Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.EMaxDDGBM.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.EMaxDDGBM.Rd (rev 0) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.EMaxDDGBM.Rd 2013-09-04 21:32:10 UTC (rev 2990) @@ -0,0 +1,57 @@ +\name{table.EMaxDDGBM} +\alias{table.EMaxDDGBM} +\title{Expected Drawdown using Brownian Motion Assumptions} +\usage{ + table.EMaxDDGBM(R, digits = 4) +} +\arguments{ + \item{R}{an xts, vector, matrix, data frame, timeSeries + or zoo object of asset returns} + + \item{digits}{significant number} +} +\description{ + Works on the model specified by Maddon-Ismail which + investigates the behavior of this statistic for a + Brownian motion with drift. +} +\details{ + If X(t) is a random process on [0, T ], the maximum + drawdown at time T , D(T), is defined by where \deqn{D(T) + = sup [X(s) - X(t)]} where s belongs to [0,t] and s + belongs to [0,T] Informally, this is the largest drop + from a peak to a bottom. In this paper, we investigate + the behavior of this statistic for a Brownian motion with + drift. In particular, we give an infinite series + representation of its distribution, and consider its + expected value. When the drift is zero, we give an + analytic expression for the expected value, and for + non-zero drift, we give an infinite series + representation. For all cases, we compute the limiting + \bold{(\eqn{T tends to \infty})} behavior, which can be + logarithmic (\eqn{\mu} > 0), square root (\eqn{\mu} = 0), + or linear (\eqn{\mu} < 0). +} +\examples{ +library(PerformanceAnalytics) +data(edhec) +table.EMaxDDGBM(edhec) +} +\author{ + Shubhankit Mohan +} +\references{ + Magdon-Ismail, M., Atiya, A., Pratap, A., and Yaser S. + Abu-Mostafa: On the Maximum Drawdown of a Browninan + Motion, Journal of Applied Probability 41, pp. 147-161, + 2004 + \url{http://alumnus.caltech.edu/~amir/drawdown-jrnl.pdf} +} +\keyword{Assumptions} +\keyword{Brownian} +\keyword{Drawdown} +\keyword{Expected} +\keyword{models} +\keyword{Motion} +\keyword{Using} + From noreply at r-forge.r-project.org Thu Sep 5 01:09:18 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 5 Sep 2013 01:09:18 +0200 (CEST) Subject: [Returnanalytics-commits] r2991 - in pkg/FactorAnalytics: R man vignettes Message-ID: <20130904230918.5077318538D@r-forge.r-project.org> Author: chenyian Date: 2013-09-05 01:09:17 +0200 (Thu, 05 Sep 2013) New Revision: 2991 Modified: pkg/FactorAnalytics/R/fitFundamentalFactorModel.R pkg/FactorAnalytics/man/fitFundamentalFactorModel.Rd pkg/FactorAnalytics/vignettes/fundamentalFM.Rnw Log: edit vignette. Modified: pkg/FactorAnalytics/R/fitFundamentalFactorModel.R =================================================================== --- pkg/FactorAnalytics/R/fitFundamentalFactorModel.R 2013-09-04 21:32:10 UTC (rev 2990) +++ pkg/FactorAnalytics/R/fitFundamentalFactorModel.R 2013-09-04 23:09:17 UTC (rev 2991) @@ -17,9 +17,10 @@ #' and Yindeng Jiang. Guy Yullen re-implemented the function in R and requires #' the following additional R libraries: zoo time series library, robust #' Insightful robust library ported to R and robustbase Basic robust statistics -#' package for R +#' package for R. Yi-An Chen from UW economics deparment re-organize the codes and finalize this +#' function. #' -#' +#' #' @param data data.frame, data must have \emph{assetvar}, \emph{returnvar}, \emph{datevar} #' , and exposure.names. Generally, data is panel data setup, so it needs firm variabales #' and time variables. Data has to be a balanced panel. Modified: pkg/FactorAnalytics/man/fitFundamentalFactorModel.Rd =================================================================== --- pkg/FactorAnalytics/man/fitFundamentalFactorModel.Rd 2013-09-04 21:32:10 UTC (rev 2990) +++ pkg/FactorAnalytics/man/fitFundamentalFactorModel.Rd 2013-09-04 23:09:17 UTC (rev 2991) @@ -100,7 +100,9 @@ Jiang. Guy Yullen re-implemented the function in R and requires the following additional R libraries: zoo time series library, robust Insightful robust library ported - to R and robustbase Basic robust statistics package for R + to R and robustbase Basic robust statistics package for + R. Yi-An Chen from UW economics deparment re-organize the + codes and finalize this function. } \examples{ # BARRA type factor model Modified: pkg/FactorAnalytics/vignettes/fundamentalFM.Rnw =================================================================== --- pkg/FactorAnalytics/vignettes/fundamentalFM.Rnw 2013-09-04 21:32:10 UTC (rev 2990) +++ pkg/FactorAnalytics/vignettes/fundamentalFM.Rnw 2013-09-04 23:09:17 UTC (rev 2991) @@ -8,21 +8,26 @@ \begin{document} \SweaveOpts{concordance=TRUE} -\title{factorAnalytics: Factor Model Fitting} +\title{factorAnalytics: A Concise User Guide} \author{Yi-An Chen} \maketitle \section{Introduction} -This vignette aims to help users to learn how to use fundamental factor model in \verb at factorAnalytics@ package. We will walk through users a few examples from scratch. +This vignette aims to help users to learn how to use fit factor model with \verb at factorAnalytics@ package. We will walk through users a few examples from data input to risk analysis and performance attribution. -\subsection{Fundamental Factor Model} +\section{Factor Model} A factor model is defined as \\ \begin{equation} r_t = bf_t + \epsilon_t\;,t=1 \cdots T \label{fm} \end{equation} -Where $r_t$ is N x 1, b is N x K and f is K x 1. N is number of variables and K is number of factors. b is usually called factor exposures or factor loadings and f is factor returns. $\epsilon_t$ is serial uncorrelated but may be cross-correlated. The model is useful to fit for examples asset returns. The famous CAPM (Capital Assets Pricing Model) is a one factor model with f equal to market returns. +Where $r_t$ is N x 1 exress returns, b is N x K and f is K x 1. N is number of variables and K is number of factors. b is usually called factor exposures or factor loadings, and b can be time-varying $b_t$ in fundamental factor model setting. f is factor returns. $\epsilon_t$ is serial uncorrelated but may be cross-correlated. The model is useful to fit asset pricing model. The famous CAPM (Capital Assets Pricing Model) is a one factor model with f equal to market returns. -In the case of fundamental factor model, we assume we know b, factor exposures which are assets characteristics, like market capitalization or book-to-market ratio. f is unknown and we can use OLS or WLS regression skills to estimate for each period. In specific, +\verb at factorAnalytics@ package provides 3 different kinds of factor models. That is fundamental factor model, statistical factor model and time series factor model. We will walk through them one by one. + + +\subsection{Fundamental Factor Model} + +In the case of fundamental factor model, we assume we know factor exposures b which are assets characteristics, like market capitalization or book-to-market ratio. Therefore, $b_t$ is known and $f_t$ is unknown. We run cross-section OLS or WLS regression to estimate $f_t$ for each time period. In specific, \begin{equation}\label{ffm} r_t = f_M + b\hat{f_t} + \hat{\epsilon_t}\;,t=1 \cdots T \end{equation} @@ -30,17 +35,17 @@ This approach is also called BARRA type approach since it is initially developed by BARRA and later on been merged by MSCI. The famous Barra global equity model (GEM3) contains more than 50 factors. -\section{Example 1} -We will walk through the first examples in this section. We will use style factors like size. -\subsection{Loading Data} +\subsection{Example 1} +We will walk through the first examples in this section where use style factors like size are used. +\subsubsection{Loading Data} Let's look at the arguments of \verb at fitFundamentalFactorModel()@ which will deal with fundamental factor model in \verb at factorAnalytics@. <>= library(factorAnalytics) args(fitFundamentalFactorModel) @ -\verb at data@ is in class of \verb at data.frame@ and is required to have \emph{assetvar},\emph{returnvar} and \emph{datevar}. One can image data is like panel data setup and need firm variable and time variable. So data has dimension (N x T) and at least 3 consumes to specify information needed. +\verb at data@ is in class of \verb at data.frame@ and is required to have \emph{assetvar},\emph{returnvar} and \emph{datevar}. One can image \emph{data} is like panel data setup and need firm variable and time variable. Data has dimension (N x T) and at least 3 consumes to specify information needed. -We download data from CRSP/Compustat quarterly fundamental and name \verb at equity@ which contains 67 stocks from January 2000 to December 2013. +We download data from CRSP/Compustat quarterly fundamental and name it \verb at equity@. It contains 67 stocks and 106 time period from January 2000 to December 2013. <>= #equity <- data(equity) @@ -49,50 +54,51 @@ length(unique(equity$datadate)) # number of period t length(unique(equity$tic)) # number of assets @ -We want asset returns. +We need asset returns to run our model. We can utilize \verb at Delt()@ to calculate price percentage change which is exactly asset returns in \verb at quantmod@ package. <>= library(quantmod) # for Delt. See Delt for detail equity <- cbind(equity,do.call(rbind,lapply(split(equity,equity$tic), function(x) Delt(x$PRCCQ)))) names(equity)[22] <- "RET" @ -We want market value and book-to-market ratio too. market vale can be achieved by common stocks outstanding x price and book value we use common/ordinary equity value. We also take log on market value. +We want market value and book-to-market ratio to be our style factors. Market vale can be achieved by common stocks outstanding multiply price and book value is common/ordinary equity value. We take log for market value. <>== equity$MV <- log(equity$PRCCQ*equity$CSHOQ) equity$BM <- equity$CEQQ/equity$MV @ -now we use model \ref{ffm} where K=2, b = [ MV , BM ]. +now we will fit Equation \ref{ffm} with b = [MV BM]. -We will get an error message if \verb at datevar@ is not \verb at as.Date@ format compatible. In our example, our date variable is \emph{DATACQTR} and looks like "2000Q1". We have to convert it to \verb at as.Date@ compatible. We can utilize \verb at as.yearqtr@ to do it. Also, we will use character string for asset variable instead of factor. +We will get an error message if \verb at datevar@ is not \verb at as.Date@ format compatible. In our example, our date variable is \emph{DATACQTR} and looks like "2000Q1". We have to convert it to \verb at as.Date@ compatible. We can utilize \verb at as.yearqtr@ in \verb at xts@ package to do it. Also, we will use character string for asset variable instead of factor.\footnote{The best data input is to convert all your data into xts class since we use xts to compute everything in this package, although it is not restricted to it.} <>= a <- unlist( lapply(strsplit(as.character(equity$DATACQTR),"Q"), function(x) paste(x[[1]],"-",x[[2]],sep="") ) ) equity$yearqtr <- as.yearqtr(a,format="%Y-%q") equity$tic <- as.character(equity$tic) -equity <- subset(equity,yearqtr != "2000 Q1") -# delete the first element of each assets +equity <- subset(equity,yearqtr != "2000 Q1") # delete the first element of each assets @ -\subsection{Fit the Model} +\subsubsection{Fit the Model} fit the function: <>= fit.fund <- fitFundamentalFactorModel(exposure.names=c("BM","MV"),datevar="yearqtr", - returnsvar ="RET",assetvar="tic",wls=TRUE,data=equity) + returnsvar ="RET",assetvar="tic",wls=TRUE, + data=equity) names(fit.fund) @ -A few notice for fitting fundamental factor model. So far this function can only deal with balanced panel because we want to extract return covariance and residuals and so on. Second, \verb at datevar@ has to be \verb at as.Date@ compatible, otherwise the function can not read the time index. It is somehow inconvenient but make sure we will not mess up with time index. +A few notice for fitting fundamental factor model. So far this function can only deal with balanced panel because we want to extract return covariance and residuals and so on. Second, \verb at datevar@ has to be \verb at as.Date@ compatible, otherwise the function can not read time index. It is somehow inconvenient but make sure we will not mess up with any time issue. Default fit method for \verb at fitFundamentalFactorModel()@ is classic OLS and covariance matrix is also classic covariance matrix defined by \verb at covClassic()@ in \verb at robust@ package. One can change to robust estimation and robust covariance matrix estimation. \verb at returns.cov@ contains information about returns covariance. return covariance is -\[ \Sigma_x = B \Sigma_f B' + D \]. If \verb at full.resid.cov@ is \emph{FALSE}, D is diagonal matrix with variance of residuals in diagonal terms. If \emph{TRUE}, D is covariance matrix of residuals. +\[ \Sigma_x = B \Sigma_f B' + D \] +If \verb at full.resid.cov@ is \emph{FALSE}, D is diagonal matrix with variance of residuals in diagonal terms. If \emph{TRUE}, D is covariance matrix of residuals. <>= names(fit.fund$returns.cov) @ -Once can check out \verb at fit.fund$factor.cov@, \verb at fit.fund$resids.cov@ and \verb at fit.fund$resid.variance@ for detail. +Please check out \verb at fit.fund$factor.cov@, \verb at fit.fund$resids.cov@ and \verb at fit.fund$resid.variance@ yourself for detail. factor returns, residuals,t-stats are xts class. @@ -102,7 +108,7 @@ fit.fund$tstats @ -There are a few generic function \verb at predict@, \verb at summary@, \verb at print@ and \verb at plot@ one can utilize. +Output of \verb at fitFundamentalFactorModel()@ is of class \emph{FundamentalFactorModel}. There are generic function \verb at predict@, \verb at summary@, \verb at print@ and \verb at plot@ can be applied. <>= summary(fit.fund) predict(fit.fund) @@ -130,7 +136,7 @@ Enter an item from the menu, or 0 to exit \end{verbatim} -For example, choose 1 will give factor returns and it looks like +For example, choose 1 will give factor returns and it looks like in Figure \ref{fig1} <>= plot(fit.fund,which.plot=1,max.show=3) @ @@ -145,24 +151,24 @@ \label{fig1} \end{figure} -\section{Example 2: Barra type industry/country model} -In a global equity model or specific country equity model, modelers usually want to use industry/country dummies. In our example, we have 63 stocks in different industry. In specific, +\subsection{Example 2: Barra type industry/country model} +In a global equity model or specific country equity model, modelers can use industry/country dummies. In our example, we have 63 stocks in different industry. In specific, \begin{equation} -x_{it} = a_{i,t} + \Sigma_{j=1}^{J}b_{i,j}f_{i,t} + \epsilon_{i,t},\;for\,each\,i\,,t +x_{it} = \Sigma_{j=1}^{J}b_{i,j}f_{i,t} + \epsilon_{i,t},\;for\,each\,i\,,t \end{equation} where $b_{i,j} = 1$ if stock i in industry j and $b_{i,j}=0$ otherwise. In matrix form:\[ x_t = Bf_t + \epsilon_t \] and B is the N X J matrix of industry dummies. -\emph{SPCINDCD} in our data are $S\&P$ industry code, what we only have to do to fit industry model is to add this variable name into \verb at exposure.names@. Be sure this variable is \emph{character} not \emph{numeric}. Otherwise the function will not create dummies. +\emph{SPCINDCD} in our \verb at equity@ contains $S\&P$ industry codes, we add this variable name into \verb at exposure.names@ and we can fit Barra type industry model. Be sure this variable is of class\emph{character} not \emph{numeric}. Otherwise the function will not create dummies. <>= equity$SPCINDCD <- as.character(equity$SPCINDCD) fit.ind <- fitFundamentalFactorModel(exposure.names=c("SPCINDCD"),datevar="yearqtr", - returnsvar ="RET",assetvar="tic",wls=FALSE,data=equity) + returnsvar ="RET",assetvar="tic",wls=FALSE, + data=equity) @ -One can also use generic function to do plot, summary... -\verb at fitFundamentalFactorModel()@ support industry/country dummy factor exposures and style factor exposures together. Try +\verb at fitFundamentalFactorModel()@ supports mixed model like fit industry/country dummy factor exposures and style factor exposures together. For example, <>= fit.mix <- fitFundamentalFactorModel(exposure.names=c("BM","MV","SPCINDCD"), datevar="yearqtr",returnsvar ="RET", @@ -170,15 +176,15 @@ @ -\section{Standardizing Factor Exposure} +\subsubsection{Standardizing Factor Exposure} It is common to standardize factor exposure to have weight mean 0 and standard deviation equal to 1. The weight are often taken as proportional to square root of market capitalization, although other weighting schemes are possible. We will try example 1 but with standardized factor exposure with square root of market capitalization. First we create a weighting variable. <>= -equity$weight <- sqrt(exp(equity$MV)) # we take log for MV before. +equity$weight <- sqrt(exp(equity$MV)) # we took log for MV before. @ -We can choose \verb at standardized.factor.exposure@ to be \verb at TRUE@ and \verb at weight.var@ equal to weighting variable. +We can choose \verb at standardized.factor.exposure@ to be \verb at TRUE@ and \verb at weight.var@ equals to weighting variable. <>= fit.fund2 <- fitFundamentalFactorModel(exposure.names=c("BM","MV"), datevar="yearqtr",returnsvar ="RET", @@ -187,21 +193,21 @@ weight.var = "weight" ) @ -\section{Statistical Factor Model} +The advantage of weight facotr exposures is better interpretation of factor returns. $f_t$ can be interpreted as long-short zero investment portfolio. In our case, $f_{MVt}$ will long big size stocks and short small size stocks. -In statistical factor model, neither factor exposure b (normally called factor loadings in statistical factor model) nor factor returns $f_t$ are observed in equation \ref{fm2}: +\subsection{Statistical Factor Model} + +In statistical factor model, neither factor exposure b (normally called factor loadings in statistical factor model) nor factor returns $f_t$ are observed in equation \ref{fm}. So we can rewrite the model as: \begin{equation} r_t = bf_t + \epsilon_t\;,t=1 \cdots T \label{fm2} \end{equation} Factor returns $f_t$ can be calculated as principle components of covariance matrix of assets returns if number of asset N is less than the number of time period T, and factor loadings can be calculated using conventional least square technique. - - By default, the first principle component or factor will explain the most variation of returns covariance matrix and so on. In some cases, when number of assets N is larger than number of time period T. Connor and Korajczyk (1986) develop an alternative method called asymptotic principal components, building on the approximate factor model theory of Chamberlain and Rothschild (1983). Connor and Korajczyk analyze the eigenvector of the T X T cross product of matrix returns rather then N X N covariance matrix of returns. They show the first k eigenvectors of this cross product matrix provide consistent estimates of the k X T matrix of factor returns. -We can use function \verb at fitStatisticalFactorModel@ to fit statistical factor model. First, we need asset returns in time series or xts format. We choose xts to work with because time index is easy to handle but this is not restricted to the model fit. +We can use function \verb at fitStatisticalFactorModel@ to fit statistical factor model. First, we need asset returns in time series or xts class. We choose xts to work with because time index is easy to handle but this is not restricted to the function. <>= library(xts) @@ -216,7 +222,7 @@ ret <- ret[,-1] dim(ret) @ -We then fit the model. The data \verb at ret@ has 63 assets and 52 time period. So we will exploit asymptotic principal components analysis to find factor returns. There are two ways to find numbers of factors, Connor and Korajczyk(1995) and Bai and Ng (2002) are provided in the function. We will use Bai and Ng (2002) to choose the numbers of factors. +The data \verb at ret@ contians 63 assets and 52 time periods. We will exploit asymptotic principal components analysis to fit statistical model. There are two ways to find numbers of factors, Connor and Korajczyk(1995) and Bai and Ng (2002). Both are provided in our function. We will use Bai and Ng (2002) to choose the numbers of factors. <>= fit.stat <- fitStatisticalFactorModel(data=ret, @@ -224,7 +230,7 @@ names(fit.stat) @ -5 factors is chosen by Bai and Ng (2002). Factor returns can be found using \verb@$factors at . +5 factors is chosen by Bai and Ng (2002). Factor returns can be found \verb at fit.stat$factors at . <>= fit.stat$k @ @@ -260,8 +266,8 @@ Similar to \verb at fitFundamentalFactorModel@, generic functions like \verb at summary@, \verb at print@, \verb at plot@ and \verb at predict@ can be used for statistical factor model. -\section{Time Series Factor Model} -In Time Series factor model, factor returns $f_t$ is observed and taken as macroeconomic time series like GDP or other time series like market returns or credit spread. In our package, we have provided some common used times series in data set \verb at CommonFactors@. \verb at factors@ is monthly time series and \verb at factors.Q@ is quarterly time series. +\subsection{Time Series Factor Model} +In Time Series factor model, factor returns $f_t$ is observed and taken as macroeconomic time series like GDP or other time series like market returns or credit spread. In our package, we provid some common used times series in data set \verb at CommonFactors@. \verb at factors@ is monthly time series and \verb at factors.Q@ is quarterly time series. <>= data(CommonFactors) @@ -275,10 +281,12 @@ ts.data <- na.omit(merge(ret,ts.factors)) @ -We will use SP500, 10 years and 3 months term spread and difference of VIX as our factors. +We will use SP500, 10 years and 3 months term spread and difference of VIX as our common factors. <>= -fit.time <- fitTimeSeriesFactorModel(assets.names=tic,factors.names=c("SP500","Term.Spread","dVIX"),data=ts.data,fit.method="OLS") +fit.time <- fitTimeSeriesFactorModel(assets.names=tic, + factors.names=c("SP500","Term.Spread","dVIX"), + data=ts.data,fit.method="OLS") @ \verb at asset.fit@ can show model fit for each assets, for example for asset \verb at AA@. @@ -287,10 +295,13 @@ @ -\verb at fitTimeSeriesFactorModel@ also have various variable selection algorithm to choose. One can include all the factor and let the function to decide which one is the best model. For example, we include all common factors and use method \verb at stepwise@ which utilizes \verb at step@ function in \verb at stat@ package +\verb at fitTimeSeriesFactorModel@ also have various variable selection algorithm to choose. One can include every factor and let the function to decide which one is the best model. For example, we include every common factors and use method \verb at stepwise@ which utilizes \verb at step@ function in \verb at stat@ package <>= -fit.time2 <- fitTimeSeriesFactorModel(assets.names=tic,factors.names=names(ts.factors),data=ts.data,fit.method="OLS",variable.selection = "stepwise") +fit.time2 <- fitTimeSeriesFactorModel(assets.names=tic, + factors.names=names(ts.factors), + data=ts.data,fit.method="OLS", + variable.selection = "stepwise") @ There are 5 factors chosen for asset AA for example. <>= @@ -302,7 +313,7 @@ \section{Risk Analysis} \subsection{Factor Model Risk Budgeting} -One can do risk analysis easily with factor model. According to Meucci (2007), factor model can be represented as +One can do risk analysis with factor model. According to Meucci (2007), factor model can be represented as \begin{align} r_{it} &= \alpha_i + \beta_{i1}f_{1t} + \beta_{i2}f_{2t} + \cdots + \beta_{ik}f_{kt} + \sigma{i}z_{it},\;i=1 \cdots N,\;t=1 \cdots T \\ @@ -321,11 +332,12 @@ $\beta_{ki}\frac{\partial RM_i}{\partial \beta_{ki}}$ is called component contribution of factor k to $RM_i$ \\ $\beta_{ki}\frac{\partial RM_i}{\partial \beta_{ki}}/RM_i$ is called percentage contribution of factor k to $RM_i$ -\verb at factorAnalytics@ package provide 3 different risk metrics decomposition, Standard deviation (Std), Value-at-Risk (VaR) and Expected Shortfall (ES) with historical distribution, Normal distribution and Cornish-Fisher distribution. +\verb at factorAnalytics@ package provide 3 different risk metrics decomposition, Standard deviation (Std), Value-at-Risk (VaR) and Expected Shortfall (ES). Each one with different distribution such as historical distribution, Normal distribution and Cornish-Fisher distribution. -For example, factor model VaR decomposition with Normal distribution of asset AA for a statistical factor model. +This example shows factor model VaR decomposition with Normal distribution of asset AA for a statistical factor model. <>= -data.rd <- cbind(ret[,"AA"],fit.stat$factors,fit.stat$residuals[,"AA"]/sqrt(fit.stat$resid.variance["AA"])) +data.rd <- cbind(ret[,"AA"],fit.stat$factors, + fit.stat$residuals[,"AA"]/sqrt(fit.stat$resid.variance["AA"])) var.decp <- factorModelVaRDecomposition(data.rd,fit.stat$loadings[,"AA"], fit.stat$resid.variance["AA"],tail.prob=0.05, VaR.method="gaussian") @@ -339,7 +351,7 @@ @ It looks like the second factor contributes the largest risk to asset AA. -One can use \verb at plot@ method to see barplot of risk budgeting. Default is to show 6 assets. Figure \ref{fig4} shows componenet contribution to VaR for several different assets. +One can use \verb at plot()@ method to see barplot of risk budgeting. Default is to show 6 assets. Figure \ref{fig4} shows componenet contribution to VaR for several different assets. <>= plot(fit.stat,which.plot=8,legend.text=TRUE, args.legend=list(x="topright"), @@ -384,12 +396,17 @@ \end{equation} can break down asset returns into two pieces. The first term is \emph{returns atttributed to factors} $Bf_t$ and the second term is called \emph{specific returns} which is simply $\alpha + e_t$. -For example, let's breakdown time series factor model we usded previously. Function \verb at factorModelPerformanceAttribution()@ can help us to calculate performance attribution. +For example, we can breakdown time series factor model.\\ +Function \verb at factorModelPerformanceAttribution()@ can help us to calculate performance attribution. <>= ts.attr <- factorModelPerformanceAttribution(fit.time) names(ts.attr) @ There are 3 items generated by the function. \verb at cum.ret.attr.f@ will return a N x K matrix with cummulative returns attributed to factors. \verb at cum.spec.ret@ will return a N x 1 matrix with cummulative specific returns. \verb at attr.list@ will return a list which contains returns atttribution to each factors and specific returns asset by asset. In addition, a \emph{FM.attribution} class will be generated and generic function \verb at print()@, \verb at summary()@ and \verb at plot()@ can be applied to it. +\subsection{Benchmark and Active Returns} +Portfolio performance is usually compared to similar type of benchmark. US equity portfolio will compare its performance with S$\&$P 500 index for example. Therefore, \emph{active returns} under active management is interested. We define active returns = assets returns - benchmark. + +We can also calculate active return attribution just simply fit active return with fundamental factor model, statistical factor model or time series factor model and calculate by \verb at factorModelPerformanceAttribution()@. \end{document} \ No newline at end of file From noreply at r-forge.r-project.org Thu Sep 5 02:09:22 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 5 Sep 2013 02:09:22 +0200 (CEST) Subject: [Returnanalytics-commits] r2992 - pkg/PerformanceAnalytics/sandbox/pulkit/R Message-ID: <20130905000922.A6310185E06@r-forge.r-project.org> Author: pulkit Date: 2013-09-05 02:09:22 +0200 (Thu, 05 Sep 2013) New Revision: 2992 Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/ProbSharpeRatio.R Log: Improved error handling in Probabilistic Sharpe Ratio Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/ProbSharpeRatio.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/ProbSharpeRatio.R 2013-09-04 23:09:17 UTC (rev 2991) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/ProbSharpeRatio.R 2013-09-05 00:09:22 UTC (rev 2992) @@ -97,12 +97,23 @@ Rf = checkData(Rf) } #If the Reference Sharpe Ratio is greater than the Observred Sharpe Ratio an error is displayed - if(length(which(refSR>sr))!=0){ - stop("The Reference Sharpe Ratio should be less than the Observed Sharpe Ratio") + index = which(refSR>sr) + if(length(index)!=0){ + if(length(index)==columns){ + stop("The reference Sharpe Ratio greater than the Observed Sharpe ratio for all the cases") + } + sr = sr[-index] + refSR = refSR[-index] + sk = sk[-index] + kr = kr[-index] + warning(paste("The Reference Sharpe Ratio greater than the Observed Sharpe Ratio for case",columnnames[index],"\n")) + } result = pnorm(((sr - refSR)*(n-1)^(0.5))/(1-sr*sk+sr^2*(kr-1)/4)^(0.5)) + columnnames = columnnames[-index] if(!is.null(dim(result))){ colnames(result) = paste(columnnames,"(SR >",refSR,")") + rownames(result) = paste("Probabilistic Sharpe Ratio(p=",round(p*100,1),"%):") } return(result) From noreply at r-forge.r-project.org Thu Sep 5 02:11:34 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 5 Sep 2013 02:11:34 +0200 (CEST) Subject: [Returnanalytics-commits] r2993 - pkg/PerformanceAnalytics/sandbox/pulkit/R Message-ID: <20130905001134.6E10A185E06@r-forge.r-project.org> Author: pulkit Date: 2013-09-05 02:11:33 +0200 (Thu, 05 Sep 2013) New Revision: 2993 Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/MinTRL.R Log: Improved error handling in Minimum Track Record Length Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/MinTRL.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/MinTRL.R 2013-09-05 00:09:22 UTC (rev 2992) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/MinTRL.R 2013-09-05 00:11:33 UTC (rev 2993) @@ -98,12 +98,19 @@ if(!is.null(dim(Rf))){ Rf = checkData(Rf) } - #If the refSR is greater than SR an error is displayed - if(length(which(refSR>sr))!=0){ - stop("The Reference Sharpe Ratio should be less than the Observed Sharpe Ratio") + index = which(refSR>sr) + if(length(index)!=0){ + if(length(index)==columns){ + stop("The reference Sharpe Ratio greater than the Observed Sharpe ratio for all the cases") + } + sr = sr[-index] + refSR = refSR[-index] + sk = sk[-index] + kr = kr[-index] + warning(paste("The Reference Sharpe Ratio greater than the Observed Sharpe Ratio for case",columnnames[index],"\n")) } - result = 1 + (1 - sk*sr + ((kr-1)/4)*sr^2)*(qnorm(p)/(sr-refSR))^2 + columnnames = columnnames[-index] if(!is.null(dim(result))){ colnames(result) = paste(columnnames,"(SR >",refSR,")") rownames(result) = paste("Probabilistic Sharpe Ratio(p=",round(p*100,1),"%):") From noreply at r-forge.r-project.org Thu Sep 5 03:40:11 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 5 Sep 2013 03:40:11 +0200 (CEST) Subject: [Returnanalytics-commits] r2994 - in pkg/PortfolioAnalytics: . R demo man Message-ID: <20130905014011.3B9ED185D21@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-05 03:40:09 +0200 (Thu, 05 Sep 2013) New Revision: 2994 Added: pkg/PortfolioAnalytics/man/barplotGroupWeights.Rd pkg/PortfolioAnalytics/man/constraint_v2.Rd Removed: pkg/PortfolioAnalytics/man/HHI.Rd pkg/PortfolioAnalytics/man/chart.EfficientFrontier.Rd pkg/PortfolioAnalytics/man/chart.RiskReward.Rd pkg/PortfolioAnalytics/man/chart.Scatter.DE.Rd pkg/PortfolioAnalytics/man/chart.Scatter.GenSA.Rd pkg/PortfolioAnalytics/man/chart.Scatter.ROI.Rd pkg/PortfolioAnalytics/man/chart.Scatter.RP.Rd pkg/PortfolioAnalytics/man/chart.Scatter.pso.Rd pkg/PortfolioAnalytics/man/chart.Weights.DE.Rd pkg/PortfolioAnalytics/man/chart.Weights.GenSA.Rd pkg/PortfolioAnalytics/man/chart.Weights.ROI.Rd pkg/PortfolioAnalytics/man/chart.Weights.RP.Rd pkg/PortfolioAnalytics/man/chart.Weights.Rd pkg/PortfolioAnalytics/man/chart.Weights.pso.Rd pkg/PortfolioAnalytics/man/charts.DE.Rd pkg/PortfolioAnalytics/man/charts.GenSA.Rd pkg/PortfolioAnalytics/man/charts.ROI.Rd pkg/PortfolioAnalytics/man/charts.RP.Rd pkg/PortfolioAnalytics/man/charts.pso.Rd pkg/PortfolioAnalytics/man/extractStats.optimize.portfolio.DEoptim.Rd pkg/PortfolioAnalytics/man/extractStats.optimize.portfolio.GenSA.Rd pkg/PortfolioAnalytics/man/extractStats.optimize.portfolio.ROI.Rd pkg/PortfolioAnalytics/man/extractStats.optimize.portfolio.parallel.Rd pkg/PortfolioAnalytics/man/extractStats.optimize.portfolio.pso.Rd pkg/PortfolioAnalytics/man/extractStats.optimize.portfolio.random.Rd pkg/PortfolioAnalytics/man/extractWeights.optimize.portfolio.Rd pkg/PortfolioAnalytics/man/extractWeights.optimize.portfolio.rebalancing.Rd Modified: pkg/PortfolioAnalytics/DESCRIPTION pkg/PortfolioAnalytics/NAMESPACE pkg/PortfolioAnalytics/R/applyFUN.R pkg/PortfolioAnalytics/R/chart.RiskReward.R pkg/PortfolioAnalytics/R/chart.Weights.R pkg/PortfolioAnalytics/R/charts.DE.R pkg/PortfolioAnalytics/R/charts.GenSA.R pkg/PortfolioAnalytics/R/charts.PSO.R pkg/PortfolioAnalytics/R/charts.ROI.R pkg/PortfolioAnalytics/R/charts.RP.R pkg/PortfolioAnalytics/R/charts.efficient.frontier.R pkg/PortfolioAnalytics/R/charts.groups.R pkg/PortfolioAnalytics/R/constrained_objective.R pkg/PortfolioAnalytics/R/constraint_fn_map.R pkg/PortfolioAnalytics/R/constraints.R pkg/PortfolioAnalytics/R/extract.efficient.frontier.R pkg/PortfolioAnalytics/R/extractstats.R pkg/PortfolioAnalytics/R/generics.R pkg/PortfolioAnalytics/R/objective.R pkg/PortfolioAnalytics/R/objectiveFUN.R pkg/PortfolioAnalytics/R/optimize.portfolio.R pkg/PortfolioAnalytics/R/random_portfolios.R pkg/PortfolioAnalytics/demo/00Index pkg/PortfolioAnalytics/man/add.objective.Rd pkg/PortfolioAnalytics/man/applyFUN.Rd pkg/PortfolioAnalytics/man/chart.EfficientFrontierOverlay.Rd pkg/PortfolioAnalytics/man/chart.Weights.EF.Rd pkg/PortfolioAnalytics/man/constrained_objective.Rd pkg/PortfolioAnalytics/man/constraint.Rd pkg/PortfolioAnalytics/man/create.EfficientFrontier.Rd pkg/PortfolioAnalytics/man/extract.efficient.frontier.Rd pkg/PortfolioAnalytics/man/extractStats.Rd pkg/PortfolioAnalytics/man/extractWeights.Rd pkg/PortfolioAnalytics/man/fn_map.Rd pkg/PortfolioAnalytics/man/is.objective.Rd pkg/PortfolioAnalytics/man/objective.Rd pkg/PortfolioAnalytics/man/optimize.portfolio.Rd pkg/PortfolioAnalytics/man/plot.optimize.portfolio.DEoptim.Rd pkg/PortfolioAnalytics/man/plot.optimize.portfolio.GenSA.Rd pkg/PortfolioAnalytics/man/plot.optimize.portfolio.ROI.Rd pkg/PortfolioAnalytics/man/plot.optimize.portfolio.Rd pkg/PortfolioAnalytics/man/plot.optimize.portfolio.pso.Rd pkg/PortfolioAnalytics/man/plot.optimize.portfolio.random.Rd pkg/PortfolioAnalytics/man/position_limit_constraint.Rd pkg/PortfolioAnalytics/man/print.constraint.Rd pkg/PortfolioAnalytics/man/print.efficient.frontier.Rd pkg/PortfolioAnalytics/man/print.optimize.portfolio.DEoptim.Rd pkg/PortfolioAnalytics/man/print.optimize.portfolio.GenSA.Rd pkg/PortfolioAnalytics/man/print.optimize.portfolio.ROI.Rd pkg/PortfolioAnalytics/man/print.optimize.portfolio.pso.Rd pkg/PortfolioAnalytics/man/print.optimize.portfolio.random.Rd pkg/PortfolioAnalytics/man/print.portfolio.Rd pkg/PortfolioAnalytics/man/randomize_portfolio.Rd pkg/PortfolioAnalytics/man/scatterFUN.Rd pkg/PortfolioAnalytics/man/summary.efficient.frontier.Rd pkg/PortfolioAnalytics/man/summary.optimize.portfolio.Rd pkg/PortfolioAnalytics/man/summary.optimize.portfolio.rebalancing.Rd pkg/PortfolioAnalytics/man/summary.portfolio.Rd pkg/PortfolioAnalytics/man/txfrm_position_limit_constraint.Rd pkg/PortfolioAnalytics/man/update.constraint.Rd pkg/PortfolioAnalytics/man/weight_sum_constraint.Rd Log: Cleaning up documentation to attempt to pass R CMD check. Modified: pkg/PortfolioAnalytics/DESCRIPTION =================================================================== --- pkg/PortfolioAnalytics/DESCRIPTION 2013-09-05 00:11:33 UTC (rev 2993) +++ pkg/PortfolioAnalytics/DESCRIPTION 2013-09-05 01:40:09 UTC (rev 2994) @@ -15,8 +15,9 @@ PerformanceAnalytics (>= 1.0.0) Suggests: quantmod, - DEoptim(>= 2.3.1), + DEoptim(>= 2.2.1), foreach, + iterators, fGarch, Rglpk, quadprog, Modified: pkg/PortfolioAnalytics/NAMESPACE =================================================================== --- pkg/PortfolioAnalytics/NAMESPACE 2013-09-05 00:11:33 UTC (rev 2993) +++ pkg/PortfolioAnalytics/NAMESPACE 2013-09-05 01:40:09 UTC (rev 2994) @@ -3,48 +3,17 @@ export(applyFUN) export(box_constraint) export(CCCgarch.MM) -export(chart.EfficientFrontier.efficient.frontier) -export(chart.EfficientFrontier.optimize.portfolio.ROI) -export(chart.EfficientFrontier.optimize.portfolio) export(chart.EfficientFrontier) export(chart.EfficientFrontierOverlay) export(chart.GroupWeights) export(chart.RiskBudget) -export(chart.RiskReward.optimize.portfolio.DEoptim) -export(chart.RiskReward.optimize.portfolio.GenSA) -export(chart.RiskReward.optimize.portfolio.pso) -export(chart.RiskReward.optimize.portfolio.random) -export(chart.RiskReward.optimize.portfolio.ROI) export(chart.RiskReward) -export(chart.Scatter.DE) -export(chart.Scatter.GenSA) -export(chart.Scatter.pso) -export(chart.Scatter.ROI) -export(chart.Scatter.RP) -export(chart.Weights.DE) -export(chart.Weights.EF.efficient.frontier) -export(chart.Weights.EF.optimize.portfolio) export(chart.Weights.EF) -export(chart.Weights.GenSA) -export(chart.Weights.optimize.portfolio.DEoptim) -export(chart.Weights.optimize.portfolio.GenSA) -export(chart.Weights.optimize.portfolio.pso) -export(chart.Weights.optimize.portfolio.random) -export(chart.Weights.optimize.portfolio.ROI) -export(chart.Weights.pso) -export(chart.Weights.ROI) -export(chart.Weights.RP) export(chart.Weights) -export(charts.DE) -export(charts.GenSA) -export(charts.pso) -export(charts.ROI) -export(charts.RP) export(constrained_group_tmp) export(constrained_objective_v2) export(constrained_objective) export(constraint_ROI) -export(constraint_v2) export(constraint) export(create.EfficientFrontier) export(diversification_constraint) @@ -53,15 +22,7 @@ export(extractEfficientFrontier) export(extractGroups) export(extractObjectiveMeasures) -export(extractStats.optimize.portfolio.DEoptim) -export(extractStats.optimize.portfolio.GenSA) -export(extractStats.optimize.portfolio.parallel) -export(extractStats.optimize.portfolio.pso) -export(extractStats.optimize.portfolio.random) -export(extractStats.optimize.portfolio.ROI) export(extractStats) -export(extractWeights.optimize.portfolio.rebalancing) -export(extractWeights.optimize.portfolio) export(extractWeights) export(factor_exposure_constraint) export(fn_map) @@ -69,6 +30,7 @@ export(get_constraints) export(group_constraint) export(group_fail) +export(HHI) export(insert_constraints) export(insert_objectives) export(is.constraint) @@ -82,24 +44,10 @@ export(optimize.portfolio.parallel) export(optimize.portfolio.rebalancing) export(optimize.portfolio) -export(plot.optimize.portfolio.DEoptim) -export(plot.optimize.portfolio.GenSA) -export(plot.optimize.portfolio.pso) -export(plot.optimize.portfolio.random) -export(plot.optimize.portfolio.ROI) -export(plot.optimize.portfolio) export(portfolio_risk_objective) export(portfolio.spec) export(pos_limit_fail) export(position_limit_constraint) -export(print.constraint) -export(print.efficient.frontier) -export(print.optimize.portfolio.DEoptim) -export(print.optimize.portfolio.GenSA) -export(print.optimize.portfolio.pso) -export(print.optimize.portfolio.random) -export(print.optimize.portfolio.ROI) -export(print.portfolio) export(quadratic_utility_objective) export(random_portfolios_v1) export(random_portfolios_v2) @@ -116,10 +64,6 @@ export(set.portfolio.moments_v1) export(set.portfolio.moments_v2) export(set.portfolio.moments) -export(summary.efficient.frontier) -export(summary.optimize.portfolio.rebalancing) -export(summary.optimize.portfolio) -export(summary.portfolio) export(trailingFUN) export(turnover_constraint) export(turnover_objective) @@ -129,7 +73,48 @@ export(txfrm_position_limit_constraint) export(txfrm_weight_sum_constraint) export(update_constraint_v1tov2) -export(update.constraint) export(var.portfolio) export(weight_concentration_objective) export(weight_sum_constraint) +S3method(chart.EfficientFrontier,efficient.frontier) +S3method(chart.EfficientFrontier,optimize.portfolio.ROI) +S3method(chart.EfficientFrontier,optimize.portfolio) +S3method(chart.RiskReward,optimize.portfolio.DEoptim) +S3method(chart.RiskReward,optimize.portfolio.GenSA) +S3method(chart.RiskReward,optimize.portfolio.pso) +S3method(chart.RiskReward,optimize.portfolio.random) +S3method(chart.RiskReward,optimize.portfolio.ROI) +S3method(chart.Weights,optimize.portfolio.DEoptim) +S3method(chart.Weights,optimize.portfolio.GenSA) +S3method(chart.Weights,optimize.portfolio.pso) +S3method(chart.Weights,optimize.portfolio.random) +S3method(chart.Weights,optimize.portfolio.ROI) +S3method(chart.Weights.EF,efficient.frontier) +S3method(chart.Weights.EF,optimize.portfolio) +S3method(extractStats,optimize.portfolio.DEoptim) +S3method(extractStats,optimize.portfolio.GenSA) +S3method(extractStats,optimize.portfolio.parallel) +S3method(extractStats,optimize.portfolio.pso) +S3method(extractStats,optimize.portfolio.random) +S3method(extractStats,optimize.portfolio.ROI) +S3method(extractWeights,optimize.portfolio.rebalancing) +S3method(extractWeights,optimize.portfolio) +S3method(plot,optimize.portfolio.DEoptim) +S3method(plot,optimize.portfolio.GenSA) +S3method(plot,optimize.portfolio.pso) +S3method(plot,optimize.portfolio.random) +S3method(plot,optimize.portfolio.ROI) +S3method(plot,optimize.portfolio) +S3method(print,constraint) +S3method(print,efficient.frontier) +S3method(print,optimize.portfolio.DEoptim) +S3method(print,optimize.portfolio.GenSA) +S3method(print,optimize.portfolio.pso) +S3method(print,optimize.portfolio.random) +S3method(print,optimize.portfolio.ROI) +S3method(print,portfolio) +S3method(summary,efficient.frontier) +S3method(summary,optimize.portfolio.rebalancing) +S3method(summary,optimize.portfolio) +S3method(summary,portfolio) +S3method(update,constraint) Modified: pkg/PortfolioAnalytics/R/applyFUN.R =================================================================== --- pkg/PortfolioAnalytics/R/applyFUN.R 2013-09-05 00:11:33 UTC (rev 2993) +++ pkg/PortfolioAnalytics/R/applyFUN.R 2013-09-05 01:40:09 UTC (rev 2994) @@ -3,9 +3,9 @@ #' This function is used to calculate risk or return metrics given a matrix of #' weights and is primarily used as a convenience function used in chart.Scatter functions #' -#' @param R +#' @param R xts object of asset returns #' @param weights a matrix of weights generated from random_portfolios or \code{optimize.portfolio} -#' @param FUN +#' @param FUN name of a function #' @param ... any passthrough arguments to FUN #' @author Ross Bennett #' @export @@ -90,8 +90,8 @@ #' This function is used to calculate risk or return metrics given a matrix of #' asset returns and will be used for a risk-reward scatter plot of the assets #' -#' @param R -#' @param FUN +#' @param R xts object of asset returns +#' @param FUN name of function #' @param ... any passthrough arguments to FUN #' @author Ross Bennett #' @export Modified: pkg/PortfolioAnalytics/R/chart.RiskReward.R =================================================================== --- pkg/PortfolioAnalytics/R/chart.RiskReward.R 2013-09-05 00:11:33 UTC (rev 2993) +++ pkg/PortfolioAnalytics/R/chart.RiskReward.R 2013-09-05 01:40:09 UTC (rev 2994) @@ -2,21 +2,32 @@ #' classic risk reward scatter #' +#' \code{neighbors} may be specified in three ways. +#' The first is as a single number of neighbors. This will extract the \code{neighbors} closest +#' portfolios in terms of the \code{out} numerical statistic. +#' The second method consists of a numeric vector for \code{neighbors}. +#' This will extract the \code{neighbors} with portfolio index numbers that correspond to the vector contents. +#' The third method for specifying \code{neighbors} is to pass in a matrix. +#' This matrix should look like the output of \code{\link{extractStats}}, and should contain +#' \code{risk.col},\code{return.col}, and weights columns all properly named. +#' #' @param object optimal portfolio created by \code{\link{optimize.portfolio}} -#' @param neighbors set of 'neighbor' portfolios to overplot, see Details in \code{\link{charts.DE}} -#' @param ... any other passthru parameters -#' @param rp TRUE/FALSE. If TRUE, random portfolios are generated by \code{\link{random_portfolios}} to view the feasible space +#' @param neighbors set of 'neighbor' portfolios to overplot, see Details +#' @param \dots any other passthru parameters #' @param return.col string matching the objective of a 'return' objective, on vertical axis #' @param risk.col string matching the objective of a 'risk' objective, on horizontal axis #' @param chart.assets TRUE/FALSE. Includes a risk reward scatter of the assets in the chart +#' @param element.color color for the default plot scatter points #' @param cex.axis The magnification to be used for axis annotation relative to the current setting of \code{cex} -#' @param element.color color for the default plot scatter points #' @param xlim set the x-axis limit, same as in \code{\link{plot}} #' @param ylim set the y-axis limit, same as in \code{\link{plot}} #' @seealso \code{\link{optimize.portfolio}} +#' @rdname chart.RiskReward +#' @aliases chart.RiskReward.optimize.portfolio.DEoptim chart.RiskReward.optimize.portfolio.RP +#' chart.RiskReward.optimize.portfolio.ROI chart.RiskReward.optimize.portfolio.pso +#' chart.RiskReward.optimize.portfolio.GenSA #' @export -chart.RiskReward <- function(object, neighbors, ..., rp=FALSE, return.col="mean", risk.col="ES", element.color = "darkgray", cex.axis=0.8, ylim=NULL, xlim=NULL){ +chart.RiskReward <- function(object, neighbors, ..., return.col, risk.col, chart.assets, element.color, cex.axis, xlim, ylim){ UseMethod("chart.RiskReward") } - Modified: pkg/PortfolioAnalytics/R/chart.Weights.R =================================================================== --- pkg/PortfolioAnalytics/R/chart.Weights.R 2013-09-05 00:11:33 UTC (rev 2993) +++ pkg/PortfolioAnalytics/R/chart.Weights.R 2013-09-05 01:40:09 UTC (rev 2994) @@ -5,6 +5,8 @@ #' #' @param object optimal portfolio object created by \code{\link{optimize.portfolio}} #' @param neighbors set of 'neighbor' portfolios to overplot +#' @param \dots any other passthru parameters +#' @param main an overall title for the plot: see \code{\link{title}} #' @param las numeric in \{0,1,2,3\}; the style of axis labels #' \describe{ #' \item{0:}{always parallel to the axis [\emph{default}],} @@ -14,11 +16,12 @@ #' } #' @param xlab a title for the x axis: see \code{\link{title}} #' @param cex.lab The magnification to be used for x and y labels relative to the current setting of \code{cex} +#' @param element.color color for the default plot lines #' @param cex.axis The magnification to be used for axis annotation relative to the current setting of \code{cex} -#' @param element.color color for the default plot lines -#' @param ... any other passthru parameters -#' @param main an overall title for the plot: see \code{\link{title}} #' @seealso \code{\link{optimize.portfolio}} +#' @rdname chart.Weights +#' @name chart.Weights +#' @aliases chart.Weights.optimize.portfolio.ROI chart.Weights.optimize.portfolio.DEoptim chart.Weights.optimize.portfolio.pso chart.Weights.optimize.portfolio.RP chart.Weights.optimize.portfolio.GenSA #' @export chart.Weights <- function(object, neighbors = NULL, ..., main="Weights", las = 3, xlab=NULL, cex.lab = 1, element.color = "darkgray", cex.axis=0.8){ UseMethod("chart.Weights") Modified: pkg/PortfolioAnalytics/R/charts.DE.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.DE.R 2013-09-05 00:11:33 UTC (rev 2993) +++ pkg/PortfolioAnalytics/R/charts.DE.R 2013-09-05 01:40:09 UTC (rev 2994) @@ -10,8 +10,7 @@ # ############################################################################### -#' @rdname chart.Weights -#' @export + chart.Weights.DE <- function(object, neighbors = NULL, ..., main="Weights", las = 3, xlab=NULL, cex.lab = 1, element.color = "darkgray", cex.axis=0.8){ # Specific to the output of optimize.portfolio with optimize_method="DEoptim" if(!inherits(object, "optimize.portfolio.DEoptim")) stop("object must be of class 'optimize.portfolio.DEoptim'") @@ -82,12 +81,12 @@ box(col = element.color) } -#' @rdname chart.Weights +#' @method chart.Weights optimize.portfolio.DEoptim +#' @S3method chart.Weights optimize.portfolio.DEoptim #' @export chart.Weights.optimize.portfolio.DEoptim <- chart.Weights.DE -#' @rdname chart.RiskReward -#' @export + chart.Scatter.DE <- function(object, neighbors = NULL, ..., return.col='mean', risk.col='ES', chart.assets=FALSE, element.color = "darkgray", cex.axis=0.8, xlim=NULL, ylim=NULL){ # more or less specific to the output of the DEoptim portfolio code with constraints # will work to a point with other functions, such as optimize.porfolio.parallel @@ -234,7 +233,7 @@ rr[i,2] = x[rtc] #'FIXME } colors2 = colorRamp(c("blue","lightblue")) - colortrail = rgb(colors2((0:rows)/rows),max=255) + colortrail = rgb(colors2((0:rows)/rows),maxColorValue=255) for(i in 1:rows){ points(rr[i,1], rr[i,2], pch=1, col = colortrail[rows-i+1]) } @@ -285,36 +284,17 @@ box(col = element.color) } -#' @rdname chart.RiskReward +#' @method chart.RiskReward optimize.portfolio.DEoptim +#' @S3method chart.RiskReward optimize.portfolio.DEoptim #' @export chart.RiskReward.optimize.portfolio.DEoptim <- chart.Scatter.DE -#' scatter and weights chart for random portfolios -#' -#' \code{neighbors} may be specified in three ways. -#' The first is as a single number of neighbors. This will extract the \code{neighbors} closest -#' portfolios in terms of the \code{out} numerical statistic. -#' The second method consists of a numeric vector for \code{neighbors}. -#' This will extract the \code{neighbors} with portfolio index numbers that correspond to the vector contents. -#' The third method for specifying \code{neighbors} is to pass in a matrix. -#' This matrix should look like the output of \code{\link{extractStats}}, and should contain -#' \code{risk.col},\code{return.col}, and weights columns all properly named. -#' -#' @param DE set of random portfolios created by \code{\link{optimize.portfolio}} -#' @param ... any other passthru parameters -#' @param risk.col string name of column to use for risk (horizontal axis) -#' @param return.col string name of column to use for returns (vertical axis) -#' @param neighbors set of 'neighbor portfolios to overplot -#' @param main an overall title for the plot: see \code{\link{title}} -#' @seealso -#' \code{\link{optimize.portfolio}} -#' \code{\link{extractStats}} -#' @export + charts.DE <- function(DE, risk.col, return.col, chart.assets, neighbors=NULL, main="DEoptim.Portfolios", xlim=NULL, ylim=NULL, ...){ # Specific to the output of the random portfolio code with constraints # @TODO: check that DE is of the correct class op <- par(no.readonly=TRUE) - layout(matrix(c(1,2)),height=c(2,1.5),width=1) + layout(matrix(c(1,2)),heights=c(2,1.5),widths=1) par(mar=c(4,4,4,2)) chart.Scatter.DE(object=DE, risk.col=risk.col, return.col=return.col, chart.assets=chart.assets, neighbors=neighbors, main=main, xlim=xlim, ylim=ylim, ...) par(mar=c(2,4,0,2)) @@ -322,12 +302,14 @@ par(op) } -#TODO make chart.DE into a plot() method or methods #' plot method for optimize.portfolio.DEoptim output #' -#' scatter and weights chart for DEoptim portfolio optimizations run with trace=TRUE +#' scatter and weights chart for DEoptim portfolio optimizations run with trace=TRUE #' +#' \code{return.col} must be the name of a function used to compute the return metric on the random portfolio weights +#' \code{risk.col} must be the name of a function used to compute the risk metric on the random portfolio weights +#' #' \code{neighbors} may be specified in three ways. #' The first is as a single number of neighbors. This will extract the \code{neighbors} closest #' portfolios in terms of the \code{out} numerical statistic. @@ -345,6 +327,8 @@ #' @param main an overall title for the plot: see \code{\link{title}} #' @param xlim set the limit on coordinates for the x-axis #' @param ylim set the limit on coordinates for the y-axis +#' @method plot optimize.portfolio.DEoptim +#' @S3method plot optimize.portfolio.DEoptim #' @export plot.optimize.portfolio.DEoptim <- function(x, ..., return.col='mean', risk.col='ES', chart.assets=FALSE, neighbors=NULL, main='optimized portfolio plot', xlim=NULL, ylim=NULL) { charts.DE(DE=x, risk.col=risk.col, return.col=return.col, chart.assets=chart.assets, neighbors=neighbors, main=main, xlim=xlim, ylim=ylim, ...) Modified: pkg/PortfolioAnalytics/R/charts.GenSA.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.GenSA.R 2013-09-05 00:11:33 UTC (rev 2993) +++ pkg/PortfolioAnalytics/R/charts.GenSA.R 2013-09-05 01:40:09 UTC (rev 2994) @@ -1,6 +1,4 @@ -#' @rdname chart.Weights -#' @export chart.Weights.GenSA <- function(object, neighbors = NULL, ..., main="Weights", las = 3, xlab=NULL, cex.lab = 1, element.color = "darkgray", cex.axis=0.8){ if(!inherits(object, "optimize.portfolio.GenSA")) stop("object must be of class 'optimize.portfolio.GenSA'") @@ -70,13 +68,12 @@ box(col = element.color) } -#' @rdname chart.Weights +#' @method chart.Weights optimize.portfolio.GenSA +#' @S3method chart.Weights optimize.portfolio.GenSA #' @export chart.Weights.optimize.portfolio.GenSA <- chart.Weights.GenSA -#' @rdname chart.RiskReward -#' @export -chart.Scatter.GenSA <- function(object, neighbors=NULL, ..., rp=FALSE, return.col="mean", risk.col="ES", chart.assets=FALSE, element.color="darkgray", cex.axis=0.8, ylim=NULL, xlim=NULL){ +chart.Scatter.GenSA <- function(object, neighbors=NULL, ..., return.col="mean", risk.col="ES", chart.assets=FALSE, element.color="darkgray", cex.axis=0.8, ylim=NULL, xlim=NULL, rp=FALSE){ if(!inherits(object, "optimize.portfolio.GenSA")) stop("object must be of class 'optimize.portfolio.GenSA'") @@ -133,31 +130,16 @@ box(col = element.color) } -#' @rdname chart.RiskReward +#' @method chart.RiskReward optimize.portfolio.GenSA +#' @S3method chart.RiskReward optimize.portfolio.GenSA #' @export chart.RiskReward.optimize.portfolio.GenSA <- chart.Scatter.GenSA -#' scatter and weights chart for portfolios -#' -#' \code{return.col} must be the name of a function used to compute the return metric on the random portfolio weights -#' \code{risk.col} must be the name of a function used to compute the risk metric on the random portfolio weights -#' -#' @param GenSA object created by \code{\link{optimize.portfolio}} -#' @param rp set of weights generated by \code{\link{random_portfolio}} -#' @param return.col string matching the objective of a 'return' objective, on vertical axis -#' @param risk.col string matching the objective of a 'risk' objective, on horizontal axis -#' @param ... any other passthru parameters -#' @param cex.axis The magnification to be used for axis annotation relative to the current setting of \code{cex} -#' @param element.color color for the default plot scatter points -#' @param neighbors set of 'neighbor' portfolios to overplot -#' @param main an overall title for the plot: see \code{\link{title}} -#' @seealso \code{\link{optimize.portfolio}} -#' @author Ross Bennett -#' @export + charts.GenSA <- function(GenSA, rp=FALSE, return.col="mean", risk.col="ES", chart.assets=FALSE, cex.axis=0.8, element.color="darkgray", neighbors=NULL, main="GenSA.Portfolios", xlim=NULL, ylim=NULL, ...){ # Specific to the output of the optimize_method=GenSA op <- par(no.readonly=TRUE) - layout(matrix(c(1,2)),height=c(2,2),width=1) + layout(matrix(c(1,2)),heights=c(2,2),widths=1) par(mar=c(4,4,4,2)) chart.Scatter.GenSA(object=GenSA, rp=rp, return.col=return.col, risk.col=risk.col, chart.assets=chart.assets, element.color=element.color, cex.axis=cex.axis, main=main, xlim=xlim, ylim=ylim, ...=...) par(mar=c(2,4,0,2)) @@ -165,14 +147,16 @@ par(op) } -#' scatter and weights chart for portfolios +#' plot method for optimize.portfolio.DEoptim output #' +#' scatter and weights chart for GenSA portfolio optimizations run with trace=TRUE +#' #' \code{return.col} must be the name of a function used to compute the return metric on the random portfolio weights #' \code{risk.col} must be the name of a function used to compute the risk metric on the random portfolio weights #' #' @param x object created by \code{\link{optimize.portfolio}} #' @param ... any other passthru parameters -#' @param rp set of weights generated by \code{\link{random_portfolio}} +#' @param rp TRUE/FALSE to plot feasible portfolios generated by \code{\link{random_portfolios}} #' @param return.col string matching the objective of a 'return' objective, on vertical axis #' @param risk.col string matching the objective of a 'risk' objective, on horizontal axis #' @param chart.assets TRUE/FALSE to include risk-return scatter of assets @@ -184,6 +168,8 @@ #' @param ylim set the limit on coordinates for the y-axis #' @seealso \code{\link{optimize.portfolio}} #' @author Ross Bennett +#' @method plot optimize.portfolio.GenSA +#' @S3method plot optimize.portfolio.GenSA #' @export plot.optimize.portfolio.GenSA <- function(x, ..., rp=FALSE, return.col="mean", risk.col="ES", chart.assets=FALSE, cex.axis=0.8, element.color="darkgray", neighbors=NULL, main="GenSA.Portfolios", xlim=NULL, ylim=NULL){ charts.GenSA(GenSA=x, rp=rp, return.col=return.col, risk.col=risk.col, chart.assets=chart.assets, cex.axis=cex.axis, element.color=element.color, neighbors=neighbors, main=main, xlim=xlim, ylim=ylim, ...=...) Modified: pkg/PortfolioAnalytics/R/charts.PSO.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.PSO.R 2013-09-05 00:11:33 UTC (rev 2993) +++ pkg/PortfolioAnalytics/R/charts.PSO.R 2013-09-05 01:40:09 UTC (rev 2994) @@ -1,6 +1,4 @@ -#' @rdname chart.Weights -#' @export chart.Weights.pso <- function(object, neighbors = NULL, ..., main="Weights", las = 3, xlab=NULL, cex.lab = 1, element.color = "darkgray", cex.axis=0.8){ if(!inherits(object, "optimize.portfolio.pso")) stop("object must be of class 'optimize.portfolio.pso'") @@ -70,12 +68,11 @@ box(col = element.color) } -#' @rdname chart.Weights +#' @method chart.Weights optimize.portfolio.pso +#' @S3method chart.Weights optimize.portfolio.pso #' @export chart.Weights.optimize.portfolio.pso <- chart.Weights.pso -#' @rdname chart.RiskReward -#' @export chart.Scatter.pso <- function(object, neighbors=NULL, ..., return.col="mean", risk.col="ES", chart.assets=FALSE, element.color = "darkgray", cex.axis=0.8, xlim=NULL, ylim=NULL){ if(!inherits(object, "optimize.portfolio.pso")) stop("object must be of class 'optimize.portfolio.pso'") @@ -191,30 +188,16 @@ box(col = element.color) } -#' @rdname chart.RiskReward +#' @method chart.RiskReward optimize.portfolio.pso +#' @S3method chart.RiskReward optimize.portfolio.pso #' @export chart.RiskReward.optimize.portfolio.pso <- chart.Scatter.pso -#' scatter and weights chart for portfolios -#' -#' \code{return.col} must be the name of a function used to compute the return metric on the random portfolio weights -#' \code{risk.col} must be the name of a function used to compute the risk metric on the random portfolio weights -#' -#' @param pso object created by \code{\link{optimize.portfolio}} -#' @param return.col string matching the objective of a 'return' objective, on vertical axis -#' @param risk.col string matching the objective of a 'risk' objective, on horizontal axis -#' @param ... any other passthru parameters -#' @param cex.axis The magnification to be used for axis annotation relative to the current setting of \code{cex} -#' @param element.color color for the default plot scatter points -#' @param neighbors set of 'neighbor' portfolios to overplot -#' @param main an overall title for the plot: see \code{\link{title}} -#' @seealso \code{\link{optimize.portfolio}} -#' @author Ross Bennett -#' @export + charts.pso <- function(pso, return.col="mean", risk.col="ES", chart.assets=FALSE, cex.axis=0.8, element.color="darkgray", neighbors=NULL, main="PSO.Portfolios", xlim=NULL, ylim=NULL, ...){ # Specific to the output of the optimize_method=pso op <- par(no.readonly=TRUE) - layout(matrix(c(1,2)),height=c(2,2),width=1) + layout(matrix(c(1,2)),heights=c(2,2),widths=1) par(mar=c(4,4,4,2)) chart.Scatter.pso(object=pso, return.col=return.col, risk.col=risk.col, chart.assets=chart.assets, element.color=element.color, cex.axis=cex.axis, main=main, xlim=xlim, ylim=ylim, ...=...) par(mar=c(2,4,0,2)) @@ -222,12 +205,14 @@ par(op) } -#' scatter and weights chart for portfolios +#' plot method for optimize.portfolio.pso output #' +#' scatter and weights chart for pso portfolio optimizations run with trace=TRUE +#' #' \code{return.col} must be the name of a function used to compute the return metric on the random portfolio weights #' \code{risk.col} must be the name of a function used to compute the risk metric on the random portfolio weights #' -#' @param pso object created by \code{\link{optimize.portfolio}} +#' @param x object created by \code{\link{optimize.portfolio}} #' @param ... any other passthru parameters #' @param return.col string matching the objective of a 'return' objective, on vertical axis #' @param risk.col string matching the objective of a 'risk' objective, on horizontal axis @@ -240,6 +225,8 @@ #' @param ylim set the limit on coordinates for the y-axis #' @seealso \code{\link{optimize.portfolio}} #' @author Ross Bennett +#' @method plot optimize.portfolio.pso +#' @S3method plot optimize.portfolio.pso #' @export plot.optimize.portfolio.pso <- function(x, ..., return.col="mean", risk.col="ES", chart.assets=FALSE, cex.axis=0.8, element.color="darkgray", neighbors=NULL, main="PSO.Portfolios", xlim=NULL, ylim=NULL){ charts.pso(pso=x, return.col=return.col, risk.col=risk.col, chart.assets=FALSE, cex.axis=cex.axis, element.color=element.color, neighbors=neighbors, main=main, xlim=xlim, ylim=ylim, ...=...) Modified: pkg/PortfolioAnalytics/R/charts.ROI.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.ROI.R 2013-09-05 00:11:33 UTC (rev 2993) +++ pkg/PortfolioAnalytics/R/charts.ROI.R 2013-09-05 01:40:09 UTC (rev 2994) @@ -1,6 +1,4 @@ -#' @rdname chart.Weights -#' @export chart.Weights.ROI <- function(object, neighbors = NULL, ..., main="Weights", las = 3, xlab=NULL, cex.lab = 1, element.color = "darkgray", cex.axis=0.8){ if(!inherits(object, "optimize.portfolio.ROI")) stop("object must be of class 'optimize.portfolio.ROI'") @@ -70,13 +68,13 @@ box(col = element.color) } -#' @rdname chart.Weights +#' @method chart.Weights optimize.portfolio.ROI +#' @S3method chart.Weights optimize.portfolio.ROI #' @export chart.Weights.optimize.portfolio.ROI <- chart.Weights.ROI -#' @rdname chart.RiskReward -#' @export -chart.Scatter.ROI <- function(object, neighbors=NULL, ..., rp=FALSE, return.col="mean", risk.col="ES", chart.assets=FALSE, element.color = "darkgray", cex.axis=0.8, xlim=NULL, ylim=NULL){ + +chart.Scatter.ROI <- function(object, neighbors=NULL, ..., return.col="mean", risk.col="ES", chart.assets=FALSE, element.color = "darkgray", cex.axis=0.8, xlim=NULL, ylim=NULL, rp=FALSE){ if(!inherits(object, "optimize.portfolio.ROI")) stop("object must be of class 'optimize.portfolio.ROI'") @@ -134,34 +132,16 @@ box(col = element.color) } -#' @rdname chart.RiskReward +#' @method chart.RiskReward optimize.portfolio.ROI +#' @S3method chart.RiskReward optimize.portfolio.ROI #' @export chart.RiskReward.optimize.portfolio.ROI <- chart.Scatter.ROI -#' scatter and weights chart for portfolios -#' -#' The ROI optimizers do not store the portfolio weights like DEoptim or random -#' portfolios so we will generate random portfolios for the scatter plot. -#' -#' \code{return.col} must be the name of a function used to compute the return metric on the random portfolio weights -#' \code{risk.col} must be the name of a function used to compute the risk metric on the random portfolio weights -#' -#' @param ROI object created by \code{\link{optimize.portfolio}} -#' @param rp set of weights generated by \code{\link{random_portfolio}} -#' @param risk.col string matching the objective of a 'risk' objective, on horizontal axis -#' @param return.col string matching the objective of a 'return' objective, on vertical axis -#' @param ... any other passthru parameters -#' @param cex.axis The magnification to be used for axis annotation relative to the current setting of \code{cex} -#' @param element.color color for the default plot scatter points -#' @param neighbors set of 'neighbor' portfolios to overplot -#' @param main an overall title for the plot: see \code{\link{title}} -#' @seealso \code{\link{optimize.portfolio}} -#' @author Ross Bennett -#' @export + charts.ROI <- function(ROI, rp=FALSE, risk.col="ES", return.col="mean", chart.assets=FALSE, cex.axis=0.8, element.color="darkgray", neighbors=NULL, main="ROI.Portfolios", xlim=NULL, ylim=NULL, ...){ # Specific to the output of the optimize_method=ROI op <- par(no.readonly=TRUE) - layout(matrix(c(1,2)),height=c(2,1.5),width=1) + layout(matrix(c(1,2)),heights=c(2,1.5),widths=1) par(mar=c(4,4,4,2)) chart.Scatter.ROI(object=ROI, rp=rp, return.col=return.col, risk.col=risk.col, ..., chart.assets=chart.assets, element.color=element.color, cex.axis=cex.axis, main=main, xlim=xlim, ylim=ylim) par(mar=c(2,4,0,2)) @@ -169,21 +149,25 @@ par(op) } -#' scatter and weights chart for portfolios +#' plot method for optimize.portfolio.ROI output #' +#' scatter and weights chart for ROI portfolio optimizations run with trace=TRUE +#' +#' \code{return.col} must be the name of a function used to compute the return metric on the random portfolio weights +#' \code{risk.col} must be the name of a function used to compute the risk metric on the random portfolio weights +#' #' The ROI optimizers do not store the portfolio weights like DEoptim or random -#' portfolios so we will generate random portfolios for the scatter plot. +#' portfolios random portfolios can be generated for the scatter plot. #' #' \code{return.col} must be the name of a function used to compute the return metric on the random portfolio weights #' \code{risk.col} must be the name of a function used to compute the risk metric on the random portfolio weights #' #' @param x object created by \code{\link{optimize.portfolio}} #' @param ... any other passthru parameters -#' @param rp set of weights generated by \code{\link{random_portfolio}} +#' @param rp TRUE/FALSE to plot feasible portfolios generated by \code{\link{random_portfolios}} #' @param risk.col string matching the objective of a 'risk' objective, on horizontal axis #' @param return.col string matching the objective of a 'return' objective, on vertical axis #' @param chart.assets TRUE/FALSE to include risk-return scatter of assets -#' @param cex.axis The magnification to be used for axis annotation relative to the current setting of \code{cex} #' @param element.color color for the default plot scatter points #' @param neighbors set of 'neighbor' portfolios to overplot #' @param main an overall title for the plot: see \code{\link{title}} @@ -191,6 +175,8 @@ #' @param ylim set the limit on coordinates for the y-axis #' @seealso \code{\link{optimize.portfolio}} #' @author Ross Bennett +#' @method plot optimize.portfolio.ROI +#' @S3method plot optimize.portfolio.ROI #' @export plot.optimize.portfolio.ROI <- function(x, ..., rp=FALSE, risk.col="ES", return.col="mean", chart.assets=FALSE, element.color="darkgray", neighbors=NULL, main="ROI.Portfolios", xlim=NULL, ylim=NULL){ charts.ROI(ROI=x, rp=rp, risk.col=risk.col, return.col=return.col, chart.assets=chart.assets, main=main, xlim=xlim, ylim=ylim, ...) Modified: pkg/PortfolioAnalytics/R/charts.RP.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.RP.R 2013-09-05 00:11:33 UTC (rev 2993) +++ pkg/PortfolioAnalytics/R/charts.RP.R 2013-09-05 01:40:09 UTC (rev 2994) @@ -10,12 +10,8 @@ # ############################################################################### -#' @rdname chart.Weights -#' @export [TRUNCATED] To get the complete diff run: svnlook diff /svnroot/returnanalytics -r 2994 From noreply at r-forge.r-project.org Thu Sep 5 03:41:28 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 5 Sep 2013 03:41:28 +0200 (CEST) Subject: [Returnanalytics-commits] r2995 - pkg/PortfolioAnalytics/man Message-ID: <20130905014128.E42AF185D21@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-05 03:41:28 +0200 (Thu, 05 Sep 2013) New Revision: 2995 Added: pkg/PortfolioAnalytics/man/HHI.Rd pkg/PortfolioAnalytics/man/chart.EfficientFrontier.Rd pkg/PortfolioAnalytics/man/chart.RiskReward.Rd pkg/PortfolioAnalytics/man/chart.Weights.Rd Log: Adding documentation files missed in last commit. Added: pkg/PortfolioAnalytics/man/HHI.Rd =================================================================== --- pkg/PortfolioAnalytics/man/HHI.Rd (rev 0) +++ pkg/PortfolioAnalytics/man/HHI.Rd 2013-09-05 01:41:28 UTC (rev 2995) @@ -0,0 +1,19 @@ +\name{HHI} +\alias{HHI} +\title{Concentration of weights} +\usage{ + HHI(weights, groups = NULL) +} +\arguments{ + \item{weights}{set of portfolio weights} + + \item{groups}{list of vectors of grouping} +} +\description{ + This function computes the concentration of weights using + the Herfindahl Hirschman Index +} +\author{ + Ross Bennett +} + Added: pkg/PortfolioAnalytics/man/chart.EfficientFrontier.Rd =================================================================== --- pkg/PortfolioAnalytics/man/chart.EfficientFrontier.Rd (rev 0) +++ pkg/PortfolioAnalytics/man/chart.EfficientFrontier.Rd 2013-09-05 01:41:28 UTC (rev 2995) @@ -0,0 +1,106 @@ +\name{chart.EfficientFrontier} +\alias{chart.EfficientFrontier} +\title{Chart the efficient frontier and risk-return scatter} +\usage{ + chart.EfficientFrontier(object, match.col, n.portfolios, + ...) +} +\arguments{ + \item{object}{object to chart} + + \item{match.col}{string name of column to use for risk + (horizontal axis). \code{match.col} must match the name + of an objective measure in the \code{objective_measures} + or \code{opt_values} slot in the object created by + \code{\link{optimize.portfolio}}.} + + \item{n.portfolios}{number of portfolios to use to plot + the efficient frontier} + + \item{\dots}{passthru parameters to \code{\link{plot}}} + + \item{xlim}{set the x-axis limit, same as in + \code{\link{plot}}} + + \item{ylim}{set the y-axis limit, same as in + \code{\link{plot}}} + + \item{cex.axis}{A numerical value giving the amount by + which the axis should be magnified relative to the + default.} + + \item{element.color}{provides the color for drawing + less-important chart elements, such as the box lines, + axis lines, etc.} + + \item{main}{a main title for the plot} + + \item{RAR.text}{Risk Adjusted Return ratio text to plot + in the legend} + + \item{rf}{risk free rate. If \code{rf} is not null, the + maximum Sharpe Ratio or modified Sharpe Ratio tangency + portfolio will be plotted} + + \item{tangent.line}{TRUE/FALSE to plot the tangent line} + + \item{cex.legend}{A numerical value giving the amount by + which the legend should be magnified relative to the + default.} + + \item{chart.assets}{TRUE/FALSE to include the assets} + + \item{labels.assets}{TRUE/FALSE to include the asset + names in the plot. \code{chart.assets} must be + \code{TRUE} to plot asset names} + + \item{pch.assets}{plotting character of the assets, same + as in \code{\link{plot}}} + + \item{cex.assets}{A numerical value giving the amount by + which the asset points and labels should be magnified + relative to the default.} +} +\description{ + Chart the efficient frontier and risk-return scatter of + the assets for optimize.portfolio and efficient.frontier + objects +} +\details{ + For objects created by optimize.portfolio with 'DEoptim', + 'random', or 'pso' specified as the optimize_method: + \itemize{ \item The efficient frontier plotted is based + on the the trace information (sets of portfolios tested + by the solver at each iteration) in objects created by + \code{optimize.portfolio}. } + + For objects created by optimize.portfolio with 'ROI' + specified as the optimize_method: \itemize{ \item The + mean-StdDev or mean-etl efficient frontier can be plotted + for optimal portfolio objects created by + \code{optimize.portfolio}. + + \item If \code{match.col="StdDev"}, the mean-StdDev + efficient frontier is plotted. + + \item If \code{match.col="ETL"} (also "ES" or "CVaR"), + the mean-etl efficient frontier is plotted. } + + Note that \code{trace=TRUE} must be specified in + \code{\link{optimize.portfolio}} + + GenSA does not return any useable trace information for + portfolios tested at each iteration, therfore we cannot + extract and chart an efficient frontier. + + By default, the tangency portfolio (maximum Sharpe Ratio + or modified Sharpe Ratio) will be plotted using a risk + free rate of 0. Set \code{rf=NULL} to omit this from the + plot. +} +\author{ + Ross Bennett + + Ross Bennett +} + Added: pkg/PortfolioAnalytics/man/chart.RiskReward.Rd =================================================================== --- pkg/PortfolioAnalytics/man/chart.RiskReward.Rd (rev 0) +++ pkg/PortfolioAnalytics/man/chart.RiskReward.Rd 2013-09-05 01:41:28 UTC (rev 2995) @@ -0,0 +1,61 @@ +\name{chart.RiskReward} +\alias{chart.RiskReward} +\alias{chart.RiskReward.optimize.portfolio.DEoptim} +\alias{chart.RiskReward.optimize.portfolio.GenSA} +\alias{chart.RiskReward.optimize.portfolio.pso} +\alias{chart.RiskReward.optimize.portfolio.ROI} +\alias{chart.RiskReward.optimize.portfolio.RP} +\title{classic risk reward scatter} +\usage{ + chart.RiskReward(object, neighbors, ..., return.col, + risk.col, chart.assets, element.color, cex.axis, xlim, + ylim) +} +\arguments{ + \item{object}{optimal portfolio created by + \code{\link{optimize.portfolio}}} + + \item{neighbors}{set of 'neighbor' portfolios to + overplot, see Details} + + \item{\dots}{any other passthru parameters} + + \item{return.col}{string matching the objective of a + 'return' objective, on vertical axis} + + \item{risk.col}{string matching the objective of a 'risk' + objective, on horizontal axis} + + \item{chart.assets}{TRUE/FALSE. Includes a risk reward + scatter of the assets in the chart} + + \item{element.color}{color for the default plot scatter + points} + + \item{cex.axis}{The magnification to be used for axis + annotation relative to the current setting of \code{cex}} + + \item{xlim}{set the x-axis limit, same as in + \code{\link{plot}}} + + \item{ylim}{set the y-axis limit, same as in + \code{\link{plot}}} +} +\description{ + \code{neighbors} may be specified in three ways. The + first is as a single number of neighbors. This will + extract the \code{neighbors} closest portfolios in terms + of the \code{out} numerical statistic. The second method + consists of a numeric vector for \code{neighbors}. This + will extract the \code{neighbors} with portfolio index + numbers that correspond to the vector contents. The third + method for specifying \code{neighbors} is to pass in a + matrix. This matrix should look like the output of + \code{\link{extractStats}}, and should contain + \code{risk.col},\code{return.col}, and weights columns + all properly named. +} +\seealso{ + \code{\link{optimize.portfolio}} +} + Added: pkg/PortfolioAnalytics/man/chart.Weights.Rd =================================================================== --- pkg/PortfolioAnalytics/man/chart.Weights.Rd (rev 0) +++ pkg/PortfolioAnalytics/man/chart.Weights.Rd 2013-09-05 01:41:28 UTC (rev 2995) @@ -0,0 +1,51 @@ +\name{chart.Weights} +\alias{chart.Weights} +\alias{chart.Weights.optimize.portfolio.DEoptim} +\alias{chart.Weights.optimize.portfolio.GenSA} +\alias{chart.Weights.optimize.portfolio.pso} +\alias{chart.Weights.optimize.portfolio.ROI} +\alias{chart.Weights.optimize.portfolio.RP} +\title{boxplot of the weights of the optimal portfolios} +\usage{ + chart.Weights(object, neighbors = NULL, ..., + main = "Weights", las = 3, xlab = NULL, cex.lab = 1, + element.color = "darkgray", cex.axis = 0.8) +} +\arguments{ + \item{object}{optimal portfolio object created by + \code{\link{optimize.portfolio}}} + + \item{neighbors}{set of 'neighbor' portfolios to + overplot} + + \item{\dots}{any other passthru parameters} + + \item{main}{an overall title for the plot: see + \code{\link{title}}} + + \item{las}{numeric in \{0,1,2,3\}; the style of axis + labels \describe{ \item{0:}{always parallel to the axis + [\emph{default}],} \item{1:}{always horizontal,} + \item{2:}{always perpendicular to the axis,} + \item{3:}{always vertical.} }} + + \item{xlab}{a title for the x axis: see + \code{\link{title}}} + + \item{cex.lab}{The magnification to be used for x and y + labels relative to the current setting of \code{cex}} + + \item{element.color}{color for the default plot lines} + + \item{cex.axis}{The magnification to be used for axis + annotation relative to the current setting of \code{cex}} +} +\description{ + Chart the optimal weights and upper and lower bounds on + weights of a portfolio run via + \code{\link{optimize.portfolio}} +} +\seealso{ + \code{\link{optimize.portfolio}} +} + From noreply at r-forge.r-project.org Thu Sep 5 05:29:46 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 5 Sep 2013 05:29:46 +0200 (CEST) Subject: [Returnanalytics-commits] r2996 - in pkg/PortfolioAnalytics: R man Message-ID: <20130905032947.1EC1F185DFF@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-05 05:29:45 +0200 (Thu, 05 Sep 2013) New Revision: 2996 Added: pkg/PortfolioAnalytics/man/chart.Weights.EF.efficient.frontier.Rd pkg/PortfolioAnalytics/man/chart.Weights.EF.optimize.portfolio.Rd Modified: pkg/PortfolioAnalytics/R/charts.efficient.frontier.R pkg/PortfolioAnalytics/man/chart.EfficientFrontier.Rd pkg/PortfolioAnalytics/man/chart.Weights.EF.Rd Log: Modifying generic method chart.Weights.EF and updating documentation Modified: pkg/PortfolioAnalytics/R/charts.efficient.frontier.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.efficient.frontier.R 2013-09-05 01:41:28 UTC (rev 2995) +++ pkg/PortfolioAnalytics/R/charts.efficient.frontier.R 2013-09-05 03:29:45 UTC (rev 2996) @@ -55,7 +55,7 @@ #' @param pch.assets plotting character of the assets, same as in \code{\link{plot}} #' @param cex.assets A numerical value giving the amount by which the asset points and labels should be magnified relative to the default. #' @author Ross Bennett -#' @author Ross Bennett +#' @aliases chart.EfficientFrontier.optimize.portfolio.ROI chart.EfficientFrontier.optimize.portfolio chart.EfficientFrontier.efficient.frontier #' @export chart.EfficientFrontier <- function(object, match.col, n.portfolios, ...){ UseMethod("chart.EfficientFrontier") @@ -269,35 +269,58 @@ box(col = element.color) } -#' chart weights along an efficient frontier +# ' chart weights along an efficient frontier +# ' +# ' This creates a stacked column chart of the weights of portfolios along an efficient frontier. +# ' +# ' @param object object to chart. +# ' @param \dots passthru parameters to \code{barplot}. +# ' @param colorset color palette to use. +# ' @param n.portfolios number of portfolios to extract along the efficient frontier. +# ' This is only used for objects of class \code{optimize.portfolio} +# ' @param by.groups TRUE/FALSE. If TRUE, the weights by group are charted. +# ' @param match.col match.col string name of column to use for risk (horizontal axis). +# ' Must match the name of an objective. +# ' @param main main title used in the plot. +# ' @param cex.lab The magnification to be used for x-axis and y-axis labels relative to the current setting of 'cex'. +# ' @param cex.axis The magnification to be used for sizing the axis text relative to the current setting of 'cex', similar to \code{\link{plot}}. +# ' @param cex.legend The magnification to be used for sizing the legend relative to the current setting of 'cex', similar to \code{\link{plot}}. +# ' @param legend.labels character vector to use for the legend labels +# ' @param element.color provides the color for drawing less-important chart elements, such as the box lines, axis lines, etc. +# ' @param legend.loc NULL, "topright", "right", or "bottomright". If legend.loc is NULL, the legend will not be plotted. +# ' @author Ross Bennett +# ' @aliases chart.Weights.EF.efficient.frontier chart.Weights.EF.optimize.portfolio +# ' @export + +#' Chart weights along an efficient frontier #' -#' This creates a stacked column chart of the weights of portfolios along an efficient frontier. +#' This function is a generic method to chart weights along an efficient frontier #' -#' @param object object to chart. -#' @param \dots passthru parameters to \code{barplot}. -#' @param colorset color palette to use. -#' @param n.portfolios number of portfolios to extract along the efficient frontier. -#' This is only used for objects of class \code{optimize.portfolio} -#' @param by.groups TRUE/FALSE. If TRUE, the weights by group are charted. -#' @param match.col match.col string name of column to use for risk (horizontal axis). -#' Must match the name of an objective. -#' @param main main title used in the plot. -#' @param cex.lab The magnification to be used for x-axis and y-axis labels relative to the current setting of 'cex'. -#' @param cex.axis The magnification to be used for sizing the axis text relative to the current setting of 'cex', similar to \code{\link{plot}}. -#' @param cex.legend The magnification to be used for sizing the legend relative to the current setting of 'cex', similar to \code{\link{plot}}. -#' @param legend.labels character vector to use for the legend labels -#' @param element.color provides the color for drawing less-important chart elements, such as the box lines, axis lines, etc. -#' @param legend.loc NULL, "topright", "right", or "bottomright". If legend.loc is NULL, the legend will not be plotted. -#' @author Ross Bennett -#' @aliases chart.Weights.EF.efficient.frontier chart.Weights.EF.optimize.portfolio +#' @param object object to chart +#' @param \dots any other passthru parameters #' @export chart.Weights.EF <- function(object, ...){ UseMethod("chart.Weights.EF") } +#' Chart weights along an efficient frontier for an efficient.frontier object +#' +#' @param object object of class \code{efficient.frontier} +#' @param \dots passthru parameters to \code{barplot}. +#' @param colorset color palette to use +#' @param n.portfolios number of portfolios to extract along the efficient frontier +#' @param by.groups TRUE/FALSE. If TRUE, the group weights are charted +#' @param match.col string name of column to use for risk (horizontal axis). Must match the name of an objective. +#' @param main title used in the plot. +#' @param cex.lab The magnification to be used for x-axis and y-axis labels relative to the current setting of 'cex' +#' @param cex.axis The magnification to be used for sizing the axis text relative to the current setting of 'cex', similar to \code{\link{plot}} +#' @param cex.legend The magnification to be used for sizing the legend relative to the current setting of 'cex', similar to \code{\link{plot}} +#' @param legend.labels character vector to use for the legend labels +#' @param element.color provides the color for drawing less-important chart elements, such as the box lines, axis lines, etc. +#' @param legend.loc NULL, "topright", "right", or "bottomright". If legend.loc is NULL, the legend will not be plotted +#' @author Ross Bennett #' @method chart.Weights.EF efficient.frontier #' @S3method chart.Weights.EF efficient.frontier -#' @export chart.Weights.EF.efficient.frontier <- function(object, ..., colorset=NULL, n.portfolios=25, by.groups=FALSE, match.col="ES", main="", cex.lab=0.8, cex.axis=0.8, cex.legend=0.8, legend.labels=NULL, element.color="darkgray", legend.loc="topright"){ # using ideas from weightsPlot.R in fPortfolio package @@ -420,9 +443,24 @@ box(col=element.color) } +#' Chart weights along an efficient frontier for an efficient.frontier object +#' +#' @param object object of class \code{efficient.frontier} +#' @param \dots passthru parameters to \code{barplot}. +#' @param colorset color palette to use +#' @param n.portfolios number of portfolios to extract along the efficient frontier +#' @param by.groups TRUE/FALSE. If TRUE, the group weights are charted +#' @param match.col string name of column to use for risk (horizontal axis). Must match the name of an objective. +#' @param main title used in the plot. +#' @param cex.lab The magnification to be used for x-axis and y-axis labels relative to the current setting of 'cex' +#' @param cex.axis The magnification to be used for sizing the axis text relative to the current setting of 'cex', similar to \code{\link{plot}} +#' @param cex.legend The magnification to be used for sizing the legend relative to the current setting of 'cex', similar to \code{\link{plot}} +#' @param legend.labels character vector to use for the legend labels +#' @param element.color provides the color for drawing less-important chart elements, such as the box lines, axis lines, etc. +#' @param legend.loc NULL, "topright", "right", or "bottomright". If legend.loc is NULL, the legend will not be plotted +#' @author Ross Bennett #' @method chart.Weights.EF optimize.portfolio #' @S3method chart.Weights.EF optimize.portfolio -#' @export chart.Weights.EF.optimize.portfolio <- function(object, ..., colorset=NULL, n.portfolios=25, by.groups=FALSE, match.col="ES", main="", cex.lab=0.8, cex.axis=0.8, cex.legend=0.8, legend.labels=NULL, element.color="darkgray", legend.loc="topright"){ # chart the weights along the efficient frontier of an objected created by optimize.portfolio Modified: pkg/PortfolioAnalytics/man/chart.EfficientFrontier.Rd =================================================================== --- pkg/PortfolioAnalytics/man/chart.EfficientFrontier.Rd 2013-09-05 01:41:28 UTC (rev 2995) +++ pkg/PortfolioAnalytics/man/chart.EfficientFrontier.Rd 2013-09-05 03:29:45 UTC (rev 2996) @@ -1,5 +1,8 @@ \name{chart.EfficientFrontier} \alias{chart.EfficientFrontier} +\alias{chart.EfficientFrontier.efficient.frontier} +\alias{chart.EfficientFrontier.optimize.portfolio} +\alias{chart.EfficientFrontier.optimize.portfolio.ROI} \title{Chart the efficient frontier and risk-return scatter} \usage{ chart.EfficientFrontier(object, match.col, n.portfolios, @@ -100,7 +103,5 @@ } \author{ Ross Bennett - - Ross Bennett } Modified: pkg/PortfolioAnalytics/man/chart.Weights.EF.Rd =================================================================== --- pkg/PortfolioAnalytics/man/chart.Weights.EF.Rd 2013-09-05 01:41:28 UTC (rev 2995) +++ pkg/PortfolioAnalytics/man/chart.Weights.EF.Rd 2013-09-05 03:29:45 UTC (rev 2996) @@ -1,59 +1,16 @@ \name{chart.Weights.EF} \alias{chart.Weights.EF} -\alias{chart.Weights.EF.efficient.frontier} -\alias{chart.Weights.EF.optimize.portfolio} -\title{chart weights along an efficient frontier} +\title{Chart weights along an efficient frontier} \usage{ chart.Weights.EF(object, ...) } \arguments{ - \item{object}{object to chart.} + \item{object}{object to chart} - \item{\dots}{passthru parameters to \code{barplot}.} - - \item{colorset}{color palette to use.} - - \item{n.portfolios}{number of portfolios to extract along - the efficient frontier. This is only used for objects of - class \code{optimize.portfolio}} - - \item{by.groups}{TRUE/FALSE. If TRUE, the weights by - group are charted.} - - \item{match.col}{match.col string name of column to use - for risk (horizontal axis). Must match the name of an - objective.} - - \item{main}{main title used in the plot.} - - \item{cex.lab}{The magnification to be used for x-axis - and y-axis labels relative to the current setting of - 'cex'.} - - \item{cex.axis}{The magnification to be used for sizing - the axis text relative to the current setting of 'cex', - similar to \code{\link{plot}}.} - - \item{cex.legend}{The magnification to be used for sizing - the legend relative to the current setting of 'cex', - similar to \code{\link{plot}}.} - - \item{legend.labels}{character vector to use for the - legend labels} - - \item{element.color}{provides the color for drawing - less-important chart elements, such as the box lines, - axis lines, etc.} - - \item{legend.loc}{NULL, "topright", "right", or - "bottomright". If legend.loc is NULL, the legend will not - be plotted.} + \item{\dots}{any other passthru parameters} } \description{ - This creates a stacked column chart of the weights of - portfolios along an efficient frontier. + This function is a generic method to chart weights along + an efficient frontier } -\author{ - Ross Bennett -} Added: pkg/PortfolioAnalytics/man/chart.Weights.EF.efficient.frontier.Rd =================================================================== --- pkg/PortfolioAnalytics/man/chart.Weights.EF.efficient.frontier.Rd (rev 0) +++ pkg/PortfolioAnalytics/man/chart.Weights.EF.efficient.frontier.Rd 2013-09-05 03:29:45 UTC (rev 2996) @@ -0,0 +1,60 @@ +\name{chart.Weights.EF.efficient.frontier} +\alias{chart.Weights.EF.efficient.frontier} +\title{Chart weights along an efficient frontier for an efficient.frontier object} +\usage{ + \method{chart.Weights.EF}{efficient.frontier} (object, + ..., colorset = NULL, n.portfolios = 25, + by.groups = FALSE, match.col = "ES", main = "", + cex.lab = 0.8, cex.axis = 0.8, cex.legend = 0.8, + legend.labels = NULL, element.color = "darkgray", + legend.loc = "topright") +} +\arguments{ + \item{object}{object of class \code{efficient.frontier}} + + \item{\dots}{passthru parameters to \code{barplot}.} + + \item{colorset}{color palette to use} + + \item{n.portfolios}{number of portfolios to extract along + the efficient frontier} + + \item{by.groups}{TRUE/FALSE. If TRUE, the group weights + are charted} + + \item{match.col}{string name of column to use for risk + (horizontal axis). Must match the name of an objective.} + + \item{main}{title used in the plot.} + + \item{cex.lab}{The magnification to be used for x-axis + and y-axis labels relative to the current setting of + 'cex'} + + \item{cex.axis}{The magnification to be used for sizing + the axis text relative to the current setting of 'cex', + similar to \code{\link{plot}}} + + \item{cex.legend}{The magnification to be used for sizing + the legend relative to the current setting of 'cex', + similar to \code{\link{plot}}} + + \item{legend.labels}{character vector to use for the + legend labels} + + \item{element.color}{provides the color for drawing + less-important chart elements, such as the box lines, + axis lines, etc.} + + \item{legend.loc}{NULL, "topright", "right", or + "bottomright". If legend.loc is NULL, the legend will not + be plotted} +} +\description{ + Chart weights along an efficient frontier for an + efficient.frontier object +} +\author{ + Ross Bennett +} + Added: pkg/PortfolioAnalytics/man/chart.Weights.EF.optimize.portfolio.Rd =================================================================== --- pkg/PortfolioAnalytics/man/chart.Weights.EF.optimize.portfolio.Rd (rev 0) +++ pkg/PortfolioAnalytics/man/chart.Weights.EF.optimize.portfolio.Rd 2013-09-05 03:29:45 UTC (rev 2996) @@ -0,0 +1,60 @@ +\name{chart.Weights.EF.optimize.portfolio} +\alias{chart.Weights.EF.optimize.portfolio} +\title{Chart weights along an efficient frontier for an efficient.frontier object} +\usage{ + \method{chart.Weights.EF}{optimize.portfolio} (object, + ..., colorset = NULL, n.portfolios = 25, + by.groups = FALSE, match.col = "ES", main = "", + cex.lab = 0.8, cex.axis = 0.8, cex.legend = 0.8, + legend.labels = NULL, element.color = "darkgray", + legend.loc = "topright") +} +\arguments{ + \item{object}{object of class \code{efficient.frontier}} + + \item{\dots}{passthru parameters to \code{barplot}.} + + \item{colorset}{color palette to use} + + \item{n.portfolios}{number of portfolios to extract along + the efficient frontier} + + \item{by.groups}{TRUE/FALSE. If TRUE, the group weights + are charted} + + \item{match.col}{string name of column to use for risk + (horizontal axis). Must match the name of an objective.} + + \item{main}{title used in the plot.} + + \item{cex.lab}{The magnification to be used for x-axis + and y-axis labels relative to the current setting of + 'cex'} + + \item{cex.axis}{The magnification to be used for sizing + the axis text relative to the current setting of 'cex', + similar to \code{\link{plot}}} + + \item{cex.legend}{The magnification to be used for sizing + the legend relative to the current setting of 'cex', + similar to \code{\link{plot}}} + + \item{legend.labels}{character vector to use for the + legend labels} + + \item{element.color}{provides the color for drawing + less-important chart elements, such as the box lines, + axis lines, etc.} + + \item{legend.loc}{NULL, "topright", "right", or + "bottomright". If legend.loc is NULL, the legend will not + be plotted} +} +\description{ + Chart weights along an efficient frontier for an + efficient.frontier object +} +\author{ + Ross Bennett +} + From noreply at r-forge.r-project.org Thu Sep 5 11:21:51 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 5 Sep 2013 11:21:51 +0200 (CEST) Subject: [Returnanalytics-commits] r2997 - in pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm: . man Message-ID: <20130905092151.CF583185B42@r-forge.r-project.org> Author: braverock Date: 2013-09-05 11:21:51 +0200 (Thu, 05 Sep 2013) New Revision: 2997 Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/QP.Norm.Rd pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.EMaxDDGBM.Rd Removed: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/inst/ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/QP.Norm.Rd pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.EMaxDDGBM.Rd Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/DESCRIPTION Log: - two missing .Rd files restored Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/DESCRIPTION =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/DESCRIPTION 2013-09-05 03:29:45 UTC (rev 2996) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/DESCRIPTION 2013-09-05 09:21:51 UTC (rev 2997) @@ -1,38 +1,38 @@ -Package: noniid.sm -Type: Package -Title: Non-i.i.d. GSoC 2013 Shubhankit -Version: 0.1 -Date: $Date: 2013-05-13 14:30:22 -0500 (Mon, 13 May 2013) $ -Author: Shubhankit Mohan -Contributors: Peter Carl, Brian G. Peterson -Depends: - xts, - PerformanceAnalytics, - tseries, - stats -Maintainer: Brian G. Peterson -Description: GSoC 2013 project to replicate literature on drawdowns and - non-i.i.d assumptions in finance. -License: GPL-3 -ByteCompile: TRUE -Collate: - 'AcarSim.R' - 'ACStdDev.annualized.R' - 'CalmarRatio.Norm.R' - 'CDrawdown.R' - 'chart.AcarSim.R' - 'chart.Autocorrelation.R' - 'EmaxDDGBM.R' - 'GLMSmoothIndex.R' - 'LoSharpe.R' - 'na.skip.R' - 'noniid.sm-internal.R' - 'QP.Norm.R' - 'Return.GLM.R' - 'Return.Okunev.R' - 'se.LoSharpe.R' - 'SterlingRatio.Norm.R' - 'table.ComparitiveReturn.GLM.R' - 'table.EMaxDDGBM.R' - 'table.UnsmoothReturn.R' - 'UnsmoothReturn.R' +Package: noniid.sm +Type: Package +Title: Non-i.i.d. GSoC 2013 Shubhankit +Version: 0.1 +Date: $Date: 2013-05-13 14:30:22 -0500 (Mon, 13 May 2013) $ +Author: Shubhankit Mohan +Contributors: Peter Carl, Brian G. Peterson +Depends: + xts, + PerformanceAnalytics, + tseries, + stats +Maintainer: Brian G. Peterson +Description: GSoC 2013 project to replicate literature on drawdowns and + non-i.i.d assumptions in finance. +License: GPL-3 +ByteCompile: TRUE +Collate: + 'AcarSim.R' + 'ACStdDev.annualized.R' + 'CalmarRatio.Norm.R' + 'CDrawdown.R' + 'chart.AcarSim.R' + 'chart.Autocorrelation.R' + 'EmaxDDGBM.R' + 'GLMSmoothIndex.R' + 'LoSharpe.R' + 'na.skip.R' + 'noniid.sm-internal.R' + 'QP.Norm.R' + 'Return.GLM.R' + 'Return.Okunev.R' + 'se.LoSharpe.R' + 'SterlingRatio.Norm.R' + 'table.ComparitiveReturn.GLM.R' + 'table.EMaxDDGBM.R' + 'table.UnsmoothReturn.R' + 'UnsmoothReturn.R' Deleted: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/QP.Norm.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/QP.Norm.Rd 2013-09-05 03:29:45 UTC (rev 2996) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/QP.Norm.Rd 2013-09-05 09:21:51 UTC (rev 2997) @@ -1,22 +0,0 @@ -\name{QP.Norm} -\alias{QP.Norm} -\title{QP function for calculation of Sharpe Ratio} -\usage{ - QP.Norm(R, tau, scale = NA) -} -\arguments{ - \item{R}{an xts, vector, matrix, data frame, timeSeries - or zoo object of asset returns} - - \item{tau}{Time Scale Translations Factor} - - \item{scale}{number of periods in a year (daily scale = - 252, monthly scale =} -} -\description{ - QP function for calculation of Sharpe Ratio -} -\seealso{ - \code{\link{CalmarRatio.Norm}}, \cr -} - Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/QP.Norm.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/QP.Norm.Rd (rev 0) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/QP.Norm.Rd 2013-09-05 09:21:51 UTC (rev 2997) @@ -0,0 +1,22 @@ +\name{QP.Norm} +\alias{QP.Norm} +\title{QP function for calculation of Sharpe Ratio} +\usage{ + QP.Norm(R, tau, scale = NA) +} +\arguments{ + \item{R}{an xts, vector, matrix, data frame, timeSeries + or zoo object of asset returns} + + \item{tau}{Time Scale Translations Factor} + + \item{scale}{number of periods in a year (daily scale = + 252, monthly scale =} +} +\description{ + QP function for calculation of Sharpe Ratio +} +\seealso{ + \code{\link{CalmarRatio.Norm}}, \cr +} + Deleted: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.EMaxDDGBM.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.EMaxDDGBM.Rd 2013-09-05 03:29:45 UTC (rev 2996) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.EMaxDDGBM.Rd 2013-09-05 09:21:51 UTC (rev 2997) @@ -1,57 +0,0 @@ -\name{table.EMaxDDGBM} -\alias{table.EMaxDDGBM} -\title{Expected Drawdown using Brownian Motion Assumptions} -\usage{ - table.EMaxDDGBM(R, digits = 4) -} -\arguments{ - \item{R}{an xts, vector, matrix, data frame, timeSeries - or zoo object of asset returns} - - \item{digits}{significant number} -} -\description{ - Works on the model specified by Maddon-Ismail which - investigates the behavior of this statistic for a - Brownian motion with drift. -} -\details{ - If X(t) is a random process on [0, T ], the maximum - drawdown at time T , D(T), is defined by where \deqn{D(T) - = sup [X(s) - X(t)]} where s belongs to [0,t] and s - belongs to [0,T] Informally, this is the largest drop - from a peak to a bottom. In this paper, we investigate - the behavior of this statistic for a Brownian motion with - drift. In particular, we give an infinite series - representation of its distribution, and consider its - expected value. When the drift is zero, we give an - analytic expression for the expected value, and for - non-zero drift, we give an infinite series - representation. For all cases, we compute the limiting - \bold{(\eqn{T tends to \infty})} behavior, which can be - logarithmic (\eqn{\mu} > 0), square root (\eqn{\mu} = 0), - or linear (\eqn{\mu} < 0). -} -\examples{ -library(PerformanceAnalytics) -data(edhec) -table.EMaxDDGBM(edhec) -} -\author{ - Shubhankit Mohan -} -\references{ - Magdon-Ismail, M., Atiya, A., Pratap, A., and Yaser S. - Abu-Mostafa: On the Maximum Drawdown of a Browninan - Motion, Journal of Applied Probability 41, pp. 147-161, - 2004 - \url{http://alumnus.caltech.edu/~amir/drawdown-jrnl.pdf} -} -\keyword{Assumptions} -\keyword{Brownian} -\keyword{Drawdown} -\keyword{Expected} -\keyword{models} -\keyword{Motion} -\keyword{Using} - Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.EMaxDDGBM.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.EMaxDDGBM.Rd (rev 0) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.EMaxDDGBM.Rd 2013-09-05 09:21:51 UTC (rev 2997) @@ -0,0 +1,57 @@ +\name{table.EMaxDDGBM} +\alias{table.EMaxDDGBM} +\title{Expected Drawdown using Brownian Motion Assumptions} +\usage{ + table.EMaxDDGBM(R, digits = 4) +} +\arguments{ + \item{R}{an xts, vector, matrix, data frame, timeSeries + or zoo object of asset returns} + + \item{digits}{significant number} +} +\description{ + Works on the model specified by Maddon-Ismail which + investigates the behavior of this statistic for a + Brownian motion with drift. +} +\details{ + If X(t) is a random process on [0, T ], the maximum + drawdown at time T , D(T), is defined by where \deqn{D(T) + = sup [X(s) - X(t)]} where s belongs to [0,t] and s + belongs to [0,T] Informally, this is the largest drop + from a peak to a bottom. In this paper, we investigate + the behavior of this statistic for a Brownian motion with + drift. In particular, we give an infinite series + representation of its distribution, and consider its + expected value. When the drift is zero, we give an + analytic expression for the expected value, and for + non-zero drift, we give an infinite series + representation. For all cases, we compute the limiting + \bold{(\eqn{T tends to \infty})} behavior, which can be + logarithmic (\eqn{\mu} > 0), square root (\eqn{\mu} = 0), + or linear (\eqn{\mu} < 0). +} +\examples{ +library(PerformanceAnalytics) +data(edhec) +table.EMaxDDGBM(edhec) +} +\author{ + Shubhankit Mohan +} +\references{ + Magdon-Ismail, M., Atiya, A., Pratap, A., and Yaser S. + Abu-Mostafa: On the Maximum Drawdown of a Browninan + Motion, Journal of Applied Probability 41, pp. 147-161, + 2004 + \url{http://alumnus.caltech.edu/~amir/drawdown-jrnl.pdf} +} +\keyword{Assumptions} +\keyword{Brownian} +\keyword{Drawdown} +\keyword{Expected} +\keyword{models} +\keyword{Motion} +\keyword{Using} + From noreply at r-forge.r-project.org Thu Sep 5 13:25:45 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 5 Sep 2013 13:25:45 +0200 (CEST) Subject: [Returnanalytics-commits] r2998 - in pkg/Meucci: . R data demo man Message-ID: <20130905112545.47DCF184612@r-forge.r-project.org> Author: xavierv Date: 2013-09-05 13:25:44 +0200 (Thu, 05 Sep 2013) New Revision: 2998 Added: pkg/Meucci/R/DoubleDecay.R pkg/Meucci/R/Fit2Moms.R pkg/Meucci/R/LeastInfoKernel.R pkg/Meucci/R/PlotDistributions.R pkg/Meucci/data/dbFFP.Rda pkg/Meucci/data/fILMR.Rda pkg/Meucci/demo/FullFlexProbs.R pkg/Meucci/demo/FullyIntegratedLiquidityAndMarketRisk.R pkg/Meucci/man/DoubleDecay.Rd pkg/Meucci/man/Fit2Moms.Rd pkg/Meucci/man/LeastInfoKernel.Rd Modified: pkg/Meucci/DESCRIPTION pkg/Meucci/NAMESPACE pkg/Meucci/R/EntropyProg.R pkg/Meucci/R/Prior2Posterior.R pkg/Meucci/man/PlotDistributions.Rd Log: -added the code for scripts Historical Scenarios with Fully Flexible.. and Fully Integrated Liquidity and Market Risk Modified: pkg/Meucci/DESCRIPTION =================================================================== --- pkg/Meucci/DESCRIPTION 2013-09-05 09:21:51 UTC (rev 2997) +++ pkg/Meucci/DESCRIPTION 2013-09-05 11:25:44 UTC (rev 2998) @@ -99,3 +99,7 @@ 'Log2Lin.R' 'PlotCompositionEfficientFrontier.R' 'MaxRsqTS.R' + 'PlotDistributions.R' + 'DoubleDecay.R' + 'Fit2Moms.R' + 'LeastInfoKernel.R' Modified: pkg/Meucci/NAMESPACE =================================================================== --- pkg/Meucci/NAMESPACE 2013-09-05 09:21:51 UTC (rev 2997) +++ pkg/Meucci/NAMESPACE 2013-09-05 11:25:44 UTC (rev 2998) @@ -11,10 +11,12 @@ export(ConvertCompoundedReturns2Price) export(Cumul2Raw) export(DetectOutliersViaMVE) +export(DoubleDecay) export(EfficientFrontierPrices) export(EfficientFrontierReturns) export(EfficientFrontierReturnsBenchmark) export(EntropyProg) +export(Fit2Moms) export(FitExpectationMaximization) export(FitMultivariateGarch) export(FitOrnsteinUhlenbeck) @@ -25,6 +27,7 @@ export(hermitePolynomial) export(integrateSubIntervals) export(InterExtrapolate) +export(LeastInfoKernel) export(linreturn) export(Log2Lin) export(LognormalCopulaPdf) @@ -44,6 +47,7 @@ export(PlotDistributions) export(PlotMarginalsNormalInverseWishart) export(PlotVolVsCompositionEfficientFrontier) +export(Prior2Posterior) export(ProjectionStudentT) export(QuantileMixture) export(RandNormalInverseWishart) Added: pkg/Meucci/R/DoubleDecay.R =================================================================== --- pkg/Meucci/R/DoubleDecay.R (rev 0) +++ pkg/Meucci/R/DoubleDecay.R 2013-09-05 11:25:44 UTC (rev 2998) @@ -0,0 +1,46 @@ +#' Computes a double-decay covariance matrix. +#' +#' This function computes a double-decay covariance matrix for the risk drivers provided, as described in +#' A. Meucci, "Personalized Risk Management: Historical Scenarios with Fully Flexible Probabilities" +#' GARP Risk Professional, Dec 2010, p 47-51 +#' +#' @param X matrix representing the risk drivers. +#' @param lmd_c numeric representing the low decay (long half-life) for the correlations. +#' @param lmd_s numeric representing the high decay (short half-life) for the volatilities. +#' @return m matrix of zeros, representing the expectation of the risk drivers. +#' @return S matrix representing the double-decay estimation for the correlation matrix of the risk drivers. +#' +#' @references +#' \url{http://www.symmys.com/node/150} +#' See Meucci script for "DoubleDecay.m" +#' +#' @author Xavier Valls \email{flamejat@@gmail.com} +#' @export + +DoubleDecay = function( X, lmd_c, lmd_s) +{ + + N = dim( X ) + m = matrix( 0, N[2], 1); + + p_c = exp( -lmd_c * ( N[1] - t(rbind( 1:N[1] ) ) ) ); + + p_c = kronecker( matrix( 1, 1, N[2] ), p_c / sum( p_c ) ); # workaround on p_c=repmat( p_c/sum(p_c),1,N); + + S_1 = t( p_c * X ) %*% X; + + C = cov2cor( S_1 ); + + p_s = exp( -lmd_s * ( N[1] - t(rbind( 1:N[1] ) ) ) ); + + p_s = kronecker(matrix(1,1,N[2]),p_s/sum(p_s)); + S_2 = t( p_s*X ) %*% X; + + R = cov2cor(S_2) ; + s = c( 0.0099, 0.0538, 0.0163 ); + + s = sqrt(diag(S_2)); + S = diag(s) %*% C %*% diag(s); + + return( list( m = m , S = S ) ) +} \ No newline at end of file Modified: pkg/Meucci/R/EntropyProg.R =================================================================== --- pkg/Meucci/R/EntropyProg.R 2013-09-05 09:21:51 UTC (rev 2997) +++ pkg/Meucci/R/EntropyProg.R 2013-09-05 11:25:44 UTC (rev 2998) @@ -178,40 +178,8 @@ return ( list ( p_ = p_ , optimizationPerformance = optimizationPerformance ) ) } -#' Calculate the full-confidence posterior distributions of Mu and Sigma -#' -#' \deqn{ \tilde{ \mu } \equiv \mu + \Sigma Q' {\big(Q \Sigma Q' \big)}^{-1} \big( \tilde{\mu}_{Q} - Q \mu \big), -#' \\ \tilde{ \Sigma } \equiv \Sigma + \Sigma G' \big({\big(G \Sigma G' \big)}^{-1} \tilde{ \Sigma }_G {\big(G \Sigma G' \big)}^{-1} - {\big(G \Sigma G' \big)}^{-1} \big) G \Sigma } -#' @param M a numeric vector with the Mu of the normal reference model -#' @param Q a numeric vector used to construct a view on expectation of the linear combination QX -#' @param M_Q a numeric vector with the view of the expectations of QX -#' @param S a covariance matrix for the normal reference model -#' @param G a numeric vector used to construct a view on covariance of the linear combination GX -#' @param S_G a numeric with the expectation associated with the covariance of the linear combination GX -#' -#' @return a list with -#' @return M_ a numeric vector with the full-confidence posterior distribution of Mu -#' @return S_ a covariance matrix with the full-confidence posterior distribution of Sigma -#' -#' @references -#' \url{http://www.symmys.com} -#' \url{http://ssrn.com/abstract=1213325} -#' A. Meucci - "Fully Flexible Views: Theory and Practice". See formula (21) and (22) on page 7 -#' See Meucci script Prior2Posterior.m attached to Entropy Pooling Paper -#' @author Ram Ahluwalia \email{ram@@wingedfootcapital.com} -Prior2Posterior = function( M , Q , M_Q , S , G , S_G ) -{ - # Compute posterior moments - - if ( Q != 0 ) { M_ = M + S %*% t(Q) %*% solve( Q %*% S %*% t(Q) ) %*% ( M_Q - Q %*% M) } - else { M_ = M } - - if ( G != 0 ) { S_ = S + (S %*% t(G)) %*% ( solve(G %*% S %*% t(G)) %*% S_G %*% solve(G %*% S %*% t(G)) - solve( G %*% S %*% t(G)) ) %*% (G %*% S) } - else { S_ = S } - - return( list( M_ = M_ , S_ = S_ ) ) -} + #' Generates histogram #' #' @param X a vector containing the data points Added: pkg/Meucci/R/Fit2Moms.R =================================================================== --- pkg/Meucci/R/Fit2Moms.R (rev 0) +++ pkg/Meucci/R/Fit2Moms.R 2013-09-05 11:25:44 UTC (rev 2998) @@ -0,0 +1,44 @@ +#' Uses Entropy Pooling to compute a double-decay covariance matrix. +#' +#' This function uses Entropy Pooling to compute a double-decay covariance matrix, as described in +#' A. Meucci, "Personalized Risk Management: Historical Scenarios with Fully Flexible Probabilities" +#' GARP Risk Professional, Dec 2010, p 47-51 +#' +#' @param X matrix representing the risk drivers. +#' @param m matrix of zeros, representing the expectation of the risk drivers. +#' @param S matrix representing the double-decay estimation for the correlation matrix of the risk drivers. +#' @return p list containing the vector of posterior probabilities and information about the optimization performance. +#' +#' @references +#' \url{http://www.symmys.com/node/150} +#' See Meucci script for "S_MainFullFlexProbs.m" +#' +#' @author Xavier Valls \email{flamejat@@gmail.com} +#' @export + +Fit2Moms = function( X, m, S) +{ + N = dim(X); + + Aeq = matrix( 1, 1, N[1] ); # constrain probabilities to sum to one... + beq = 1; + + Aeq = rbind( Aeq , t(X) ); # ...constrain the first moments... + beq = rbind( beq, m ); + + SecMom = S + m %*% t(m); #...constrain the second moments... + + for ( k in 1:N[2] ) + { + for ( l in k:N[2] ) + { + Aeq = rbind( Aeq , t(X[ ,k] * X[ ,l] ) ); + beq = rbind( beq, SecMom[k,l] ); + } + } + + p_0 = matrix( 1, N[1], 1) / N[1]; + + return ( p = EntropyProg( p_0, matrix( , 0, 0), matrix( , 0, 0), Aeq , beq)$p_); # ...compute posterior probabilities + +} \ No newline at end of file Added: pkg/Meucci/R/LeastInfoKernel.R =================================================================== --- pkg/Meucci/R/LeastInfoKernel.R (rev 0) +++ pkg/Meucci/R/LeastInfoKernel.R 2013-09-05 11:25:44 UTC (rev 2998) @@ -0,0 +1,46 @@ +#' Computes least information kernel smoothing +#' +#' This script uses Entropy Pooling to compute least information kernel smoothing, as described in +#' A. Meucci, "Personalized Risk Management: Historical Scenarios with Fully Flexible Probabilities" +#' GARP Risk Professional, Dec 2010, p 47-51 +#' +#' @param Y Matrix representing the macroeconomic indicator +#' @param y scalar reprenting the target to which Y is expected to be close in the Generalized Empirical Distribution +#' @param h2 N X N matrix +#' +#' @return p list containing the vector of posterior probabilities and information about the optimization performance. +#' +#' @references +#' \url{http://www.symmys.com/node/150} +#' See Meucci script for "LeastInfoKernel.m" +#' +#' @author Xavier Valls \email{flamejat@@gmail.com} +#' @export + +LeastInfoKernel = function( Y, y, h2 ) +{ + T = dim(Y)[1]; + N = dim(Y)[2]; + Aeq = matrix( 1, 1, T ); # constrain probabilities to sum to one... + beq = 1; + # ...constrain the first moments... + Aeq = rbind( Aeq, t(Y) ); + beq = rbind( beq, y ); + + if( !is.nan(h2) ) + { + SecMom = h2 + y %*% t( y ); # ...constrain the second moments... + for( k in 1:N ) + { + for( l in k:N ) + { + Aeq = rbind( Aeq, ( Y[ , k ] * Y[ , l ] ) ); + beq = rbind( beq, SecMom[ k, l ] ); + } + } + } + p_0 = matrix( 1, T, 1 ) / T; + p = EntropyProg( p_0, matrix(,0,0), matrix(,0,0), Aeq, beq ); # ...compute posterior probabilities + return( p$p_ ); +} + Added: pkg/Meucci/R/PlotDistributions.R =================================================================== --- pkg/Meucci/R/PlotDistributions.R (rev 0) +++ pkg/Meucci/R/PlotDistributions.R 2013-09-05 11:25:44 UTC (rev 2998) @@ -0,0 +1,41 @@ +#' Plot numerical and analytical prior and posterior distributions +#' +#' @param X a vector containing the dataset +#' @param p a vector cotaining the prior probability values +#' @param Mu a vector containing the prior means +#' @param Sigma a vector containing the prior standard deviations +#' @param p_ a vector containing the posterior probability values +#' @param Mu_ a vector containing the posterior means +#' @param Sigma_ a vector containing the posterior standard deviations +#' @author Ram Ahluwalia \email{ram@@wingedfootcapital.com} +#' @export +PlotDistributions = function( X , p , Mu , Sigma , p_ , Mu_ , Sigma_ ) +{ + J = nrow( X ) + N = ncol( X ) + + NBins = round( 10*log( J ) ) + + for ( n in 1:N ) + { + # set ranges + xl = min( X[ , n ] ) + xh = max( X[ , n ] ) + x = as.matrix(seq(from=xl, to=xh, by=(xh-xl)/100)) + + # posterior numerical + # h3 = pHist(X[ ,n] , p_ , NBins ) + + # posterior analytical + y1 = dnorm( x , Mu_[n] , sqrt( Sigma_[n,n] ) ) + h4 = plot( x , y1, type='l', col='red', xlab='', ylab='' ) + + # prior analytical + par(new = TRUE) + y2 = dnorm( x , Mu[n] ,sqrt( Sigma[n,n] ) ) + h2 = plot( x , y2, type='l', col='blue', xlab='', ylab='' ) + + # xlim( cbind( xl , xh ) ) + legend(x = 1.5, y =0.4 ,legend=c("analytical","prior"), lwd=c(0.2,0.2), lty=c(1,1), col=c("red", "blue")) + } +} \ No newline at end of file Modified: pkg/Meucci/R/Prior2Posterior.R =================================================================== --- pkg/Meucci/R/Prior2Posterior.R 2013-09-05 09:21:51 UTC (rev 2997) +++ pkg/Meucci/R/Prior2Posterior.R 2013-09-05 11:25:44 UTC (rev 2998) @@ -1,41 +1,36 @@ -#' Plot numerical and analytical prior and posterior distributions + +#' Calculate the full-confidence posterior distributions of Mu and Sigma #' -#' @param X a vector containing the dataset -#' @param p a vector cotaining the prior probability values -#' @param Mu a vector containing the prior means -#' @param Sigma a vector containing the prior standard deviations -#' @param p_ a vector containing the posterior probability values -#' @param Mu_ a vector containing the posterior means -#' @param Sigma_ a vector containing the posterior standard deviations +#' \deqn{ \tilde{ \mu } \equiv \mu + \Sigma Q' {\big(Q \Sigma Q' \big)}^{-1} \big( \tilde{\mu}_{Q} - Q \mu \big), +#' \\ \tilde{ \Sigma } \equiv \Sigma + \Sigma G' \big({\big(G \Sigma G' \big)}^{-1} \tilde{ \Sigma }_G {\big(G \Sigma G' \big)}^{-1} - {\big(G \Sigma G' \big)}^{-1} \big) G \Sigma } +#' @param M a numeric vector with the Mu of the normal reference model +#' @param Q a numeric vector used to construct a view on expectation of the linear combination QX +#' @param M_Q a numeric vector with the view of the expectations of QX +#' @param S a covariance matrix for the normal reference model +#' @param G a numeric vector used to construct a view on covariance of the linear combination GX +#' @param S_G a numeric with the expectation associated with the covariance of the linear combination GX +#' +#' @return a list with +#' @return M_ a numeric vector with the full-confidence posterior distribution of Mu +#' @return S_ a covariance matrix with the full-confidence posterior distribution of Sigma +#' +#' @references +#' \url{http://www.symmys.com} +#' \url{http://ssrn.com/abstract=1213325} +#' A. Meucci - "Fully Flexible Views: Theory and Practice". See formula (21) and (22) on page 7 +#' See Meucci script Prior2Posterior.m attached to Entropy Pooling Paper #' @author Ram Ahluwalia \email{ram@@wingedfootcapital.com} #' @export -PlotDistributions = function( X , p , Mu , Sigma , p_ , Mu_ , Sigma_ ) + +Prior2Posterior = function( M , Q , M_Q , S , G , S_G ) { - J = nrow( X ) - N = ncol( X ) - - NBins = round( 10*log( J ) ) - - for ( n in 1:N ) - { - # set ranges - xl = min( X[ , n ] ) - xh = max( X[ , n ] ) - x = as.matrix(seq(from=xl, to=xh, by=(xh-xl)/100)) - - # posterior numerical - # h3 = pHist(X[ ,n] , p_ , NBins ) - - # posterior analytical - y1 = dnorm( x , Mu_[n] , sqrt( Sigma_[n,n] ) ) - h4 = plot( x , y1, type='l', col='red', xlab='', ylab='' ) - - # prior analytical - par(new = TRUE) - y2 = dnorm( x , Mu[n] ,sqrt( Sigma[n,n] ) ) - h2 = plot( x , y2, type='l', col='blue', xlab='', ylab='' ) - - # xlim( cbind( xl , xh ) ) - legend(x = 1.5, y =0.4 ,legend=c("analytical","prior"), lwd=c(0.2,0.2), lty=c(1,1), col=c("red", "blue")) - } + # Compute posterior moments + + if ( Q != 0 ) { M_ = M + S %*% t(Q) %*% solve( Q %*% S %*% t(Q) ) %*% ( M_Q - Q %*% M) } + else { M_ = M } + + if ( G != 0 ) { S_ = S + (S %*% t(G)) %*% ( solve(G %*% S %*% t(G)) %*% S_G %*% solve(G %*% S %*% t(G)) - solve( G %*% S %*% t(G)) ) %*% (G %*% S) } + else { S_ = S } + + return( list( M_ = M_ , S_ = S_ ) ) } \ No newline at end of file Added: pkg/Meucci/data/dbFFP.Rda =================================================================== (Binary files differ) Property changes on: pkg/Meucci/data/dbFFP.Rda ___________________________________________________________________ Added: svn:mime-type + application/octet-stream Added: pkg/Meucci/data/fILMR.Rda =================================================================== (Binary files differ) Property changes on: pkg/Meucci/data/fILMR.Rda ___________________________________________________________________ Added: svn:mime-type + application/octet-stream Added: pkg/Meucci/demo/FullFlexProbs.R =================================================================== --- pkg/Meucci/demo/FullFlexProbs.R (rev 0) +++ pkg/Meucci/demo/FullFlexProbs.R 2013-09-05 11:25:44 UTC (rev 2998) @@ -0,0 +1,160 @@ +#' Computes the Call Price +#' +#' Pricing function to apply to each scenario in order to generate the P&L distribution, as described +#' A. Meucci, "Personalized Risk Management: Historical Scenarios with Fully Flexible Probabilities" +#' GARP Risk Professional, Dec 2010, p 47-51 +#' +#' @param P matrix of prices +#' @param K +#' @param r risk +#' @param t expiry +#' @param s volatility +#' +#' @return C Prices +#' +#' @references +#' \url{http://www.symmys.com/node/150} +#' See Meucci script for "CallPrice.m" +#' +#' @author Xavier Valls \email{flamejat@@gmail.com} + +CallPrice = function( P, K, r, t, s ) +{ + d_1 = log( P/K ) + ( r + s * s/2 ) * t; + d_2 = d_1 - s * sqrt( t ); + + C = P * pnorm( d_1 ) - K * exp( -r * t ) * pnorm( d_2 ); + + return( C ); +} + + + +#'This script uses Entropy Pooling to compute Fully Flexible Probabilities for historical scenarios +#'based on time periods, market conditions, constraints on moments, etc., as described in +#'A. Meucci, "Personalized Risk Management: Historical Scenarios with Fully Flexible Probabilities" +#'GARP Risk Professional, Dec 2010, p 47-51 +#' +#' Most recent version of article and code available at +#' http://www.symmys.com/node/150 +#' @references +#' \url{http://www.symmys.com/node/150} +#' See Meucci script for "DoubleDecay.m" +#' +#' @author Xavier Valls \email{flamejat@@gmail.com} + +########################################################################## +# risk drivers scenarios +########################################################################### + +load( "../data/dbFFP.Rda" ) + +Infl = dbFFP$Data[ , length( dbFFP$Names ) ]; +Vix = dbFFP$Data[ , length( dbFFP$Names ) - 1 ]; +Crude = dbFFP$Data[ , length( dbFFP$Names )-3 ]; +Swp10 = dbFFP$Data[ , 2 ]; +SnP = dbFFP$Data[ , 4 ]; + +X = diff( log( cbind( SnP, Vix, Swp10 ) ) ); +Y = matrix(Infl[ -nrow( dbFFP$Data ) ]); + +########################################################################## +#assign probabilities to historical scenarios +########################################################################### +# DefineProbs = "1" : rolling window +# DefineProbs = "2" : exponential smoothing +# DefineProbs = "3" : market conditions +# DefineProbs = "4" : kernel damping +# DefineProbs = "5" : partial information prox. kernel damping +# DefineProbs = "6" : partial information: match covariance + +DefineProbs = "6"; + +T = dim(X)[1]; +p = matrix( 0, T, 1 ); + + +if( DefineProbs = 1) +{ + # rolling window + + tau = 2 * 252; + p[ 1:tau ] = 1; + p = p / sum( p ); +} +} else if( DefineProbs = 2 ) +{ + # exponential smoothing + + lmd = 0.0166; + p = exp( -lmd * ( T - ( 1 : T ) ) ); + p = p / sum( p ); + +} else if( DefineProbs = 3 ) +{ + # market conditions + Cond = Y >= 2.8; + p[ Cond ] = 1; + p = p / sum( p ); + +} else if( DefineProbs = 4 ) +{ + # kernel damping + y = 3; + h2 = cov( matrix( diff( Y ) ) ); + p = dmvnorm( Y, y, h2 ); + p = p / sum( p ); + +} else if( DefineProbs = 5 ) +{ + # partial information prox. kernel damping + y = 3; + h2 = NaN; # set h2=NaN for no conditioning on second moments + h2 = cov( 1 * diff( Y ) ); + p = LeastInfoKernel( Y, y, h2 ); + +} else if( DefineProbs = 6 ){ + #partial information: match covariance + + l_c = 0.0055; + l_s = 0.0166; + + N = 20; + Dd = DoubleDecay( X, l_c, l_s ); + + p = Fit2Moms( X, Dd$m, Dd$S ); +} + +########################################################################### +# P&L scenarios +########################################################################### + +N = 20; + +# call parameters +S_0 = SnP[ length(SnP) ]; +vol_0 = Vix[ length(Vix)]; +rf_0 = Swp10[ length(Swp10) ]; +K = S_0 * ( seq( 0.8, 1.1, length = N) ); +Expiry = ( 2: (N+1) ) / 252; + +S_T = S_0 * exp( X[ , 1 ] ); +vol_T = vol_0 * exp( X[ , 2 ] ); +rf_T = rf_0 * exp( X[ , 3 ] ); + +PnL = matrix( NaN, T, N ); + +# securities scenarios +for( n in 1:N ) +{ + Call_1 = CallPrice( S_T, K[ n ], rf_T, Expiry[ n ] - 1 / 252, vol_T ); + Call_0 = CallPrice( S_0, K[ n ], rf_0, Expiry[ n ], vol_0 ); + PnL[ , n ] = Call_1 - Call_0; +} + +# portfolio scenarios +u = -rbind( -matrix( 1, N/2, 1 ), matrix( 1, N/2, 1 ) ); # number of units (contracts/shares/etc) +PnL_u = PnL %*% u; + + + Added: pkg/Meucci/demo/FullyIntegratedLiquidityAndMarketRisk.R =================================================================== --- pkg/Meucci/demo/FullyIntegratedLiquidityAndMarketRisk.R (rev 0) +++ pkg/Meucci/demo/FullyIntegratedLiquidityAndMarketRisk.R 2013-09-05 11:25:44 UTC (rev 2998) @@ -0,0 +1,140 @@ +#'This his script computes the liquidity-risk and funding-risk adjusted P&L distribution, as described in +#' A. Meucci, "A Fully Integrated Liquidity and Market Risk Model", Financial Analyst Journal, 68, 6, 35-47 (2012) +#' +#' @references +#' \url{http://www.symmys.com/node/350} +#' See Meucci script "S_Main.m" +#' +#' @author Xavier Valls \email{flamejat@@gmail.com} + +# INPUTS +#####################################################################* +# liquidation policy at horizon as fraction of investment +Policy=-1; + +# collinearity of liquidity perturbations +CollinLiq=1; + +# select only some stock in portfolio and equally allocate capital as fraction of daily dollar volume +Selectstock = 1:10 ; +Capital_perDailyVolume = 0.2; + + +# PREPARE DATA +#####################################################################* +# load fILMR$Daily_Prices: closing prices +# fILMR$Daily_Volumes_Shares: daily volumes +# fILMR$Daily_Liq: Morgan Stanley liquidity index +load("../data/fILMR.Rda") + +# Prices and returns +#Daily_Prices = Daily_Prices(:,Selectstock); +Prices_0 = matrix( fILMR$Daily_Prices[ nrow(fILMR$Daily_Prices), ] ); +Daily_LogRets = log( fILMR$Daily_Prices[ -nrow(fILMR$Daily_Prices), ] / fILMR$Daily_Prices[ -1, ] ); +J = dim( Daily_LogRets )[1] +N = dim( Daily_LogRets )[2] + +# volumes in shares +#Daily_Volumes = Daily_Volumes_Shares[ , Selectstock ]; +Volumes_0 = matrix( fILMR$Daily_Volumes_Shares[ nrow(fILMR$Daily_Volumes_Shares), ]); + +Volumes_t = matrix( apply( fILMR$Daily_Volumes_Shares[ -(1:(nrow(fILMR$Daily_Volumes_Shares)-250)), ], 2, mean ) ); + +# liquidity index +Daily_LiqChanges = diff(fILMR$Daily_Liq); +Liq_0 = matrix( fILMR$Daily_Liq[ length(fILMR$Daily_Liq), ] ); + +# normal simulations + X = cbind( Daily_LogRets, Daily_LiqChanges ); + m_X = apply( X, 2, mean ); + s2_X = cov( X ); #covariance + J = 100000; + #RandStream.setGlobalStream(RandStream('mt19937ar', 'seed', 11)); + X = rmvnorm( J, m_X, s2_X ); + Daily_LogRets = X[ ,1:N ]; + Daily_LiqChanges = X[ , dim(X)[2] ]; + +# Fully Flexible Probabilties associated with each scenario +Probs = matrix( 1, J, 1 ) / J; + +# stock prices at horizon +Prices_t = repmat( t( Prices_0) , J, 1 ) * exp(Daily_LogRets); + +# liquidity index at horizon +Liq_t = Liq_0 * exp(Daily_LiqChanges); + +# pure market risk: p&L due to market risk +PnL_mkt = Prices_t - repmat( t(Prices_0), J, 1 ); + + +# PORTFOLIO COMPUTATIONS +###################################################################### +# portfolio and liquidation policy +Weights = matrix( 0, N, 1); +Weights[ Selectstock ] = 1 / length(Selectstock); +DollarVolume_0 = t(Volumes_0) %*% Prices_0; +Capital = (Capital_perDailyVolume %*% DollarVolume_0)[1]; + +h = Capital * Weights / Prices_0; + +PnL_mkt_h = PnL_mkt %*% h; + +# LIQUIDITY ADJUSTMENT +###################################################################### +# liquidation policy +Dh = Policy * h; + +# market impact +b_a = 0.01 * matrix( 1, N, 1 ); +Linear =-b_a * Prices_0 * abs(Dh); +NonLinear = -(10^5) * Prices_0 * matrix( apply( Daily_LogRets, 2, sd ) ) * ( ( abs(Dh) / Volumes_t ) ^ 1.5); +m_Dh = Linear + NonLinear; + +# state-dependent expected liquidity impact on all stocks +s_g1 = 0.5 * sd( PnL_mkt_h ); +g1 = -pmin( PnL_mkt_h, -s_g1 ) / s_g1; +m_Dh_x = repmat( g1, 1, N ) * repmat( t(m_Dh), J, 1 ); # (14) + +# state-dependent expected liquidity impact on portfolio +m_Dh_h = m_Dh_x %*% matrix( 1, N, 1 ); # (23) + +# state-independent uncertainty on liquidity impact on portfolio +s_Dh = 1.5 * m_Dh; # +r2_Dh = ( 1 - CollinLiq ) * cor( Daily_LogRets ) + CollinLiq * matrix( 1, N, N ); # +s2_Dh = diag( s_Dh[,] , length(s_Dh) ) %*% r2_Dh %*% diag( s_Dh[,], length( s_Dh ) ); # +s2_Dh_h = t( matrix( 1, N, 1 ) ) %*% s2_Dh %*% matrix( 1, N, 1 ); # +s_Dh_h = sqrt(s2_Dh_h); +s_Dh_h = pmax( s_Dh_h, 0.01 * std(PnL_mkt_h) ); # regularization + +# TOTAL P&L +###################################################################### +# conditional center and scatter +m_j = PnL_mkt_h + m_Dh_h; +s_j = s_Dh_h[1] * matrix( 1, J, 1 ); + +# pdf and cdf: taking and not taking into account funding cost +nu = 100; +f_Pi = function(x){ t( Probs / s_j) %*% dt( (x-m_j) / s_j, nu ) }; +F_Pi = function(x){ t( Probs ) %*% pt( (x-m_j) / s_j, nu ) }; + +NGrid = 200; +x_= seq( min(PnL_mkt_h) - s_Dh_h, max(PnL_mkt_h) + s_Dh_h, length = NGrid ); +p_= NULL; +f_Pi_plot = NULL; +f_Pi_funding_plot = NULL; +for( k in 1:NGrid ) +{ + p_= rbind( p_, F_Pi( x_[ k ] ) ); + f_Pi_plot = rbind( f_Pi_plot, f_Pi( x_[ k ] ) ); +} + +########################################################################### +# plots +dev.new() +NumBins = round( 10 * log(J) ); +hist = hist(PnL_mkt_h,NumBins, plot = FALSE ); # compute bin width +D = hist$mids[ 2 ]- hist$mids[ 1 ]; +hh = plot( hist$mids, hist$counts / ( J * D ), type = "h", xlab ="", ylab = "" ); # plot histogram +hh1 = lines(x_,f_Pi_plot, col="red"); + +legend( "topright", 1.9, c("pure market P&L","market + liquidity P&L"), col = c( "black", "red"), lty=1, bg = "gray90" ); \ No newline at end of file Added: pkg/Meucci/man/DoubleDecay.Rd =================================================================== --- pkg/Meucci/man/DoubleDecay.Rd (rev 0) +++ pkg/Meucci/man/DoubleDecay.Rd 2013-09-05 11:25:44 UTC (rev 2998) @@ -0,0 +1,37 @@ +\name{DoubleDecay} +\alias{DoubleDecay} +\title{Computes a double-decay covariance matrix.} +\usage{ + DoubleDecay(X, lmd_c, lmd_s) +} +\arguments{ + \item{X}{matrix representing the risk drivers.} + + \item{lmd_c}{numeric representing the low decay (long + half-life) for the correlations.} + + \item{lmd_s}{numeric representing the high decay (short + half-life) for the volatilities.} +} +\value{ + m matrix of zeros, representing the expectation of the + risk drivers. + + S matrix representing the double-decay estimation for the + correlation matrix of the risk drivers. +} +\description{ + This function computes a double-decay covariance matrix + for the risk drivers provided, as described in A. Meucci, + "Personalized Risk Management: Historical Scenarios with + Fully Flexible Probabilities" GARP Risk Professional, Dec + 2010, p 47-51 +} +\author{ + Xavier Valls \email{flamejat at gmail.com} +} +\references{ + \url{http://www.symmys.com/node/150} See Meucci script + for "DoubleDecay.m" +} + Added: pkg/Meucci/man/Fit2Moms.Rd =================================================================== --- pkg/Meucci/man/Fit2Moms.Rd (rev 0) +++ pkg/Meucci/man/Fit2Moms.Rd 2013-09-05 11:25:44 UTC (rev 2998) @@ -0,0 +1,34 @@ +\name{Fit2Moms} +\alias{Fit2Moms} +\title{Uses Entropy Pooling to compute a double-decay covariance matrix.} +\usage{ + Fit2Moms(X, m, S) +} +\arguments{ + \item{X}{matrix representing the risk drivers.} + + \item{m}{matrix of zeros, representing the expectation of + the risk drivers.} + + \item{S}{matrix representing the double-decay estimation + for the correlation matrix of the risk drivers.} +} +\value{ + p list containing the vector of posterior probabilities + and information about the optimization performance. +} +\description{ + This function uses Entropy Pooling to compute a + double-decay covariance matrix, as described in A. + Meucci, "Personalized Risk Management: Historical + Scenarios with Fully Flexible Probabilities" GARP Risk + Professional, Dec 2010, p 47-51 +} +\author{ + Xavier Valls \email{flamejat at gmail.com} +} +\references{ + \url{http://www.symmys.com/node/150} See Meucci script + for "S_MainFullFlexProbs.m" +} + Added: pkg/Meucci/man/LeastInfoKernel.Rd =================================================================== --- pkg/Meucci/man/LeastInfoKernel.Rd (rev 0) +++ pkg/Meucci/man/LeastInfoKernel.Rd 2013-09-05 11:25:44 UTC (rev 2998) @@ -0,0 +1,34 @@ +\name{LeastInfoKernel} +\alias{LeastInfoKernel} +\title{Computes least information kernel smoothing} +\usage{ + LeastInfoKernel(Y, y, h2) +} +\arguments{ + \item{Y}{Matrix representing the macroeconomic indicator} + + \item{y}{scalar reprenting the target to which Y is + expected to be close in the Generalized Empirical + Distribution} + + \item{h2}{N X N matrix} +} +\value{ + p list containing the vector of posterior probabilities + and information about the optimization performance. +} +\description{ + This script uses Entropy Pooling to compute least + information kernel smoothing, as described in A. Meucci, + "Personalized Risk Management: Historical Scenarios with + Fully Flexible Probabilities" GARP Risk Professional, Dec + 2010, p 47-51 +} +\author{ + Xavier Valls \email{flamejat at gmail.com} +} +\references{ + \url{http://www.symmys.com/node/150} See Meucci script + for "LeastInfoKernel.m" +} + Modified: pkg/Meucci/man/PlotDistributions.Rd =================================================================== --- pkg/Meucci/man/PlotDistributions.Rd 2013-09-05 09:21:51 UTC (rev 2997) +++ pkg/Meucci/man/PlotDistributions.Rd 2013-09-05 11:25:44 UTC (rev 2998) @@ -1,32 +1,32 @@ -\name{PlotDistributions} -\alias{PlotDistributions} -\title{Plot numerical and analytical prior and posterior distributions} -\usage{ - PlotDistributions(X, p, Mu, Sigma, p_, Mu_, Sigma_) -} -\arguments{ - \item{X}{a vector containing the dataset} - - \item{p}{a vector cotaining the prior probability values} - - \item{Mu}{a vector containing the prior means} - - \item{Sigma}{a vector containing the prior standard - deviations} - - \item{p_}{a vector containing the posterior probability - values} - - \item{Mu_}{a vector containing the posterior means} - - \item{Sigma_}{a vector containing the posterior standard - deviations} -} -\description{ - Plot numerical and analytical prior and posterior - distributions -} -\author{ - Ram Ahluwalia \email{ram at wingedfootcapital.com} -} - +\name{PlotDistributions} +\alias{PlotDistributions} +\title{Plot numerical and analytical prior and posterior distributions} +\usage{ + PlotDistributions(X, p, Mu, Sigma, p_, Mu_, Sigma_) +} +\arguments{ + \item{X}{a vector containing the dataset} + + \item{p}{a vector cotaining the prior probability values} + + \item{Mu}{a vector containing the prior means} + + \item{Sigma}{a vector containing the prior standard + deviations} + + \item{p_}{a vector containing the posterior probability + values} + + \item{Mu_}{a vector containing the posterior means} + + \item{Sigma_}{a vector containing the posterior standard + deviations} +} +\description{ + Plot numerical and analytical prior and posterior + distributions +} +\author{ + Ram Ahluwalia \email{ram at wingedfootcapital.com} +} + From noreply at r-forge.r-project.org Thu Sep 5 18:29:00 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 5 Sep 2013 18:29:00 +0200 (CEST) Subject: [Returnanalytics-commits] r2999 - in pkg/PortfolioAnalytics: R man Message-ID: <20130905162900.D069E183CEE@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-05 18:29:00 +0200 (Thu, 05 Sep 2013) New Revision: 2999 Removed: pkg/PortfolioAnalytics/man/chart.EfficientFrontier.Rd Modified: pkg/PortfolioAnalytics/R/charts.efficient.frontier.R pkg/PortfolioAnalytics/man/chart.Weights.EF.Rd Log: Updating documentation for chart.Weights.EF and chart.EfficientFrontier generic methods. Modified: pkg/PortfolioAnalytics/R/charts.efficient.frontier.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.efficient.frontier.R 2013-09-05 11:25:44 UTC (rev 2998) +++ pkg/PortfolioAnalytics/R/charts.efficient.frontier.R 2013-09-05 16:29:00 UTC (rev 2999) @@ -2,7 +2,7 @@ #' Chart the efficient frontier and risk-return scatter #' #' Chart the efficient frontier and risk-return scatter of the assets for -#' optimize.portfolio and efficient.frontier objects +#' optimize.portfolio. or efficient.frontier objects #' #' @details #' For objects created by optimize.portfolio with 'DEoptim', 'random', or 'pso' @@ -33,13 +33,13 @@ #' will be plotted using a risk free rate of 0. Set \code{rf=NULL} to omit #' this from the plot. #' -#' @param object object to chart +#' @param object object of class optimize.portfolio.ROI to chart +#' @param \dots passthru parameters to \code{\link{plot}} #' @param match.col string name of column to use for risk (horizontal axis). #' \code{match.col} must match the name of an objective measure in the #' \code{objective_measures} or \code{opt_values} slot in the object created #' by \code{\link{optimize.portfolio}}. #' @param n.portfolios number of portfolios to use to plot the efficient frontier -#' @param \dots passthru parameters to \code{\link{plot}} #' @param xlim set the x-axis limit, same as in \code{\link{plot}} #' @param ylim set the y-axis limit, same as in \code{\link{plot}} #' @param cex.axis A numerical value giving the amount by which the axis should be magnified relative to the default. @@ -55,17 +55,16 @@ #' @param pch.assets plotting character of the assets, same as in \code{\link{plot}} #' @param cex.assets A numerical value giving the amount by which the asset points and labels should be magnified relative to the default. #' @author Ross Bennett -#' @aliases chart.EfficientFrontier.optimize.portfolio.ROI chart.EfficientFrontier.optimize.portfolio chart.EfficientFrontier.efficient.frontier +#' @rdname chart.EfficientFrontier #' @export -chart.EfficientFrontier <- function(object, match.col, n.portfolios, ...){ +chart.EfficientFrontier <- function(object, ...){ UseMethod("chart.EfficientFrontier") } - +#' @rdname chart.EfficientFrontier #' @method chart.EfficientFrontier optimize.portfolio.ROI #' @S3method chart.EfficientFrontier optimize.portfolio.ROI -#' @export -chart.EfficientFrontier.optimize.portfolio.ROI <- function(object, match.col="ES", n.portfolios=25, ..., xlim=NULL, ylim=NULL, cex.axis=0.8, element.color="darkgray", main="Efficient Frontier", rf=0, tangent.line=TRUE, cex.legend=0.8, chart.assets=TRUE, labels.assets=TRUE, pch.assets=21, cex.assets=0.8){ +chart.EfficientFrontier.optimize.portfolio.ROI <- function(object, ..., match.col="ES", n.portfolios=25, xlim=NULL, ylim=NULL, cex.axis=0.8, element.color="darkgray", main="Efficient Frontier", RAR.text="SR", rf=0, tangent.line=TRUE, cex.legend=0.8, chart.assets=TRUE, labels.assets=TRUE, pch.assets=21, cex.assets=0.8){ if(!inherits(object, "optimize.portfolio.ROI")) stop("object must be of class optimize.portfolio.ROI") portf <- object$portfolio @@ -160,7 +159,7 @@ points(x.f[idx.maxsr], y.f[idx.maxsr], pch=16) # text(x=x.f[idx.maxsr], y=y.f[idx.maxsr], labels="T", pos=4, cex=0.8) # Add lengend with max Sharpe Ratio and risk-free rate - legend("topleft", paste(rar, " = ", signif(srmax,3), sep = ""), bty = "n", cex=cex.legend) + legend("topleft", paste(RAR.text, " = ", signif(srmax,3), sep = ""), bty = "n", cex=cex.legend) legend("topleft", inset = c(0,0.05), paste("rf = ", signif(rf,3), sep = ""), bty = "n", cex=cex.legend) } axis(1, cex.axis = cex.axis, col = element.color) @@ -168,11 +167,10 @@ box(col = element.color) } - +#' @rdname chart.EfficientFrontier #' @method chart.EfficientFrontier optimize.portfolio #' @S3method chart.EfficientFrontier optimize.portfolio -#' @export -chart.EfficientFrontier.optimize.portfolio <- function(object, match.col="ES", n.portfolios=25, ..., xlim=NULL, ylim=NULL, cex.axis=0.8, element.color="darkgray", main="Efficient Frontier", RAR.text="SR", rf=0, tangent.line=TRUE, cex.legend=0.8, chart.assets=TRUE, labels.assets=TRUE, pch.assets=21, cex.assets=0.8){ +chart.EfficientFrontier.optimize.portfolio <- function(object, ..., match.col="ES", n.portfolios=25, xlim=NULL, ylim=NULL, cex.axis=0.8, element.color="darkgray", main="Efficient Frontier", RAR.text="SR", rf=0, tangent.line=TRUE, cex.legend=0.8, chart.assets=TRUE, labels.assets=TRUE, pch.assets=21, cex.assets=0.8){ # This function will work with objects of class optimize.portfolio.DEoptim, # optimize.portfolio.random, and optimize.portfolio.pso @@ -269,43 +267,12 @@ box(col = element.color) } -# ' chart weights along an efficient frontier -# ' -# ' This creates a stacked column chart of the weights of portfolios along an efficient frontier. -# ' -# ' @param object object to chart. -# ' @param \dots passthru parameters to \code{barplot}. -# ' @param colorset color palette to use. -# ' @param n.portfolios number of portfolios to extract along the efficient frontier. -# ' This is only used for objects of class \code{optimize.portfolio} -# ' @param by.groups TRUE/FALSE. If TRUE, the weights by group are charted. -# ' @param match.col match.col string name of column to use for risk (horizontal axis). -# ' Must match the name of an objective. -# ' @param main main title used in the plot. -# ' @param cex.lab The magnification to be used for x-axis and y-axis labels relative to the current setting of 'cex'. -# ' @param cex.axis The magnification to be used for sizing the axis text relative to the current setting of 'cex', similar to \code{\link{plot}}. -# ' @param cex.legend The magnification to be used for sizing the legend relative to the current setting of 'cex', similar to \code{\link{plot}}. -# ' @param legend.labels character vector to use for the legend labels -# ' @param element.color provides the color for drawing less-important chart elements, such as the box lines, axis lines, etc. -# ' @param legend.loc NULL, "topright", "right", or "bottomright". If legend.loc is NULL, the legend will not be plotted. -# ' @author Ross Bennett -# ' @aliases chart.Weights.EF.efficient.frontier chart.Weights.EF.optimize.portfolio -# ' @export #' Chart weights along an efficient frontier #' -#' This function is a generic method to chart weights along an efficient frontier +#' This function produces a stacked barplot of weights along the efficient frontier. #' -#' @param object object to chart -#' @param \dots any other passthru parameters -#' @export -chart.Weights.EF <- function(object, ...){ - UseMethod("chart.Weights.EF") -} - -#' Chart weights along an efficient frontier for an efficient.frontier object -#' -#' @param object object of class \code{efficient.frontier} +#' @param object object of class \code{efficient.frontier} or \code{optimize.portfolio} #' @param \dots passthru parameters to \code{barplot}. #' @param colorset color palette to use #' @param n.portfolios number of portfolios to extract along the efficient frontier @@ -319,6 +286,14 @@ #' @param element.color provides the color for drawing less-important chart elements, such as the box lines, axis lines, etc. #' @param legend.loc NULL, "topright", "right", or "bottomright". If legend.loc is NULL, the legend will not be plotted #' @author Ross Bennett +#' @rdname chart.Weights.EF +#' @export +chart.Weights.EF <- function(object, ...){ + UseMethod("chart.Weights.EF") +} + + +#' @rdname chart.Weights.EF #' @method chart.Weights.EF efficient.frontier #' @S3method chart.Weights.EF efficient.frontier chart.Weights.EF.efficient.frontier <- function(object, ..., colorset=NULL, n.portfolios=25, by.groups=FALSE, match.col="ES", main="", cex.lab=0.8, cex.axis=0.8, cex.legend=0.8, legend.labels=NULL, element.color="darkgray", legend.loc="topright"){ @@ -443,22 +418,7 @@ box(col=element.color) } -#' Chart weights along an efficient frontier for an efficient.frontier object -#' -#' @param object object of class \code{efficient.frontier} -#' @param \dots passthru parameters to \code{barplot}. -#' @param colorset color palette to use -#' @param n.portfolios number of portfolios to extract along the efficient frontier -#' @param by.groups TRUE/FALSE. If TRUE, the group weights are charted -#' @param match.col string name of column to use for risk (horizontal axis). Must match the name of an objective. -#' @param main title used in the plot. -#' @param cex.lab The magnification to be used for x-axis and y-axis labels relative to the current setting of 'cex' -#' @param cex.axis The magnification to be used for sizing the axis text relative to the current setting of 'cex', similar to \code{\link{plot}} -#' @param cex.legend The magnification to be used for sizing the legend relative to the current setting of 'cex', similar to \code{\link{plot}} -#' @param legend.labels character vector to use for the legend labels -#' @param element.color provides the color for drawing less-important chart elements, such as the box lines, axis lines, etc. -#' @param legend.loc NULL, "topright", "right", or "bottomright". If legend.loc is NULL, the legend will not be plotted -#' @author Ross Bennett +#' @rdname chart.Weights.EF #' @method chart.Weights.EF optimize.portfolio #' @S3method chart.Weights.EF optimize.portfolio chart.Weights.EF.optimize.portfolio <- function(object, ..., colorset=NULL, n.portfolios=25, by.groups=FALSE, match.col="ES", main="", cex.lab=0.8, cex.axis=0.8, cex.legend=0.8, legend.labels=NULL, element.color="darkgray", legend.loc="topright"){ @@ -474,10 +434,10 @@ legend.loc=legend.loc) } +#' @rdname chart.EfficientFrontier #' @method chart.EfficientFrontier efficient.frontier #' @S3method chart.EfficientFrontier efficient.frontier -#' @export -chart.EfficientFrontier.efficient.frontier <- function(object, match.col="ES", n.portfolios=NULL, ..., xlim=NULL, ylim=NULL, cex.axis=0.8, element.color="darkgray", main="Efficient Frontier", RAR.text="SR", rf=0, tangent.line=TRUE, cex.legend=0.8, chart.assets=TRUE, labels.assets=TRUE, pch.assets=21, cex.assets=0.8){ +chart.EfficientFrontier.efficient.frontier <- function(object, ..., match.col="ES", n.portfolios=NULL, xlim=NULL, ylim=NULL, cex.axis=0.8, element.color="darkgray", main="Efficient Frontier", RAR.text="SR", rf=0, tangent.line=TRUE, cex.legend=0.8, chart.assets=TRUE, labels.assets=TRUE, pch.assets=21, cex.assets=0.8){ if(!inherits(object, "efficient.frontier")) stop("object must be of class 'efficient.frontier'") # get the returns and efficient frontier object Deleted: pkg/PortfolioAnalytics/man/chart.EfficientFrontier.Rd =================================================================== --- pkg/PortfolioAnalytics/man/chart.EfficientFrontier.Rd 2013-09-05 11:25:44 UTC (rev 2998) +++ pkg/PortfolioAnalytics/man/chart.EfficientFrontier.Rd 2013-09-05 16:29:00 UTC (rev 2999) @@ -1,107 +0,0 @@ -\name{chart.EfficientFrontier} -\alias{chart.EfficientFrontier} -\alias{chart.EfficientFrontier.efficient.frontier} -\alias{chart.EfficientFrontier.optimize.portfolio} -\alias{chart.EfficientFrontier.optimize.portfolio.ROI} -\title{Chart the efficient frontier and risk-return scatter} -\usage{ - chart.EfficientFrontier(object, match.col, n.portfolios, - ...) -} -\arguments{ - \item{object}{object to chart} - - \item{match.col}{string name of column to use for risk - (horizontal axis). \code{match.col} must match the name - of an objective measure in the \code{objective_measures} - or \code{opt_values} slot in the object created by - \code{\link{optimize.portfolio}}.} - - \item{n.portfolios}{number of portfolios to use to plot - the efficient frontier} - - \item{\dots}{passthru parameters to \code{\link{plot}}} - - \item{xlim}{set the x-axis limit, same as in - \code{\link{plot}}} - - \item{ylim}{set the y-axis limit, same as in - \code{\link{plot}}} - - \item{cex.axis}{A numerical value giving the amount by - which the axis should be magnified relative to the - default.} - - \item{element.color}{provides the color for drawing - less-important chart elements, such as the box lines, - axis lines, etc.} - - \item{main}{a main title for the plot} - - \item{RAR.text}{Risk Adjusted Return ratio text to plot - in the legend} - - \item{rf}{risk free rate. If \code{rf} is not null, the - maximum Sharpe Ratio or modified Sharpe Ratio tangency - portfolio will be plotted} - - \item{tangent.line}{TRUE/FALSE to plot the tangent line} - - \item{cex.legend}{A numerical value giving the amount by - which the legend should be magnified relative to the - default.} - - \item{chart.assets}{TRUE/FALSE to include the assets} - - \item{labels.assets}{TRUE/FALSE to include the asset - names in the plot. \code{chart.assets} must be - \code{TRUE} to plot asset names} - - \item{pch.assets}{plotting character of the assets, same - as in \code{\link{plot}}} - - \item{cex.assets}{A numerical value giving the amount by - which the asset points and labels should be magnified - relative to the default.} -} -\description{ - Chart the efficient frontier and risk-return scatter of - the assets for optimize.portfolio and efficient.frontier - objects -} -\details{ - For objects created by optimize.portfolio with 'DEoptim', - 'random', or 'pso' specified as the optimize_method: - \itemize{ \item The efficient frontier plotted is based - on the the trace information (sets of portfolios tested - by the solver at each iteration) in objects created by - \code{optimize.portfolio}. } - - For objects created by optimize.portfolio with 'ROI' - specified as the optimize_method: \itemize{ \item The - mean-StdDev or mean-etl efficient frontier can be plotted - for optimal portfolio objects created by - \code{optimize.portfolio}. - - \item If \code{match.col="StdDev"}, the mean-StdDev - efficient frontier is plotted. - - \item If \code{match.col="ETL"} (also "ES" or "CVaR"), - the mean-etl efficient frontier is plotted. } - - Note that \code{trace=TRUE} must be specified in - \code{\link{optimize.portfolio}} - - GenSA does not return any useable trace information for - portfolios tested at each iteration, therfore we cannot - extract and chart an efficient frontier. - - By default, the tangency portfolio (maximum Sharpe Ratio - or modified Sharpe Ratio) will be plotted using a risk - free rate of 0. Set \code{rf=NULL} to omit this from the - plot. -} -\author{ - Ross Bennett -} - Modified: pkg/PortfolioAnalytics/man/chart.Weights.EF.Rd =================================================================== --- pkg/PortfolioAnalytics/man/chart.Weights.EF.Rd 2013-09-05 11:25:44 UTC (rev 2998) +++ pkg/PortfolioAnalytics/man/chart.Weights.EF.Rd 2013-09-05 16:29:00 UTC (rev 2999) @@ -1,16 +1,72 @@ \name{chart.Weights.EF} \alias{chart.Weights.EF} +\alias{chart.Weights.EF.efficient.frontier} +\alias{chart.Weights.EF.optimize.portfolio} \title{Chart weights along an efficient frontier} \usage{ chart.Weights.EF(object, ...) + + \method{chart.Weights.EF}{efficient.frontier} (object, + ..., colorset = NULL, n.portfolios = 25, + by.groups = FALSE, match.col = "ES", main = "", + cex.lab = 0.8, cex.axis = 0.8, cex.legend = 0.8, + legend.labels = NULL, element.color = "darkgray", + legend.loc = "topright") + + \method{chart.Weights.EF}{optimize.portfolio} (object, + ..., colorset = NULL, n.portfolios = 25, + by.groups = FALSE, match.col = "ES", main = "", + cex.lab = 0.8, cex.axis = 0.8, cex.legend = 0.8, + legend.labels = NULL, element.color = "darkgray", + legend.loc = "topright") } \arguments{ - \item{object}{object to chart} + \item{object}{object of class \code{efficient.frontier} + or \code{optimize.portfolio}} - \item{\dots}{any other passthru parameters} + \item{\dots}{passthru parameters to \code{barplot}.} + + \item{colorset}{color palette to use} + + \item{n.portfolios}{number of portfolios to extract along + the efficient frontier} + + \item{by.groups}{TRUE/FALSE. If TRUE, the group weights + are charted} + + \item{match.col}{string name of column to use for risk + (horizontal axis). Must match the name of an objective.} + + \item{main}{title used in the plot.} + + \item{cex.lab}{The magnification to be used for x-axis + and y-axis labels relative to the current setting of + 'cex'} + + \item{cex.axis}{The magnification to be used for sizing + the axis text relative to the current setting of 'cex', + similar to \code{\link{plot}}} + + \item{cex.legend}{The magnification to be used for sizing + the legend relative to the current setting of 'cex', + similar to \code{\link{plot}}} + + \item{legend.labels}{character vector to use for the + legend labels} + + \item{element.color}{provides the color for drawing + less-important chart elements, such as the box lines, + axis lines, etc.} + + \item{legend.loc}{NULL, "topright", "right", or + "bottomright". If legend.loc is NULL, the legend will not + be plotted} } \description{ - This function is a generic method to chart weights along - an efficient frontier + This function produces a stacked barplot of weights along + the efficient frontier. } +\author{ + Ross Bennett +} From noreply at r-forge.r-project.org Thu Sep 5 18:30:29 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 5 Sep 2013 18:30:29 +0200 (CEST) Subject: [Returnanalytics-commits] r3000 - pkg/PortfolioAnalytics/man Message-ID: <20130905163029.BF4E6183CEE@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-05 18:30:29 +0200 (Thu, 05 Sep 2013) New Revision: 3000 Added: pkg/PortfolioAnalytics/man/chart.EfficientFrontier.Rd Log: Adding Rd file for chart.EfficientFrontier. NOTE: wrapping causes R CMD check warning, need to change manually so it passes R CMD check Added: pkg/PortfolioAnalytics/man/chart.EfficientFrontier.Rd =================================================================== --- pkg/PortfolioAnalytics/man/chart.EfficientFrontier.Rd (rev 0) +++ pkg/PortfolioAnalytics/man/chart.EfficientFrontier.Rd 2013-09-05 16:30:29 UTC (rev 3000) @@ -0,0 +1,131 @@ +\name{chart.EfficientFrontier} +\alias{chart.EfficientFrontier} +\alias{chart.EfficientFrontier.efficient.frontier} +\alias{chart.EfficientFrontier.optimize.portfolio} +\alias{chart.EfficientFrontier.optimize.portfolio.ROI} +\title{Chart the efficient frontier and risk-return scatter} +\usage{ + chart.EfficientFrontier(object, ...) + + \method{chart.EfficientFrontier}{optimize.portfolio.ROI} (object, ..., match.col = "ES", n.portfolios = 25, + xlim = NULL, ylim = NULL, cex.axis = 0.8, + element.color = "darkgray", + main = "Efficient Frontier", RAR.text = "SR", rf = 0, + tangent.line = TRUE, cex.legend = 0.8, + chart.assets = TRUE, labels.assets = TRUE, + pch.assets = 21, cex.assets = 0.8) + + \method{chart.EfficientFrontier}{optimize.portfolio} (object, ..., match.col = "ES", n.portfolios = 25, + xlim = NULL, ylim = NULL, cex.axis = 0.8, + element.color = "darkgray", + main = "Efficient Frontier", RAR.text = "SR", rf = 0, + tangent.line = TRUE, cex.legend = 0.8, + chart.assets = TRUE, labels.assets = TRUE, + pch.assets = 21, cex.assets = 0.8) + + \method{chart.EfficientFrontier}{efficient.frontier} (object, ..., match.col = "ES", n.portfolios = NULL, + xlim = NULL, ylim = NULL, cex.axis = 0.8, + element.color = "darkgray", + main = "Efficient Frontier", RAR.text = "SR", rf = 0, + tangent.line = TRUE, cex.legend = 0.8, + chart.assets = TRUE, labels.assets = TRUE, + pch.assets = 21, cex.assets = 0.8) +} +\arguments{ + \item{object}{object of class optimize.portfolio.ROI to + chart} + + \item{\dots}{passthru parameters to \code{\link{plot}}} + + \item{match.col}{string name of column to use for risk + (horizontal axis). \code{match.col} must match the name + of an objective measure in the \code{objective_measures} + or \code{opt_values} slot in the object created by + \code{\link{optimize.portfolio}}.} + + \item{n.portfolios}{number of portfolios to use to plot + the efficient frontier} + + \item{xlim}{set the x-axis limit, same as in + \code{\link{plot}}} + + \item{ylim}{set the y-axis limit, same as in + \code{\link{plot}}} + + \item{cex.axis}{A numerical value giving the amount by + which the axis should be magnified relative to the + default.} + + \item{element.color}{provides the color for drawing + less-important chart elements, such as the box lines, + axis lines, etc.} + + \item{main}{a main title for the plot} + + \item{RAR.text}{Risk Adjusted Return ratio text to plot + in the legend} + + \item{rf}{risk free rate. If \code{rf} is not null, the + maximum Sharpe Ratio or modified Sharpe Ratio tangency + portfolio will be plotted} + + \item{tangent.line}{TRUE/FALSE to plot the tangent line} + + \item{cex.legend}{A numerical value giving the amount by + which the legend should be magnified relative to the + default.} + + \item{chart.assets}{TRUE/FALSE to include the assets} + + \item{labels.assets}{TRUE/FALSE to include the asset + names in the plot. \code{chart.assets} must be + \code{TRUE} to plot asset names} + + \item{pch.assets}{plotting character of the assets, same + as in \code{\link{plot}}} + + \item{cex.assets}{A numerical value giving the amount by + which the asset points and labels should be magnified + relative to the default.} +} +\description{ + Chart the efficient frontier and risk-return scatter of + the assets for optimize.portfolio. or efficient.frontier + objects +} +\details{ + For objects created by optimize.portfolio with 'DEoptim', + 'random', or 'pso' specified as the optimize_method: + \itemize{ \item The efficient frontier plotted is based + on the the trace information (sets of portfolios tested + by the solver at each iteration) in objects created by + \code{optimize.portfolio}. } + + For objects created by optimize.portfolio with 'ROI' + specified as the optimize_method: \itemize{ \item The + mean-StdDev or mean-etl efficient frontier can be plotted + for optimal portfolio objects created by + \code{optimize.portfolio}. + + \item If \code{match.col="StdDev"}, the mean-StdDev + efficient frontier is plotted. + + \item If \code{match.col="ETL"} (also "ES" or "CVaR"), + the mean-etl efficient frontier is plotted. } + + Note that \code{trace=TRUE} must be specified in + \code{\link{optimize.portfolio}} + + GenSA does not return any useable trace information for + portfolios tested at each iteration, therfore we cannot + extract and chart an efficient frontier. + + By default, the tangency portfolio (maximum Sharpe Ratio + or modified Sharpe Ratio) will be plotted using a risk + free rate of 0. Set \code{rf=NULL} to omit this from the + plot. +} +\author{ + Ross Bennett +} + From noreply at r-forge.r-project.org Thu Sep 5 19:12:45 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 5 Sep 2013 19:12:45 +0200 (CEST) Subject: [Returnanalytics-commits] r3001 - in pkg/PortfolioAnalytics: R man Message-ID: <20130905171245.16A89184BE6@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-05 19:12:44 +0200 (Thu, 05 Sep 2013) New Revision: 3001 Removed: pkg/PortfolioAnalytics/man/chart.Weights.EF.efficient.frontier.Rd pkg/PortfolioAnalytics/man/chart.Weights.EF.optimize.portfolio.Rd Modified: pkg/PortfolioAnalytics/R/constrained_objective.R pkg/PortfolioAnalytics/R/optimize.portfolio.R pkg/PortfolioAnalytics/man/constrained_objective.Rd pkg/PortfolioAnalytics/man/optimize.portfolio.Rd pkg/PortfolioAnalytics/man/optimize.portfolio.rebalancing.Rd Log: Updating documentation. Removed inst/folder Modified: pkg/PortfolioAnalytics/R/constrained_objective.R =================================================================== --- pkg/PortfolioAnalytics/R/constrained_objective.R 2013-09-05 16:30:29 UTC (rev 3000) +++ pkg/PortfolioAnalytics/R/constrained_objective.R 2013-09-05 17:12:44 UTC (rev 3001) @@ -339,6 +339,7 @@ #' @param trace TRUE/FALSE whether to include debugging and additional detail in the output list #' @param normalize TRUE/FALSE whether to normalize results to min/max sum (TRUE), or let the optimizer penalize portfolios that do not conform (FALSE) #' @param storage TRUE/FALSE default TRUE for DEoptim with trace, otherwise FALSE. not typically user-called +#' @param constraints a v1_constraint object for \code{constrained_objective_v1} #' @seealso \code{\link{constraint}}, \code{\link{objective}}, \code{\link[DEoptim]{DEoptim.control}} #' @author Kris Boudt, Peter Carl, Brian G. Peterson, Ross Bennett #' @aliases constrained_objective constrained_objective_v1 Modified: pkg/PortfolioAnalytics/R/optimize.portfolio.R =================================================================== --- pkg/PortfolioAnalytics/R/optimize.portfolio.R 2013-09-05 16:30:29 UTC (rev 3000) +++ pkg/PortfolioAnalytics/R/optimize.portfolio.R 2013-09-05 17:12:44 UTC (rev 3001) @@ -1037,7 +1037,7 @@ #' } #' #' @author Kris Boudt, Peter Carl, Brian G. Peterson, Ross Bennett -#' @aliases optimize.portfolio_v2 optimize_portfolio_v1 +#' @aliases optimize.portfolio_v2 optimize.portfolio_v1 #' @seealso \code{\link{portfolio.spec}} #' @name optimize.portfolio #' @export @@ -1109,6 +1109,7 @@ #' @return a list containing the optimal weights, some summary statistics, the function call, and optionally trace information #' @author Kris Boudt, Peter Carl, Brian G. Peterson #' @name optimize.portfolio.rebalancing +#' @aliases optimize.portfolio.rebalancing optimize.portfolio.rebalancing_v1 #' @export optimize.portfolio.rebalancing <- function(R, portfolio=NULL, constraints=NULL, objectives=NULL, optimize_method=c("DEoptim","random","ROI"), search_size=20000, trace=FALSE, ..., rp=NULL, rebalance_on=NULL, training_period=NULL, trailing_periods=NULL) { Deleted: pkg/PortfolioAnalytics/man/chart.Weights.EF.efficient.frontier.Rd =================================================================== --- pkg/PortfolioAnalytics/man/chart.Weights.EF.efficient.frontier.Rd 2013-09-05 16:30:29 UTC (rev 3000) +++ pkg/PortfolioAnalytics/man/chart.Weights.EF.efficient.frontier.Rd 2013-09-05 17:12:44 UTC (rev 3001) @@ -1,60 +0,0 @@ -\name{chart.Weights.EF.efficient.frontier} -\alias{chart.Weights.EF.efficient.frontier} -\title{Chart weights along an efficient frontier for an efficient.frontier object} -\usage{ - \method{chart.Weights.EF}{efficient.frontier} (object, - ..., colorset = NULL, n.portfolios = 25, - by.groups = FALSE, match.col = "ES", main = "", - cex.lab = 0.8, cex.axis = 0.8, cex.legend = 0.8, - legend.labels = NULL, element.color = "darkgray", - legend.loc = "topright") -} -\arguments{ - \item{object}{object of class \code{efficient.frontier}} - - \item{\dots}{passthru parameters to \code{barplot}.} - - \item{colorset}{color palette to use} - - \item{n.portfolios}{number of portfolios to extract along - the efficient frontier} - - \item{by.groups}{TRUE/FALSE. If TRUE, the group weights - are charted} - - \item{match.col}{string name of column to use for risk - (horizontal axis). Must match the name of an objective.} - - \item{main}{title used in the plot.} - - \item{cex.lab}{The magnification to be used for x-axis - and y-axis labels relative to the current setting of - 'cex'} - - \item{cex.axis}{The magnification to be used for sizing - the axis text relative to the current setting of 'cex', - similar to \code{\link{plot}}} - - \item{cex.legend}{The magnification to be used for sizing - the legend relative to the current setting of 'cex', - similar to \code{\link{plot}}} - - \item{legend.labels}{character vector to use for the - legend labels} - - \item{element.color}{provides the color for drawing - less-important chart elements, such as the box lines, - axis lines, etc.} - - \item{legend.loc}{NULL, "topright", "right", or - "bottomright". If legend.loc is NULL, the legend will not - be plotted} -} -\description{ - Chart weights along an efficient frontier for an - efficient.frontier object -} -\author{ - Ross Bennett -} - Deleted: pkg/PortfolioAnalytics/man/chart.Weights.EF.optimize.portfolio.Rd =================================================================== --- pkg/PortfolioAnalytics/man/chart.Weights.EF.optimize.portfolio.Rd 2013-09-05 16:30:29 UTC (rev 3000) +++ pkg/PortfolioAnalytics/man/chart.Weights.EF.optimize.portfolio.Rd 2013-09-05 17:12:44 UTC (rev 3001) @@ -1,60 +0,0 @@ -\name{chart.Weights.EF.optimize.portfolio} -\alias{chart.Weights.EF.optimize.portfolio} -\title{Chart weights along an efficient frontier for an efficient.frontier object} -\usage{ - \method{chart.Weights.EF}{optimize.portfolio} (object, - ..., colorset = NULL, n.portfolios = 25, - by.groups = FALSE, match.col = "ES", main = "", - cex.lab = 0.8, cex.axis = 0.8, cex.legend = 0.8, - legend.labels = NULL, element.color = "darkgray", - legend.loc = "topright") -} -\arguments{ - \item{object}{object of class \code{efficient.frontier}} - - \item{\dots}{passthru parameters to \code{barplot}.} - - \item{colorset}{color palette to use} - - \item{n.portfolios}{number of portfolios to extract along - the efficient frontier} - - \item{by.groups}{TRUE/FALSE. If TRUE, the group weights - are charted} - - \item{match.col}{string name of column to use for risk - (horizontal axis). Must match the name of an objective.} - - \item{main}{title used in the plot.} - - \item{cex.lab}{The magnification to be used for x-axis - and y-axis labels relative to the current setting of - 'cex'} - - \item{cex.axis}{The magnification to be used for sizing - the axis text relative to the current setting of 'cex', - similar to \code{\link{plot}}} - - \item{cex.legend}{The magnification to be used for sizing - the legend relative to the current setting of 'cex', - similar to \code{\link{plot}}} - - \item{legend.labels}{character vector to use for the - legend labels} - - \item{element.color}{provides the color for drawing - less-important chart elements, such as the box lines, - axis lines, etc.} - - \item{legend.loc}{NULL, "topright", "right", or - "bottomright". If legend.loc is NULL, the legend will not - be plotted} -} -\description{ - Chart weights along an efficient frontier for an - efficient.frontier object -} -\author{ - Ross Bennett -} - Modified: pkg/PortfolioAnalytics/man/constrained_objective.Rd =================================================================== --- pkg/PortfolioAnalytics/man/constrained_objective.Rd 2013-09-05 16:30:29 UTC (rev 3000) +++ pkg/PortfolioAnalytics/man/constrained_objective.Rd 2013-09-05 17:12:44 UTC (rev 3001) @@ -34,6 +34,9 @@ \item{storage}{TRUE/FALSE default TRUE for DEoptim with trace, otherwise FALSE. not typically user-called} + + \item{constraints}{a v1_constraint object for + \code{constrained_objective_v1}} } \description{ function to calculate a numeric return value for a Modified: pkg/PortfolioAnalytics/man/optimize.portfolio.Rd =================================================================== --- pkg/PortfolioAnalytics/man/optimize.portfolio.Rd 2013-09-05 16:30:29 UTC (rev 3000) +++ pkg/PortfolioAnalytics/man/optimize.portfolio.Rd 2013-09-05 17:12:44 UTC (rev 3001) @@ -1,6 +1,6 @@ \name{optimize.portfolio} -\alias{optimize_portfolio_v1} \alias{optimize.portfolio} +\alias{optimize.portfolio_v1} \alias{optimize.portfolio_v2} \title{constrained optimization of portfolios} \usage{ Modified: pkg/PortfolioAnalytics/man/optimize.portfolio.rebalancing.Rd =================================================================== --- pkg/PortfolioAnalytics/man/optimize.portfolio.rebalancing.Rd 2013-09-05 16:30:29 UTC (rev 3000) +++ pkg/PortfolioAnalytics/man/optimize.portfolio.rebalancing.Rd 2013-09-05 17:12:44 UTC (rev 3001) @@ -1,5 +1,6 @@ \name{optimize.portfolio.rebalancing} \alias{optimize.portfolio.rebalancing} +\alias{optimize.portfolio.rebalancing_v1} \title{portfolio optimization with support for rebalancing or rolling periods} \usage{ optimize.portfolio.rebalancing_v1(R, constraints, From noreply at r-forge.r-project.org Thu Sep 5 20:08:13 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 5 Sep 2013 20:08:13 +0200 (CEST) Subject: [Returnanalytics-commits] r3002 - in pkg/PortfolioAnalytics: R sandbox Message-ID: <20130905180813.5819A185B57@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-05 20:08:13 +0200 (Thu, 05 Sep 2013) New Revision: 3002 Modified: pkg/PortfolioAnalytics/R/charts.groups.R pkg/PortfolioAnalytics/sandbox/testing_weight_conc.R Log: Modifying barplotGroupWeights and testing script for weight concentration averision Modified: pkg/PortfolioAnalytics/R/charts.groups.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.groups.R 2013-09-05 17:12:44 UTC (rev 3001) +++ pkg/PortfolioAnalytics/R/charts.groups.R 2013-09-05 18:08:13 UTC (rev 3002) @@ -116,7 +116,6 @@ constraints <- get_constraints(object$portfolio) tmp <- extractGroups(object) - grouping <- "groups" if(grouping == "groups"){ weights <- tmp$group_weights Modified: pkg/PortfolioAnalytics/sandbox/testing_weight_conc.R =================================================================== --- pkg/PortfolioAnalytics/sandbox/testing_weight_conc.R 2013-09-05 17:12:44 UTC (rev 3001) +++ pkg/PortfolioAnalytics/sandbox/testing_weight_conc.R 2013-09-05 18:08:13 UTC (rev 3002) @@ -39,12 +39,23 @@ opt2 all.equal(opt1$weights, opt2$weights) -# Now change the conc_aversion values to give highest penalty to small cap stocks -conc$objectives[[2]]$conc_aversion <- c(0.05, 1, 0.1, 0) +# From the chart we can see that the allocation to MGF is very high. +chart.Weights(opt2) + +# MGF is part of the SMALL group +# Now change the conc_aversion values +conc$objectives[[2]]$conc_aversion <- c(0.1, 0.05, 0.1, 0) opt3 <- optimize.portfolio(R=R, portfolio=conc, optimize_method="ROI", trace=TRUE) opt3 +chart.Weights(opt3) + +# We do not have a group constraint, but we can plot the groups based on +# category labels in the portfolio object +chart.GroupWeights(opt3, grouping="category", plot.type="barplot", col=bluemono) + # If all the conc_aversion values are very high, this should result in an equal weight portfolio conc$objectives[[2]]$conc_aversion <- rep(1e6, 4) opt4 <- optimize.portfolio(R=R, portfolio=conc, optimize_method="ROI", trace=TRUE) opt4 +chart.Weights(opt4) From noreply at r-forge.r-project.org Thu Sep 5 21:33:22 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 5 Sep 2013 21:33:22 +0200 (CEST) Subject: [Returnanalytics-commits] r3003 - in pkg/Meucci: R demo Message-ID: <20130905193322.D6E94185A7C@r-forge.r-project.org> Author: xavierv Date: 2013-09-05 21:33:22 +0200 (Thu, 05 Sep 2013) New Revision: 3003 Modified: pkg/Meucci/R/PerformIidAnalysis.R pkg/Meucci/demo/FullFlexProbs.R pkg/Meucci/demo/S_DerivativesInvariants.R pkg/Meucci/demo/S_EstimateExpectedValueEvaluation.R pkg/Meucci/demo/S_EstimateMomentsComboEvaluation.R pkg/Meucci/demo/S_EstimateQuantileEvaluation.R pkg/Meucci/demo/S_Estimator.R pkg/Meucci/demo/S_HedgeOptions.R pkg/Meucci/demo/S_HorizonEffect.R pkg/Meucci/demo/S_LinVsLogReturn.R pkg/Meucci/demo/S_MeanVarianceBenchmark.R Log: -fixed some errors with non ASCII characters and other porting errors Modified: pkg/Meucci/R/PerformIidAnalysis.R =================================================================== --- pkg/Meucci/R/PerformIidAnalysis.R 2013-09-05 18:08:13 UTC (rev 3002) +++ pkg/Meucci/R/PerformIidAnalysis.R 2013-09-05 19:33:22 UTC (rev 3003) @@ -17,7 +17,7 @@ #' @author Xavier Valls \email{flamejat@@gmail.com} #' @export -PerformIidAnalysis = function( Dates = dim( Data, 1), Data, Str = "") +PerformIidAnalysis = function( Dates = dim( Data)[1], Data, Str = "") { ########################################################################################################## Modified: pkg/Meucci/demo/FullFlexProbs.R =================================================================== --- pkg/Meucci/demo/FullFlexProbs.R 2013-09-05 18:08:13 UTC (rev 3002) +++ pkg/Meucci/demo/FullFlexProbs.R 2013-09-05 19:33:22 UTC (rev 3003) @@ -68,21 +68,20 @@ # DefineProbs = "5" : partial information prox. kernel damping # DefineProbs = "6" : partial information: match covariance -DefineProbs = "6"; +DefineProbs = 1; T = dim(X)[1]; p = matrix( 0, T, 1 ); -if( DefineProbs = 1) +if( DefineProbs == 1) { # rolling window tau = 2 * 252; p[ 1:tau ] = 1; p = p / sum( p ); -} -} else if( DefineProbs = 2 ) +} else if( DefineProbs == 2 ) { # exponential smoothing @@ -90,14 +89,14 @@ p = exp( -lmd * ( T - ( 1 : T ) ) ); p = p / sum( p ); -} else if( DefineProbs = 3 ) +} else if( DefineProbs == 3 ) { # market conditions Cond = Y >= 2.8; p[ Cond ] = 1; p = p / sum( p ); -} else if( DefineProbs = 4 ) +} else if( DefineProbs == 4 ) { # kernel damping y = 3; @@ -105,7 +104,7 @@ p = dmvnorm( Y, y, h2 ); p = p / sum( p ); -} else if( DefineProbs = 5 ) +} else if( DefineProbs == 5 ) { # partial information prox. kernel damping y = 3; @@ -113,7 +112,7 @@ h2 = cov( 1 * diff( Y ) ); p = LeastInfoKernel( Y, y, h2 ); -} else if( DefineProbs = 6 ){ +} else if( DefineProbs == 6 ){ #partial information: match covariance l_c = 0.0055; Modified: pkg/Meucci/demo/S_DerivativesInvariants.R =================================================================== --- pkg/Meucci/demo/S_DerivativesInvariants.R 2013-09-05 18:08:13 UTC (rev 3002) +++ pkg/Meucci/demo/S_DerivativesInvariants.R 2013-09-05 19:33:22 UTC (rev 3003) @@ -29,11 +29,11 @@ PerformIidAnalysis( 1:length(X), X, 'Changes in implied vol'); Y = diff(log(derivatives$impVol[ eachFiveRowsSeq , maturityIndex, moneynessIndex ])); -PerformIidAnalysis( 1:size(Y,1), Y, 'Changes in log of implied vol' ); +PerformIidAnalysis( 1:length( Y ), Y, 'Changes in log of implied vol' ); ################################################################################################################## ### Multivariate test with AR(1) structure -[T, Mat, Mon] + Dim = dim(derivatives$impVol[ eachFiveRowsSeq , , ]); Z = matrix(log(derivatives$impVol[ eachFiveRowsSeq , , ] ), Dim[ 1 ], Dim[ 2 ] * Dim[ 3 ]); # VAR(1) model by least square @@ -41,9 +41,7 @@ F = cbind(matrix( 1, Dim[ 1 ]-1, 1), Z[ -length( Z[1, ] ) , ]); E_XF = t( X ) %*% F / Dim[ 1 ]; E_FF = t( F ) %*% F / Dim[ 1 ]; -B = E_XF %*% (E_FF \ diag( 1, ncol(size(E_FF) ) ) ); +B = E_XF %*% solve(E_FF); Eps = X - F %*% t( B ); # residuals -PerformIidAnalysis(1:size(Eps,1), Eps(:,3), 'VAR(1) residuals'); - -### EOF \ No newline at end of file +PerformIidAnalysis(1:dim(Eps)[1], Eps[ , 3 ], "VAR(1) residuals"); Modified: pkg/Meucci/demo/S_EstimateExpectedValueEvaluation.R =================================================================== --- pkg/Meucci/demo/S_EstimateExpectedValueEvaluation.R 2013-09-05 18:08:13 UTC (rev 3002) +++ pkg/Meucci/demo/S_EstimateExpectedValueEvaluation.R 2013-09-05 19:33:22 UTC (rev 3003) @@ -1,5 +1,5 @@ -#'This script script familiarizes the user with the evaluation of an estimator replicability, loss, error, bias and inefficiency -#', as described in A. Meucci, "Risk and Asset Allocation", Springer, 2005, Chapter 4. +#' This script script familiarizes the user with the evaluation of an estimator replicability, loss, error, bias +#' and inefficiency, as described in A. Meucci, "Risk and Asset Allocation", Springer, 2005, Chapter 4. #' #' @references #' \url{http://symmys.com/node/170} @@ -132,12 +132,12 @@ b = barplot(Bias_G1sq + Ineff_G1sq, col = "red", main = "stress-test of estimator: x(1)*x(3)"); barplot( Ineff_G1sq, col="blue", add = TRUE); lines( b, Err_G1sq); -legend( "topleft", 1.9, c( "bias?", "ineff?", "error?" ), col = c( "red","blue", "black" ), +legend( "topleft", 1.9, c( "bias^2", "ineff^2", "error^2" ), col = c( "red","blue", "black" ), lty=1, lwd=c(5,5,1),bg = "gray90" ); b=barplot( Bias_G2sq + Ineff_G2sq , col = "red", main = "stress-test of estimator sample mean"); barplot( Ineff_G2sq, col="blue", add = TRUE); lines(b, Err_G2sq); -legend( "topleft", 1.9, c( "bias?", "ineff?", "error?" ), col = c( "red","blue", "black" ), +legend( "topleft", 1.9, c( "bias^2", "ineff^2", "error^2" ), col = c( "red","blue", "black" ), lty=1, lwd=c(5,5,1),bg = "gray90" ); Modified: pkg/Meucci/demo/S_EstimateMomentsComboEvaluation.R =================================================================== --- pkg/Meucci/demo/S_EstimateMomentsComboEvaluation.R 2013-09-05 18:08:13 UTC (rev 3002) +++ pkg/Meucci/demo/S_EstimateMomentsComboEvaluation.R 2013-09-05 19:33:22 UTC (rev 3003) @@ -187,23 +187,23 @@ b = barplot( Bias_Gasq + Ineff_Gasq, col = "red", main = "stress-test of estimator a" ); barplot( Ineff_Gasq, col = "blue", add = TRUE); lines( b, Err_Gasq); -legend( "topleft", 1.9, c( "bias?", "ineff?", "error?" ), col = c( "red","blue", "black" ), +legend( "topleft", 1.9, c( "bias^2", "ineff^2", "error^2" ), col = c( "red","blue", "black" ), lty=1, lwd=c(5,5,1),bg = "gray90" ); b = barplot( Bias_Gbsq + Ineff_Gbsq, col = "red", main = "stress-test of estimator b" ); barplot( Ineff_Gbsq, col = "blue", add = TRUE); lines( b, Err_Gbsq); -legend( "topleft", 1.9, c( "bias?", "ineff?", "error?" ), col = c( "red","blue", "black" ), +legend( "topleft", 1.9, c( "bias^2", "ineff^2", "error^2" ), col = c( "red","blue", "black" ), lty=1, lwd=c(5,5,1),bg = "gray90" ); b = barplot( Bias_Gcsq + Ineff_Gcsq, col = "red", main = "stress-test of estimator c" ); barplot( Ineff_Gcsq, col = "blue", add = TRUE); lines( b, Err_Gcsq); -legend( "topleft", 1.9, c( "bias?", "ineff?", "error?" ), col = c( "red","blue", "black" ), +legend( "topleft", 1.9, c( "bias^2", "ineff^2", "error^2" ), col = c( "red","blue", "black" ), lty=1, lwd=c(5,5,1),bg = "gray90" ); b = barplot( Bias_Gdsq + Ineff_Gdsq, col = "red", main = "stress-test of estimator d" ); barplot( Ineff_Gdsq, col = "blue", add = TRUE); lines( b, Err_Gdsq); -legend( "topleft", 1.9, c( "bias?", "ineff?", "error?" ), col = c( "red","blue", "black" ), +legend( "topleft", 1.9, c( "bias^2", "ineff^2", "error^2" ), col = c( "red","blue", "black" ), lty=1, lwd=c(5,5,1),bg = "gray90" ); \ No newline at end of file Modified: pkg/Meucci/demo/S_EstimateQuantileEvaluation.R =================================================================== --- pkg/Meucci/demo/S_EstimateQuantileEvaluation.R 2013-09-05 18:08:13 UTC (rev 3002) +++ pkg/Meucci/demo/S_EstimateQuantileEvaluation.R 2013-09-05 19:33:22 UTC (rev 3003) @@ -139,12 +139,12 @@ b = barplot(Bias_Gesq +Ineff_Gesq , col = "red", main = "stress-test of estimator e"); barplot( Ineff_Gesq, col="blue", add = TRUE); lines( b, Err_Gesq); -legend( "topleft", 1.9, c( "bias?", "ineff?", "error?" ), col = c( "red","blue", "black" ), +legend( "topleft", 1.9, c( "bias^2", "ineff^2", "error^2" ), col = c( "red","blue", "black" ), lty=1, lwd=c(5,5,1),bg = "gray90" ); b = barplot(Bias_Gbsq+Ineff_Gbsq, col = "red", main = "stress-test of estimator b"); barplot( Ineff_Gbsq, col="blue", add = TRUE); lines( b, Err_Gbsq); -legend( "topleft", 1.9, c( "bias?", "ineff?", "error?" ), col = c( "red","blue", "black" ), +legend( "topleft", 1.9, c( "bias^2", "ineff^2", "error^2" ), col = c( "red","blue", "black" ), lty=1, lwd=c(5,5,1),bg = "gray90" ); \ No newline at end of file Modified: pkg/Meucci/demo/S_Estimator.R =================================================================== --- pkg/Meucci/demo/S_Estimator.R 2013-09-05 18:08:13 UTC (rev 3002) +++ pkg/Meucci/demo/S_Estimator.R 2013-09-05 19:33:22 UTC (rev 3003) @@ -132,12 +132,12 @@ b = barplot(Bias_G1sq + Ineff_G1sq, col = "red", main = "stress-test of estimator: x(1)*x(3)"); barplot( Ineff_G1sq, col="blue", add = TRUE); lines( b, Err_G1sq); -legend( "topleft", 1.9, c( "bias?", "ineff?", "error?" ), col = c( "red","blue", "black" ), +legend( "topleft", 1.9, c( "bias^2", "ineff^2", "error^2" ), col = c( "red","blue", "black" ), lty=1, lwd=c(5,5,1),bg = "gray90" ); b=barplot( Bias_G2sq + Ineff_G2sq , col = "red", main = "stress-test of estimator sample mean"); barplot( Ineff_G2sq, col="blue", add = TRUE); lines(b, Err_G2sq); -legend( "topleft", 1.9, c( "bias?", "ineff?", "error?" ), col = c( "red","blue", "black" ), +legend( "topleft", 1.9, c( "bias^2", "ineff^2", "error^2" ), col = c( "red","blue", "black" ), lty=1, lwd=c(5,5,1),bg = "gray90" ); Modified: pkg/Meucci/demo/S_HedgeOptions.R =================================================================== --- pkg/Meucci/demo/S_HedgeOptions.R 2013-09-05 18:08:13 UTC (rev 3002) +++ pkg/Meucci/demo/S_HedgeOptions.R 2013-09-05 19:33:22 UTC (rev 3003) @@ -8,11 +8,6 @@ #' @author Xavier Valls \email{flamejat@@gmail.com} ################################################################################################################## -### -### == Chapter 3 == -################################################################################################################## - -################################################################################################################## ### Load data load( "../data/implVol.Rda" ); @@ -105,7 +100,7 @@ a_bs = BSCP$cash / BSCP$c * r_free * tau / 252; b_bs = t( BSCP$delta / BSCP$c * spot_T); -printf( "OLS: a = [ %s\t]\n", sprintf("\t%7.4f", t(a) ) )); +printf( "OLS: a = [ %s\t]\n", sprintf("\t%7.4f", t(a) ) ); printf( "B-S: a = [ %s\t]\n", sprintf("\t%7.4f", t(a_bs) ) ); printf( "OLS: b = [ %s\t]\n", sprintf("\t%7.4f", t(b) ) ); printf( "B-S: b = [ %s\t]\n", sprintf("\t%7.4f", t(b_bs) ) ); @@ -113,6 +108,6 @@ for( i in 1 : numCalls ) { dev.new(); - plot( Rsp, Rc[ , i ], xlab = "return underlying" , ylab = "return call option"); + plot( Rsp, Rc[ , i ], xlab = "return underlying" , ylab = "return call option" ); } Modified: pkg/Meucci/demo/S_HorizonEffect.R =================================================================== --- pkg/Meucci/demo/S_HorizonEffect.R 2013-09-05 18:08:13 UTC (rev 3002) +++ pkg/Meucci/demo/S_HorizonEffect.R 2013-09-05 19:33:22 UTC (rev 3003) @@ -17,7 +17,7 @@ ################################################################################################################## # Load parameters of the model: D, muX, sigmaF, sigmaEps -load( "../data/DB_LinearModel.mat" ); +load( "../data/linearModel.Rda" ); # Specify range of investment horizon, weeks tauRangeWeeks = 1:52; @@ -95,4 +95,3 @@ lines(tauRangeWeeks, minCorrU, col = "green"); legend( "topleft", 1.9, c( "max absolute corr", "mean absolute corr", "min absolute corr" ), col = c( "red","blue", "green" ), lty=1, bg = "gray90" ); -} \ No newline at end of file Modified: pkg/Meucci/demo/S_LinVsLogReturn.R =================================================================== --- pkg/Meucci/demo/S_LinVsLogReturn.R 2013-09-05 18:08:13 UTC (rev 3002) +++ pkg/Meucci/demo/S_LinVsLogReturn.R 2013-09-05 19:33:22 UTC (rev 3003) @@ -54,7 +54,6 @@ Col = rgb( 0.8, 0.8, 0.8 ); -subplot('Position', ( 0.05, 0.55, 0.9, 0.4 ) ); par(mfrow=c(2,1)); Modified: pkg/Meucci/demo/S_MeanVarianceBenchmark.R =================================================================== --- pkg/Meucci/demo/S_MeanVarianceBenchmark.R 2013-09-05 18:08:13 UTC (rev 3002) +++ pkg/Meucci/demo/S_MeanVarianceBenchmark.R 2013-09-05 19:33:22 UTC (rev 3003) @@ -138,7 +138,7 @@ # frontiers in relative return space dev.new(); plot( Rel_Std_Deviation, Rel_ExpectedValue, type = "l", lwd = 2, col = "blue", xlab = "TE rets.", ylab = "EOP rets.", - xlim =c( Rel_Std_Deviation_b[1], Rel_Std_Deviation_b[length(Rel_Std_Deviation_b)] ), ylim = c( min( Rel_ExpectedValue_b ), max( Rel_ExpectedValue_b )) );); + xlim =c( Rel_Std_Deviation_b[1], Rel_Std_Deviation_b[length(Rel_Std_Deviation_b)] ), ylim = c( min( Rel_ExpectedValue_b ), max( Rel_ExpectedValue_b )) ); lines( Rel_Std_Deviation_b, Rel_ExpectedValue_b, lwd = 2, col = "red" ); legend( "topleft", 1.9, c( "total ret", "relative" ), col = c( "blue","red" ), lty=1, bg = "gray90" ); From noreply at r-forge.r-project.org Thu Sep 5 23:29:27 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 5 Sep 2013 23:29:27 +0200 (CEST) Subject: [Returnanalytics-commits] r3004 - in pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm: . R man Message-ID: <20130905212927.1BCDE185C11@r-forge.r-project.org> Author: shubhanm Date: 2013-09-05 23:29:26 +0200 (Thu, 05 Sep 2013) New Revision: 3004 Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/inst/ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/man/ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.Sharpe.R pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/inst/ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.Sharpe.Rd Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/DESCRIPTION pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/NAMESPACE pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/LoSharpe.R pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/se.LoSharpe.R pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/LoSharpe.Rd pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/se.LoSharpe.Rd Log: Lo Sharpe final documentation + additon of table summary of all Sharpe Ratio functions Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/DESCRIPTION =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/DESCRIPTION 2013-09-05 19:33:22 UTC (rev 3003) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/DESCRIPTION 2013-09-05 21:29:26 UTC (rev 3004) @@ -1,38 +1,39 @@ -Package: noniid.sm -Type: Package -Title: Non-i.i.d. GSoC 2013 Shubhankit -Version: 0.1 -Date: $Date: 2013-05-13 14:30:22 -0500 (Mon, 13 May 2013) $ -Author: Shubhankit Mohan -Contributors: Peter Carl, Brian G. Peterson -Depends: - xts, - PerformanceAnalytics, - tseries, - stats -Maintainer: Brian G. Peterson -Description: GSoC 2013 project to replicate literature on drawdowns and - non-i.i.d assumptions in finance. -License: GPL-3 -ByteCompile: TRUE -Collate: - 'AcarSim.R' - 'ACStdDev.annualized.R' - 'CalmarRatio.Norm.R' - 'CDrawdown.R' - 'chart.AcarSim.R' - 'chart.Autocorrelation.R' - 'EmaxDDGBM.R' - 'GLMSmoothIndex.R' - 'LoSharpe.R' - 'na.skip.R' - 'noniid.sm-internal.R' - 'QP.Norm.R' - 'Return.GLM.R' - 'Return.Okunev.R' - 'se.LoSharpe.R' - 'SterlingRatio.Norm.R' - 'table.ComparitiveReturn.GLM.R' - 'table.EMaxDDGBM.R' - 'table.UnsmoothReturn.R' - 'UnsmoothReturn.R' +Package: noniid.sm +Type: Package +Title: Non-i.i.d. GSoC 2013 Shubhankit +Version: 0.1 +Date: $Date: 2013-05-13 14:30:22 -0500 (Mon, 13 May 2013) $ +Author: Shubhankit Mohan +Contributors: Peter Carl, Brian G. Peterson +Depends: + xts, + PerformanceAnalytics, + tseries, + stats +Maintainer: Brian G. Peterson +Description: GSoC 2013 project to replicate literature on drawdowns and + non-i.i.d assumptions in finance. +License: GPL-3 +ByteCompile: TRUE +Collate: + 'AcarSim.R' + 'ACStdDev.annualized.R' + 'CalmarRatio.Norm.R' + 'CDrawdown.R' + 'chart.AcarSim.R' + 'chart.Autocorrelation.R' + 'EmaxDDGBM.R' + 'GLMSmoothIndex.R' + 'LoSharpe.R' + 'na.skip.R' + 'noniid.sm-internal.R' + 'QP.Norm.R' + 'Return.GLM.R' + 'Return.Okunev.R' + 'se.LoSharpe.R' + 'SterlingRatio.Norm.R' + 'table.ComparitiveReturn.GLM.R' + 'table.EMaxDDGBM.R' + 'table.UnsmoothReturn.R' + 'UnsmoothReturn.R' + 'table.Sharpe.R' Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/NAMESPACE =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/NAMESPACE 2013-09-05 19:33:22 UTC (rev 3003) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/NAMESPACE 2013-09-05 21:29:26 UTC (rev 3004) @@ -1,17 +1,18 @@ -export(AcarSim) -export(ACStdDev.annualized) -export(CalmarRatio.Norm) -export(CDrawdown) -export(chart.AcarSim) -export(chart.Autocorrelation) -export(EMaxDDGBM) -export(GLMSmoothIndex) -export(LoSharpe) -export(QP.Norm) -export(Return.GLM) -export(Return.Okunev) -export(se.LoSharpe) -export(SterlingRatio.Norm) -export(table.ComparitiveReturn.GLM) -export(table.EMaxDDGBM) -export(table.UnsmoothReturn) +export(AcarSim) +export(ACStdDev.annualized) +export(CalmarRatio.Norm) +export(CDrawdown) +export(chart.AcarSim) +export(chart.Autocorrelation) +export(EMaxDDGBM) +export(GLMSmoothIndex) +export(LoSharpe) +export(QP.Norm) +export(Return.GLM) +export(Return.Okunev) +export(se.LoSharpe) +export(SterlingRatio.Norm) +export(table.ComparitiveReturn.GLM) +export(table.EMaxDDGBM) +export(table.Sharpe) +export(table.UnsmoothReturn) Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/LoSharpe.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/LoSharpe.R 2013-09-05 19:33:22 UTC (rev 3003) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/LoSharpe.R 2013-09-05 21:29:26 UTC (rev 3004) @@ -20,21 +20,27 @@ #'\deqn{SR(q) = \eta(q) } #'Where : #' \deqn{ }{\eta(q) = [q]/[\sqrt(q\sigma^2) + 2\sigma^2 \sum(q-k)\rho(k)] } -#' Where k belongs to 0 to q-1 +#' Where, k belongs to 0 to q-1 +#' SR(q) : Estimated Lo Sharpe Ratio +#' SR : Theoretical William Sharpe Ratio #' @param Ra an xts, vector, matrix, data frame, timeSeries or zoo object of #' daily asset returns #' @param Rf an xts, vector, matrix, data frame, timeSeries or zoo object of #' annualized Risk Free Rate #' @param q Number of autocorrelated lag periods. Taken as 3 (Default) #' @param \dots any other pass thru parameters -#' @author Brian G. Peterson, Peter Carl, Shubhankit Mohan -#' @references Getmansky, Mila, Lo, Andrew W. and Makarov, Igor,\emph{ An Econometric Model of Serial Correlation and Illiquidity in Hedge Fund Returns} (March 1, 2003). MIT Sloan Working Paper No. 4288-03; MIT Laboratory for Financial Engineering Working Paper No. LFE-1041A-03; EFMA 2003 Helsinki Meetings. -#' \url{http://ssrn.com/abstract=384700} +#' @author Shubhankit Mohan +#' @references Andrew Lo,\emph{ The Statistics of Sharpe Ratio.}2002, AIMR. +#'\code{\link[stats]{}} \cr +#' \url{http://papers.ssrn.com/sol3/papers.cfm?abstract_id=377260} +#' +#' Andrew Lo,\emph{Sharpe Ratio may be Overstated} +#' \url{http://www.risk.net/risk-magazine/feature/1506463/lo-sharpe-ratios-overstated} #' @keywords ts multivariate distribution models non-iid #' @examples #' #' data(edhec) -#' head(LoSharpe(edhec,0,3)) +#' LoSharpe(edhec,0,3) #' @rdname LoSharpe #' @export LoSharpe <- @@ -48,7 +54,7 @@ columns.a = ncol(R) columnnames.a = colnames(R) # Time used for daily Return manipulations - Time= 252*nyears(R) + Time= 252*nyears(edhec) clean.lo <- function(column.R,q) { # compute the lagged return series gamma.k =matrix(0,q) @@ -71,12 +77,13 @@ } for(column.a in 1:columns.a) { # for each asset passed in as R # clean the data and get rid of NAs - mu = sum(R[,column.a])/(Time) - sig=sqrt(((R[,column.a]-mu)^2/(Time))) - pho.k = clean.lo(R[,column.a],q)/(as.numeric(sig[1])) + clean.ret=na.omit(R[,column.a]) + mu = sum(clean.ret)/(Time) + sig=sqrt(((clean.ret-mu)^2/(Time))) + pho.k = clean.lo(clean.ret,q)/(as.numeric(sig[1])) netaq=neta.lo(pho.k,q) - column.lo = (netaq*((mu-Rf)/as.numeric(sig[1]))) - + #column.lo = (netaq*((mu-Rf)/as.numeric(sig[1]))) + column.lo = as.numeric(SharpeRatio.annualized(R[,column.a]))[1]*netaq if(column.a == 1) { lo = column.lo } else { lo = cbind (lo, column.lo) } @@ -85,7 +92,7 @@ rownames(lo)= paste("Lo Sharpe Ratio") return(lo) - edhec=NULL + # RESULTS: } Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/se.LoSharpe.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/se.LoSharpe.R 2013-09-05 19:33:22 UTC (rev 3003) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/se.LoSharpe.R 2013-09-05 21:29:26 UTC (rev 3004) @@ -21,15 +21,23 @@ #'Where : #' \deqn{ }{\eta(q) = [q]/[\sqrt(q\sigma^2) + 2\sigma^2 \sum(q-k)\rho(k)] } #' Where k belongs to 0 to q-1 +#' Under the assumption of assumption of asymptotic variance of SR(q), the standard error for the Sharpe Ratio Esitmator can be computed as: +#' \deqn{SE(SR(q)) = \sqrt((1+SR^2/2)/T)} +#' SR(q) : Estimated Lo Sharpe Ratio +#' SR : Theoretical William Sharpe Ratio #' @param Ra an xts, vector, matrix, data frame, timeSeries or zoo object of #' daily asset returns #' @param Rf an xts, vector, matrix, data frame, timeSeries or zoo object of #' annualized Risk Free Rate #' @param q Number of autocorrelated lag periods. Taken as 3 (Default) #' @param \dots any other pass thru parameters -#' @author Brian G. Peterson, Peter Carl, Shubhankit Mohan -#' @references Getmansky, Mila, Lo, Andrew W. and Makarov, Igor,\emph{ An Econometric Model of Serial Correlation and Illiquidity in Hedge Fund Returns} (March 1, 2003). MIT Sloan Working Paper No. 4288-03; MIT Laboratory for Financial Engineering Working Paper No. LFE-1041A-03; EFMA 2003 Helsinki Meetings. -#' \url{http://ssrn.com/abstract=384700} +#' @author Shubhankit Mohan +#' @references Andrew Lo,\emph{ The Statistics of Sharpe Ratio.}2002, AIMR. +#'\code{\link[stats]{}} \cr +#' \url{http://papers.ssrn.com/sol3/papers.cfm?abstract_id=377260} +#' +#' Andrew Lo,\emph{Sharpe Ratio may be Overstated} +#' \url{http://www.risk.net/risk-magazine/feature/1506463/lo-sharpe-ratios-overstated} #' @keywords ts multivariate distribution models non-iid #' @examples #' @@ -48,7 +56,7 @@ columns.a = ncol(R) columnnames.a = colnames(R) # Time used for daily Return manipulations - Time= 252*nyears(R) + Time= 252*nyears(edhec) clean.lo <- function(column.R,q) { # compute the lagged return series gamma.k =matrix(0,q) Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.Sharpe.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.Sharpe.R (rev 0) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.Sharpe.R 2013-09-05 21:29:26 UTC (rev 3004) @@ -0,0 +1,98 @@ +#'@title Sharpe Ratio Statistics Summary +#'@description +#' The Sharpe ratio is simply the return per unit of risk (represented by +#' variability). In the classic case, the unit of risk is the standard +#' deviation of the returns. +#' +#' \deqn{\frac{\overline{(R_{a}-R_{f})}}{\sqrt{\sigma_{(R_{a}-R_{f})}}}} +#' +#' William Sharpe now recommends \code{\link{InformationRatio}} preferentially +#' to the original Sharpe Ratio. +#' +#' The higher the Sharpe ratio, the better the combined performance of "risk" +#' and return. +#' +#' As noted, the traditional Sharpe Ratio is a risk-adjusted measure of return +#' that uses standard deviation to represent risk. + +#' Although the Sharpe ratio has become part of the canon of modern financial +#' analysis, its applications typically do not account for the fact that it is an +#' estimated quantity, subject to estimation errors that can be substantial in +#' some cases. +#' +#' Many studies have documented various violations of the assumption of +#' IID returns for financial securities. +#' +#' Under the assumption of stationarity,a version of the Central Limit Theorem can +#' still be applied to the estimator . +#' @details +#' The relationship between SR and SR(q) is somewhat more involved for non- +#'IID returns because the variance of Rt(q) is not just the sum of the variances of component returns but also includes all the covariances. Specifically, under +#' the assumption that returns \eqn{R_t} are stationary, +#' \deqn{ Var[(R_t)] = \sum \sum Cov(R(t-i),R(t-j)) = q{\sigma^2} + 2{\sigma^2} \sum (q-k)\rho(k) } +#' Where \eqn{ \rho(k) = Cov(R(t),R(t-k))/Var[(R_t)]} is the \eqn{k^{th}} order autocorrelation coefficient of the series of returns.This yields the following relationship between SR and SR(q): +#' and i,j belongs to 0 to q-1 +#'\deqn{SR(q) = \eta(q) } +#'Where : +#' \deqn{ }{\eta(q) = [q]/[\sqrt(q\sigma^2) + 2\sigma^2 \sum(q-k)\rho(k)] } +#' Where, k belongs to 0 to q-1 +#' SR(q) : Estimated Lo Sharpe Ratio +#' SR : Theoretical William Sharpe Ratio +#' @param Ra an xts, vector, matrix, data frame, timeSeries or zoo object of +#' daily asset returns +#' @param Rf an xts, vector, matrix, data frame, timeSeries or zoo object of +#' annualized Risk Free Rate +#' @param q Number of autocorrelated lag periods. Taken as 3 (Default) +#' @param \dots any other pass thru parameters +#' @author Shubhankit Mohan +#' @references Andrew Lo,\emph{ The Statistics of Sharpe Ratio.}2002, AIMR. +#'\code{\link[stats]{}} \cr +#' \url{http://papers.ssrn.com/sol3/papers.cfm?abstract_id=377260} +#' +#' Andrew Lo,\emph{Sharpe Ratio may be Overstated} +#' \url{http://www.risk.net/risk-magazine/feature/1506463/lo-sharpe-ratios-overstated} +#' @keywords ts multivariate distribution models non-iid +#' @examples +#' +#' data(edhec) +#' table.Sharpe(edhec,0,3) +#' @rdname table.Sharpe +#' @export +table.Sharpe <- + function (Ra,Rf = 0,q = 3, ...) + { y = checkData(Ra, method = "xts") + columns = ncol(y) + rows = nrow(y) + columnnames = colnames(y) + rownames = rownames(y) + + # for each column, do the following: + for(column in 1:columns) { + x = y[,column] + + z = c(SharpeRatio.annualized(x), + SharpeRatio.modified(x), + LoSharpe(x), + Return.annualized(x),StdDev.annualized(x),se.Losharpe(x)) + + znames = c( + "William Sharpe Ratio", + "Modified Sharpe Ratio", + "Andrew Lo Sharpe Ratio", + "Annualized Return", + "Annualized Standard Deviation","Sharpe Ratio Standard Error(95%)" + ) + if(column == 1) { + resultingtable = data.frame(Value = z, row.names = znames) + } + else { + nextcolumn = data.frame(Value = z, row.names = znames) + resultingtable = cbind(resultingtable, nextcolumn) + } + } + colnames(resultingtable) = columnnames + ans = base::round(resultingtable, digits) + ans + + + } Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/LoSharpe.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/LoSharpe.Rd 2013-09-05 19:33:22 UTC (rev 3003) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/LoSharpe.Rd 2013-09-05 21:29:26 UTC (rev 3004) @@ -1,70 +1,71 @@ -\name{LoSharpe} -\alias{LoSharpe} -\title{Andrew Lo Sharpe Ratio} -\usage{ - LoSharpe(Ra, Rf = 0, q = 3, ...) -} -\arguments{ - \item{Ra}{an xts, vector, matrix, data frame, timeSeries - or zoo object of daily asset returns} - - \item{Rf}{an xts, vector, matrix, data frame, timeSeries - or zoo object of annualized Risk Free Rate} - - \item{q}{Number of autocorrelated lag periods. Taken as 3 - (Default)} - - \item{\dots}{any other pass thru parameters} -} -\description{ - Although the Sharpe ratio has become part of the canon of - modern financial analysis, its applications typically do - not account for the fact that it is an estimated - quantity, subject to estimation errors that can be - substantial in some cases. - - Many studies have documented various violations of the - assumption of IID returns for financial securities. - - Under the assumption of stationarity,a version of the - Central Limit Theorem can still be applied to the - estimator . -} -\details{ - The relationship between SR and SR(q) is somewhat more - involved for non- IID returns because the variance of - Rt(q) is not just the sum of the variances of component - returns but also includes all the covariances. - Specifically, under the assumption that returns \eqn{R_t} - are stationary, \deqn{ Var[(R_t)] = \sum \sum - Cov(R(t-i),R(t-j)) = q{\sigma^2} + 2{\sigma^2} \sum - (q-k)\rho(k) } Where \eqn{ \rho(k) = - Cov(R(t),R(t-k))/Var[(R_t)]} is the \eqn{k^{th}} order - autocorrelation coefficient of the series of returns.This - yields the following relationship between SR and SR(q): - and i,j belongs to 0 to q-1 \deqn{SR(q) = \eta(q) } Where - : \deqn{ }{\eta(q) = [q]/[\sqrt(q\sigma^2) + 2\sigma^2 - \sum(q-k)\rho(k)] } Where k belongs to 0 to q-1 -} -\examples{ -data(edhec) -head(LoSharpe(edhec,0,3)) -} -\author{ - Brian G. Peterson, Peter Carl, Shubhankit Mohan -} -\references{ - Getmansky, Mila, Lo, Andrew W. and Makarov, Igor,\emph{ - An Econometric Model of Serial Correlation and - Illiquidity in Hedge Fund Returns} (March 1, 2003). MIT - Sloan Working Paper No. 4288-03; MIT Laboratory for - Financial Engineering Working Paper No. LFE-1041A-03; - EFMA 2003 Helsinki Meetings. - \url{http://ssrn.com/abstract=384700} -} -\keyword{distribution} -\keyword{models} -\keyword{multivariate} -\keyword{non-iid} -\keyword{ts} - +\name{LoSharpe} +\alias{LoSharpe} +\title{Andrew Lo Sharpe Ratio} +\usage{ + LoSharpe(Ra, Rf = 0, q = 3, ...) +} +\arguments{ + \item{Ra}{an xts, vector, matrix, data frame, timeSeries + or zoo object of daily asset returns} + + \item{Rf}{an xts, vector, matrix, data frame, timeSeries + or zoo object of annualized Risk Free Rate} + + \item{q}{Number of autocorrelated lag periods. Taken as 3 + (Default)} + + \item{\dots}{any other pass thru parameters} +} +\description{ + Although the Sharpe ratio has become part of the canon of + modern financial analysis, its applications typically do + not account for the fact that it is an estimated + quantity, subject to estimation errors that can be + substantial in some cases. + + Many studies have documented various violations of the + assumption of IID returns for financial securities. + + Under the assumption of stationarity,a version of the + Central Limit Theorem can still be applied to the + estimator . +} +\details{ + The relationship between SR and SR(q) is somewhat more + involved for non- IID returns because the variance of + Rt(q) is not just the sum of the variances of component + returns but also includes all the covariances. + Specifically, under the assumption that returns \eqn{R_t} + are stationary, \deqn{ Var[(R_t)] = \sum \sum + Cov(R(t-i),R(t-j)) = q{\sigma^2} + 2{\sigma^2} \sum + (q-k)\rho(k) } Where \eqn{ \rho(k) = + Cov(R(t),R(t-k))/Var[(R_t)]} is the \eqn{k^{th}} order + autocorrelation coefficient of the series of returns.This + yields the following relationship between SR and SR(q): + and i,j belongs to 0 to q-1 \deqn{SR(q) = \eta(q) } Where + : \deqn{ }{\eta(q) = [q]/[\sqrt(q\sigma^2) + 2\sigma^2 + \sum(q-k)\rho(k)] } Where, k belongs to 0 to q-1 SR(q) : + Estimated Lo Sharpe Ratio SR : Theoretical William Sharpe + Ratio +} +\examples{ +data(edhec) +LoSharpe(edhec,0,3) +} +\author{ + Shubhankit Mohan +} +\references{ + Andrew Lo,\emph{ The Statistics of Sharpe Ratio.}2002, + AIMR. \code{\link[stats]{}} \cr + \url{http://papers.ssrn.com/sol3/papers.cfm?abstract_id=377260} + + Andrew Lo,\emph{Sharpe Ratio may be Overstated} + \url{http://www.risk.net/risk-magazine/feature/1506463/lo-sharpe-ratios-overstated} +} +\keyword{distribution} +\keyword{models} +\keyword{multivariate} +\keyword{non-iid} +\keyword{ts} + Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/se.LoSharpe.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/se.LoSharpe.Rd 2013-09-05 19:33:22 UTC (rev 3003) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/se.LoSharpe.Rd 2013-09-05 21:29:26 UTC (rev 3004) @@ -44,23 +44,27 @@ yields the following relationship between SR and SR(q): and i,j belongs to 0 to q-1 \deqn{SR(q) = \eta(q) } Where : \deqn{ }{\eta(q) = [q]/[\sqrt(q\sigma^2) + 2\sigma^2 - \sum(q-k)\rho(k)] } Where k belongs to 0 to q-1 + \sum(q-k)\rho(k)] } Where k belongs to 0 to q-1 Under the + assumption of assumption of asymptotic variance of SR(q), + the standard error for the Sharpe Ratio Esitmator can be + computed as: \deqn{SE(SR(q)) = \sqrt((1+SR^2/2)/T)} SR(q) + : Estimated Lo Sharpe Ratio SR : Theoretical William + Sharpe Ratio } \examples{ data(edhec) se.LoSharpe(edhec,0,3) } \author{ - Brian G. Peterson, Peter Carl, Shubhankit Mohan + Shubhankit Mohan } \references{ - Getmansky, Mila, Lo, Andrew W. and Makarov, Igor,\emph{ - An Econometric Model of Serial Correlation and - Illiquidity in Hedge Fund Returns} (March 1, 2003). MIT - Sloan Working Paper No. 4288-03; MIT Laboratory for - Financial Engineering Working Paper No. LFE-1041A-03; - EFMA 2003 Helsinki Meetings. - \url{http://ssrn.com/abstract=384700} + Andrew Lo,\emph{ The Statistics of Sharpe Ratio.}2002, + AIMR. \code{\link[stats]{}} \cr + \url{http://papers.ssrn.com/sol3/papers.cfm?abstract_id=377260} + + Andrew Lo,\emph{Sharpe Ratio may be Overstated} + \url{http://www.risk.net/risk-magazine/feature/1506463/lo-sharpe-ratios-overstated} } \keyword{distribution} \keyword{models} Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.Sharpe.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.Sharpe.Rd (rev 0) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.Sharpe.Rd 2013-09-05 21:29:26 UTC (rev 3004) @@ -0,0 +1,86 @@ +\name{table.Sharpe} +\alias{table.Sharpe} +\title{Sharpe Ratio Statistics Summary} +\usage{ + table.Sharpe(Ra, Rf = 0, q = 3, ...) +} +\arguments{ + \item{Ra}{an xts, vector, matrix, data frame, timeSeries + or zoo object of daily asset returns} + + \item{Rf}{an xts, vector, matrix, data frame, timeSeries + or zoo object of annualized Risk Free Rate} + + \item{q}{Number of autocorrelated lag periods. Taken as 3 + (Default)} + + \item{\dots}{any other pass thru parameters} +} +\description{ + The Sharpe ratio is simply the return per unit of risk + (represented by variability). In the classic case, the + unit of risk is the standard deviation of the returns. + + \deqn{\frac{\overline{(R_{a}-R_{f})}}{\sqrt{\sigma_{(R_{a}-R_{f})}}}} + + William Sharpe now recommends + \code{\link{InformationRatio}} preferentially to the + original Sharpe Ratio. + + The higher the Sharpe ratio, the better the combined + performance of "risk" and return. + + As noted, the traditional Sharpe Ratio is a risk-adjusted + measure of return that uses standard deviation to + represent risk. Although the Sharpe ratio has become part + of the canon of modern financial analysis, its + applications typically do not account for the fact that + it is an estimated quantity, subject to estimation errors + that can be substantial in some cases. + + Many studies have documented various violations of the + assumption of IID returns for financial securities. + + Under the assumption of stationarity,a version of the + Central Limit Theorem can still be applied to the + estimator . +} +\details{ + The relationship between SR and SR(q) is somewhat more + involved for non- IID returns because the variance of + Rt(q) is not just the sum of the variances of component + returns but also includes all the covariances. + Specifically, under the assumption that returns \eqn{R_t} + are stationary, \deqn{ Var[(R_t)] = \sum \sum + Cov(R(t-i),R(t-j)) = q{\sigma^2} + 2{\sigma^2} \sum + (q-k)\rho(k) } Where \eqn{ \rho(k) = + Cov(R(t),R(t-k))/Var[(R_t)]} is the \eqn{k^{th}} order + autocorrelation coefficient of the series of returns.This + yields the following relationship between SR and SR(q): + and i,j belongs to 0 to q-1 \deqn{SR(q) = \eta(q) } Where + : \deqn{ }{\eta(q) = [q]/[\sqrt(q\sigma^2) + 2\sigma^2 + \sum(q-k)\rho(k)] } Where, k belongs to 0 to q-1 SR(q) : + Estimated Lo Sharpe Ratio SR : Theoretical William Sharpe + Ratio +} +\examples{ +data(edhec) +table.Sharpe(edhec,0,3) +} +\author{ + Shubhankit Mohan +} +\references{ + Andrew Lo,\emph{ The Statistics of Sharpe Ratio.}2002, + AIMR. \code{\link[stats]{}} \cr + \url{http://papers.ssrn.com/sol3/papers.cfm?abstract_id=377260} + + Andrew Lo,\emph{Sharpe Ratio may be Overstated} + \url{http://www.risk.net/risk-magazine/feature/1506463/lo-sharpe-ratios-overstated} +} +\keyword{distribution} +\keyword{models} +\keyword{multivariate} +\keyword{non-iid} +\keyword{ts} + From noreply at r-forge.r-project.org Thu Sep 5 23:31:44 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 5 Sep 2013 23:31:44 +0200 (CEST) Subject: [Returnanalytics-commits] r3005 - in pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code: . Data Tests Message-ID: <20130905213144.EF85B180484@r-forge.r-project.org> Author: shubhanm Date: 2013-09-05 23:31:44 +0200 (Thu, 05 Sep 2013) New Revision: 3005 Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/Data/inst/ pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/Data/man/ pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/Tests/ pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/Tests/Cross Sectional Data.R pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/Tests/HAC Data.R pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/Tests/Tests.R pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/Tests/Time Series Data.R Removed: pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/R Tests/ Log: Change of Folder Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/Tests/Cross Sectional Data.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/Tests/Cross Sectional Data.R (rev 0) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/Tests/Cross Sectional Data.R 2013-09-05 21:31:44 UTC (rev 3005) @@ -0,0 +1,79 @@ +library("sandwich") +library("lmtest") +library("strucchange") +data("PublicSchools") +ps <- na.omit(PublicSchools) +ps$Income <- ps$Income * 0.0001 +fm.ps <- lm(Expenditure ~ Income + I(Income^3), data = ps) +sqrt(diag(vcov(fm.ps))) +sqrt(diag(vcovHC(fm.ps, type = "const"))) +sqrt(diag(vcovHC(fm.ps, type = "HC0"))) +sqrt(diag(vcovHC(fm.ps, type = "HC3"))) +sqrt(diag(vcovHC(fm.ps, type = "HC4"))) +coeftest(fm.ps, df = Inf, vcov = vcovHC(fm.ps, type = "HC0")) +coeftest(fm.ps, df = Inf, vcov = vcovHC(fm.ps, type = "HC4")) +plot(Expenditure ~ Income, data = ps, + xlab = "per capita income", + ylab = "per capita spending on public schools") +inc <- seq(0.5, 1.2, by = 0.001) +lines(inc, predict(fm.ps, data.frame(Income = inc)), col = 4, lty = 2) +fm.ps2 <- lm(Expenditure ~ Income, data = ps) +abline(fm.ps2, col = 4) +text(ps[2,2], ps[2,1], rownames(ps)[2], pos = 2) +## Willam H. Greene, Econometric Analysis, 2nd Ed. +## Chapter 14 +## load data set, p. 385, Table 14.1 +data(PublicSchools) + +## omit NA in Wisconsin and scale income +ps <- na.omit(PublicSchools) +ps$Income <- ps$Income * 0.0001 + +## fit quadratic regression, p. 385, Table 14.2 +fmq <- lm(Expenditure ~ Income + I(Income^2), data = ps) +summary(fmq) + +## compare standard and HC0 standard errors +## p. 391, Table 14.3 +library(sandwich) +coef(fmq) +sqrt(diag(vcovHC(fmq, type = "const"))) +sqrt(diag(vcovHC(fmq, type = "HC0"))) + +if(require(lmtest)) { + ## compare t ratio + coeftest(fmq, vcov = vcovHC(fmq, type = "HC0")) + + ## White test, p. 393, Example 14.5 + wt <- lm(residuals(fmq)^2 ~ poly(Income, 4), data = ps) + wt.stat <- summary(wt)$r.squared * nrow(ps) + c(wt.stat, pchisq(wt.stat, df = 3, lower = FALSE)) + + ## Bresch-Pagan test, p. 395, Example 14.7 + bptest(fmq, studentize = FALSE) + bptest(fmq) + + ## Francisco Cribari-Neto, Asymptotic Inference, CSDA 45 + ## quasi z-tests, p. 229, Table 8 + ## with Alaska + coeftest(fmq, df = Inf)[3,4] + coeftest(fmq, df = Inf, vcov = vcovHC(fmq, type = "HC0"))[3,4] + coeftest(fmq, df = Inf, vcov = vcovHC(fmq, type = "HC3"))[3,4] + coeftest(fmq, df = Inf, vcov = vcovHC(fmq, type = "HC4"))[3,4] + ## without Alaska (observation 2) + fmq1 <- lm(Expenditure ~ Income + I(Income^2), data = ps[-2,]) + coeftest(fmq1, df = Inf)[3,4] + coeftest(fmq1, df = Inf, vcov = vcovHC(fmq1, type = "HC0"))[3,4] + coeftest(fmq1, df = Inf, vcov = vcovHC(fmq1, type = "HC3"))[3,4] + coeftest(fmq1, df = Inf, vcov = vcovHC(fmq1, type = "HC4"))[3,4] +} + +## visualization, p. 230, Figure 1 +plot(Expenditure ~ Income, data = ps, + xlab = "per capita income", + ylab = "per capita spending on public schools") +inc <- seq(0.5, 1.2, by = 0.001) +lines(inc, predict(fmq, data.frame(Income = inc)), col = 4) +fml <- lm(Expenditure ~ Income, data = ps) +abline(fml) +text(ps[2,2], ps[2,1], rownames(ps)[2], pos = 2) \ No newline at end of file Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/Tests/HAC Data.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/Tests/HAC Data.R (rev 0) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/Tests/HAC Data.R 2013-09-05 21:31:44 UTC (rev 3005) @@ -0,0 +1,17 @@ +data("RealInt") +#OLS-based CUSUM test with quadratic spectral kernel HAC estimate: + ocus <- gefp(RealInt ~ 1, fit = lm, vcov = kernHAC) +plot(ocus, aggregate = FALSE) +sctest(ocus) +#supF test with quadratic spectral kernel HAC estimate: + fs <- Fstats(RealInt ~ 1, vcov = kernHAC) +plot(fs) +sctest(fs) +#Breakpoint estimation and con?dence intervals with quadratic spectral kernel HAC estimate: + bp <- breakpoints(RealInt ~ 1) +confint(bp, vcov = kernHAC) +plot(bp) +#Visualization: + plot(RealInt, ylab = "Real interest rate") +lines(ts(fitted(bp), start = start(RealInt), freq = 4), col = 4) +lines(confint(bp, vcov = kernHAC)) \ No newline at end of file Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/Tests/Tests.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/Tests/Tests.R (rev 0) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/Tests/Tests.R 2013-09-05 21:31:44 UTC (rev 3005) @@ -0,0 +1,21 @@ +fpe <- read.table("http://data.princeton.edu/wws509/datasets/effort.dat") +attach(fpe) +lmfit = lm( change ~ setting + effort ) +sandwich(lmfit) +Fr <- c(68,42,42,30, 37,52,24,43, + 66,50,33,23, 47,55,23,47, + 63,53,29,27, 57,49,19,29) + +Temp <- gl(2, 2, 24, labels = c("Low", "High")) +Soft <- gl(3, 8, 24, labels = c("Hard","Medium","Soft")) +M.user <- gl(2, 4, 24, labels = c("N", "Y")) +Brand <- gl(2, 1, 24, labels = c("X", "M")) + +detg <- data.frame(Fr,Temp, Soft,M.user, Brand) +detg.m0 <- glm(Fr ~ M.user*Temp*Soft + Brand, family = poisson, data = detg) +summary(detg.m0) + +detg.mod <- glm(terms(Fr ~ M.user*Temp*Soft + Brand*M.user*Temp, + keep.order = TRUE), + family = poisson, data = detg) +sandwich(detg.mod) \ No newline at end of file Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/Tests/Time Series Data.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/Tests/Time Series Data.R (rev 0) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/Code/Tests/Time Series Data.R 2013-09-05 21:31:44 UTC (rev 3005) @@ -0,0 +1,78 @@ +## Willam H. Greene, Econometric Analysis, 2nd Ed. +## Chapter 15 +## load data set, p. 411, Table 15.1 +data(Investment) + +## fit linear model, p. 412, Table 15.2 +fm <- lm(RealInv ~ RealGNP + RealInt, data = Investment) +summary(fm) + +## visualize residuals, p. 412, Figure 15.1 +plot(ts(residuals(fm), start = 1964), + type = "b", pch = 19, ylim = c(-35, 35), ylab = "Residuals") +sigma <- sqrt(sum(residuals(fm)^2)/fm$df.residual) ## maybe used df = 26 instead of 16 ?? +abline(h = c(-2, 0, 2) * sigma, lty = 2) + +if(require(lmtest)) { + ## Newey-West covariances, Example 15.3 + coeftest(fm, vcov = NeweyWest(fm, lag = 4)) + ## Note, that the following is equivalent: + coeftest(fm, vcov = kernHAC(fm, kernel = "Bartlett", bw = 5, prewhite = FALSE, adjust = FALSE)) + + ## Durbin-Watson test, p. 424, Example 15.4 + dwtest(fm) + + ## Breusch-Godfrey test, p. 427, Example 15.6 + bgtest(fm, order = 4) +} + +## visualize fitted series +plot(Investment[, "RealInv"], type = "b", pch = 19, ylab = "Real investment") +lines(ts(fitted(fm), start = 1964), col = 4) + +## 3-d visualization of fitted model +if(require(scatterplot3d)) { + s3d <- scatterplot3d(Investment[,c(5,7,6)], + type = "b", angle = 65, scale.y = 1, pch = 16) + s3d$plane3d(fm, lty.box = "solid", col = 4) +} +## fit investment equation +data(Investment) +fm <- lm(RealInv ~ RealGNP + RealInt, data = Investment) + +## Newey & West (1994) compute this type of estimator +NeweyWest(fm) + +## The Newey & West (1987) estimator requires specification +## of the lag and suppression of prewhitening +NeweyWest(fm, lag = 4, prewhite = FALSE) + +## bwNeweyWest() can also be passed to kernHAC(), e.g. +## for the quadratic spectral kernel +kernHAC(fm, bw = bwNeweyWest) + +curve(kweights(x, kernel = "Quadratic", normalize = TRUE), + from = 0, to = 3.2, xlab = "x", ylab = "k(x)") +curve(kweights(x, kernel = "Bartlett", normalize = TRUE), + from = 0, to = 3.2, col = 2, add = TRUE) +curve(kweights(x, kernel = "Parzen", normalize = TRUE), + from = 0, to = 3.2, col = 3, add = TRUE) +curve(kweights(x, kernel = "Tukey", normalize = TRUE), + from = 0, to = 3.2, col = 4, add = TRUE) +curve(kweights(x, kernel = "Truncated", normalize = TRUE), + from = 0, to = 3.2, col = 5, add = TRUE) + +## fit investment equation +data(Investment) +fm <- lm(RealInv ~ RealGNP + RealInt, data = Investment) + +## compute quadratic spectral kernel HAC estimator +kernHAC(fm) +kernHAC(fm, verbose = TRUE) + +## use Parzen kernel instead, VAR(2) prewhitening, no finite sample +## adjustment and Newey & West (1994) bandwidth selection +kernHAC(fm, kernel = "Parzen", prewhite = 2, adjust = FALSE, + bw = bwNeweyWest, verbose = TRUE) +## compare with estimate under assumption of spheric errors +vcov(fm) \ No newline at end of file From noreply at r-forge.r-project.org Fri Sep 6 02:00:33 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 6 Sep 2013 02:00:33 +0200 (CEST) Subject: [Returnanalytics-commits] r3006 - in pkg/PerformanceAnalytics/sandbox/pulkit: R week1/code Message-ID: <20130906000034.026E5183E14@r-forge.r-project.org> Author: pulkit Date: 2013-09-06 02:00:33 +0200 (Fri, 06 Sep 2013) New Revision: 3006 Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/MinTRL.R pkg/PerformanceAnalytics/sandbox/pulkit/R/PSRopt.R pkg/PerformanceAnalytics/sandbox/pulkit/R/ProbSharpeRatio.R pkg/PerformanceAnalytics/sandbox/pulkit/week1/code/PSROpt.py Log: change in initial weights PSR optimization Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/MinTRL.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/MinTRL.R 2013-09-05 21:31:44 UTC (rev 3005) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/MinTRL.R 2013-09-06 00:00:33 UTC (rev 3006) @@ -107,12 +107,12 @@ refSR = refSR[-index] sk = sk[-index] kr = kr[-index] + columnnames = columnnames[-index] warning(paste("The Reference Sharpe Ratio greater than the Observed Sharpe Ratio for case",columnnames[index],"\n")) } result = 1 + (1 - sk*sr + ((kr-1)/4)*sr^2)*(qnorm(p)/(sr-refSR))^2 - columnnames = columnnames[-index] if(!is.null(dim(result))){ - colnames(result) = paste(columnnames,"(SR >",refSR,")") + colnames(result) = paste(columnnames,"(SR >",round(refSR,2),")") rownames(result) = paste("Probabilistic Sharpe Ratio(p=",round(p*100,1),"%):") } return(result) Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/PSRopt.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/PSRopt.R 2013-09-05 21:31:44 UTC (rev 3005) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/PSRopt.R 2013-09-06 00:00:33 UTC (rev 3006) @@ -60,7 +60,7 @@ } z = 0 iter = 0 - w = rep(1,columns) + w = rep(1,columns)/columns d1z = 0 #Optimization Function optimize<-function(){ Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/ProbSharpeRatio.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/ProbSharpeRatio.R 2013-09-05 21:31:44 UTC (rev 3005) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/ProbSharpeRatio.R 2013-09-06 00:00:33 UTC (rev 3006) @@ -106,13 +106,14 @@ refSR = refSR[-index] sk = sk[-index] kr = kr[-index] + columnnames = columnnames[-index] warning(paste("The Reference Sharpe Ratio greater than the Observed Sharpe Ratio for case",columnnames[index],"\n")) } - result = pnorm(((sr - refSR)*(n-1)^(0.5))/(1-sr*sk+sr^2*(kr-1)/4)^(0.5)) - columnnames = columnnames[-index] + result = pnorm(((sr - refSR)*((n-1)^(0.5)))/(1-sr*sk+(sr^2)*(kr-1)/4)^(0.5)) + if(!is.null(dim(result))){ - colnames(result) = paste(columnnames,"(SR >",refSR,")") + colnames(result) = paste(columnnames,"(SR >",round(refSR,2),")") rownames(result) = paste("Probabilistic Sharpe Ratio(p=",round(p*100,1),"%):") } Modified: pkg/PerformanceAnalytics/sandbox/pulkit/week1/code/PSROpt.py =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/week1/code/PSROpt.py 2013-09-05 21:31:44 UTC (rev 3005) +++ pkg/PerformanceAnalytics/sandbox/pulkit/week1/code/PSROpt.py 2013-09-06 00:00:33 UTC (rev 3006) @@ -134,12 +134,12 @@ def main(): #1) Inputs (path to csv file with returns series) path='data.csv' - maxIter=1000 # Maximum number of iterations + maxIter=10000 # Maximum number of iterations delta=.005 # Delta Z (attempted gain per interation) #2) Load data, set seed series=np.genfromtxt(path,delimiter=',') # load as numpy array - seed=np.ones((series.shape[1],1)) # initialize seed + seed=np.ones((series.shape[1],1))/series.shape[1] # initialize seed bounds=[(0,1) for i in seed] # min and max boundary per weight #3) Create class and solve From noreply at r-forge.r-project.org Fri Sep 6 09:15:20 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 6 Sep 2013 09:15:20 +0200 (CEST) Subject: [Returnanalytics-commits] r3007 - in pkg/Meucci: R demo man Message-ID: <20130906071521.0D38218444E@r-forge.r-project.org> Author: xavierv Date: 2013-09-06 09:15:20 +0200 (Fri, 06 Sep 2013) New Revision: 3007 Added: pkg/Meucci/demo/00Index Modified: pkg/Meucci/R/FitMultivariateGarch.R pkg/Meucci/R/InterExtrapolate.R pkg/Meucci/R/PerformIidAnalysis.R pkg/Meucci/R/PlotVolVsCompositionEfficientFrontier.R pkg/Meucci/man/InterExtrapolate.Rd pkg/Meucci/man/PerformIidAnalysis.Rd pkg/Meucci/man/PlotVolVsCompositionEfficientFrontier.Rd pkg/Meucci/man/garch1f4.Rd pkg/Meucci/man/garch2f8.Rd Log: -Function documentation errors fixed Modified: pkg/Meucci/R/FitMultivariateGarch.R =================================================================== --- pkg/Meucci/R/FitMultivariateGarch.R 2013-09-06 00:00:33 UTC (rev 3006) +++ pkg/Meucci/R/FitMultivariateGarch.R 2013-09-06 07:15:20 UTC (rev 3007) @@ -113,6 +113,8 @@ #' Fit a GARCH(1,1) model with student-t errors #' #' @param x : [vector] (T x 1) data generated by a GARCH(1,1) process +#' @param eps : [scalar] used in enforcing a_ii + b_ii <= 1 - eps; the default value is zero +#' @param df : [scalar] degree of freedom for the t-distribution; the default value is 500 to make it, basically, normal #' #' @return q : [vector] (4 x 1) parameters of the GARCH(1,1) process #' @return qerr : [vector] (4 x 1) standard error of parameter estimates @@ -364,7 +366,18 @@ #' Off-diagonal parameter estimation in bivariate GARCH(1,1) when diagonal parameters are given. #' -#' @param x : [vector] (T x 1) data generated by a GARCH(1,1) process +#' @param y : [vector] (T x 1) data generated by a GARCH(1,1) process +#' @param c1 : [scalar] diagonal parameter of the GARCH(1,1) process taken from matrix C +#' @param a1 : [scalar] diagonal parameter of the GARCH(1,1) process taken from matrix A +#' @param b1 : [scalar] diagonal parameter of the GARCH(1,1) process taken from matrix B +#' @param y1 : [vector] (T x 1) data generated by a GARCH(1,1) process +#' @param h1 : [vector] (T x 1) data generated by a GARCH(1,1) process +#' @param c2 : [scalar] diagonal parameter of the GARCH(1,1) process taken from matrix C +#' @param a2 : [scalar] diagonal parameter of the GARCH(1,1) process taken from matrix A +#' @param b2 : [scalar] diagonal parameter of the GARCH(1,1) process taken from matrix B +#' @param y2 : [vector] (T x 1) data generated by a GARCH(1,1) process +#' @param h2 : [vector] (T x 1) generated by a GARCH(1,1) process +#' @param df : [scalar] degree of freedom for the t-distribution; the default value is 500 to make it, basically, normal #' #' @return q : [vector] (4 x 1) parameters of the GARCH(1,1) process #' @return qerr : [vector] (4 x 1) standard error of parameter estimates Modified: pkg/Meucci/R/InterExtrapolate.R =================================================================== --- pkg/Meucci/R/InterExtrapolate.R 2013-09-06 00:00:33 UTC (rev 3006) +++ pkg/Meucci/R/InterExtrapolate.R 2013-09-06 07:15:20 UTC (rev 3007) @@ -21,16 +21,6 @@ #' Vpred = interpne(V,Xi,nodelist,method) #' Extrapolating long distances outside the support of V is rarely advisable. #' -#' @examples -#' -#' [x1,x2] = meshgrid(0:.2:1); -#' z = exp(x1+x2); -#' Xi = rand(100,2)*2-.5; -#' Zi = interpne(z,Xi,{0:.2:1, 0:.2:1},'linear'); -#' surf(0:.2:1,0:.2:1,z) -#' hold on -#' plot3(Xi(:,1),Xi(:,2),Zi,'ro') -#' #' @references #' \url{http://symmys.com/node/170} #' See Meucci's script for "InterExtrapolate.R" @@ -38,7 +28,18 @@ #' @author Xavier Valls \email{flamejat@@gmail.com} #' @export -InterExtrapolate = function( V, Xi, nodelist, method, ...) +# examples +# +# [x1,x2] = meshgrid(0:.2:1); +# z = exp(x1+x2); +# Xi = rand(100,2)*2-.5; +# Zi = interpne(z,Xi,{0:.2:1, 0:.2:1},'linear'); +# surf(0:.2:1,0:.2:1,z) +# hold on +# plot3(Xi(:,1),Xi(:,2),Zi,'ro') +# + +InterExtrapolate = function( V, Xi, nodelist, method ) { # get some sizes Modified: pkg/Meucci/R/PerformIidAnalysis.R =================================================================== --- pkg/Meucci/R/PerformIidAnalysis.R 2013-09-06 00:00:33 UTC (rev 3006) +++ pkg/Meucci/R/PerformIidAnalysis.R 2013-09-06 07:15:20 UTC (rev 3007) @@ -3,7 +3,7 @@ #' #' @param Dates : [vector] (T x 1) dates #' @param Data : [matrix] (T x N) data -#' @param Starting_Prices : [vector] (N x 1) +#' @param Str : [string] title for the plot #' #' @note it checks the evolution over time # it checks that the variables are identically distributed by looking at the histogram of two subsamples Modified: pkg/Meucci/R/PlotVolVsCompositionEfficientFrontier.R =================================================================== --- pkg/Meucci/R/PlotVolVsCompositionEfficientFrontier.R 2013-09-06 00:00:33 UTC (rev 3006) +++ pkg/Meucci/R/PlotVolVsCompositionEfficientFrontier.R 2013-09-06 07:15:20 UTC (rev 3007) @@ -1,8 +1,8 @@ #' Plot the efficient frontier in the plane of portfolio weights versus standard deviation, #' as described in A. Meucci, "Risk and Asset Allocation", Springer, 2005. #' -#' @param Portfolios: [matrix] (M x N) of portfolios weights -#' @param vol : [vector] (M x 1) of volatilities +#' @param Portfolios : [matrix] (M x N) of portfolios weights +#' @param vol : [vector] (M x 1) of volatilities #' #' @references #' \url{http://symmys.com/node/170} Added: pkg/Meucci/demo/00Index =================================================================== --- pkg/Meucci/demo/00Index (rev 0) +++ pkg/Meucci/demo/00Index 2013-09-06 07:15:20 UTC (rev 3007) @@ -0,0 +1,20 @@ +AnalyticalvsNumerical This example script compares the numerical and the analytical solution of entropy-pooling +ButterflyTrading This example script performs the butterfly-trading case study for the Entropy-Pooling approach by Attilio Meucci +DetectOutliersviaMVE This example script detects outliers in two-asset and multi-asset case +FullyFlexibleBayesNets This case study uses Entropy Pooling to compute Fully Flexible Bayesian networks for risk management +HermiteGrid_CaseStudy This script estimates the prior of a hedge fund return and processes extreme views on CVaR according to Entropy Pooling +HermiteGrid_CVaR_Recursion This script illustrates the discrete Newton recursion to process views on CVaR according to Entropy Pooling +HermiteGrid_demo This script compares the performance of plain Monte Carlo versus grid in applying Entropy Pooling to process extreme views +InvariantProjection This script projects summary statistics to arbitrary horizons under i.i.d. assumption +logToArithmeticCovariance This example script generates arithmetric returns and arithmetric covariance matrix given a distribution of log returns +Prior2Posterior This example script compares the numerical and the analytical solution of entropy-pooling +RankingInformation This script performs ranking allocation using the Entropy-Pooling approach by Attilio Meucci +RobustBayesianAllocation This script replicates the example from Meucci's MATLAB script S_SimulationsCaseStudy.M +S_plotGaussHermite This example script displays mesh points based on Gaussian-Hermite quadrature +S_SnPCaseStudy This script replicates the example from Meucci's MATLAB script S_SnPCaseStudy.M +S_ToyExample This toy example illustrates the use of Entropy Pooling to compute Fully Flexible Bayesian networks +S_FitProjectRates This script fits the swap rates dynamics to a multivariate Ornstein-Uhlenbeck process and computes and plots the estimated future distribution +S_CheckDiagonalization This script verifies the correctness of the eigenvalue-eigenvector representation in terms of real matrices for the transition matrix of an OU process +S_CovarianceEvolution This script represents the evolution of the covariance of an OU process in terms of the dispersion ellipsoid +S_DeterministicEvolution This script animates the evolution of the determinstic component of an OU process +MeanDiversificationFrontier This script computes the mean-diversification efficient frontier \ No newline at end of file Modified: pkg/Meucci/man/InterExtrapolate.Rd =================================================================== --- pkg/Meucci/man/InterExtrapolate.Rd 2013-09-06 00:00:33 UTC (rev 3006) +++ pkg/Meucci/man/InterExtrapolate.Rd 2013-09-06 07:15:20 UTC (rev 3007) @@ -2,7 +2,7 @@ \alias{InterExtrapolate} \title{Interpolate and extrapolate using n-linear interpolation (tensor product linear).} \usage{ - InterExtrapolate(V, Xi, nodelist, method, ...) + InterExtrapolate(V, Xi, nodelist, method) } \arguments{ \item{V}{: [array] p-dimensional array to be @@ -34,15 +34,6 @@ interpne(V,Xi,nodelist,method) Extrapolating long distances outside the support of V is rarely advisable. } -\examples{ -[x1,x2] = meshgrid(0:.2:1); - z = exp(x1+x2); - Xi = rand(100,2)*2-.5; - Zi = interpne(z,Xi,{0:.2:1, 0:.2:1},'linear'); - surf(0:.2:1,0:.2:1,z) - hold on - plot3(Xi(:,1),Xi(:,2),Zi,'ro') -} \author{ Xavier Valls \email{flamejat at gmail.com} } Modified: pkg/Meucci/man/PerformIidAnalysis.Rd =================================================================== --- pkg/Meucci/man/PerformIidAnalysis.Rd 2013-09-06 00:00:33 UTC (rev 3006) +++ pkg/Meucci/man/PerformIidAnalysis.Rd 2013-09-06 07:15:20 UTC (rev 3007) @@ -3,14 +3,14 @@ \title{This function performs simple invariance (i.i.d.) tests on a time series, as described in A. Meucci "Risk and Asset Allocation", Springer, 2005} \usage{ - PerformIidAnalysis(Dates = dim(Data, 1), Data, Str = "") + PerformIidAnalysis(Dates = dim(Data)[1], Data, Str = "") } \arguments{ \item{Dates}{: [vector] (T x 1) dates} \item{Data}{: [matrix] (T x N) data} - \item{Starting_Prices}{: [vector] (N x 1)} + \item{Str}{: [string] title for the plot} } \description{ This function performs simple invariance (i.i.d.) tests Modified: pkg/Meucci/man/PlotVolVsCompositionEfficientFrontier.Rd =================================================================== --- pkg/Meucci/man/PlotVolVsCompositionEfficientFrontier.Rd 2013-09-06 00:00:33 UTC (rev 3006) +++ pkg/Meucci/man/PlotVolVsCompositionEfficientFrontier.Rd 2013-09-06 07:15:20 UTC (rev 3007) @@ -6,7 +6,7 @@ PlotVolVsCompositionEfficientFrontier(Portfolios, vol) } \arguments{ - \item{Portfolios:}{[matrix] (M x N) of portfolios + \item{Portfolios}{: [matrix] (M x N) of portfolios weights} \item{vol}{: [vector] (M x 1) of volatilities} Modified: pkg/Meucci/man/garch1f4.Rd =================================================================== --- pkg/Meucci/man/garch1f4.Rd 2013-09-06 00:00:33 UTC (rev 3006) +++ pkg/Meucci/man/garch1f4.Rd 2013-09-06 07:15:20 UTC (rev 3007) @@ -7,6 +7,13 @@ \arguments{ \item{x}{: [vector] (T x 1) data generated by a GARCH(1,1) process} + + \item{eps}{: [scalar] used in enforcing a_ii + b_ii <= 1 + - eps; the default value is zero} + + \item{df}{: [scalar] degree of freedom for the + t-distribution; the default value is 500 to make it, + basically, normal} } \value{ q : [vector] (4 x 1) parameters of the GARCH(1,1) process Modified: pkg/Meucci/man/garch2f8.Rd =================================================================== --- pkg/Meucci/man/garch2f8.Rd 2013-09-06 00:00:33 UTC (rev 3006) +++ pkg/Meucci/man/garch2f8.Rd 2013-09-06 07:15:20 UTC (rev 3007) @@ -5,8 +5,42 @@ garch2f8(y, c1, a1, b1, y1, h1, c2, a2, b2, y2, h2, df) } \arguments{ - \item{x}{: [vector] (T x 1) data generated by a + \item{y}{: [vector] (T x 1) data generated by a GARCH(1,1) process} + + \item{c1}{: [scalar] diagonal parameter of the GARCH(1,1) + process taken from matrix C} + + \item{a1}{: [scalar] diagonal parameter of the GARCH(1,1) + process taken from matrix A} + + \item{b1}{: [scalar] diagonal parameter of the GARCH(1,1) + process taken from matrix B} + + \item{y1}{: [vector] (T x 1) data generated by a + GARCH(1,1) process} + + \item{h1}{: [vector] (T x 1) data generated by a + GARCH(1,1) process} + + \item{c2}{: [scalar] diagonal parameter of the GARCH(1,1) + process taken from matrix C} + + \item{a2}{: [scalar] diagonal parameter of the GARCH(1,1) + process taken from matrix A} + + \item{b2}{: [scalar] diagonal parameter of the GARCH(1,1) + process taken from matrix B} + + \item{y2}{: [vector] (T x 1) data generated by a + GARCH(1,1) process} + + \item{h2}{: [vector] (T x 1) generated by a GARCH(1,1) + process} + + \item{df}{: [scalar] degree of freedom for the + t-distribution; the default value is 500 to make it, + basically, normal} } \value{ q : [vector] (4 x 1) parameters of the GARCH(1,1) process From noreply at r-forge.r-project.org Fri Sep 6 10:03:45 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 6 Sep 2013 10:03:45 +0200 (CEST) Subject: [Returnanalytics-commits] r3008 - in pkg/Meucci: . R data demo Message-ID: <20130906080345.670031851D9@r-forge.r-project.org> Author: xavierv Date: 2013-09-06 10:03:44 +0200 (Fri, 06 Sep 2013) New Revision: 3008 Added: pkg/Meucci/data/00Index Modified: pkg/Meucci/DESCRIPTION pkg/Meucci/R/CentralAndStandardizedStatistics.R pkg/Meucci/R/EntropyProg.R pkg/Meucci/R/FitMultivariateGarch.R pkg/Meucci/R/RankingInformation.R pkg/Meucci/demo/00Index Log: - minor error fixing Modified: pkg/Meucci/DESCRIPTION =================================================================== --- pkg/Meucci/DESCRIPTION 2013-09-06 07:15:20 UTC (rev 3007) +++ pkg/Meucci/DESCRIPTION 2013-09-06 08:03:44 UTC (rev 3008) @@ -50,7 +50,8 @@ scatterplot3d, signal, fExtremes, - QZ + QZ, + PerformanceAnalytics License: GPL URL: http://r-forge.r-project.org/projects/returnanalytics/ Copyright: (c) 2012 Modified: pkg/Meucci/R/CentralAndStandardizedStatistics.R =================================================================== --- pkg/Meucci/R/CentralAndStandardizedStatistics.R 2013-09-06 07:15:20 UTC (rev 3007) +++ pkg/Meucci/R/CentralAndStandardizedStatistics.R 2013-09-06 08:03:44 UTC (rev 3008) @@ -16,6 +16,7 @@ CentralAndStandardizedStatistics = function( X, N ) { + if(!require("PerformanceAnalytics")) stop("PerformanceAnalytics package required for this script"); # compute central moments mu = matrix( 0, 1, N); mu[ 1 ] = mean(X); Modified: pkg/Meucci/R/EntropyProg.R =================================================================== --- pkg/Meucci/R/EntropyProg.R 2013-09-06 07:15:20 UTC (rev 3007) +++ pkg/Meucci/R/EntropyProg.R 2013-09-06 08:03:44 UTC (rev 3008) @@ -200,7 +200,7 @@ { if ( length( match.call() ) < 3 ) { - J = size( X , 1 ) + J = dim( X )[ 1 ] nBins = round( 10 * log(J) ) } Modified: pkg/Meucci/R/FitMultivariateGarch.R =================================================================== --- pkg/Meucci/R/FitMultivariateGarch.R 2013-09-06 07:15:20 UTC (rev 3007) +++ pkg/Meucci/R/FitMultivariateGarch.R 2013-09-06 08:03:44 UTC (rev 3008) @@ -691,6 +691,7 @@ minfro = function( A ) { + if(!require("Matrix")) stop("Matrix package required for this script"); if( any( diag( A ) < 0) ) { stop("Diagonal Elements Must Be Non-Negative!"); @@ -726,7 +727,7 @@ b = a - rho * m; # Newton's step x = newton( M, i, b, m, aii, n, rho ); - P = sparse( diag( 1, n ) ); + P = as( diag( 1, n ), "sparseMatrix" ); P[ i, 1:n ] = t(x); # update Mtest = P %*% M %*% t(P); @@ -740,7 +741,7 @@ } } - normj[ j+1 ] = oldnorm; ##ok + normj[ j+1 ] = oldnorm; incj[ j ] = oldnormj - oldnorm; oldnormj = oldnorm; Modified: pkg/Meucci/R/RankingInformation.R =================================================================== --- pkg/Meucci/R/RankingInformation.R 2013-09-06 07:15:20 UTC (rev 3007) +++ pkg/Meucci/R/RankingInformation.R 2013-09-06 08:03:44 UTC (rev 3008) @@ -13,7 +13,7 @@ data = as.data.frame( weightsMatrix ) data$aspect = 1:nrow(data) data2 = reshape2:::melt( data , id.vars = "aspect" ) - p <- ggplot(data2, aes(x=factor(aspect), y = value, fill=factor(variable))) + geom_bar() #+ opts( title = expression( "Efficient Frontier Weights" )) + p <- ggplot(data2, aes( x = factor(aspect), y = value, fill = factor( variable ) ) ) + geom_bar() #+ opts( title = expression( "Efficient Frontier Weights" )) return( p ) } @@ -25,6 +25,7 @@ #' @param Upper a vector of indexes indicating which column is lower than the corresponding column number in Upper #' @export EntropyProg # @example ViewRanking( X , p , Lower = c(3,4) , Upper = c(4,5) ) # two inequality views: asset 3 < asset 4 returns, and asset 4 < asset 5 returns + ViewRanking = function( X , p , Lower , Upper ) { library( matlab ) Added: pkg/Meucci/data/00Index =================================================================== --- pkg/Meucci/data/00Index (rev 0) +++ pkg/Meucci/data/00Index 2013-09-06 08:03:44 UTC (rev 3008) @@ -0,0 +1,10 @@ +butterflyAnalytics X p FactorNames +FactorDistributions Butterflies +ghq1000 ghqx +MeucciFreaqEst DY Data Dates X p Names +MeucciTweakTest A_ Aeq_ b_ beq_ db_ g_ lb_ ub_ +pseudodata data +ReturnsDistribution P X +SectorsSnP500 DP P +MeanDiversificationFrontier S Mu w_b +DB_SwapParRates Rates Dates \ No newline at end of file Modified: pkg/Meucci/demo/00Index =================================================================== --- pkg/Meucci/demo/00Index 2013-09-06 07:15:20 UTC (rev 3007) +++ pkg/Meucci/demo/00Index 2013-09-06 08:03:44 UTC (rev 3008) @@ -1,7 +1,7 @@ AnalyticalvsNumerical This example script compares the numerical and the analytical solution of entropy-pooling ButterflyTrading This example script performs the butterfly-trading case study for the Entropy-Pooling approach by Attilio Meucci DetectOutliersviaMVE This example script detects outliers in two-asset and multi-asset case -FullyFlexibleBayesNets This case study uses Entropy Pooling to compute Fully Flexible Bayesian networks for risk management +FullyFlexibleBayesNets This case study uses Entropy Pooling to compute Fully Flexible Bayesian networks for risk management HermiteGrid_CaseStudy This script estimates the prior of a hedge fund return and processes extreme views on CVaR according to Entropy Pooling HermiteGrid_CVaR_Recursion This script illustrates the discrete Newton recursion to process views on CVaR according to Entropy Pooling HermiteGrid_demo This script compares the performance of plain Monte Carlo versus grid in applying Entropy Pooling to process extreme views @@ -17,4 +17,4 @@ S_CheckDiagonalization This script verifies the correctness of the eigenvalue-eigenvector representation in terms of real matrices for the transition matrix of an OU process S_CovarianceEvolution This script represents the evolution of the covariance of an OU process in terms of the dispersion ellipsoid S_DeterministicEvolution This script animates the evolution of the determinstic component of an OU process -MeanDiversificationFrontier This script computes the mean-diversification efficient frontier \ No newline at end of file +MeanDiversificationFrontier This script computes the mean-diversification efficient frontier From noreply at r-forge.r-project.org Fri Sep 6 10:34:13 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 6 Sep 2013 10:34:13 +0200 (CEST) Subject: [Returnanalytics-commits] r3009 - pkg/Meucci/demo Message-ID: <20130906083413.1856618444E@r-forge.r-project.org> Author: xavierv Date: 2013-09-06 10:34:12 +0200 (Fri, 06 Sep 2013) New Revision: 3009 Modified: pkg/Meucci/demo/00Index pkg/Meucci/demo/FullyIntegratedLiquidityAndMarketRisk.R pkg/Meucci/demo/S_CornishFisher.R Log: - updated Index for demo scripts beggining up to S_D Modified: pkg/Meucci/demo/00Index =================================================================== --- pkg/Meucci/demo/00Index 2013-09-06 08:03:44 UTC (rev 3008) +++ pkg/Meucci/demo/00Index 2013-09-06 08:34:12 UTC (rev 3009) @@ -1,20 +1,91 @@ -AnalyticalvsNumerical This example script compares the numerical and the analytical solution of entropy-pooling -ButterflyTrading This example script performs the butterfly-trading case study for the Entropy-Pooling approach by Attilio Meucci -DetectOutliersviaMVE This example script detects outliers in two-asset and multi-asset case -FullyFlexibleBayesNets This case study uses Entropy Pooling to compute Fully Flexible Bayesian networks for risk management -HermiteGrid_CaseStudy This script estimates the prior of a hedge fund return and processes extreme views on CVaR according to Entropy Pooling -HermiteGrid_CVaR_Recursion This script illustrates the discrete Newton recursion to process views on CVaR according to Entropy Pooling -HermiteGrid_demo This script compares the performance of plain Monte Carlo versus grid in applying Entropy Pooling to process extreme views -InvariantProjection This script projects summary statistics to arbitrary horizons under i.i.d. assumption -logToArithmeticCovariance This example script generates arithmetric returns and arithmetric covariance matrix given a distribution of log returns -Prior2Posterior This example script compares the numerical and the analytical solution of entropy-pooling -RankingInformation This script performs ranking allocation using the Entropy-Pooling approach by Attilio Meucci -RobustBayesianAllocation This script replicates the example from Meucci's MATLAB script S_SimulationsCaseStudy.M +AnalyticalvsNumerical This example script compares the numerical and the analytical solution of entropy-pooling +ButterflyTrading This example script performs the butterfly-trading case study for the Entropy-Pooling approach by Attilio Meucci +DetectOutliersviaMVE This example script detects outliers in two-asset and multi-asset case +FullyFlexibleBayesNets This case study uses Entropy Pooling to compute Fully Flexible Bayesian networks for risk management +FullFlexProbs This script uses Entropy Pooling to compute Fully Flexible Probabilities for historical scenarios +FullyIntegratedLiquidityAndMarketRisk This script computes the liquidity-risk and funding-risk adjusted P&L distribution +HermiteGrid_CaseStudy This script estimates the prior of a hedge fund return and processes extreme views on CVaR according to Entropy Pooling +HermiteGrid_CVaR_Recursion This script illustrates the discrete Newton recursion to process views on CVaR according to Entropy Pooling +HermiteGrid_demo This script compares the performance of plain Monte Carlo versus grid in applying Entropy Pooling to process extreme views +InvariantProjection This script projects summary statistics to arbitrary horizons under i.i.d. assumption +logToArithmeticCovariance This example script generates arithmetric returns and arithmetric covariance matrix given a distribution of log returns +Prior2Posterior This example script compares the numerical and the analytical solution of entropy-pooling +RankingInformation This script performs ranking allocation using the Entropy-Pooling approach by Attilio Meucci +RobustBayesianAllocation This script replicates the example from Meucci's MATLAB script S_SimulationsCaseStudy.M +S_AnalyzeLognormalCorrelation This script considers a bivariate lognormal market and display the correlation and the condition number of the covariance matrix +S_AnalyzeNormalCorrelation This script considers a bivariate normal market and display the correlation and the condition number of the covariance matrix +S_AnalyzeNormalInverseWishart This script familiarizes the users with multivariate Bayesian estimation. +S_AutocorrelatedProcess This script simulates a Ornstein-Uhlenbeck AR(1) process +S_BivariateSample This script generates draws from a bivariate distribution with different marginals +S_BlackLittermanBasic This script describes to basic market-based Black-Litterman approach +S_BondProjectionPricingNormal This script projects the distribution of the market invariants for the bond markets from the estimation interval to the investment horizon +S_BondProjectionPricingStudentT This script projects the distribution of the market invariants for the bond markets from the estimation interval to the investment horizon (Student's T assumption) +S_BuyNHold This script illustrates the buy & hold dynamic strategy +S_CPPI This script illustrates the CPPI (constant proportion portfolio insurance) dynamic strategy +S_CallsProjectionPricing This script projects the distribution of the market invariants for the derivatives market and computes the distribution of prices at the investment horizon +S_CheckDiagonalization This script verifies the correctness of the eigenvalue-eigenvector representation in terms of real matrices for the transition matrix of an OU process +S_CornishFisher This script compares the Cornish-Fisher estimate of the VaR with the true analytical VaR under the lognormal assumptions +S_CorrelationPriorUniform This script shows how a jointly uniform prior on the correlations implies that the marginal distribution of each correlation is peaked around zero +S_CovarianceEvolution This script represents the evolution of the covariance of an OU process in terms of the dispersion ellipsoid +S_CrossSectionConstrainedIndustries This script fits a cross-sectional linear factor model creating industry factors, where the industry factors are constrained to be uncorrelated with the market +S_CrossSectionIndustries This script fits a cross-sectional linear factor model creating industry factors +S_DerivativesInvariants +S_DisplayLognormalCopulaPdf +S_DisplayNormalCopulaCdf +S_DisplayNormalCopulaPdf +S_DisplayStudentTCopulaPdf +S_ESContributionFactors +S_ESContributionsStudentT +S_EigenvalueDispersion +S_EllipticalNDim +S_EquitiesInvariance +S_EquitiesInvariants +S_EquityProjectionPricing +S_EstimateExpectedValueEvaluation +S_EstimateMomentsComboEvaluation +S_EstimateQuantileEvaluation +S_Estimator +S_EvaluationGeneric +S_ExactMeanAndCovariance +S_ExpectationMaximizationHighYield +S_ExtremeValueTheory +S_FactorAnalysisNotOk +S_FactorResidualCorrelation +S_FitSwapToStudentT +S_FixedIncomeInvariants +S_FullCodependence +S_FxCopulaMarginal +S_GenerateMixtureSample +S_HedgeOptions +S_HorizonEffect +S_InvestorsObjective +S_JumpDiffusionMerton +S_LinVsLogReturn +S_LognormalSample +S_MarkovChainMonteCarlo +S_MaxMinVariance +S_MaximumLikelihood +S_MeanVarianceBenchmark +S_MeanVarianceCalls +S_MeanVarianceHorizon +S_MeanVarianceOptimization +S_MultiVarSqrRootRule +S_NonAnalytical +S_NormalSample +S_OrderStatisticsPdfLognormal +S_OrderStatisticsPdfStudentT +S_PasturMarchenko +S_ProjectNPriceMvGarch +S_ProjectSummaryStatistics +S_PureResidualBonds +S_ResidualAnalysisTheory +S_SelectionHeuristics +S_SemiCircular +S_ShrinkageEstimators S_plotGaussHermite This example script displays mesh points based on Gaussian-Hermite quadrature -S_SnPCaseStudy This script replicates the example from Meucci's MATLAB script S_SnPCaseStudy.M +S_SnPCaseStudy This script replicates the example from Meucci's MATLAB scriptS_SnPCaseStudy.M S_ToyExample This toy example illustrates the use of Entropy Pooling to compute Fully Flexible Bayesian networks S_FitProjectRates This script fits the swap rates dynamics to a multivariate Ornstein-Uhlenbeck process and computes and plots the estimated future distribution -S_CheckDiagonalization This script verifies the correctness of the eigenvalue-eigenvector representation in terms of real matrices for the transition matrix of an OU process -S_CovarianceEvolution This script represents the evolution of the covariance of an OU process in terms of the dispersion ellipsoid + S_DeterministicEvolution This script animates the evolution of the determinstic component of an OU process MeanDiversificationFrontier This script computes the mean-diversification efficient frontier Modified: pkg/Meucci/demo/FullyIntegratedLiquidityAndMarketRisk.R =================================================================== --- pkg/Meucci/demo/FullyIntegratedLiquidityAndMarketRisk.R 2013-09-06 08:03:44 UTC (rev 3008) +++ pkg/Meucci/demo/FullyIntegratedLiquidityAndMarketRisk.R 2013-09-06 08:34:12 UTC (rev 3009) @@ -1,4 +1,4 @@ -#'This his script computes the liquidity-risk and funding-risk adjusted P&L distribution, as described in +#' This script computes the liquidity-risk and funding-risk adjusted P&L distribution, as described in #' A. Meucci, "A Fully Integrated Liquidity and Market Risk Model", Financial Analyst Journal, 68, 6, 35-47 (2012) #' #' @references Modified: pkg/Meucci/demo/S_CornishFisher.R =================================================================== --- pkg/Meucci/demo/S_CornishFisher.R 2013-09-06 08:03:44 UTC (rev 3008) +++ pkg/Meucci/demo/S_CornishFisher.R 2013-09-06 08:34:12 UTC (rev 3009) @@ -1,4 +1,4 @@ -#'This script compares the Cornish-Fisher estiamte of the VaR with the true analytical VaR under the lognormal +#'This script compares the Cornish-Fisher estimate of the VaR with the true analytical VaR under the lognormal #'assumptions as described in A. Meucci,"Risk and Asset Allocation", Springer, 2005, Chapter 5. #' #' @references From noreply at r-forge.r-project.org Fri Sep 6 11:18:55 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 6 Sep 2013 11:18:55 +0200 (CEST) Subject: [Returnanalytics-commits] r3010 - pkg/Meucci/demo Message-ID: <20130906091855.80ECF185613@r-forge.r-project.org> Author: xavierv Date: 2013-09-06 11:18:55 +0200 (Fri, 06 Sep 2013) New Revision: 3010 Modified: pkg/Meucci/demo/00Index pkg/Meucci/demo/S_EigenvalueDispersion.R pkg/Meucci/demo/S_EllipticalNDim.R pkg/Meucci/demo/S_EquityProjectionPricing.R pkg/Meucci/demo/S_FullCodependence.R pkg/Meucci/demo/S_FxCopulaMarginal.R pkg/Meucci/demo/S_InvestorsObjective.R pkg/Meucci/demo/S_ShrinkageEstimators.R pkg/Meucci/demo/S_TimeSeriesVsCrossSectionIndustries.R Log: - finished Index for demo scripts Modified: pkg/Meucci/demo/00Index =================================================================== --- pkg/Meucci/demo/00Index 2013-09-06 08:34:12 UTC (rev 3009) +++ pkg/Meucci/demo/00Index 2013-09-06 09:18:55 UTC (rev 3010) @@ -8,14 +8,14 @@ HermiteGrid_CVaR_Recursion This script illustrates the discrete Newton recursion to process views on CVaR according to Entropy Pooling HermiteGrid_demo This script compares the performance of plain Monte Carlo versus grid in applying Entropy Pooling to process extreme views InvariantProjection This script projects summary statistics to arbitrary horizons under i.i.d. assumption -logToArithmeticCovariance This example script generates arithmetric returns and arithmetric covariance matrix given a distribution of log returns +MeanDiversificationFrontier This script computes the mean-diversification efficient frontier Prior2Posterior This example script compares the numerical and the analytical solution of entropy-pooling RankingInformation This script performs ranking allocation using the Entropy-Pooling approach by Attilio Meucci RobustBayesianAllocation This script replicates the example from Meucci's MATLAB script S_SimulationsCaseStudy.M S_AnalyzeLognormalCorrelation This script considers a bivariate lognormal market and display the correlation and the condition number of the covariance matrix S_AnalyzeNormalCorrelation This script considers a bivariate normal market and display the correlation and the condition number of the covariance matrix S_AnalyzeNormalInverseWishart This script familiarizes the users with multivariate Bayesian estimation. -S_AutocorrelatedProcess This script simulates a Ornstein-Uhlenbeck AR(1) process +S_AutocorrelatedProcess This script simulates a Ornstein-Uhlenbeck AR(1) process S_BivariateSample This script generates draws from a bivariate distribution with different marginals S_BlackLittermanBasic This script describes to basic market-based Black-Litterman approach S_BondProjectionPricingNormal This script projects the distribution of the market invariants for the bond markets from the estimation interval to the investment horizon @@ -29,63 +29,79 @@ S_CovarianceEvolution This script represents the evolution of the covariance of an OU process in terms of the dispersion ellipsoid S_CrossSectionConstrainedIndustries This script fits a cross-sectional linear factor model creating industry factors, where the industry factors are constrained to be uncorrelated with the market S_CrossSectionIndustries This script fits a cross-sectional linear factor model creating industry factors -S_DerivativesInvariants -S_DisplayLognormalCopulaPdf -S_DisplayNormalCopulaCdf -S_DisplayNormalCopulaPdf -S_DisplayStudentTCopulaPdf -S_ESContributionFactors -S_ESContributionsStudentT -S_EigenvalueDispersion -S_EllipticalNDim -S_EquitiesInvariance -S_EquitiesInvariants -S_EquityProjectionPricing -S_EstimateExpectedValueEvaluation -S_EstimateMomentsComboEvaluation -S_EstimateQuantileEvaluation -S_Estimator -S_EvaluationGeneric -S_ExactMeanAndCovariance -S_ExpectationMaximizationHighYield -S_ExtremeValueTheory -S_FactorAnalysisNotOk -S_FactorResidualCorrelation -S_FitSwapToStudentT -S_FixedIncomeInvariants -S_FullCodependence -S_FxCopulaMarginal -S_GenerateMixtureSample -S_HedgeOptions -S_HorizonEffect -S_InvestorsObjective -S_JumpDiffusionMerton -S_LinVsLogReturn -S_LognormalSample -S_MarkovChainMonteCarlo -S_MaxMinVariance -S_MaximumLikelihood -S_MeanVarianceBenchmark -S_MeanVarianceCalls -S_MeanVarianceHorizon -S_MeanVarianceOptimization -S_MultiVarSqrRootRule -S_NonAnalytical -S_NormalSample -S_OrderStatisticsPdfLognormal -S_OrderStatisticsPdfStudentT -S_PasturMarchenko -S_ProjectNPriceMvGarch -S_ProjectSummaryStatistics -S_PureResidualBonds -S_ResidualAnalysisTheory -S_SelectionHeuristics -S_SemiCircular -S_ShrinkageEstimators +S_DerivativesInvariants This script performs the quest for invariance in the derivatives market +S_DeterministicEvolution This script animates the evolution of the determinstic component of an OU process +S_DisplayLognormalCopulaPdf This script displays the pdf of the copula of a lognormal distribution +S_DisplayNormalCopulaCdf This script displays the cdf of the copula of a normal distribution +S_DisplayNormalCopulaPdf This script displays the pdf of the copula of a normal distribution +S_DisplayStudentTCopulaPdf This script displays the pdf of the copula of a Student t distribution +S_ESContributionFactors This script computes the expected shortfall and the contributions to ES from each factor in simulations +S_ESContributionsStudentT This script computes the expected shortfall and the contributions to ES from each security +S_EigenvalueDispersion This script displays the sample eigenvalues dispersion phenomenon +S_EllipticalNDim This script decomposes the N-variate normal distribution into its radial and uniform components to generate an elliptical distribution +S_EquitiesInvariants This file performs the quest for invariance in the stock market +S_EquityProjectionPricing This script projects the distribution of the market invariants for the stock market from the estimation interval (normal assumption) to the investment horizon. Then it computes the distribution of prices at the investment horizon analytically. +S_EstimateExpectedValueEvaluation This script script familiarizes the user with the evaluation of an estimator replicability, loss, error, bias and inefficiency +S_EstimateMomentsComboEvaluation This script familiarizes the user with the evaluation of an estimator: replicability, loss, error, bias and inefficiency +S_EstimateQuantileEvaluation This script familiarizes the user with the evaluation of an estimator: replicability, loss, error, bias and inefficiency +S_Estimator This script familiarizes the user with the evaluation of an estimator: replicability, loss, error, bias and inefficiency +S_EvaluationGeneric This script determines the optimal allocation +S_ExactMeanAndCovariance Generate draws from a multivariate normal with matching mean and covariance +S_ExpectationMaximizationHighYield This script implements the Expectation-Maximization (EM) algoritm, which estimates the parameters of a multivariate normal distribution when some observations are randomly missing +S_ExtremeValueTheory This script computes the quantile (VaR) analytically, in simulations and using the extreme value theory approximation +S_FactorAnalysisNotOk This script illustrates the hidden factor analysis puzzle +S_FactorResidualCorrelation This script illustrates exogenous loadings and endogenous factors the true analytical VaR under the lognormal assumptions from the estimation interval to the investment horizon +S_FitProjectRates This script fits the swap rates dynamics to a multivariate Ornstein-Uhlenbeck process and computes and plots the estimated future distribution +S_FitSwapToStudentT This script demonstrates the recursive ML estimation of the location and scatter parameters of a multivariate Student t distribution +S_FixedIncomeInvariants This file performs the quest for invariance in the fixed income market +S_FullCodependence This script illustrates the concept of co-dependence +S_FxCopulaMarginal This script displays the empirical copula of a set of market variables +S_GenerateMixtureSample This script generates draws from a univarite mixture +S_HedgeOptions This script compares hedging based on Black-Scholes deltas with Factors on Demand hedging +S_HorizonEffect This script studies horizon effect on explicit factors / implicit loadings linear model +S_InvestorsObjective This script familiarizes the users with the objectives of different investors in a highly non-normal bi-variate market of securities +S_JumpDiffusionMerton This script simulates a jump-diffusion process +S_LinVsLogReturn This script project a distribution in the future according to the i.i.d.-implied square-root rule +S_LognormalSample This script simulate univariate lognormal variables +S_MarkovChainMonteCarlo This script illustrates the Metropolis-Hastings algorithm +S_MaxMinVariance This script dispays location-dispersion ellipsoid and statistic +S_MaximumLikelihood This script performs ML under a non-standard parametric set of distributions +S_MeanVarianceBenchmark This script projects the distribution of the market invariants for the bond and stock markets from the estimation interval to the investment horizon. Then it computes the distribution of prices at the investment horizon and translates this distribution into the returns distribution +S_MeanVarianceCalls This script computes the mean-variance frontier of a set of options +S_MeanVarianceHorizon This script projects the distribution of the market invariants for the bond and stock markets from the estimation interval to the investment horizon. Then it computes the distribution of prices at the investment horizon and performs the two-step mean-variance optimization in terms of returns and relative portfolio weights. +S_MeanVarianceOptimization This script projects the distribution of the market invariants for the bond and stock markets from the estimation interval to the investment horizon. Then it computes the distribution of prices at the investment horizon and performs the two-step mean-variance optimization. +S_MultiVarSqrRootRule This script illustrates the multivariate square root rule-of-thumb +S_NonAnalytical This script generates draws for the sum of random variable +S_NormalSample This script simulate univariate normal variables +S_OrderStatisticsPdfLognormal This script script shows that the pdf of the r-th order statistics of a lognormal random variable +S_OrderStatisticsPdfStudentT This script script shows that the pdf of the r-th order statistics of a tudent t random variable +S_PasturMarchenko This script illustrate the Marchenko-Pastur limit of runifom matrix theory +S_ProjectNPriceMvGarch This script fits a multivariate GARCH model and projects the distribution of the compounded returns from the estimation interval to the investment horizon. Then it computes the distribution of prices at the investment horizon. +S_ProjectSummaryStatistics This script projects summary statistics to arbitrary horizons +S_PureResidualBonds This script models the joint distribution of the yet-to-be realized key rates of the government curve +S_ResidualAnalysisTheory This script performs the analysis of residuals +S_SelectionHeuristics Compute the r-square of selected factors +S_SemiCircular This script illustrate the semi-circular law of random matrix theory +S_ShrinkageEstimators This script computes the multivariate shrinkage estimators of location and scatter under the normal assumption +S_SnPCaseStudy This script replicates the example from Meucci's MATLAB scriptS_SnPCaseStudy.M +S_StatArbSwaps This script search for cointegrated stat-arb strategies among swap contracts +S_StudentTSample This script simulate univariate Student-t variables +S_SwapPca2Dim This script performs the principal component analysis of a simplified two-point swap curve +S_TStatApprox Simulate invariants for the regression model +S_TimeSeriesConstrainedIndustries This script fits a time-series linear factor computing the industry factors loadings, where the loadings are bounded and constrained to yield unit exposure +S_TimeSeriesIndustries This script fits a time-series linear factor computing the industry factors loadings +S_TimeSeriesVsCrossSectionIndustries This script computes the correlation between explicit, time-series industry factor returns and implicit, cross-section industry factor returns +S_Toeplitz This script shows that the eigenvectors of a Toeplitz matrix have a Fourier basis structure under t-distribution assumptions +S_UtilityMax This script illustrates the constant weight dynamic strategy that maximizes power utility +S_VaRContributionsUniform This script computes the VaR and the contributions to VaR from each security anallitically and in simulations +S_VolatilityClustering This file generates paths for a volatility clustering +S_Wishart This script generates a sample from the 2x2 Wishart distribution +S_WishartCorrelation This script computes the correlation of the first diagonal and off-diagonal elements of a 2x2 Wishart distribution as a function of the inputs +S_WishartLocationDispersion This script computes the location-dispersion ellipsoid of the normalized first diagonal and off-diagonal elements of a 2x2 Wishart distribution as a function of the inputs +S_ToyExample This toy example illustrates the use of Entropy Pooling to compute Fully Flexible +logToArithmeticCovariance This example script generates arithmetric returns and arithmetric covariance matrix given a distribution of log returns S_plotGaussHermite This example script displays mesh points based on Gaussian-Hermite quadrature -S_SnPCaseStudy This script replicates the example from Meucci's MATLAB scriptS_SnPCaseStudy.M -S_ToyExample This toy example illustrates the use of Entropy Pooling to compute Fully Flexible Bayesian networks -S_FitProjectRates This script fits the swap rates dynamics to a multivariate Ornstein-Uhlenbeck process and computes and plots the estimated future distribution + Bayesian networks -S_DeterministicEvolution This script animates the evolution of the determinstic component of an OU process -MeanDiversificationFrontier This script computes the mean-diversification efficient frontier + + Modified: pkg/Meucci/demo/S_EigenvalueDispersion.R =================================================================== --- pkg/Meucci/demo/S_EigenvalueDispersion.R 2013-09-06 08:34:12 UTC (rev 3009) +++ pkg/Meucci/demo/S_EigenvalueDispersion.R 2013-09-06 09:18:55 UTC (rev 3010) @@ -1,7 +1,5 @@ -library(mvtnorm) - -#'This script displays the sample eigenvalues dispersion phenomenon, as described in A. Meucci, -#'"Risk and Asset Allocation", Springer, 2005, Chapter 4. +#' This script displays the sample eigenvalues dispersion phenomenon, as described in A. Meucci, +#' "Risk and Asset Allocation", Springer, 2005, Chapter 4. #' #' @references #' \url{http://symmys.com/node/170} Modified: pkg/Meucci/demo/S_EllipticalNDim.R =================================================================== --- pkg/Meucci/demo/S_EllipticalNDim.R 2013-09-06 08:34:12 UTC (rev 3009) +++ pkg/Meucci/demo/S_EllipticalNDim.R 2013-09-06 09:18:55 UTC (rev 3010) @@ -1,4 +1,4 @@ -#'This script decomposes the N-variate normal distribution into its radial and uniform components +#' This script decomposes the N-variate normal distribution into its radial and uniform components #' then it uses the uniform component to generate an elliptical distribution with location parameter #' Mu and dispersion parameter Sigma, as described in A. Meucci, "Risk and Asset Allocation", #' Springer, 2005, Chapter 2. Modified: pkg/Meucci/demo/S_EquityProjectionPricing.R =================================================================== --- pkg/Meucci/demo/S_EquityProjectionPricing.R 2013-09-06 08:34:12 UTC (rev 3009) +++ pkg/Meucci/demo/S_EquityProjectionPricing.R 2013-09-06 09:18:55 UTC (rev 3010) @@ -1,8 +1,8 @@ #' This script projects the distribution of the market invariants for the stock market (i.e. the compounded returns) #' from the estimation interval (normal assumption) to the investment horizon. Then it computes the distribution of prices -#' at the investment horizon analytically, by full Monte Carlo, and by delta/duration approximationThis file performs -#' the quest for invariance in the stock market, as described in A. Meucci "Risk and Asset Allocation", Springer, 2005, +#' at the investment horizon analytically, by full Monte Carlo, and by delta/duration approximation. +#' Described in A. Meucci "Risk and Asset Allocation", Springer, 2005, #' chapter 3. #' #' @references Modified: pkg/Meucci/demo/S_FullCodependence.R =================================================================== --- pkg/Meucci/demo/S_FullCodependence.R 2013-09-06 08:34:12 UTC (rev 3009) +++ pkg/Meucci/demo/S_FullCodependence.R 2013-09-06 09:18:55 UTC (rev 3010) @@ -1,4 +1,4 @@ -#' This script illustrate the concept of co-dependence, as described +#' This script illustrates the concept of co-dependence, as described #' in A. Meucci, "Risk and Asset Allocation", Springer, 2005, Chapter 2. #' #' @references Modified: pkg/Meucci/demo/S_FxCopulaMarginal.R =================================================================== --- pkg/Meucci/demo/S_FxCopulaMarginal.R 2013-09-06 08:34:12 UTC (rev 3009) +++ pkg/Meucci/demo/S_FxCopulaMarginal.R 2013-09-06 09:18:55 UTC (rev 3010) @@ -1,4 +1,4 @@ -#'This script display the empirical copula of a set of market variables, as described +#' This script displays the empirical copula of a set of market variables, as described #' in A. Meucci, "Risk and Asset Allocation", Springer, 2005, Chapter 2. #' #' @references Modified: pkg/Meucci/demo/S_InvestorsObjective.R =================================================================== --- pkg/Meucci/demo/S_InvestorsObjective.R 2013-09-06 08:34:12 UTC (rev 3009) +++ pkg/Meucci/demo/S_InvestorsObjective.R 2013-09-06 09:18:55 UTC (rev 3010) @@ -1,4 +1,3 @@ -library(mvtnorm); #' This script familiarizes the users with the objectives of different investors in a highly #' non-normal bi-variate market of securities, as described in A. Meucci,"Risk and Asset #' Allocation",Springer, 2005, Chapter 5. Modified: pkg/Meucci/demo/S_ShrinkageEstimators.R =================================================================== --- pkg/Meucci/demo/S_ShrinkageEstimators.R 2013-09-06 08:34:12 UTC (rev 3009) +++ pkg/Meucci/demo/S_ShrinkageEstimators.R 2013-09-06 09:18:55 UTC (rev 3010) @@ -1,5 +1,3 @@ -library(mvtnorm); - #' This script computes the multivariate shrinkage estimators of location and scatter under the normal assumption, #' as described in A. Meucci, "Risk and Asset Allocation", Springer, 2005, Chapter 4. #' Modified: pkg/Meucci/demo/S_TimeSeriesVsCrossSectionIndustries.R =================================================================== --- pkg/Meucci/demo/S_TimeSeriesVsCrossSectionIndustries.R 2013-09-06 08:34:12 UTC (rev 3009) +++ pkg/Meucci/demo/S_TimeSeriesVsCrossSectionIndustries.R 2013-09-06 09:18:55 UTC (rev 3010) @@ -1,5 +1,3 @@ - -################################################################################################################## #' This script computes the correlation between explicit, time-series industry factor returns and implicit, #' cross-section industry factor returns, as described in A. Meucci, "Risk and Asset Allocation", Springer, 2005, #' Chapter 3. From noreply at r-forge.r-project.org Fri Sep 6 12:43:49 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 6 Sep 2013 12:43:49 +0200 (CEST) Subject: [Returnanalytics-commits] r3011 - in pkg/Meucci: R data demo Message-ID: <20130906104349.29FD9184BE6@r-forge.r-project.org> Author: xavierv Date: 2013-09-06 12:43:48 +0200 (Fri, 06 Sep 2013) New Revision: 3011 Added: pkg/Meucci/data/bondAttribution.rda pkg/Meucci/data/db.rda pkg/Meucci/data/dbFFP.rda pkg/Meucci/data/derivatives.rda pkg/Meucci/data/equities.rda pkg/Meucci/data/fILMR.rda pkg/Meucci/data/fixedIncome.rda pkg/Meucci/data/highYieldIndices.rda pkg/Meucci/data/implVol.rda pkg/Meucci/data/linearModel.rda pkg/Meucci/data/sectorsTS.rda pkg/Meucci/data/securitiesIndustryClassification.rda pkg/Meucci/data/securitiesTS.rda pkg/Meucci/data/stockSeries.rda pkg/Meucci/data/swap2y4y.rda pkg/Meucci/data/swapParRates.rda pkg/Meucci/data/swaps.rda pkg/Meucci/data/timeSeries.rda pkg/Meucci/data/usSwapRates.rda Modified: pkg/Meucci/R/InterExtrapolate.R pkg/Meucci/demo/FullFlexProbs.R pkg/Meucci/demo/FullyIntegratedLiquidityAndMarketRisk.R pkg/Meucci/demo/S_BlackLittermanBasic.R pkg/Meucci/demo/S_CallsProjectionPricing.R pkg/Meucci/demo/S_CrossSectionConstrainedIndustries.R pkg/Meucci/demo/S_CrossSectionIndustries.R pkg/Meucci/demo/S_EquitiesInvariants.R pkg/Meucci/demo/S_ExpectationMaximizationHighYield.R pkg/Meucci/demo/S_FitSwapToStudentT.R pkg/Meucci/demo/S_FixedIncomeInvariants.R pkg/Meucci/demo/S_FxCopulaMarginal.R pkg/Meucci/demo/S_HedgeOptions.R pkg/Meucci/demo/S_HorizonEffect.R pkg/Meucci/demo/S_MaximumLikelihood.R pkg/Meucci/demo/S_MeanVarianceBenchmark.R pkg/Meucci/demo/S_MeanVarianceCalls.R pkg/Meucci/demo/S_MeanVarianceHorizon.R pkg/Meucci/demo/S_MeanVarianceOptimization.R pkg/Meucci/demo/S_MultiVarSqrRootRule.R pkg/Meucci/demo/S_ProjectNPriceMvGarch.R pkg/Meucci/demo/S_PureResidualBonds.R pkg/Meucci/demo/S_StatArbSwaps.R pkg/Meucci/demo/S_TimeSeriesConstrainedIndustries.R pkg/Meucci/demo/S_TimeSeriesIndustries.R pkg/Meucci/demo/S_TimeSeriesVsCrossSectionIndustries.R Log: - fixed wrog extension for data files Modified: pkg/Meucci/R/InterExtrapolate.R =================================================================== --- pkg/Meucci/R/InterExtrapolate.R 2013-09-06 09:18:55 UTC (rev 3010) +++ pkg/Meucci/R/InterExtrapolate.R 2013-09-06 10:43:48 UTC (rev 3011) @@ -28,15 +28,14 @@ #' @author Xavier Valls \email{flamejat@@gmail.com} #' @export -# examples +# examples (MATLAB) # # [x1,x2] = meshgrid(0:.2:1); # z = exp(x1+x2); # Xi = rand(100,2)*2-.5; # Zi = interpne(z,Xi,{0:.2:1, 0:.2:1},'linear'); # surf(0:.2:1,0:.2:1,z) -# hold on -# plot3(Xi(:,1),Xi(:,2),Zi,'ro') +# plot3( Xi(:,1),Xi(:,2),Zi,'ro') # InterExtrapolate = function( V, Xi, nodelist, method ) Added: pkg/Meucci/data/bondAttribution.rda =================================================================== (Binary files differ) Property changes on: pkg/Meucci/data/bondAttribution.rda ___________________________________________________________________ Added: svn:mime-type + application/octet-stream Added: pkg/Meucci/data/db.rda =================================================================== (Binary files differ) Property changes on: pkg/Meucci/data/db.rda ___________________________________________________________________ Added: svn:mime-type + application/octet-stream Added: pkg/Meucci/data/dbFFP.rda =================================================================== (Binary files differ) Property changes on: pkg/Meucci/data/dbFFP.rda ___________________________________________________________________ Added: svn:mime-type + application/octet-stream Added: pkg/Meucci/data/derivatives.rda =================================================================== (Binary files differ) Property changes on: pkg/Meucci/data/derivatives.rda ___________________________________________________________________ Added: svn:mime-type + application/octet-stream Added: pkg/Meucci/data/equities.rda =================================================================== (Binary files differ) Property changes on: pkg/Meucci/data/equities.rda ___________________________________________________________________ Added: svn:mime-type + application/octet-stream Added: pkg/Meucci/data/fILMR.rda =================================================================== (Binary files differ) Property changes on: pkg/Meucci/data/fILMR.rda ___________________________________________________________________ Added: svn:mime-type + application/octet-stream Added: pkg/Meucci/data/fixedIncome.rda =================================================================== (Binary files differ) Property changes on: pkg/Meucci/data/fixedIncome.rda ___________________________________________________________________ Added: svn:mime-type + application/octet-stream Added: pkg/Meucci/data/highYieldIndices.rda =================================================================== (Binary files differ) Property changes on: pkg/Meucci/data/highYieldIndices.rda ___________________________________________________________________ Added: svn:mime-type + application/octet-stream Added: pkg/Meucci/data/implVol.rda =================================================================== (Binary files differ) Property changes on: pkg/Meucci/data/implVol.rda ___________________________________________________________________ Added: svn:mime-type + application/octet-stream Added: pkg/Meucci/data/linearModel.rda =================================================================== (Binary files differ) Property changes on: pkg/Meucci/data/linearModel.rda ___________________________________________________________________ Added: svn:mime-type + application/octet-stream Added: pkg/Meucci/data/sectorsTS.rda =================================================================== (Binary files differ) Property changes on: pkg/Meucci/data/sectorsTS.rda ___________________________________________________________________ Added: svn:mime-type + application/octet-stream Added: pkg/Meucci/data/securitiesIndustryClassification.rda =================================================================== (Binary files differ) Property changes on: pkg/Meucci/data/securitiesIndustryClassification.rda ___________________________________________________________________ Added: svn:mime-type + application/octet-stream Added: pkg/Meucci/data/securitiesTS.rda =================================================================== (Binary files differ) Property changes on: pkg/Meucci/data/securitiesTS.rda ___________________________________________________________________ Added: svn:mime-type + application/octet-stream Added: pkg/Meucci/data/stockSeries.rda =================================================================== (Binary files differ) Property changes on: pkg/Meucci/data/stockSeries.rda ___________________________________________________________________ Added: svn:mime-type + application/octet-stream Added: pkg/Meucci/data/swap2y4y.rda =================================================================== (Binary files differ) Property changes on: pkg/Meucci/data/swap2y4y.rda ___________________________________________________________________ Added: svn:mime-type + application/octet-stream Added: pkg/Meucci/data/swapParRates.rda =================================================================== (Binary files differ) Property changes on: pkg/Meucci/data/swapParRates.rda ___________________________________________________________________ Added: svn:mime-type + application/octet-stream Added: pkg/Meucci/data/swaps.rda =================================================================== (Binary files differ) Property changes on: pkg/Meucci/data/swaps.rda ___________________________________________________________________ Added: svn:mime-type + application/octet-stream Added: pkg/Meucci/data/timeSeries.rda =================================================================== (Binary files differ) Property changes on: pkg/Meucci/data/timeSeries.rda ___________________________________________________________________ Added: svn:mime-type + application/octet-stream Added: pkg/Meucci/data/usSwapRates.rda =================================================================== (Binary files differ) Property changes on: pkg/Meucci/data/usSwapRates.rda ___________________________________________________________________ Added: svn:mime-type + application/octet-stream Modified: pkg/Meucci/demo/FullFlexProbs.R =================================================================== --- pkg/Meucci/demo/FullFlexProbs.R 2013-09-06 09:18:55 UTC (rev 3010) +++ pkg/Meucci/demo/FullFlexProbs.R 2013-09-06 10:43:48 UTC (rev 3011) @@ -47,7 +47,7 @@ # risk drivers scenarios ########################################################################### -load( "../data/dbFFP.Rda" ) +load( "../data/dbFFP.rda" ) Infl = dbFFP$Data[ , length( dbFFP$Names ) ]; Vix = dbFFP$Data[ , length( dbFFP$Names ) - 1 ]; Modified: pkg/Meucci/demo/FullyIntegratedLiquidityAndMarketRisk.R =================================================================== --- pkg/Meucci/demo/FullyIntegratedLiquidityAndMarketRisk.R 2013-09-06 09:18:55 UTC (rev 3010) +++ pkg/Meucci/demo/FullyIntegratedLiquidityAndMarketRisk.R 2013-09-06 10:43:48 UTC (rev 3011) @@ -25,7 +25,7 @@ # load fILMR$Daily_Prices: closing prices # fILMR$Daily_Volumes_Shares: daily volumes # fILMR$Daily_Liq: Morgan Stanley liquidity index -load("../data/fILMR.Rda") +load("../data/fILMR.rda") # Prices and returns #Daily_Prices = Daily_Prices(:,Selectstock); Modified: pkg/Meucci/demo/S_BlackLittermanBasic.R =================================================================== --- pkg/Meucci/demo/S_BlackLittermanBasic.R 2013-09-06 09:18:55 UTC (rev 3010) +++ pkg/Meucci/demo/S_BlackLittermanBasic.R 2013-09-06 10:43:48 UTC (rev 3011) @@ -12,7 +12,7 @@ ################################################################################################################## ### Load inputs -load("../data/covNRets.Rda"); +load("../data/covNRets.rda"); ################################################################################################################## ### Compute efficient frontier Modified: pkg/Meucci/demo/S_CallsProjectionPricing.R =================================================================== --- pkg/Meucci/demo/S_CallsProjectionPricing.R 2013-09-06 09:18:55 UTC (rev 3010) +++ pkg/Meucci/demo/S_CallsProjectionPricing.R 2013-09-06 10:43:48 UTC (rev 3011) @@ -13,7 +13,7 @@ # load 'spot' for underlying and current vol surface, given by # 'impVol' for different 'days2Maturity' and 'moneyness' (K/S) -load("../data/implVol.Rda"); +load("../data/implVol.rda"); ################################################################################################################## ### Inputs Modified: pkg/Meucci/demo/S_CrossSectionConstrainedIndustries.R =================================================================== --- pkg/Meucci/demo/S_CrossSectionConstrainedIndustries.R 2013-09-06 09:18:55 UTC (rev 3010) +++ pkg/Meucci/demo/S_CrossSectionConstrainedIndustries.R 2013-09-06 10:43:48 UTC (rev 3011) @@ -10,10 +10,10 @@ ################################################################################################################## ### Loads weekly stock returns X and indices stock returns F -load("../data/securitiesTS.Rda"); +load("../data/securitiesTS.rda"); Data_Securities = securitiesTS$data[ , -1 ]; # 1st column is date -load("../data/securitiesIndustryClassification.Rda"); +load("../data/securitiesIndustryClassification.rda"); Securities_IndustryClassification = securitiesIndustryClassification$data; ################################################################################################################## ### Linear returns for stocks Modified: pkg/Meucci/demo/S_CrossSectionIndustries.R =================================================================== --- pkg/Meucci/demo/S_CrossSectionIndustries.R 2013-09-06 09:18:55 UTC (rev 3010) +++ pkg/Meucci/demo/S_CrossSectionIndustries.R 2013-09-06 10:43:48 UTC (rev 3011) @@ -10,10 +10,10 @@ ################################################################################################################## ### Load data # loads weekly stock returns X and indices stock returns F -load("../data/securitiesTS.Rda"); +load("../data/securitiesTS.rda"); Data_Securities = securitiesTS$data[ , -1 ]; # 1st column is date -load("../data/securitiesIndustryClassification.Rda"); +load("../data/securitiesIndustryClassification.rda"); Securities_IndustryClassification = securitiesIndustryClassification$data; ################################################################################################################## Modified: pkg/Meucci/demo/S_EquitiesInvariants.R =================================================================== --- pkg/Meucci/demo/S_EquitiesInvariants.R 2013-09-06 09:18:55 UTC (rev 3010) +++ pkg/Meucci/demo/S_EquitiesInvariants.R 2013-09-06 10:43:48 UTC (rev 3011) @@ -11,7 +11,7 @@ ################################################################################################################## ### Load daily stock prices from the utility sector in the S&P 500 -load("../data/equities.Rda"); +load("../data/equities.rda"); ################################################################################################################## ### Pick one stock from database Modified: pkg/Meucci/demo/S_ExpectationMaximizationHighYield.R =================================================================== --- pkg/Meucci/demo/S_ExpectationMaximizationHighYield.R 2013-09-06 09:18:55 UTC (rev 3010) +++ pkg/Meucci/demo/S_ExpectationMaximizationHighYield.R 2013-09-06 10:43:48 UTC (rev 3011) @@ -10,7 +10,7 @@ ################################################################################################################## ### Load data -load("../data/highYieldIndices.Rda"); +load("../data/highYieldIndices.rda"); ################################################################################################################## ### Compute invariants and set NaN for large values Modified: pkg/Meucci/demo/S_FitSwapToStudentT.R =================================================================== --- pkg/Meucci/demo/S_FitSwapToStudentT.R 2013-09-06 09:18:55 UTC (rev 3010) +++ pkg/Meucci/demo/S_FitSwapToStudentT.R 2013-09-06 10:43:48 UTC (rev 3011) @@ -11,7 +11,7 @@ ################################################################################################################## ### Load data -load( "../data/usSwapRates.Rda" ); +load( "../data/usSwapRates.rda" ); ################################################################################################################## ### Inputs Modified: pkg/Meucci/demo/S_FixedIncomeInvariants.R =================================================================== --- pkg/Meucci/demo/S_FixedIncomeInvariants.R 2013-09-06 09:18:55 UTC (rev 3010) +++ pkg/Meucci/demo/S_FixedIncomeInvariants.R 2013-09-06 10:43:48 UTC (rev 3011) @@ -9,7 +9,7 @@ ################################################################################################################## ### Load government yield curve and bond yield data for different dates -load("../data/fixedIncome.Rda"); +load("../data/fixedIncome.rda"); ################################################################################################################## ### Pick time-to-maturity for one point on the yield curve Modified: pkg/Meucci/demo/S_FxCopulaMarginal.R =================================================================== --- pkg/Meucci/demo/S_FxCopulaMarginal.R 2013-09-06 09:18:55 UTC (rev 3010) +++ pkg/Meucci/demo/S_FxCopulaMarginal.R 2013-09-06 10:43:48 UTC (rev 3011) @@ -11,7 +11,7 @@ ### Load data and select the pair to display library(pracma) -load( "../data/fX.Rda" ) +load( "../data/fX.rda" ) Display = c( 1, 2 ); # 1 = Spot USD/EUR; 2 = Spot USD/GBP; 3 = Spot USD/JPY; Modified: pkg/Meucci/demo/S_HedgeOptions.R =================================================================== --- pkg/Meucci/demo/S_HedgeOptions.R 2013-09-06 09:18:55 UTC (rev 3010) +++ pkg/Meucci/demo/S_HedgeOptions.R 2013-09-06 10:43:48 UTC (rev 3011) @@ -9,7 +9,7 @@ ################################################################################################################## ### Load data -load( "../data/implVol.Rda" ); +load( "../data/implVol.rda" ); ################################################################################################################## ### Inputs Modified: pkg/Meucci/demo/S_HorizonEffect.R =================================================================== --- pkg/Meucci/demo/S_HorizonEffect.R 2013-09-06 09:18:55 UTC (rev 3010) +++ pkg/Meucci/demo/S_HorizonEffect.R 2013-09-06 10:43:48 UTC (rev 3011) @@ -17,7 +17,7 @@ ################################################################################################################## # Load parameters of the model: D, muX, sigmaF, sigmaEps -load( "../data/linearModel.Rda" ); +load( "../data/linearModel.rda" ); # Specify range of investment horizon, weeks tauRangeWeeks = 1:52; Modified: pkg/Meucci/demo/S_MaximumLikelihood.R =================================================================== --- pkg/Meucci/demo/S_MaximumLikelihood.R 2013-09-06 09:18:55 UTC (rev 3010) +++ pkg/Meucci/demo/S_MaximumLikelihood.R 2013-09-06 10:43:48 UTC (rev 3011) @@ -51,7 +51,7 @@ ########################################################################################################## ### Load data -load( "../data/timeSeries.Rda"); +load( "../data/timeSeries.rda"); ########################################################################################################## ### inputs Modified: pkg/Meucci/demo/S_MeanVarianceBenchmark.R =================================================================== --- pkg/Meucci/demo/S_MeanVarianceBenchmark.R 2013-09-06 09:18:55 UTC (rev 3010) +++ pkg/Meucci/demo/S_MeanVarianceBenchmark.R 2013-09-06 10:43:48 UTC (rev 3011) @@ -13,7 +13,7 @@ ################################################################################################################## ### Load data -load("../data/stockSeries.Rda"); +load("../data/stockSeries.rda"); ################################################################################################################### ### Inputs Modified: pkg/Meucci/demo/S_MeanVarianceCalls.R =================================================================== --- pkg/Meucci/demo/S_MeanVarianceCalls.R 2013-09-06 09:18:55 UTC (rev 3010) +++ pkg/Meucci/demo/S_MeanVarianceCalls.R 2013-09-06 10:43:48 UTC (rev 3011) @@ -9,7 +9,7 @@ ################################################################################################################## ### Load dat -load("../data/db.Rda" ); +load("../data/db.rda" ); ################################################################################################################## ### Inputs Modified: pkg/Meucci/demo/S_MeanVarianceHorizon.R =================================================================== --- pkg/Meucci/demo/S_MeanVarianceHorizon.R 2013-09-06 09:18:55 UTC (rev 3010) +++ pkg/Meucci/demo/S_MeanVarianceHorizon.R 2013-09-06 10:43:48 UTC (rev 3011) @@ -12,7 +12,7 @@ ################################################################################################################## ### Load data -load("../data/stockSeries.Rda"); +load("../data/stockSeries.rda"); ################################################################################################################## ### Inputs Modified: pkg/Meucci/demo/S_MeanVarianceOptimization.R =================================================================== --- pkg/Meucci/demo/S_MeanVarianceOptimization.R 2013-09-06 09:18:55 UTC (rev 3010) +++ pkg/Meucci/demo/S_MeanVarianceOptimization.R 2013-09-06 10:43:48 UTC (rev 3011) @@ -13,7 +13,7 @@ ################################################################################################################## ### Load data -load( "../data/stockSeries.Rda" ); +load( "../data/stockSeries.rda" ); ################################################################################################################## ### Inputs Modified: pkg/Meucci/demo/S_MultiVarSqrRootRule.R =================================================================== --- pkg/Meucci/demo/S_MultiVarSqrRootRule.R 2013-09-06 09:18:55 UTC (rev 3010) +++ pkg/Meucci/demo/S_MultiVarSqrRootRule.R 2013-09-06 10:43:48 UTC (rev 3011) @@ -9,7 +9,7 @@ ################################################################################################################## ### Load data -load("../data/swaps.Rda"); +load("../data/swaps.rda"); ################################################################################################################## ### Aggregation steps in days Modified: pkg/Meucci/demo/S_ProjectNPriceMvGarch.R =================================================================== --- pkg/Meucci/demo/S_ProjectNPriceMvGarch.R 2013-09-06 09:18:55 UTC (rev 3010) +++ pkg/Meucci/demo/S_ProjectNPriceMvGarch.R 2013-09-06 10:43:48 UTC (rev 3011) @@ -13,7 +13,7 @@ ################################################################################################################## ### Load data -load( "../data/equities.Rda" ); +load( "../data/equities.rda" ); ################################################################################################################## ### Inputs Modified: pkg/Meucci/demo/S_PureResidualBonds.R =================================================================== --- pkg/Meucci/demo/S_PureResidualBonds.R 2013-09-06 09:18:55 UTC (rev 3010) +++ pkg/Meucci/demo/S_PureResidualBonds.R 2013-09-06 10:43:48 UTC (rev 3011) @@ -10,7 +10,7 @@ ################################################################################################################## ### Load data -load("../data/bondAttribution.Rda"); +load("../data/bondAttribution.rda"); ################################################################################################################## Modified: pkg/Meucci/demo/S_StatArbSwaps.R =================================================================== --- pkg/Meucci/demo/S_StatArbSwaps.R 2013-09-06 09:18:55 UTC (rev 3010) +++ pkg/Meucci/demo/S_StatArbSwaps.R 2013-09-06 10:43:48 UTC (rev 3011) @@ -11,7 +11,7 @@ ################################################################################################################## ### Load data -load("../data/swapParRates.Rda"); +load("../data/swapParRates.rda"); ################################################################################################################## ### Estimate covariance and PCA decomposition Modified: pkg/Meucci/demo/S_TimeSeriesConstrainedIndustries.R =================================================================== --- pkg/Meucci/demo/S_TimeSeriesConstrainedIndustries.R 2013-09-06 09:18:55 UTC (rev 3010) +++ pkg/Meucci/demo/S_TimeSeriesConstrainedIndustries.R 2013-09-06 10:43:48 UTC (rev 3011) @@ -10,10 +10,10 @@ ################################################################################################################## ### Loads weekly stock returns X and indices stock returns F -load("../data/securitiesTS.Rda"); +load("../data/securitiesTS.rda"); Data_Securities = securitiesTS$data[ , -1 ]; # 1st column is date -load("../data/sectorsTS.Rda"); +load("../data/sectorsTS.rda"); Data_Sectors = sectorsTS$data[ , -(1:2) ]; #1st column is date, 2nd column is SPX ################################################################################################################## Modified: pkg/Meucci/demo/S_TimeSeriesIndustries.R =================================================================== --- pkg/Meucci/demo/S_TimeSeriesIndustries.R 2013-09-06 09:18:55 UTC (rev 3010) +++ pkg/Meucci/demo/S_TimeSeriesIndustries.R 2013-09-06 10:43:48 UTC (rev 3011) @@ -9,10 +9,10 @@ ################################################################################################################## ### Loads weekly stock returns X and indices stock returns F -load("../data/securitiesTS.Rda"); +load("../data/securitiesTS.rda"); Data_Securities = securitiesTS$data[ , -1 ]; # 1st column is date -load("../data/sectorsTS.Rda"); +load("../data/sectorsTS.rda"); Data_Sectors = sectorsTS$data[ , -(1:2) ]; #1st column is for date, 2nd column is SPX index ################################################################################################################## Modified: pkg/Meucci/demo/S_TimeSeriesVsCrossSectionIndustries.R =================================================================== --- pkg/Meucci/demo/S_TimeSeriesVsCrossSectionIndustries.R 2013-09-06 09:18:55 UTC (rev 3010) +++ pkg/Meucci/demo/S_TimeSeriesVsCrossSectionIndustries.R 2013-09-06 10:43:48 UTC (rev 3011) @@ -10,13 +10,13 @@ ################################################################################################################## ### Load data # loads weekly stock returns X and indices stock returns F -load("../data/securitiesTS.Rda"); +load("../data/securitiesTS.rda"); Data_Securities = securitiesTS$data[ , -1 ]; # 1st column is date -load("../data/sectorsTS.Rda"); +load("../data/sectorsTS.rda"); Data_Sectors = sectorsTS$data[ , -(1:2) ]; -load("../data/securitiesIndustryClassification.Rda"); +load("../data/securitiesIndustryClassification.rda"); Securities_IndustryClassification = securitiesIndustryClassification$data; ################################################################################################################## From noreply at r-forge.r-project.org Fri Sep 6 16:51:42 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 6 Sep 2013 16:51:42 +0200 (CEST) Subject: [Returnanalytics-commits] r3012 - pkg/PortfolioAnalytics/R Message-ID: <20130906145142.2777D1859F5@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-06 16:51:41 +0200 (Fri, 06 Sep 2013) New Revision: 3012 Modified: pkg/PortfolioAnalytics/R/constrained_objective.R Log: Adding weight concentration penalty to constrained_objective Modified: pkg/PortfolioAnalytics/R/constrained_objective.R =================================================================== --- pkg/PortfolioAnalytics/R/constrained_objective.R 2013-09-06 10:43:48 UTC (rev 3011) +++ pkg/PortfolioAnalytics/R/constrained_objective.R 2013-09-06 14:51:41 UTC (rev 3012) @@ -680,6 +680,22 @@ } } # end handling of risk_budget objective + if(inherits(objective, "weight_concentration_objective")){ + # If the user does not pass in conc_groups, the output of HHI will be a scalar + if((length(objective$conc_aversion) == 1) & is.null(objective$conc_groups)){ + # treat conc_aversion as a multiplier + out <- out + penalty * objective$conc_aversion * tmp_measure + } + # If the user passes in conc_groups, the output of HHI will be a list + # The second element of the list will be the group HHI + if(length(objective$conc_aversion > 1) & !is.null(objective$conc_groups)){ + if(length(objective$conc_aversion) == length(tmp_measure[[2]])){ + # treat the conc_aversion vector as a multiplier per group hhi + out <- out + penalty * sum(objective$conc_aversion * tmp_measure[[2]]) + } + } + } # weight concentration objective + } # end enabled check } # end loop over objectives } # end objectives processing From noreply at r-forge.r-project.org Fri Sep 6 21:06:55 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 6 Sep 2013 21:06:55 +0200 (CEST) Subject: [Returnanalytics-commits] r3013 - in pkg/Meucci: . R data demo man Message-ID: <20130906190655.80699185E02@r-forge.r-project.org> Author: xavierv Date: 2013-09-06 21:06:55 +0200 (Fri, 06 Sep 2013) New Revision: 3013 Added: pkg/Meucci/R/data.R pkg/Meucci/data/covNRets.rda pkg/Meucci/data/fX.rda pkg/Meucci/man/Equities.Rd pkg/Meucci/man/StockSeries.Rd pkg/Meucci/man/TimeSeries.Rd pkg/Meucci/man/UsSwapRates.Rd pkg/Meucci/man/bondAttribution.Rd pkg/Meucci/man/covNRets.Rd pkg/Meucci/man/db.Rd pkg/Meucci/man/dbFFP.Rd pkg/Meucci/man/db_FX.Rd pkg/Meucci/man/derivatives.Rd pkg/Meucci/man/fILMR.Rd pkg/Meucci/man/fixedIncome.Rd pkg/Meucci/man/highYieldIndices.Rd pkg/Meucci/man/implVol.Rd pkg/Meucci/man/linearModel.Rd pkg/Meucci/man/sectorsTS.Rd pkg/Meucci/man/securitiesIndustryClassification.Rd pkg/Meucci/man/securitiesTS.Rd pkg/Meucci/man/swap2y4y.Rd pkg/Meucci/man/swapParRates.Rd pkg/Meucci/man/swaps.Rd Modified: pkg/Meucci/DESCRIPTION pkg/Meucci/R/CentralAndStandardizedStatistics.R pkg/Meucci/demo/S_BondProjectionPricingNormal.R pkg/Meucci/demo/S_DerivativesInvariants.R pkg/Meucci/demo/S_FxCopulaMarginal.R Log: - fixed dataset documentation error for books datasets Modified: pkg/Meucci/DESCRIPTION =================================================================== --- pkg/Meucci/DESCRIPTION 2013-09-06 14:51:41 UTC (rev 3012) +++ pkg/Meucci/DESCRIPTION 2013-09-06 19:06:55 UTC (rev 3013) @@ -104,3 +104,4 @@ 'DoubleDecay.R' 'Fit2Moms.R' 'LeastInfoKernel.R' + 'data.R' Modified: pkg/Meucci/R/CentralAndStandardizedStatistics.R =================================================================== --- pkg/Meucci/R/CentralAndStandardizedStatistics.R 2013-09-06 14:51:41 UTC (rev 3012) +++ pkg/Meucci/R/CentralAndStandardizedStatistics.R 2013-09-06 19:06:55 UTC (rev 3013) @@ -16,13 +16,13 @@ CentralAndStandardizedStatistics = function( X, N ) { - if(!require("PerformanceAnalytics")) stop("PerformanceAnalytics package required for this script"); + if( !require("PerformanceAnalytics") ) stop("PerformanceAnalytics package required for this script"); # compute central moments mu = matrix( 0, 1, N); mu[ 1 ] = mean(X); for( n in 2 : N ) { - mu[ n ] = centeredmoment(X, n); + mu[ n ] = PerformanceAnalytics:::centeredmoment(X, n); } # compute standardized statistics Added: pkg/Meucci/R/data.R =================================================================== --- pkg/Meucci/R/data.R (rev 0) +++ pkg/Meucci/R/data.R 2013-09-06 19:06:55 UTC (rev 3013) @@ -0,0 +1,211 @@ +#' @title Historical Scenarios with Fully Flexible Probabilities dataset. +#' +#' @description Data for the Historical Scenarios with Fully Flexible Probabilities paper. +#' +#' @name dbFFP +#' @docType data +#' @author Xavier Valls\email{flamejat@@gmail.com} +#' @references A. Meucci, "Personalized Risk Management: Historical Scenarios with Fully Flexible Probabilities" +#' GARP Risk Professional, Dec 2010, p 47-51. \url{http://www.symmys.com/node/150} +#' @keywords data +NULL + +#' @title Fully Integrated Liquidity and Market Risk Model dataset. +#' +#' @description Data for the Fully Integrated Liquidity and Market Risk Model paper. +#' +#' @name fILMR +#' @docType data +#' @author Xavier Valls\email{flamejat@@gmail.com} +#' @references A. Meucci, "A Fully Integrated Liquidity and Market Risk Model", Financial Analyst Journal, 68, 6, 35-47 (2012) \url{http://www.symmys.com/node/350} +#' @keywords data +NULL + +#' @title implVol +#' +#' @name implVol +#' @docType data +#' @author Xavier Valls\email{flamejat@@gmail.com} +#' @references A. Meucci, Exercises in Advanced Risk and Portfolio Management. \url{http://symmys.com/node/170} +#' @keywords data +NULL + +#' @title Stock Returns. +#' +#' @name securitiesTS +#' +#' @description Stock Returns. +#' +#' @docType data +#' @author Xavier Valls\email{flamejat@@gmail.com} +#' @references A. Meucci, Exercises in Advanced Risk and Portfolio Management. \url{http://symmys.com/node/170} +#' @keywords data +NULL + +#' @title Stock Indices +#' +#' @name securitiesIndustryClassification +#' +#' @description Stock Indices +#' +#' @docType data +#' @author Xavier Valls\email{flamejat@@gmail.com} +#' @references A. Meucci, Exercises in Advanced Risk and Portfolio Management. \url{http://symmys.com/node/170} +#' @keywords data +NULL + +#' @title implied vol for options on SPX +#' +#' @name derivatives +#' +#' @description implied vol for options on SPX for different time to maturity and moneyness. +#' +#' @docType data +#' @author Xavier Valls\email{flamejat@@gmail.com} +#' @references A. Meucci, Exercises in Advanced Risk and Portfolio Management. \url{http://symmys.com/node/170} +#' @keywords data +NULL + +#' @title daily stock prices from the utility sector in the S&P 500 +#' +#' @name Equities +#' +#' @description daily stock prices from the utility sector in the S&P 500. +#' +#' @docType data +#' @author Xavier Valls\email{flamejat@@gmail.com} +#' @references A. Meucci, Exercises in Advanced Risk and Portfolio Management. \url{http://symmys.com/node/170} +#' @keywords data +NULL + +#' @title highYieldIndices +#' +#' @name highYieldIndices +#' @docType data +#' @author Xavier Valls\email{flamejat@@gmail.com} +#' @references A. Meucci, Exercises in Advanced Risk and Portfolio Management. \url{http://symmys.com/node/170} +#' @keywords data +NULL + +#' @title US Swap Rates +#' +#' @name UsSwapRates +#' @docType data +#' @author Xavier Valls\email{flamejat@@gmail.com} +#' @references A. Meucci, Exercises in Advanced Risk and Portfolio Management. \url{http://symmys.com/node/170} +#' @keywords data +NULL + +#' @title US government yield curve and bond yield data +#' +#' @name fixedIncome +#' +#' @description US government yield curve and bond yield data for different dates. +#' +#' @docType data +#' @author Xavier Valls\email{flamejat@@gmail.com} +#' @references A. Meucci, Exercises in Advanced Risk and Portfolio Management. \url{http://symmys.com/node/170} +#' @keywords data +NULL + +#' @title fX +#' +#' @name db_FX +#' @docType data +#' @author Xavier Valls\email{flamejat@@gmail.com} +#' @references A. Meucci, Exercises in Advanced Risk and Portfolio Management. \url{http://symmys.com/node/170} +#' @keywords data +NULL + +#' @title parameters for the explicit factors / implicit loadings linear model +#' +#' @name linearModel +#' +#' @description parameters for the explicit factors / implicit loadings linear model. +#' +#' @docType data +#' @author Xavier Valls\email{flamejat@@gmail.com} +#' @references A. Meucci, Exercises in Advanced Risk and Portfolio Management. \url{http://symmys.com/node/170} +#' @keywords data +NULL + +#' @title Time Series +#' +#' @name TimeSeries +#' @docType data +#' @author Xavier Valls\email{flamejat@@gmail.com} +#' @references A. Meucci, Exercises in Advanced Risk and Portfolio Management. \url{http://symmys.com/node/170} +#' @keywords data +NULL + +#' @title Stock Series +#' +#' @name StockSeries +#' @docType data +#' @author Xavier Valls\email{flamejat@@gmail.com} +#' @references A. Meucci, Exercises in Advanced Risk and Portfolio Management. \url{http://symmys.com/node/170} +#' @keywords data +NULL + +#' @title db +#' +#' @name db +#' @docType data +#' @author Xavier Valls\email{flamejat@@gmail.com} +#' @references A. Meucci, Exercises in Advanced Risk and Portfolio Management. \url{http://symmys.com/node/170} +#' @keywords data +NULL + +#' @title swaps +#' +#' @name swaps +#' @docType data +#' @author Xavier Valls\email{flamejat@@gmail.com} +#' @references A. Meucci, Exercises in Advanced Risk and Portfolio Management. \url{http://symmys.com/node/170} +#' @keywords data +NULL + +#' @title bondAttribution +#' +#' @name bondAttribution +#' @docType data +#' @author Xavier Valls\email{flamejat@@gmail.com} +#' @references A. Meucci, Exercises in Advanced Risk and Portfolio Management. \url{http://symmys.com/node/170} +#' @keywords data +NULL + +#' @title swapParRates +#' +#' @name swapParRates +#' @docType data +#' @author Xavier Valls\email{flamejat@@gmail.com} +#' @references A. Meucci, Exercises in Advanced Risk and Portfolio Management. \url{http://symmys.com/node/170} +#' @keywords data +NULL + +#' @title stock returns by sectors +#' +#' @name sectorsTS +#' @docType data +#' @author Xavier Valls\email{flamejat@@gmail.com} +#' @references A. Meucci, Exercises in Advanced Risk and Portfolio Management. \url{http://symmys.com/node/170} +#' @keywords data +NULL + +#' @title covNRets +#' +#' @name covNRets +#' @docType data +#' @author Xavier Valls\email{flamejat@@gmail.com} +#' @references A. Meucci, Exercises in Advanced Risk and Portfolio Management. \url{http://symmys.com/node/170} +#' @keywords data +NULL + +#' @title Swaps for 2y and 4y +#' +#' @name swap2y4y +#' @docType data +#' @author Xavier Valls\email{flamejat@@gmail.com} +#' @references A. Meucci, Exercises in Advanced Risk and Portfolio Management. \url{http://symmys.com/node/170} +#' @keywords data +NULL \ No newline at end of file Added: pkg/Meucci/data/covNRets.rda =================================================================== (Binary files differ) Property changes on: pkg/Meucci/data/covNRets.rda ___________________________________________________________________ Added: svn:mime-type + application/octet-stream Added: pkg/Meucci/data/fX.rda =================================================================== (Binary files differ) Property changes on: pkg/Meucci/data/fX.rda ___________________________________________________________________ Added: svn:mime-type + application/octet-stream Modified: pkg/Meucci/demo/S_BondProjectionPricingNormal.R =================================================================== --- pkg/Meucci/demo/S_BondProjectionPricingNormal.R 2013-09-06 14:51:41 UTC (rev 3012) +++ pkg/Meucci/demo/S_BondProjectionPricingNormal.R 2013-09-06 19:06:55 UTC (rev 3013) @@ -5,7 +5,7 @@ #'"Risk and Asset Allocation", Springer, 2005, Chapter 3. #' #' @references -#' \url{http://} +#' \url{http://symmys.com/node/170} #' See Meucci's script for "S_BondProjectionPricingNormal.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/demo/S_DerivativesInvariants.R =================================================================== --- pkg/Meucci/demo/S_DerivativesInvariants.R 2013-09-06 14:51:41 UTC (rev 3012) +++ pkg/Meucci/demo/S_DerivativesInvariants.R 2013-09-06 19:06:55 UTC (rev 3013) @@ -11,7 +11,7 @@ ################################################################################################################## ### Load implied vol for options on SPX for different time to maturity and moneyness # Variable name: derivatives -load('../data/derivatives.Rda'); +load('../data/derivatives.rda'); ################################################################################################################## ### Simple univariate test Modified: pkg/Meucci/demo/S_FxCopulaMarginal.R =================================================================== --- pkg/Meucci/demo/S_FxCopulaMarginal.R 2013-09-06 14:51:41 UTC (rev 3012) +++ pkg/Meucci/demo/S_FxCopulaMarginal.R 2013-09-06 19:06:55 UTC (rev 3013) @@ -9,8 +9,6 @@ #' @export ### Load data and select the pair to display - -library(pracma) load( "../data/fX.rda" ) Display = c( 1, 2 ); # 1 = Spot USD/EUR; 2 = Spot USD/GBP; 3 = Spot USD/JPY; Added: pkg/Meucci/man/Equities.Rd =================================================================== --- pkg/Meucci/man/Equities.Rd (rev 0) +++ pkg/Meucci/man/Equities.Rd 2013-09-06 19:06:55 UTC (rev 3013) @@ -0,0 +1,18 @@ +\docType{data} +\name{Equities} +\alias{Equities} +\title{daily stock prices from the utility sector in the S&P 500} +\description{ + daily stock prices from the utility sector in the S&P + 500. +} +\author{ + Xavier Valls\email{flamejat at gmail.com} +} +\references{ + A. Meucci, Exercises in Advanced Risk and Portfolio + Management. \url{http://symmys.com/node/170} +} +\keyword{data} +\keyword{datasets} + Added: pkg/Meucci/man/StockSeries.Rd =================================================================== --- pkg/Meucci/man/StockSeries.Rd (rev 0) +++ pkg/Meucci/man/StockSeries.Rd 2013-09-06 19:06:55 UTC (rev 3013) @@ -0,0 +1,17 @@ +\docType{data} +\name{StockSeries} +\alias{StockSeries} +\title{Stock Series} +\description{ + Stock Series +} +\author{ + Xavier Valls\email{flamejat at gmail.com} +} +\references{ + A. Meucci, Exercises in Advanced Risk and Portfolio + Management. \url{http://symmys.com/node/170} +} +\keyword{data} +\keyword{datasets} + Added: pkg/Meucci/man/TimeSeries.Rd =================================================================== --- pkg/Meucci/man/TimeSeries.Rd (rev 0) +++ pkg/Meucci/man/TimeSeries.Rd 2013-09-06 19:06:55 UTC (rev 3013) @@ -0,0 +1,17 @@ +\docType{data} +\name{TimeSeries} +\alias{TimeSeries} +\title{Time Series} +\description{ + Time Series +} +\author{ + Xavier Valls\email{flamejat at gmail.com} +} +\references{ + A. Meucci, Exercises in Advanced Risk and Portfolio + Management. \url{http://symmys.com/node/170} +} +\keyword{data} +\keyword{datasets} + Added: pkg/Meucci/man/UsSwapRates.Rd =================================================================== --- pkg/Meucci/man/UsSwapRates.Rd (rev 0) +++ pkg/Meucci/man/UsSwapRates.Rd 2013-09-06 19:06:55 UTC (rev 3013) @@ -0,0 +1,17 @@ +\docType{data} +\name{UsSwapRates} +\alias{UsSwapRates} +\title{US Swap Rates} +\description{ + US Swap Rates +} +\author{ + Xavier Valls\email{flamejat at gmail.com} +} +\references{ + A. Meucci, Exercises in Advanced Risk and Portfolio + Management. \url{http://symmys.com/node/170} +} +\keyword{data} +\keyword{datasets} + Added: pkg/Meucci/man/bondAttribution.Rd =================================================================== --- pkg/Meucci/man/bondAttribution.Rd (rev 0) +++ pkg/Meucci/man/bondAttribution.Rd 2013-09-06 19:06:55 UTC (rev 3013) @@ -0,0 +1,17 @@ +\docType{data} +\name{bondAttribution} +\alias{bondAttribution} +\title{bondAttribution} +\description{ + bondAttribution +} +\author{ + Xavier Valls\email{flamejat at gmail.com} +} +\references{ + A. Meucci, Exercises in Advanced Risk and Portfolio + Management. \url{http://symmys.com/node/170} +} +\keyword{data} +\keyword{datasets} + Added: pkg/Meucci/man/covNRets.Rd =================================================================== --- pkg/Meucci/man/covNRets.Rd (rev 0) +++ pkg/Meucci/man/covNRets.Rd 2013-09-06 19:06:55 UTC (rev 3013) @@ -0,0 +1,17 @@ +\docType{data} +\name{covNRets} +\alias{covNRets} +\title{covNRets} +\description{ + covNRets +} +\author{ + Xavier Valls\email{flamejat at gmail.com} +} +\references{ + A. Meucci, Exercises in Advanced Risk and Portfolio + Management. \url{http://symmys.com/node/170} +} +\keyword{data} +\keyword{datasets} + Added: pkg/Meucci/man/db.Rd =================================================================== --- pkg/Meucci/man/db.Rd (rev 0) +++ pkg/Meucci/man/db.Rd 2013-09-06 19:06:55 UTC (rev 3013) @@ -0,0 +1,17 @@ +\docType{data} +\name{db} +\alias{db} +\title{db} +\description{ + db +} +\author{ + Xavier Valls\email{flamejat at gmail.com} +} +\references{ + A. Meucci, Exercises in Advanced Risk and Portfolio + Management. \url{http://symmys.com/node/170} +} +\keyword{data} +\keyword{datasets} + Added: pkg/Meucci/man/dbFFP.Rd =================================================================== --- pkg/Meucci/man/dbFFP.Rd (rev 0) +++ pkg/Meucci/man/dbFFP.Rd 2013-09-06 19:06:55 UTC (rev 3013) @@ -0,0 +1,20 @@ +\docType{data} +\name{dbFFP} +\alias{dbFFP} +\title{Historical Scenarios with Fully Flexible Probabilities dataset.} +\description{ + Data for the Historical Scenarios with Fully Flexible + Probabilities paper. +} +\author{ + Xavier Valls\email{flamejat at gmail.com} +} +\references{ + A. Meucci, "Personalized Risk Management: Historical + Scenarios with Fully Flexible Probabilities" GARP Risk + Professional, Dec 2010, p 47-51. + \url{http://www.symmys.com/node/150} +} +\keyword{data} +\keyword{datasets} + Added: pkg/Meucci/man/db_FX.Rd =================================================================== --- pkg/Meucci/man/db_FX.Rd (rev 0) +++ pkg/Meucci/man/db_FX.Rd 2013-09-06 19:06:55 UTC (rev 3013) @@ -0,0 +1,17 @@ +\docType{data} +\name{db_FX} +\alias{db_FX} +\title{fX} +\description{ + fX +} +\author{ + Xavier Valls\email{flamejat at gmail.com} +} +\references{ + A. Meucci, Exercises in Advanced Risk and Portfolio + Management. \url{http://symmys.com/node/170} +} +\keyword{data} +\keyword{datasets} + Added: pkg/Meucci/man/derivatives.Rd =================================================================== --- pkg/Meucci/man/derivatives.Rd (rev 0) +++ pkg/Meucci/man/derivatives.Rd 2013-09-06 19:06:55 UTC (rev 3013) @@ -0,0 +1,18 @@ +\docType{data} +\name{derivatives} +\alias{derivatives} +\title{implied vol for options on SPX} +\description{ + implied vol for options on SPX for different time to + maturity and moneyness. +} +\author{ + Xavier Valls\email{flamejat at gmail.com} +} +\references{ + A. Meucci, Exercises in Advanced Risk and Portfolio + Management. \url{http://symmys.com/node/170} +} +\keyword{data} +\keyword{datasets} + Added: pkg/Meucci/man/fILMR.Rd =================================================================== --- pkg/Meucci/man/fILMR.Rd (rev 0) +++ pkg/Meucci/man/fILMR.Rd 2013-09-06 19:06:55 UTC (rev 3013) @@ -0,0 +1,19 @@ +\docType{data} +\name{fILMR} +\alias{fILMR} +\title{Fully Integrated Liquidity and Market Risk Model dataset.} +\description{ + Data for the Fully Integrated Liquidity and Market Risk + Model paper. +} +\author{ + Xavier Valls\email{flamejat at gmail.com} +} +\references{ + A. Meucci, "A Fully Integrated Liquidity and Market Risk + Model", Financial Analyst Journal, 68, 6, 35-47 (2012) + \url{http://www.symmys.com/node/350} +} +\keyword{data} +\keyword{datasets} + Added: pkg/Meucci/man/fixedIncome.Rd =================================================================== --- pkg/Meucci/man/fixedIncome.Rd (rev 0) +++ pkg/Meucci/man/fixedIncome.Rd 2013-09-06 19:06:55 UTC (rev 3013) @@ -0,0 +1,18 @@ +\docType{data} +\name{fixedIncome} +\alias{fixedIncome} +\title{US government yield curve and bond yield data} +\description{ + US government yield curve and bond yield data for + different dates. +} +\author{ + Xavier Valls\email{flamejat at gmail.com} +} +\references{ + A. Meucci, Exercises in Advanced Risk and Portfolio + Management. \url{http://symmys.com/node/170} +} +\keyword{data} +\keyword{datasets} + Added: pkg/Meucci/man/highYieldIndices.Rd =================================================================== --- pkg/Meucci/man/highYieldIndices.Rd (rev 0) +++ pkg/Meucci/man/highYieldIndices.Rd 2013-09-06 19:06:55 UTC (rev 3013) @@ -0,0 +1,17 @@ +\docType{data} +\name{highYieldIndices} +\alias{highYieldIndices} +\title{highYieldIndices} +\description{ + highYieldIndices +} +\author{ + Xavier Valls\email{flamejat at gmail.com} +} +\references{ + A. Meucci, Exercises in Advanced Risk and Portfolio + Management. \url{http://symmys.com/node/170} +} +\keyword{data} +\keyword{datasets} + Added: pkg/Meucci/man/implVol.Rd =================================================================== --- pkg/Meucci/man/implVol.Rd (rev 0) +++ pkg/Meucci/man/implVol.Rd 2013-09-06 19:06:55 UTC (rev 3013) @@ -0,0 +1,17 @@ +\docType{data} +\name{implVol} +\alias{implVol} +\title{implVol} +\description{ + implVol +} +\author{ + Xavier Valls\email{flamejat at gmail.com} +} +\references{ + A. Meucci, Exercises in Advanced Risk and Portfolio + Management. \url{http://symmys.com/node/170} +} +\keyword{data} +\keyword{datasets} + Added: pkg/Meucci/man/linearModel.Rd =================================================================== --- pkg/Meucci/man/linearModel.Rd (rev 0) +++ pkg/Meucci/man/linearModel.Rd 2013-09-06 19:06:55 UTC (rev 3013) @@ -0,0 +1,18 @@ +\docType{data} +\name{linearModel} +\alias{linearModel} +\title{parameters for the explicit factors / implicit loadings linear model} +\description{ + parameters for the explicit factors / implicit loadings + linear model. +} +\author{ + Xavier Valls\email{flamejat at gmail.com} +} +\references{ + A. Meucci, Exercises in Advanced Risk and Portfolio + Management. \url{http://symmys.com/node/170} +} +\keyword{data} +\keyword{datasets} + Added: pkg/Meucci/man/sectorsTS.Rd =================================================================== --- pkg/Meucci/man/sectorsTS.Rd (rev 0) +++ pkg/Meucci/man/sectorsTS.Rd 2013-09-06 19:06:55 UTC (rev 3013) @@ -0,0 +1,17 @@ +\docType{data} +\name{sectorsTS} +\alias{sectorsTS} +\title{stock returns by sectors} +\description{ + stock returns by sectors +} +\author{ + Xavier Valls\email{flamejat at gmail.com} +} +\references{ + A. Meucci, Exercises in Advanced Risk and Portfolio + Management. \url{http://symmys.com/node/170} +} +\keyword{data} +\keyword{datasets} + Added: pkg/Meucci/man/securitiesIndustryClassification.Rd =================================================================== --- pkg/Meucci/man/securitiesIndustryClassification.Rd (rev 0) +++ pkg/Meucci/man/securitiesIndustryClassification.Rd 2013-09-06 19:06:55 UTC (rev 3013) @@ -0,0 +1,17 @@ +\docType{data} +\name{securitiesIndustryClassification} +\alias{securitiesIndustryClassification} +\title{Stock Indices} +\description{ + Stock Indices +} +\author{ + Xavier Valls\email{flamejat at gmail.com} +} +\references{ + A. Meucci, Exercises in Advanced Risk and Portfolio + Management. \url{http://symmys.com/node/170} +} +\keyword{data} +\keyword{datasets} + Added: pkg/Meucci/man/securitiesTS.Rd =================================================================== --- pkg/Meucci/man/securitiesTS.Rd (rev 0) +++ pkg/Meucci/man/securitiesTS.Rd 2013-09-06 19:06:55 UTC (rev 3013) @@ -0,0 +1,17 @@ +\docType{data} +\name{securitiesTS} +\alias{securitiesTS} +\title{Stock Returns.} +\description{ + Stock Returns. +} +\author{ + Xavier Valls\email{flamejat at gmail.com} +} +\references{ + A. Meucci, Exercises in Advanced Risk and Portfolio + Management. \url{http://symmys.com/node/170} +} +\keyword{data} +\keyword{datasets} + Added: pkg/Meucci/man/swap2y4y.Rd =================================================================== --- pkg/Meucci/man/swap2y4y.Rd (rev 0) +++ pkg/Meucci/man/swap2y4y.Rd 2013-09-06 19:06:55 UTC (rev 3013) @@ -0,0 +1,17 @@ +\docType{data} +\name{swap2y4y} +\alias{swap2y4y} +\title{Swaps for 2y and 4y} +\description{ + Swaps for 2y and 4y +} +\author{ + Xavier Valls\email{flamejat at gmail.com} +} +\references{ + A. Meucci, Exercises in Advanced Risk and Portfolio + Management. \url{http://symmys.com/node/170} +} +\keyword{data} +\keyword{datasets} + Added: pkg/Meucci/man/swapParRates.Rd =================================================================== --- pkg/Meucci/man/swapParRates.Rd (rev 0) +++ pkg/Meucci/man/swapParRates.Rd 2013-09-06 19:06:55 UTC (rev 3013) @@ -0,0 +1,17 @@ +\docType{data} +\name{swapParRates} +\alias{swapParRates} +\title{swapParRates} +\description{ + swapParRates +} +\author{ + Xavier Valls\email{flamejat at gmail.com} +} +\references{ + A. Meucci, Exercises in Advanced Risk and Portfolio + Management. \url{http://symmys.com/node/170} +} +\keyword{data} +\keyword{datasets} + Added: pkg/Meucci/man/swaps.Rd =================================================================== --- pkg/Meucci/man/swaps.Rd (rev 0) +++ pkg/Meucci/man/swaps.Rd 2013-09-06 19:06:55 UTC (rev 3013) @@ -0,0 +1,17 @@ +\docType{data} +\name{swaps} +\alias{swaps} +\title{swaps} +\description{ + swaps +} +\author{ + Xavier Valls\email{flamejat at gmail.com} +} +\references{ + A. Meucci, Exercises in Advanced Risk and Portfolio + Management. \url{http://symmys.com/node/170} +} +\keyword{data} +\keyword{datasets} + From noreply at r-forge.r-project.org Sat Sep 7 10:49:48 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sat, 7 Sep 2013 10:49:48 +0200 (CEST) Subject: [Returnanalytics-commits] r3014 - pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R Message-ID: <20130907084948.C44ED1805AA@r-forge.r-project.org> Author: shubhanm Date: 2013-09-07 10:49:48 +0200 (Sat, 07 Sep 2013) New Revision: 3014 Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.Sharpe.R Log: Bug correct Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.Sharpe.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.Sharpe.R 2013-09-06 19:06:55 UTC (rev 3013) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.Sharpe.R 2013-09-07 08:49:48 UTC (rev 3014) @@ -70,14 +70,12 @@ for(column in 1:columns) { x = y[,column] - z = c(SharpeRatio.annualized(x), - SharpeRatio.modified(x), - LoSharpe(x), - Return.annualized(x),StdDev.annualized(x),se.Losharpe(x)) + z = c(as.numeric(SharpeRatio.annualized(x)), + as.numeric(LoSharpe(x)), + as.numeric(Return.annualized(x)),as.numeric(StdDev.annualized(x)),as.numeric(se.Losharpe(x))) znames = c( "William Sharpe Ratio", - "Modified Sharpe Ratio", "Andrew Lo Sharpe Ratio", "Annualized Return", "Annualized Standard Deviation","Sharpe Ratio Standard Error(95%)" From noreply at r-forge.r-project.org Sat Sep 7 12:16:35 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sat, 7 Sep 2013 12:16:35 +0200 (CEST) Subject: [Returnanalytics-commits] r3015 - pkg/Meucci/demo Message-ID: <20130907101635.4D8A1184E8D@r-forge.r-project.org> Author: xavierv Date: 2013-09-07 12:16:35 +0200 (Sat, 07 Sep 2013) New Revision: 3015 Modified: pkg/Meucci/demo/AnalyticalvsNumerical.R pkg/Meucci/demo/FullFlexProbs.R Log: - AnalyticalvsNumerical demo script revised Modified: pkg/Meucci/demo/AnalyticalvsNumerical.R =================================================================== --- pkg/Meucci/demo/AnalyticalvsNumerical.R 2013-09-07 08:49:48 UTC (rev 3014) +++ pkg/Meucci/demo/AnalyticalvsNumerical.R 2013-09-07 10:16:35 UTC (rev 3015) @@ -1,40 +1,43 @@ +#' This example script compares the numerical and the analytical solution of entropy-pooling, as described +#' in A. Meucci, "Fully Flexible Views: Theory and Practice", The Risk Magazine, October 2008, p 100-106. +#' +#' Most recent version of article and MATLAB code available at +#' http://www.symmys.com/node/158 +#' +#' @references +#' A. Meucci, Fully Flexible Views: Theory and Practice \url{http://www.symmys.com/node/158} +#' See Meucci script for "S_MAIN.m" +#' +#' @author Ram Ahluwalia \email{ram@@wingedfootcapital.com} and Xavier Valls \email{flamejat@@gmail.com} -# This example script compares the numerical and the analytical solution of entropy-pooling, see -# "A. Meucci - Fully Flexible Views: Theory and Practice -# and associated script S_Main -# Example compares analytical vs. numerical approach to entropy pooling - -# Code by A. Meucci, September 2008 -# Last version available at www.symmys.com > Teaching > MATLAB - ############################################################### # prior ############################################################### -library( matlab ) -library( MASS ) + # analytical representation -N = 2 # market dimension (2 assets) -Mu = zeros( N , 1 ) -r= .6 -Sigma = ( 1 - r ) * eye( N ) + r * ones( N , N ) # nxn correlation matrix with correlaiton 'r' in off-diagonals +N = 2 # market dimension (2 assets) +Mu = matrix( 0, N , 1 ) +r = 0.6 +Sigma = ( 1 - r ) * diag( 1, N ) + r * matrix( 1, N , N ) # nxn correlation matrix with correlation 'r' in off-diagonals + # numerical representation -J = 100000 # number of scenarios -p = ones( J , 1 ) / J -dd = mvrnorm( J / 2 , zeros( N , 1 ) , Sigma ) # distribution centered on (0,0) with variance Sigma -X = ones( J , 1 ) %*% t(Mu) + rbind( dd , -dd ) # JxN matrix of scenarios +J = 100000 # number of scenarios +p = matrix( 1, J , 1 ) / J +dd = rmvnorm( J / 2 , matrix( 1, N , 1 ) , Sigma ) # distribution centered on (0,0) with variance Sigma +X = matrix( 1, J , 1 ) %*% t(Mu) + rbind( dd , -dd ) # JxN matrix of scenarios ############################################################### # views ############################################################### # location -Q = matrix( c( 1 , -1 ) , nrow = 1 ) # long the first and asset and short the second asset produces an expectation (of Mu_Q calculated below) -Mu_Q = .5 +Q = cbind( 1 , -1 ) # long the first and asset and short the second asset produces an expectation (of Mu_Q calculated below) +Mu_Q = 0.5 # scatter -G = matrix( c( -1 , 1 ) , nrow = 1 ) -Sigma_G = .5^2 +G = cbind( -1 , 1 ) +Sigma_G = 0.5^2 ############################################################### # posterior @@ -43,34 +46,36 @@ # analytical posterior RevisedMuSigma = Prior2Posterior( Mu , Q , Mu_Q , Sigma , G , Sigma_G ) Mu_ = RevisedMuSigma$M_ - +Sigma_ = RevisedMuSigma$S_ # numerical posterior -Aeq = ones( 1 , J ) # constrain probabilities to sum to one... +Aeq = matrix( 1, 1 , J ) # constrain probabilities to sum to one... beq = 1 # create views - QX = X %*% t(Q) # a Jx1 matrix - Aeq = rbind( Aeq , t(QX) ) # ...constrain the first moments... - # QX is a linear combination of vector Q and the scenarios X - - beq = rbind( beq , Mu_Q ) - - SecMom = G %*% Mu_ %*% t(Mu_) %*% t(G) + Sigma_G # ...constrain the second moments... - # We use Mu_ from analytical result. We do not use Revised Sigma because we are testing whether - # the numerical approach for handling expectations of covariance matches the analytical approach - # TODO: Can we perform this procedure without relying on Mu_ from the analytical result? - GX = X %*% t(G) - - for ( k in 1:nrow( G ) ) - { - for ( l in k:nrow( G ) ) - { - Aeq = rbind( Aeq , t(GX[ , k ] * GX[ , l ] ) ) - beq = rbind( beq , SecMom[ k , l ] ) - } - } +QX = X %*% t(Q) # a Jx1 matrix +Aeq = rbind( Aeq , t(QX) ) # ...constrain the first moments... + # QX is a linear combination of vector Q and the scenarios X + +beq = rbind( beq , Mu_Q ) + +SecMom = G %*% Mu_ %*% t(Mu_) %*% t(G) + Sigma_G # ...constrain the second moments... + # We use Mu_ from analytical result. We do not use Revised Sigma because we are testing whether + # the numerical approach for handling expectations of covariance matches the analytical approach + # TODO: Can we perform this procedure without relying on Mu_ from the analytical result? + +GX = X %*% t(G) + +for ( k in 1:nrow( G ) ) +{ + for ( l in k:nrow( G ) ) + { + Aeq = rbind( Aeq , t(GX[ , k ] * GX[ , l ] ) ) + beq = rbind( beq , SecMom[ k , l ] ) + } +} + emptyMatrix = matrix( , nrow = 0 , ncol = 0 ) p_ = EntropyProg( p , emptyMatrix , emptyMatrix , Aeq , beq ) # ...compute posterior probabilities Modified: pkg/Meucci/demo/FullFlexProbs.R =================================================================== --- pkg/Meucci/demo/FullFlexProbs.R 2013-09-07 08:49:48 UTC (rev 3014) +++ pkg/Meucci/demo/FullFlexProbs.R 2013-09-07 10:16:35 UTC (rev 3015) @@ -35,7 +35,7 @@ #'A. Meucci, "Personalized Risk Management: Historical Scenarios with Fully Flexible Probabilities" #'GARP Risk Professional, Dec 2010, p 47-51 #' -#' Most recent version of article and code available at +#' Most recent version of article and MATLAB code available at #' http://www.symmys.com/node/150 #' @references #' \url{http://www.symmys.com/node/150} From noreply at r-forge.r-project.org Sat Sep 7 13:19:40 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sat, 7 Sep 2013 13:19:40 +0200 (CEST) Subject: [Returnanalytics-commits] r3016 - in pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm: . R man vignettes Message-ID: <20130907111940.C096D183E7A@r-forge.r-project.org> Author: shubhanm Date: 2013-09-07 13:19:40 +0200 (Sat, 07 Sep 2013) New Revision: 3016 Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/ShaneAcarMaxLoss.synctex.gz pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/ShaneAcarMaxLoss.tex Removed: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/AcarSim.R pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/LoSharpe.R pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/se.LoSharpe.R pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.Sharpe.R pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/AcarSim.Rd pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/LoSharpe.Rd pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/se.LoSharpe.Rd pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.Sharpe.Rd pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/CommodityReport.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/LoSharpe.Rnw Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/DESCRIPTION pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/NAMESPACE pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/ShaneAcarMaxLoss.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/ShaneAcarMaxLoss.pdf Log: Temp Clean version of R CMD Build Checking...removing bugs in deleted functions Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/DESCRIPTION =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/DESCRIPTION 2013-09-07 10:16:35 UTC (rev 3015) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/DESCRIPTION 2013-09-07 11:19:40 UTC (rev 3016) @@ -16,7 +16,6 @@ License: GPL-3 ByteCompile: TRUE Collate: - 'AcarSim.R' 'ACStdDev.annualized.R' 'CalmarRatio.Norm.R' 'CDrawdown.R' @@ -24,16 +23,13 @@ 'chart.Autocorrelation.R' 'EmaxDDGBM.R' 'GLMSmoothIndex.R' - 'LoSharpe.R' 'na.skip.R' 'noniid.sm-internal.R' 'QP.Norm.R' 'Return.GLM.R' 'Return.Okunev.R' - 'se.LoSharpe.R' 'SterlingRatio.Norm.R' 'table.ComparitiveReturn.GLM.R' 'table.EMaxDDGBM.R' 'table.UnsmoothReturn.R' 'UnsmoothReturn.R' - 'table.Sharpe.R' Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/NAMESPACE =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/NAMESPACE 2013-09-07 10:16:35 UTC (rev 3015) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/NAMESPACE 2013-09-07 11:19:40 UTC (rev 3016) @@ -1,4 +1,3 @@ -export(AcarSim) export(ACStdDev.annualized) export(CalmarRatio.Norm) export(CDrawdown) @@ -6,13 +5,10 @@ export(chart.Autocorrelation) export(EMaxDDGBM) export(GLMSmoothIndex) -export(LoSharpe) export(QP.Norm) export(Return.GLM) export(Return.Okunev) -export(se.LoSharpe) export(SterlingRatio.Norm) export(table.ComparitiveReturn.GLM) export(table.EMaxDDGBM) -export(table.Sharpe) export(table.UnsmoothReturn) Deleted: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/AcarSim.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/AcarSim.R 2013-09-07 10:16:35 UTC (rev 3015) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/AcarSim.R 2013-09-07 11:19:40 UTC (rev 3016) @@ -1,103 +0,0 @@ -#' @title Acar-Shane Maximum Loss Plot -#' -#'@description To get some insight on the relationships between maximum drawdown per unit of volatility -#'and mean return divided by volatility, we have proceeded to Monte-Carlo simulations. -#' We have simulated cash flows over a period of 36 monthly returns and measured maximum -#'drawdown for varied levels of annualised return divided by volatility varying from minus -#' \emph{two to two} by step of \emph{0.1} . The process has been repeated \bold{six thousand times}. -#' @details Unfortunately, there is no \bold{analytical formulae} to establish the maximum drawdown properties under -#' the random walk assumption. We should note first that due to its definition, the maximum drawdown -#' divided by volatility can be interpreted as the only function of the ratio mean divided by volatility. -#' \deqn{MD/[\sigma]= Min (\sum[X(j)])/\sigma = F(\mu/\sigma)} -#' Where j varies from 1 to n ,which is the number of drawdown's in simulation -#' @param R an xts, vector, matrix, data frame, timeSeries or zoo object of -#' asset returns -#' @param nsim number of simulations input -#' @author Shubhankit Mohan -#' @references Maximum Loss and Maximum Drawdown in Financial Markets,\emph{International Conference Sponsored by BNP and Imperial College on: -#' Forecasting Financial Markets, London, United Kingdom, May 1997} \url{http://www.intelligenthedgefundinvesting.com/pubs/easj.pdf} -#' @keywords Maximum Loss Simulated Drawdown -#' @examples -#' library(PerformanceAnalytics) -#' AcarSim(R) -#' @rdname AcarSim -#' @export -AcarSim <- - function(R,nsim=1) - { - library(PerformanceAnalytics) - - data(edhec) - - R = checkData(R, method="xts") - # Get dimensions and labels - # simulated parameters using edhec data -mu=mean(Return.annualized(R)) -monthly=(1+mu)^(1/12)-1 - vol = as.numeric(StdDev.annualized(R)); - ret=as.numeric(Return.annualized(R)) - drawdown =as.numeric(maxDrawdown(R)) - sig=mean(StdDev.annualized(R)); -T= 36 -j=1 -dt=1/T -thres=4; -r=matrix(0,nsim,T+1) -monthly = 0 -r[,1]=monthly; -# Sigma 'monthly volatiltiy' will be the varying term -ratio= seq(-2, 2, by=.1); -len = length(ratio) -ddown=array(0, dim=c(nsim,len,thres)) -fddown=array(0, dim=c(len,thres)) -Z <- array(0, c(len)) -for(i in 1:len) -{ - monthly = sig*ratio[i]; - - for(j in 1:nsim) -{ - dz=rnorm(T) - - - r[j,2:37]=monthly+(sig*dz*sqrt(3*dt)) - - ddown[j,i,1]= ES((r[j,]),.99, method="modified") - ddown[j,i,1][is.na(ddown[j,i,1])] <- 0 - fddown[i,1]=fddown[i,1]+ddown[j,i,1] - ddown[j,i,2]= ES((r[j,]),.95, method="modified") - ddown[j,i,2][is.na(ddown[j,i,2])] <- 0 - fddown[i,2]=fddown[i,2]+ddown[j,i,2] - ddown[j,i,3]= ES((r[j,]),.90, method="modified") - ddown[j,i,3][is.na(ddown[j,i,3])] <- 0 - fddown[i,3]=fddown[i,3]+ddown[j,i,3] - ddown[j,i,4]= ES((r[j,]),.85, method="modified") - ddown[j,i,4][is.na(ddown[j,i,4])] <- 0 - fddown[i,4]=fddown[i,4]+ddown[j,i,4] - assign("last.warning", NULL, envir = baseenv()) -} -} -plot(((fddown[,1])/(sig*nsim)),xlab="Annualised Return/Volatility from [-2,2]",ylab="Maximum Drawdown/Volatility",type='o',col="blue") -lines(((fddown[,2])/(sig*nsim)),type='o',col="pink") -lines(((fddown[,3])/(sig*nsim)),type='o',col="green") -lines(((fddown[,4])/(sig*nsim)),type='o',col="red") - points((ret/vol), (-drawdown/vol), col = "black", pch=10) - legend(32,-4, c("%99", "%95", "%90","%85","Fund"), col = c("blue","pink","green","red","black"), text.col= "black", - lty = c(2, -1, 1,2), pch = c(-1, 3, 4,10), merge = TRUE, bg='gray90') - -title("Maximum Drawdown/Volatility as a function of Return/Volatility -36 monthly returns simulated 6,000 times") - edhec=NULL -} - -############################################################################### -# R (http://r-project.org/) Econometrics for Performance and Risk Analysis -# -# Copyright (c) 2004-2012 Peter Carl and Brian G. Peterson -# -# This R package is distributed under the terms of the GNU Public License (GPL) -# for full details see the file COPYING -# -# $Id: AcarSim.R 2163 2012-07-16 00:30:19Z braverock $ -# -############################################################################### \ No newline at end of file Deleted: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/LoSharpe.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/LoSharpe.R 2013-09-07 10:16:35 UTC (rev 3015) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/LoSharpe.R 2013-09-07 11:19:40 UTC (rev 3016) @@ -1,98 +0,0 @@ -#'@title Andrew Lo Sharpe Ratio -#'@description -#' Although the Sharpe ratio has become part of the canon of modern financial -#' analysis, its applications typically do not account for the fact that it is an -#' estimated quantity, subject to estimation errors that can be substantial in -#' some cases. -#' -#' Many studies have documented various violations of the assumption of -#' IID returns for financial securities. -#' -#' Under the assumption of stationarity,a version of the Central Limit Theorem can -#' still be applied to the estimator . -#' @details -#' The relationship between SR and SR(q) is somewhat more involved for non- -#'IID returns because the variance of Rt(q) is not just the sum of the variances of component returns but also includes all the covariances. Specifically, under -#' the assumption that returns \eqn{R_t} are stationary, -#' \deqn{ Var[(R_t)] = \sum \sum Cov(R(t-i),R(t-j)) = q{\sigma^2} + 2{\sigma^2} \sum (q-k)\rho(k) } -#' Where \eqn{ \rho(k) = Cov(R(t),R(t-k))/Var[(R_t)]} is the \eqn{k^{th}} order autocorrelation coefficient of the series of returns.This yields the following relationship between SR and SR(q): -#' and i,j belongs to 0 to q-1 -#'\deqn{SR(q) = \eta(q) } -#'Where : -#' \deqn{ }{\eta(q) = [q]/[\sqrt(q\sigma^2) + 2\sigma^2 \sum(q-k)\rho(k)] } -#' Where, k belongs to 0 to q-1 -#' SR(q) : Estimated Lo Sharpe Ratio -#' SR : Theoretical William Sharpe Ratio -#' @param Ra an xts, vector, matrix, data frame, timeSeries or zoo object of -#' daily asset returns -#' @param Rf an xts, vector, matrix, data frame, timeSeries or zoo object of -#' annualized Risk Free Rate -#' @param q Number of autocorrelated lag periods. Taken as 3 (Default) -#' @param \dots any other pass thru parameters -#' @author Shubhankit Mohan -#' @references Andrew Lo,\emph{ The Statistics of Sharpe Ratio.}2002, AIMR. -#'\code{\link[stats]{}} \cr -#' \url{http://papers.ssrn.com/sol3/papers.cfm?abstract_id=377260} -#' -#' Andrew Lo,\emph{Sharpe Ratio may be Overstated} -#' \url{http://www.risk.net/risk-magazine/feature/1506463/lo-sharpe-ratios-overstated} -#' @keywords ts multivariate distribution models non-iid -#' @examples -#' -#' data(edhec) -#' LoSharpe(edhec,0,3) -#' @rdname LoSharpe -#' @export -LoSharpe <- - function (Ra,Rf = 0,q = 3, ...) - { # @author Brian G. Peterson, Peter Carl - - - # Function: - R = checkData(Ra, method="xts") - # Get dimensions and labels - columns.a = ncol(R) - columnnames.a = colnames(R) - # Time used for daily Return manipulations - Time= 252*nyears(edhec) - clean.lo <- function(column.R,q) { - # compute the lagged return series - gamma.k =matrix(0,q) - mu = sum(column.R)/(Time) - Rf= Rf/(Time) - for(i in 1:q){ - lagR = lag(column.R, k=i) - # compute the Momentum Lagged Values - gamma.k[i]= (sum(((column.R-mu)*(lagR-mu)),na.rm=TRUE)) - } - return(gamma.k) - } - neta.lo <- function(pho.k,q) { - # compute the lagged return series - sumq = 0 - for(j in 1:q){ - sumq = sumq+ (q-j)*pho.k[j] - } - return(q/(sqrt(q+2*sumq))) - } - for(column.a in 1:columns.a) { # for each asset passed in as R - # clean the data and get rid of NAs - clean.ret=na.omit(R[,column.a]) - mu = sum(clean.ret)/(Time) - sig=sqrt(((clean.ret-mu)^2/(Time))) - pho.k = clean.lo(clean.ret,q)/(as.numeric(sig[1])) - netaq=neta.lo(pho.k,q) - #column.lo = (netaq*((mu-Rf)/as.numeric(sig[1]))) - column.lo = as.numeric(SharpeRatio.annualized(R[,column.a]))[1]*netaq - if(column.a == 1) { lo = column.lo } - else { lo = cbind (lo, column.lo) } - - } - colnames(lo) = columnnames.a - rownames(lo)= paste("Lo Sharpe Ratio") - return(lo) - - - # RESULTS: - - } Deleted: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/se.LoSharpe.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/se.LoSharpe.R 2013-09-07 10:16:35 UTC (rev 3015) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/se.LoSharpe.R 2013-09-07 11:19:40 UTC (rev 3016) @@ -1,99 +0,0 @@ -#'@title Andrew Lo Sharpe Ratio Statistics -#'@description -#' Although the Sharpe ratio has become part of the canon of modern financial -#' analysis, its applications typically do not account for the fact that it is an -#' estimated quantity, subject to estimation errors which can be substantial in -#' some cases. -#' -#' Many studies have documented various violations of the assumption of -#' IID returns for financial securities. -#' -#' Under the assumption of stationarity,a version of the Central Limit Theorem can -#' still be applied to the estimator . -#' @details -#' The relationship between SR and SR(q) is somewhat more involved for non- -#'IID returns because the variance of Rt(q) is not just the sum of the variances of component returns but also includes all the covariances. Specifically, under -#' the assumption that returns \eqn{R_t} are stationary, -#' \deqn{ Var[(R_t)] = \sum \sum Cov(R(t-i),R(t-j)) = q{\sigma^2} + 2{\sigma^2} \sum (q-k)\rho(k) } -#' Where \eqn{ \rho(k) = Cov(R(t),R(t-k))/Var[(R_t)]} is the \eqn{k^{th}} order autocorrelation coefficient of the series of returns.This yields the following relationship between SR and SR(q): -#' and i,j belongs to 0 to q-1 -#'\deqn{SR(q) = \eta(q) } -#'Where : -#' \deqn{ }{\eta(q) = [q]/[\sqrt(q\sigma^2) + 2\sigma^2 \sum(q-k)\rho(k)] } -#' Where k belongs to 0 to q-1 -#' Under the assumption of assumption of asymptotic variance of SR(q), the standard error for the Sharpe Ratio Esitmator can be computed as: -#' \deqn{SE(SR(q)) = \sqrt((1+SR^2/2)/T)} -#' SR(q) : Estimated Lo Sharpe Ratio -#' SR : Theoretical William Sharpe Ratio -#' @param Ra an xts, vector, matrix, data frame, timeSeries or zoo object of -#' daily asset returns -#' @param Rf an xts, vector, matrix, data frame, timeSeries or zoo object of -#' annualized Risk Free Rate -#' @param q Number of autocorrelated lag periods. Taken as 3 (Default) -#' @param \dots any other pass thru parameters -#' @author Shubhankit Mohan -#' @references Andrew Lo,\emph{ The Statistics of Sharpe Ratio.}2002, AIMR. -#'\code{\link[stats]{}} \cr -#' \url{http://papers.ssrn.com/sol3/papers.cfm?abstract_id=377260} -#' -#' Andrew Lo,\emph{Sharpe Ratio may be Overstated} -#' \url{http://www.risk.net/risk-magazine/feature/1506463/lo-sharpe-ratios-overstated} -#' @keywords ts multivariate distribution models non-iid -#' @examples -#' -#' data(edhec) -#' se.LoSharpe(edhec,0,3) -#' @rdname se.LoSharpe -#' @export -se.LoSharpe <- - function (Ra,Rf = 0,q = 3, ...) - { # @author Brian G. Peterson, Peter Carl - - - # Function: - R = checkData(Ra, method="xts") - # Get dimensions and labels - columns.a = ncol(R) - columnnames.a = colnames(R) - # Time used for daily Return manipulations - Time= 252*nyears(edhec) - clean.lo <- function(column.R,q) { - # compute the lagged return series - gamma.k =matrix(0,q) - mu = sum(column.R)/(Time) - Rf= Rf/(Time) - for(i in 1:q){ - lagR = lag(column.R, k=i) - # compute the Momentum Lagged Values - gamma.k[i]= (sum(((column.R-mu)*(lagR-mu)),na.rm=TRUE)) - } - return(gamma.k) - } - neta.lo <- function(pho.k,q) { - # compute the lagged return series - sumq = 0 - for(j in 1:q){ - sumq = sumq+ (q-j)*pho.k[j] - } - return(q/(sqrt(q+2*sumq))) - } - for(column.a in 1:columns.a) { # for each asset passed in as R - # clean the data and get rid of NAs - mu = sum(R[,column.a])/(Time) - sig=sqrt(((R[,column.a]-mu)^2/(Time))) - pho.k = clean.lo(R[,column.a],q)/(as.numeric(sig[1])) - netaq=neta.lo(pho.k,q) - column.lo = (netaq*((mu-Rf)/as.numeric(sig[1]))) - column.lo= 1.96*sqrt((1+(column.lo*column.lo/2))/(Time)) - if(column.a == 1) { lo = column.lo } - else { lo = cbind (lo, column.lo) } - - } - colnames(lo) = columnnames.a - rownames(lo)= paste("Standard Error of Sharpe Ratio Estimates(95% Confidence)") - return(lo) - - - # RESULTS: - - } Deleted: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.Sharpe.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.Sharpe.R 2013-09-07 10:16:35 UTC (rev 3015) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.Sharpe.R 2013-09-07 11:19:40 UTC (rev 3016) @@ -1,96 +0,0 @@ -#'@title Sharpe Ratio Statistics Summary -#'@description -#' The Sharpe ratio is simply the return per unit of risk (represented by -#' variability). In the classic case, the unit of risk is the standard -#' deviation of the returns. -#' -#' \deqn{\frac{\overline{(R_{a}-R_{f})}}{\sqrt{\sigma_{(R_{a}-R_{f})}}}} -#' -#' William Sharpe now recommends \code{\link{InformationRatio}} preferentially -#' to the original Sharpe Ratio. -#' -#' The higher the Sharpe ratio, the better the combined performance of "risk" -#' and return. -#' -#' As noted, the traditional Sharpe Ratio is a risk-adjusted measure of return -#' that uses standard deviation to represent risk. - -#' Although the Sharpe ratio has become part of the canon of modern financial -#' analysis, its applications typically do not account for the fact that it is an -#' estimated quantity, subject to estimation errors that can be substantial in -#' some cases. -#' -#' Many studies have documented various violations of the assumption of -#' IID returns for financial securities. -#' -#' Under the assumption of stationarity,a version of the Central Limit Theorem can -#' still be applied to the estimator . -#' @details -#' The relationship between SR and SR(q) is somewhat more involved for non- -#'IID returns because the variance of Rt(q) is not just the sum of the variances of component returns but also includes all the covariances. Specifically, under -#' the assumption that returns \eqn{R_t} are stationary, -#' \deqn{ Var[(R_t)] = \sum \sum Cov(R(t-i),R(t-j)) = q{\sigma^2} + 2{\sigma^2} \sum (q-k)\rho(k) } -#' Where \eqn{ \rho(k) = Cov(R(t),R(t-k))/Var[(R_t)]} is the \eqn{k^{th}} order autocorrelation coefficient of the series of returns.This yields the following relationship between SR and SR(q): -#' and i,j belongs to 0 to q-1 -#'\deqn{SR(q) = \eta(q) } -#'Where : -#' \deqn{ }{\eta(q) = [q]/[\sqrt(q\sigma^2) + 2\sigma^2 \sum(q-k)\rho(k)] } -#' Where, k belongs to 0 to q-1 -#' SR(q) : Estimated Lo Sharpe Ratio -#' SR : Theoretical William Sharpe Ratio -#' @param Ra an xts, vector, matrix, data frame, timeSeries or zoo object of -#' daily asset returns -#' @param Rf an xts, vector, matrix, data frame, timeSeries or zoo object of -#' annualized Risk Free Rate -#' @param q Number of autocorrelated lag periods. Taken as 3 (Default) -#' @param \dots any other pass thru parameters -#' @author Shubhankit Mohan -#' @references Andrew Lo,\emph{ The Statistics of Sharpe Ratio.}2002, AIMR. -#'\code{\link[stats]{}} \cr -#' \url{http://papers.ssrn.com/sol3/papers.cfm?abstract_id=377260} -#' -#' Andrew Lo,\emph{Sharpe Ratio may be Overstated} -#' \url{http://www.risk.net/risk-magazine/feature/1506463/lo-sharpe-ratios-overstated} -#' @keywords ts multivariate distribution models non-iid -#' @examples -#' -#' data(edhec) -#' table.Sharpe(edhec,0,3) -#' @rdname table.Sharpe -#' @export -table.Sharpe <- - function (Ra,Rf = 0,q = 3, ...) - { y = checkData(Ra, method = "xts") - columns = ncol(y) - rows = nrow(y) - columnnames = colnames(y) - rownames = rownames(y) - - # for each column, do the following: - for(column in 1:columns) { - x = y[,column] - - z = c(as.numeric(SharpeRatio.annualized(x)), - as.numeric(LoSharpe(x)), - as.numeric(Return.annualized(x)),as.numeric(StdDev.annualized(x)),as.numeric(se.Losharpe(x))) - - znames = c( - "William Sharpe Ratio", - "Andrew Lo Sharpe Ratio", - "Annualized Return", - "Annualized Standard Deviation","Sharpe Ratio Standard Error(95%)" - ) - if(column == 1) { - resultingtable = data.frame(Value = z, row.names = znames) - } - else { - nextcolumn = data.frame(Value = z, row.names = znames) - resultingtable = cbind(resultingtable, nextcolumn) - } - } - colnames(resultingtable) = columnnames - ans = base::round(resultingtable, digits) - ans - - - } Deleted: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/AcarSim.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/AcarSim.Rd 2013-09-07 10:16:35 UTC (rev 3015) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/AcarSim.Rd 2013-09-07 11:19:40 UTC (rev 3016) @@ -1,52 +0,0 @@ -\name{AcarSim} -\alias{AcarSim} -\title{Acar-Shane Maximum Loss Plot} -\usage{ - AcarSim(R, nsim = 1) -} -\arguments{ - \item{R}{an xts, vector, matrix, data frame, timeSeries - or zoo object of asset returns} - - \item{nsim}{number of simulations input} -} -\description{ - To get some insight on the relationships between maximum - drawdown per unit of volatility and mean return divided - by volatility, we have proceeded to Monte-Carlo - simulations. We have simulated cash flows over a period - of 36 monthly returns and measured maximum drawdown for - varied levels of annualised return divided by volatility - varying from minus \emph{two to two} by step of - \emph{0.1} . The process has been repeated \bold{six - thousand times}. -} -\details{ - Unfortunately, there is no \bold{analytical formulae} to - establish the maximum drawdown properties under the - random walk assumption. We should note first that due to - its definition, the maximum drawdown divided by - volatility can be interpreted as the only function of the - ratio mean divided by volatility. \deqn{MD/[\sigma]= Min - (\sum[X(j)])/\sigma = F(\mu/\sigma)} Where j varies from - 1 to n ,which is the number of drawdown's in simulation -} -\examples{ -library(PerformanceAnalytics) -AcarSim(R) -} -\author{ - Shubhankit Mohan -} -\references{ - Maximum Loss and Maximum Drawdown in Financial - Markets,\emph{International Conference Sponsored by BNP - and Imperial College on: Forecasting Financial Markets, - London, United Kingdom, May 1997} - \url{http://www.intelligenthedgefundinvesting.com/pubs/easj.pdf} -} -\keyword{Drawdown} -\keyword{Loss} -\keyword{Maximum} -\keyword{Simulated} - Deleted: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/LoSharpe.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/LoSharpe.Rd 2013-09-07 10:16:35 UTC (rev 3015) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/LoSharpe.Rd 2013-09-07 11:19:40 UTC (rev 3016) @@ -1,71 +0,0 @@ -\name{LoSharpe} -\alias{LoSharpe} -\title{Andrew Lo Sharpe Ratio} -\usage{ - LoSharpe(Ra, Rf = 0, q = 3, ...) -} -\arguments{ - \item{Ra}{an xts, vector, matrix, data frame, timeSeries - or zoo object of daily asset returns} - - \item{Rf}{an xts, vector, matrix, data frame, timeSeries - or zoo object of annualized Risk Free Rate} - - \item{q}{Number of autocorrelated lag periods. Taken as 3 - (Default)} - - \item{\dots}{any other pass thru parameters} -} -\description{ - Although the Sharpe ratio has become part of the canon of - modern financial analysis, its applications typically do - not account for the fact that it is an estimated - quantity, subject to estimation errors that can be - substantial in some cases. - - Many studies have documented various violations of the - assumption of IID returns for financial securities. - - Under the assumption of stationarity,a version of the - Central Limit Theorem can still be applied to the - estimator . -} -\details{ - The relationship between SR and SR(q) is somewhat more - involved for non- IID returns because the variance of - Rt(q) is not just the sum of the variances of component - returns but also includes all the covariances. - Specifically, under the assumption that returns \eqn{R_t} - are stationary, \deqn{ Var[(R_t)] = \sum \sum - Cov(R(t-i),R(t-j)) = q{\sigma^2} + 2{\sigma^2} \sum - (q-k)\rho(k) } Where \eqn{ \rho(k) = - Cov(R(t),R(t-k))/Var[(R_t)]} is the \eqn{k^{th}} order - autocorrelation coefficient of the series of returns.This - yields the following relationship between SR and SR(q): - and i,j belongs to 0 to q-1 \deqn{SR(q) = \eta(q) } Where - : \deqn{ }{\eta(q) = [q]/[\sqrt(q\sigma^2) + 2\sigma^2 - \sum(q-k)\rho(k)] } Where, k belongs to 0 to q-1 SR(q) : - Estimated Lo Sharpe Ratio SR : Theoretical William Sharpe - Ratio -} -\examples{ -data(edhec) -LoSharpe(edhec,0,3) -} -\author{ - Shubhankit Mohan -} -\references{ - Andrew Lo,\emph{ The Statistics of Sharpe Ratio.}2002, - AIMR. \code{\link[stats]{}} \cr - \url{http://papers.ssrn.com/sol3/papers.cfm?abstract_id=377260} - - Andrew Lo,\emph{Sharpe Ratio may be Overstated} - \url{http://www.risk.net/risk-magazine/feature/1506463/lo-sharpe-ratios-overstated} -} -\keyword{distribution} -\keyword{models} -\keyword{multivariate} -\keyword{non-iid} -\keyword{ts} - Deleted: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/se.LoSharpe.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/se.LoSharpe.Rd 2013-09-07 10:16:35 UTC (rev 3015) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/se.LoSharpe.Rd 2013-09-07 11:19:40 UTC (rev 3016) @@ -1,74 +0,0 @@ -\name{se.LoSharpe} -\alias{se.LoSharpe} -\title{Andrew Lo Sharpe Ratio Statistics} -\usage{ - se.LoSharpe(Ra, Rf = 0, q = 3, ...) -} -\arguments{ - \item{Ra}{an xts, vector, matrix, data frame, timeSeries - or zoo object of daily asset returns} - - \item{Rf}{an xts, vector, matrix, data frame, timeSeries - or zoo object of annualized Risk Free Rate} - - \item{q}{Number of autocorrelated lag periods. Taken as 3 - (Default)} - - \item{\dots}{any other pass thru parameters} -} -\description{ - Although the Sharpe ratio has become part of the canon of - modern financial analysis, its applications typically do - not account for the fact that it is an estimated - quantity, subject to estimation errors which can be - substantial in some cases. - - Many studies have documented various violations of the - assumption of IID returns for financial securities. - - Under the assumption of stationarity,a version of the - Central Limit Theorem can still be applied to the - estimator . -} -\details{ - The relationship between SR and SR(q) is somewhat more - involved for non- IID returns because the variance of - Rt(q) is not just the sum of the variances of component - returns but also includes all the covariances. - Specifically, under the assumption that returns \eqn{R_t} - are stationary, \deqn{ Var[(R_t)] = \sum \sum - Cov(R(t-i),R(t-j)) = q{\sigma^2} + 2{\sigma^2} \sum - (q-k)\rho(k) } Where \eqn{ \rho(k) = - Cov(R(t),R(t-k))/Var[(R_t)]} is the \eqn{k^{th}} order - autocorrelation coefficient of the series of returns.This - yields the following relationship between SR and SR(q): - and i,j belongs to 0 to q-1 \deqn{SR(q) = \eta(q) } Where - : \deqn{ }{\eta(q) = [q]/[\sqrt(q\sigma^2) + 2\sigma^2 - \sum(q-k)\rho(k)] } Where k belongs to 0 to q-1 Under the - assumption of assumption of asymptotic variance of SR(q), - the standard error for the Sharpe Ratio Esitmator can be - computed as: \deqn{SE(SR(q)) = \sqrt((1+SR^2/2)/T)} SR(q) - : Estimated Lo Sharpe Ratio SR : Theoretical William - Sharpe Ratio -} -\examples{ -data(edhec) -se.LoSharpe(edhec,0,3) -} -\author{ - Shubhankit Mohan -} -\references{ - Andrew Lo,\emph{ The Statistics of Sharpe Ratio.}2002, - AIMR. \code{\link[stats]{}} \cr - \url{http://papers.ssrn.com/sol3/papers.cfm?abstract_id=377260} - - Andrew Lo,\emph{Sharpe Ratio may be Overstated} - \url{http://www.risk.net/risk-magazine/feature/1506463/lo-sharpe-ratios-overstated} -} -\keyword{distribution} -\keyword{models} -\keyword{multivariate} -\keyword{non-iid} -\keyword{ts} - Deleted: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.Sharpe.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.Sharpe.Rd 2013-09-07 10:16:35 UTC (rev 3015) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.Sharpe.Rd 2013-09-07 11:19:40 UTC (rev 3016) @@ -1,86 +0,0 @@ -\name{table.Sharpe} -\alias{table.Sharpe} -\title{Sharpe Ratio Statistics Summary} -\usage{ - table.Sharpe(Ra, Rf = 0, q = 3, ...) -} -\arguments{ - \item{Ra}{an xts, vector, matrix, data frame, timeSeries - or zoo object of daily asset returns} - - \item{Rf}{an xts, vector, matrix, data frame, timeSeries - or zoo object of annualized Risk Free Rate} - - \item{q}{Number of autocorrelated lag periods. Taken as 3 - (Default)} - - \item{\dots}{any other pass thru parameters} -} -\description{ - The Sharpe ratio is simply the return per unit of risk - (represented by variability). In the classic case, the - unit of risk is the standard deviation of the returns. - - \deqn{\frac{\overline{(R_{a}-R_{f})}}{\sqrt{\sigma_{(R_{a}-R_{f})}}}} - - William Sharpe now recommends - \code{\link{InformationRatio}} preferentially to the - original Sharpe Ratio. - - The higher the Sharpe ratio, the better the combined - performance of "risk" and return. - - As noted, the traditional Sharpe Ratio is a risk-adjusted - measure of return that uses standard deviation to - represent risk. Although the Sharpe ratio has become part - of the canon of modern financial analysis, its - applications typically do not account for the fact that - it is an estimated quantity, subject to estimation errors - that can be substantial in some cases. - - Many studies have documented various violations of the - assumption of IID returns for financial securities. - - Under the assumption of stationarity,a version of the - Central Limit Theorem can still be applied to the - estimator . -} -\details{ - The relationship between SR and SR(q) is somewhat more - involved for non- IID returns because the variance of - Rt(q) is not just the sum of the variances of component - returns but also includes all the covariances. - Specifically, under the assumption that returns \eqn{R_t} - are stationary, \deqn{ Var[(R_t)] = \sum \sum - Cov(R(t-i),R(t-j)) = q{\sigma^2} + 2{\sigma^2} \sum - (q-k)\rho(k) } Where \eqn{ \rho(k) = - Cov(R(t),R(t-k))/Var[(R_t)]} is the \eqn{k^{th}} order - autocorrelation coefficient of the series of returns.This - yields the following relationship between SR and SR(q): - and i,j belongs to 0 to q-1 \deqn{SR(q) = \eta(q) } Where - : \deqn{ }{\eta(q) = [q]/[\sqrt(q\sigma^2) + 2\sigma^2 - \sum(q-k)\rho(k)] } Where, k belongs to 0 to q-1 SR(q) : - Estimated Lo Sharpe Ratio SR : Theoretical William Sharpe - Ratio -} -\examples{ -data(edhec) -table.Sharpe(edhec,0,3) -} -\author{ - Shubhankit Mohan -} -\references{ - Andrew Lo,\emph{ The Statistics of Sharpe Ratio.}2002, - AIMR. \code{\link[stats]{}} \cr - \url{http://papers.ssrn.com/sol3/papers.cfm?abstract_id=377260} - - Andrew Lo,\emph{Sharpe Ratio may be Overstated} - \url{http://www.risk.net/risk-magazine/feature/1506463/lo-sharpe-ratios-overstated} -} -\keyword{distribution} -\keyword{models} -\keyword{multivariate} -\keyword{non-iid} -\keyword{ts} - Deleted: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/CommodityReport.Rnw =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/CommodityReport.Rnw 2013-09-07 10:16:35 UTC (rev 3015) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/CommodityReport.Rnw 2013-09-07 11:19:40 UTC (rev 3016) @@ -1,226 +0,0 @@ -%% no need for \DeclareGraphicsExtensions{.pdf,.eps} - -\documentclass[12pt,letterpaper,english]{article} -\usepackage{times} -\usepackage[T1]{fontenc} -\IfFileExists{url.sty}{\usepackage{url}} - {\newcommand{\url}{\texttt}} - -\usepackage{babel} -%\usepackage{noweb} -\usepackage{Rd} - -\usepackage{Sweave} -\SweaveOpts{engine=R,eps=FALSE} -%\VignetteIndexEntry{Performance Attribution from Bacon} -%\VignetteDepends{PerformanceAnalytics} -%\VignetteKeywords{returns, performance, risk, benchmark, portfolio} -%\VignettePackage{PerformanceAnalytics} - -%\documentclass[a4paper]{article} -%\usepackage[noae]{Sweave} -%\usepackage{ucs} -%\usepackage[utf8x]{inputenc} -%\usepackage{amsmath, amsthm, latexsym} -%\usepackage[top=3cm, bottom=3cm, left=2.5cm]{geometry} -%\usepackage{graphicx} -%\usepackage{graphicx, verbatim} -%\usepackage{ucs} -%\usepackage[utf8x]{inputenc} -%\usepackage{amsmath, amsthm, latexsym} -%\usepackage{graphicx} - -\title{Commodity Index Fund Performance Analysis} -\author{Shubhankit Mohan} - -\begin{document} -\SweaveOpts{concordance=TRUE} - -\maketitle - - -\begin{abstract} -The fact that many hedge fund returns exhibit extraordinary levels of serial correlation is now well-known and generally accepted as fact. The effect of this autocorrelation on investment returns diminishes the apparent risk of such asset classes as the true returns/risk is easily \textbf{camouflaged} within a haze of illiquidity, stale prices, averaged price quotes and smoothed return reporting. We highlight the effect \emph{autocorrelation} and \emph{drawdown} has on performance analysis by investigating the results of functions developed during the Google Summer of Code 2013 on \textbf{commodity based index} . -\end{abstract} - -<>= -library(PerformanceAnalytics) -library(noniid.sm) -data(edhec) -@ - - -\section{Background} -The investigated fund index that tracks a basket of \emph{commodities} to measure their performance.The value of these indexes fluctuates based on their underlying commodities, and this value depends on the \emph{component}, \emph{methodology} and \emph{style} to cover commodity markets . - -A brief overview of the four index invested in our report are : - \begin{itemize} - \item [TRUNCATED] To get the complete diff run: svnlook diff /svnroot/returnanalytics -r 3016 From noreply at r-forge.r-project.org Sat Sep 7 14:22:10 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sat, 7 Sep 2013 14:22:10 +0200 (CEST) Subject: [Returnanalytics-commits] r3017 - pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R Message-ID: <20130907122210.39A2A1805AA@r-forge.r-project.org> Author: shubhanm Date: 2013-09-07 14:22:09 +0200 (Sat, 07 Sep 2013) New Revision: 3017 Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/ACStdDev.annualized.R pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/chart.Autocorrelation.R Log: chart.Autocorrelation.R : Stacked chart used ACFSTDEv : na's handling added Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/ACStdDev.annualized.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/ACStdDev.annualized.R 2013-09-07 11:19:40 UTC (rev 3016) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/ACStdDev.annualized.R 2013-09-07 12:22:09 UTC (rev 3017) @@ -46,7 +46,8 @@ for(column.a in 1:columns.a) { # for each asset passed in as R # clean the data and get rid of NAs - column.return = R[,column.a] + # column.return = R[,column.a] + column.return = na.omit(R[,column.a]) acf = as.numeric(acf(as.numeric(column.return), plot = FALSE)[1:lag][[1]]) coef= sum(acf*acf) if(!xtsible(R) & is.na(scale)) Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/chart.Autocorrelation.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/chart.Autocorrelation.R 2013-09-07 11:19:40 UTC (rev 3016) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/chart.Autocorrelation.R 2013-09-07 12:22:09 UTC (rev 3017) @@ -35,13 +35,13 @@ # Graph autos with adjacent bars using rainbow colors aa= table.Autocorrelation(R) - barplot(as.matrix(aa), main="ACF Lag Plot", ylab= "Value of Coefficient", - , xlab = NULL,col=rich6equal) + chart.StackedBar(as.matrix(aa), main="ACF Lag Plot", ylab= "Value of Coefficient", + , xlab = NULL,col=bluemono) # Place the legend at the top-left corner with no frame # using rainbow colors - legend("topright", c("1","2","3","4","5","6"), cex=0.6, - bty="n", fill=rich6equal); + #legend("topright", c("1","2","3","4","5","6"), cex=0.6, +# bty="n", fill=rich6equal); From noreply at r-forge.r-project.org Sat Sep 7 14:48:30 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sat, 7 Sep 2013 14:48:30 +0200 (CEST) Subject: [Returnanalytics-commits] r3018 - pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R Message-ID: <20130907124830.73A5B184EAF@r-forge.r-project.org> Author: shubhanm Date: 2013-09-07 14:48:30 +0200 (Sat, 07 Sep 2013) New Revision: 3018 Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.ComparitiveReturn.GLM.R pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.UnsmoothReturn.R Log: table.ComparitiveReturn.GLM.R Handle of nan + correction of ARMA function table.UnsmoothReturn.R Handle of NANa + handling correction of ARMA function + still some issues in accuracy of values in Comparative Return GLM Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.ComparitiveReturn.GLM.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.ComparitiveReturn.GLM.R 2013-09-07 12:22:09 UTC (rev 3017) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.ComparitiveReturn.GLM.R 2013-09-07 12:48:30 UTC (rev 3018) @@ -27,7 +27,7 @@ # p = Confifence Level # Output: # A table of estimates of Moving Average - + require("tseries") y = checkData(R, method = "xts") columns = ncol(y) rows = nrow(y) @@ -37,13 +37,14 @@ # for each column, do the following: for(column in 1:columns) { x = y[,column] + x=na.omit(x) skew = skewness(x) - arma.coeff= arma(x,0,n) + arma.coeff= arma(x,order=c(0,n)) kurt= kurtosis(x) z = c(skew, - ((sum(arma.coeff$theta^2)^1.5)*(skew/(sum(arma.coeff$theta^3)))), + ((sum(as.numeric(arma.coeff$coef[1:n])^2)^1.5)*(skew/(sum(as.numeric(arma.coeff$coef[1:n])^3)))), kurt, - (kurt*(sum(arma.coeff$theta^2)^2-6*(sum(arma.coeff$theta^2)*sum(arma.coeff$theta^2)))/(sum(arma.coeff$theta^4)))) + (-kurt*(sum(as.numeric(arma.coeff$coef[1:n])^2)^2-6*(sum(as.numeric(arma.coeff$coef[1:n])^2)*sum(as.numeric(arma.coeff$coef[1:n])^2)))/(sum(as.numeric(arma.coeff$coef[1:n])^4)))) znames = c( "Skewness ( Orignal) ", "Skewness (Unsmooth)", @@ -60,8 +61,8 @@ colnames(resultingtable) = columnnames ans = base::round(resultingtable, digits) ans - - + # arma.coeff$theta + # as.numeric(arma.coeff$coef[1:n]) } ############################################################################### Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.UnsmoothReturn.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.UnsmoothReturn.R 2013-09-07 12:22:09 UTC (rev 3017) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.UnsmoothReturn.R 2013-09-07 12:48:30 UTC (rev 3018) @@ -53,6 +53,7 @@ # for each column, do the following: for(column in 1:columns) { x = y[,column] + x=na.omit(x) ma.stats= arma(x, order = c(0, 2)) z = c(as.numeric(ma.stats$coef[1]), From noreply at r-forge.r-project.org Sat Sep 7 15:42:34 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sat, 7 Sep 2013 15:42:34 +0200 (CEST) Subject: [Returnanalytics-commits] r3019 - in pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm: . R man Message-ID: <20130907134234.61D431859E1@r-forge.r-project.org> Author: shubhanm Date: 2013-09-07 15:42:33 +0200 (Sat, 07 Sep 2013) New Revision: 3019 Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/UnSmoothReturn.Rd Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/NAMESPACE pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/UnsmoothReturn.R pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/chart.Autocorrelation.R pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.ComparitiveReturn.GLM.R pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.UnsmoothReturn.R pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.ComparitiveReturn.GLM.Rd pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.UnsmoothReturn.Rd Log: Addition of examples, bug correction in "Return functions" + Clean Build Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/NAMESPACE =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/NAMESPACE 2013-09-07 12:48:30 UTC (rev 3018) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/NAMESPACE 2013-09-07 13:42:33 UTC (rev 3019) @@ -12,3 +12,4 @@ export(table.ComparitiveReturn.GLM) export(table.EMaxDDGBM) export(table.UnsmoothReturn) +export(UnsmoothReturn) Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/UnsmoothReturn.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/UnsmoothReturn.R 2013-09-07 12:48:30 UTC (rev 3018) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/UnsmoothReturn.R 2013-09-07 13:42:33 UTC (rev 3019) @@ -1,5 +1,40 @@ -UnSmoothReturn<- - function(R = NULL,q, ...) +#' @title Unsmooth Time Series Return +#' +#' @description Creates a table of estimates of moving averages for comparison across +#' multiple instruments or funds as well as their standard error and +#' smoothing index , which is Compenent Decomposition of Table of Unsmooth Returns +#' +#' @details The estimation method is based on a maximum likelihood estimation of a moving average +#' process (we use the innovations algorithm proposed by \bold{Brockwell and Davis} [1991]). The first +#' step of this approach consists in computing a series of de-meaned observed returns: +#' \deqn{X(t) = R(0,t)- \mu} +#' where \eqn{\mu} is the expected value of the series of observed returns. +#' As a consequence, the above equation can be written as : +#' \deqn{X(t)= \theta(0)\eta(t) + \theta(1)\eta(t-1) + ..... + \theta(k)\eta(t-k)} +#' with the additional assumption that : \bold{\eqn{\eta(k)= N(0,\sigma(\eta)^2)}} +#' The structure of the model and the two constraints suppose that the complete integration of +#'information in the price of the considered asset may take up to k periods because of its illiquidity. +#'In addition, according to Getmansky et al., this model is in line with previous models of nonsynchronous trading such as the one developed by \bold{Cohen, Maier, Schwartz and Whitcomb} +#' [1986]. +#' Smoothing has an impact on the third and fourth moments of the returns distribution too. +#' @param R an xts, vector, matrix, data frame, timeSeries or zoo object of +#' asset returns +#' @param q number of series lags +#' @param ... any other passthru parameter +#' @references Cavenaile, Laurent, Coen, Alain and Hubner, Georges,\emph{ The Impact of Illiquidity and Higher Moments of Hedge Fund Returns on Their Risk-Adjusted Performance and Diversification Potential} (October 30, 2009). Journal of Alternative Investments, Forthcoming. Available at SSRN: \url{http://ssrn.com/abstract=1502698} Working paper is at \url{http://www.hec.ulg.ac.be/sites/default/files/workingpapers/WP_HECULg_20091001_Cavenaile_Coen_Hubner.pdf} +#' @author Shubhankit Mohan +#' @keywords ts smooth return models +#' @seealso Reutrn.Geltner Reutrn.GLM Return.Okunev +#' @rdname UnSmoothReturn +#' @examples +#' library(PerformanceAnalytics) +#' library(tseries) +#' data(managers) +#' UnsmoothReturn(managers,3) +#' @export + +UnsmoothReturn<- + function(R = NULL,q=2, ...) { columns = 1 columnnames = NULL @@ -14,7 +49,8 @@ # Calculate AutoCorrelation Coefficient for(column in 1:columns) { # for each asset passed in as R - y = checkData(R[,column], method="vector", na.rm = TRUE) + y = R[,column] + y=na.omit(y) acflag6 = acf(y,plot=FALSE,lag.max=6)[[1]][2:7] values = sum(acflag6*acflag6)/(sum(acflag6)*sum(acflag6)) @@ -29,7 +65,7 @@ result.df = cbind(result.df, nextcol) } } - return(result.df[1:q,]*R) # Unsmooth Return + return(as.numeric(result.df)*R) # Unsmooth Return } } \ No newline at end of file Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/chart.Autocorrelation.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/chart.Autocorrelation.R 2013-09-07 12:48:30 UTC (rev 3018) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/chart.Autocorrelation.R 2013-09-07 13:42:33 UTC (rev 3019) @@ -36,7 +36,7 @@ aa= table.Autocorrelation(R) chart.StackedBar(as.matrix(aa), main="ACF Lag Plot", ylab= "Value of Coefficient", - , xlab = NULL,col=bluemono) + , xlab = NULL,colorset=bluemono) # Place the legend at the top-left corner with no frame # using rainbow colors Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.ComparitiveReturn.GLM.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.ComparitiveReturn.GLM.R 2013-09-07 12:48:30 UTC (rev 3018) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.ComparitiveReturn.GLM.R 2013-09-07 13:42:33 UTC (rev 3019) @@ -13,6 +13,11 @@ #' @references Okunev, John and White, Derek R., \emph{ Hedge Fund Risk Factors and Value at Risk of Credit Trading Strategies} (October 2003). #' Available at SSRN: \url{http://ssrn.com/abstract=460641} #' @rdname table.ComparitiveReturn.GLM +#' @examples +#' library(PerformanceAnalytics) +#' library(tseries) +#' data(managers) +#' table.ComparitiveReturn.GLM(managers,3) #' @export table.ComparitiveReturn.GLM <- function (R, n = 3, digits = 4) Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.UnsmoothReturn.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.UnsmoothReturn.R 2013-09-07 12:48:30 UTC (rev 3018) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.UnsmoothReturn.R 2013-09-07 13:42:33 UTC (rev 3019) @@ -29,6 +29,11 @@ #' #' #' @rdname table.UnsmoothReturn +#' @examples +#' library(PerformanceAnalytics) +#' library(tseries) +#' data(managers) +#' table.UnsmoothReturn(managers,3) #' @export table.UnsmoothReturn <- function (R, n = 2, p= 0.95, digits = 4) Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/UnSmoothReturn.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/UnSmoothReturn.Rd (rev 0) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/UnSmoothReturn.Rd 2013-09-07 13:42:33 UTC (rev 3019) @@ -0,0 +1,69 @@ +\name{UnsmoothReturn} +\alias{UnsmoothReturn} +\title{Unsmooth Time Series Return} +\usage{ + UnsmoothReturn(R = NULL, q = 2, ...) +} +\arguments{ + \item{R}{an xts, vector, matrix, data frame, timeSeries + or zoo object of asset returns} + + \item{q}{number of series lags} + + \item{...}{any other passthru parameter} +} +\description{ + Creates a table of estimates of moving averages for + comparison across multiple instruments or funds as well + as their standard error and smoothing index , which is + Compenent Decomposition of Table of Unsmooth Returns +} +\details{ + The estimation method is based on a maximum likelihood + estimation of a moving average process (we use the + innovations algorithm proposed by \bold{Brockwell and + Davis} [1991]). The first step of this approach consists + in computing a series of de-meaned observed returns: + \deqn{X(t) = R(0,t)- \mu} where \eqn{\mu} is the expected + value of the series of observed returns. As a + consequence, the above equation can be written as : + \deqn{X(t)= \theta(0)\eta(t) + \theta(1)\eta(t-1) + ..... + + \theta(k)\eta(t-k)} with the additional assumption that + : \bold{\eqn{\eta(k)= N(0,\sigma(\eta)^2)}} The structure + of the model and the two constraints suppose that the + complete integration of information in the price of the + considered asset may take up to k periods because of its + illiquidity. In addition, according to Getmansky et al., + this model is in line with previous models of + nonsynchronous trading such as the one developed by + \bold{Cohen, Maier, Schwartz and Whitcomb} [1986]. + Smoothing has an impact on the third and fourth moments + of the returns distribution too. +} +\examples{ +library(PerformanceAnalytics) +library(tseries) +data(managers) +UnsmoothReturn(managers,3) +} +\author{ + Shubhankit Mohan +} +\references{ + Cavenaile, Laurent, Coen, Alain and Hubner, + Georges,\emph{ The Impact of Illiquidity and Higher + Moments of Hedge Fund Returns on Their Risk-Adjusted + Performance and Diversification Potential} (October 30, + 2009). Journal of Alternative Investments, Forthcoming. + Available at SSRN: \url{http://ssrn.com/abstract=1502698} + Working paper is at + \url{http://www.hec.ulg.ac.be/sites/default/files/workingpapers/WP_HECULg_20091001_Cavenaile_Coen_Hubner.pdf} +} +\seealso{ + Reutrn.Geltner Reutrn.GLM Return.Okunev +} +\keyword{models} +\keyword{return} +\keyword{smooth} +\keyword{ts} + Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.ComparitiveReturn.GLM.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.ComparitiveReturn.GLM.Rd 2013-09-07 12:48:30 UTC (rev 3018) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.ComparitiveReturn.GLM.Rd 2013-09-07 13:42:33 UTC (rev 3019) @@ -18,6 +18,12 @@ Skewness and Kurtosis for Orignal and Unsmooth Returns Respectively } +\examples{ +library(PerformanceAnalytics) +library(tseries) +data(managers) +table.ComparitiveReturn.GLM(managers,3) +} \author{ Peter Carl, Brian Peterson, Shubhankit Mohan } Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.UnsmoothReturn.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.UnsmoothReturn.Rd 2013-09-07 12:48:30 UTC (rev 3018) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.UnsmoothReturn.Rd 2013-09-07 13:42:33 UTC (rev 3019) @@ -42,6 +42,12 @@ Smoothing has an impact on the third and fourth moments of the returns distribution too. } +\examples{ +library(PerformanceAnalytics) +library(tseries) +data(managers) +table.UnsmoothReturn(managers,3) +} \author{ Peter Carl, Brian Peterson, Shubhankit Mohan } From noreply at r-forge.r-project.org Sat Sep 7 16:58:14 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sat, 7 Sep 2013 16:58:14 +0200 (CEST) Subject: [Returnanalytics-commits] r3020 - in pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm: . R man vignettes Message-ID: <20130907145814.B3483184E8D@r-forge.r-project.org> Author: shubhanm Date: 2013-09-07 16:58:14 +0200 (Sat, 07 Sep 2013) New Revision: 3020 Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/LoSharpe.R pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/se.LoSharpe.R pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.Sharpe.R pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/LoSharpe.Rd pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/se.LoSharpe.Rd pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.Sharpe.Rd Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/DESCRIPTION pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/NAMESPACE pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/LoSharpe.pdf Log: 3 Sharpe Ratio function : correction of bugs, addition of examples in documentation using managers data to Clean R CMD Build Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/DESCRIPTION =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/DESCRIPTION 2013-09-07 13:42:33 UTC (rev 3019) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/DESCRIPTION 2013-09-07 14:58:14 UTC (rev 3020) @@ -33,3 +33,6 @@ 'table.EMaxDDGBM.R' 'table.UnsmoothReturn.R' 'UnsmoothReturn.R' + 'LoSharpe.R' + 'se.LoSharpe.R' + 'table.Sharpe.R' Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/NAMESPACE =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/NAMESPACE 2013-09-07 13:42:33 UTC (rev 3019) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/NAMESPACE 2013-09-07 14:58:14 UTC (rev 3020) @@ -5,11 +5,14 @@ export(chart.Autocorrelation) export(EMaxDDGBM) export(GLMSmoothIndex) +export(LoSharpe) export(QP.Norm) export(Return.GLM) export(Return.Okunev) +export(se.LoSharpe) export(SterlingRatio.Norm) export(table.ComparitiveReturn.GLM) export(table.EMaxDDGBM) +export(table.Sharpe) export(table.UnsmoothReturn) export(UnsmoothReturn) Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/LoSharpe.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/LoSharpe.R (rev 0) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/LoSharpe.R 2013-09-07 14:58:14 UTC (rev 3020) @@ -0,0 +1,101 @@ +#'@title Andrew Lo Sharpe Ratio +#'@description +#' Although the Sharpe ratio has become part of the canon of modern financial +#' analysis, its applications typically do not account for the fact that it is an +#' estimated quantity, subject to estimation errors that can be substantial in +#' some cases. +#' +#' Many studies have documented various violations of the assumption of +#' IID returns for financial securities. +#' +#' Under the assumption of stationarity,a version of the Central Limit Theorem can +#' still be applied to the estimator . +#' @details +#' The relationship between SR and SR(q) is somewhat more involved for non- +#'IID returns because the variance of Rt(q) is not just the sum of the variances of component returns but also includes all the covariances. Specifically, under +#' the assumption that returns \eqn{R_t} are stationary, +#' \deqn{ Var[(R_t)] = \sum \sum Cov(R(t-i),R(t-j)) = q{\sigma^2} + 2{\sigma^2} \sum (q-k)\rho(k) } +#' Where \eqn{ \rho(k) = Cov(R(t),R(t-k))/Var[(R_t)]} is the \eqn{k^{th}} order autocorrelation coefficient of the series of returns.This yields the following relationship between SR and SR(q): +#' and i,j belongs to 0 to q-1 +#'\deqn{SR(q) = \eta(q) } +#'Where : +#' \deqn{ }{\eta(q) = [q]/[\sqrt(q\sigma^2) + 2\sigma^2 \sum(q-k)\rho(k)] } +#' Where, k belongs to 0 to q-1 +#' SR(q) : Estimated Lo Sharpe Ratio +#' SR : Theoretical William Sharpe Ratio +#' @param Ra an xts, vector, matrix, data frame, timeSeries or zoo object of +#' daily asset returns +#' @param Rf an xts, vector, matrix, data frame, timeSeries or zoo object of +#' annualized Risk Free Rate +#' @param q Number of autocorrelated lag periods. Taken as 3 (Default) +#' @param \dots any other pass thru parameters +#' @author Shubhankit Mohan +#' @references Andrew Lo,\emph{ The Statistics of Sharpe Ratio.}2002, AIMR. +#' \url{http://papers.ssrn.com/sol3/papers.cfm?abstract_id=377260} +#' +#' Andrew Lo,\emph{Sharpe Ratio may be Overstated} +#' \url{http://www.risk.net/risk-magazine/feature/1506463/lo-sharpe-ratios-overstated} +#' @keywords ts multivariate distribution models non-iid +#' @examples +#' data(managers) +#' LoSharpe(managers,0,3) +#' @rdname LoSharpe +#' @export +LoSharpe <- + function (Ra,Rf = 0,q = 3, ...) + { # @author Brian G. Peterson, Peter Carl + + + # Function: + R = checkData(Ra, method="xts") + # Get dimensions and labels + columns.a = ncol(R) + columnnames.a = colnames(R) + # Time used for daily Return manipulations + Time= 252*nyears(R) + clean.lo <- function(column.R,q) { + # compute the lagged return series + gamma.k =matrix(0,q) + mu = sum(column.R)/(Time) + Rf= Rf/(Time) + for(i in 1:q){ + lagR = lag(column.R, k=i) + # compute the Momentum Lagged Values + gamma.k[i]= (sum(((column.R-mu)*(lagR-mu)),na.rm=TRUE)) + } + return(gamma.k) + } + neta.lo <- function(pho.k,q) { + # compute the lagged return series + sumq = 0 + for(j in 1:q){ + sumq = sumq+ (q-j)*pho.k[j] + } + return(q/(sqrt(q+2*sumq))) + } + column.lo=NULL + lo=NULL + for(column.a in 1:columns.a) { # for each asset passed in as R + # clean the data and get rid of NAs + clean.ret=na.omit(R[,column.a]) + mu = sum(clean.ret)/(Time) + sig=sqrt(((clean.ret-mu)^2/(Time))) + pho.k = na.omit(clean.lo(clean.ret,q))/(as.numeric(sig[1])) + netaq=neta.lo(pho.k,q) + #column.lo = (netaq*((mu-Rf)/as.numeric(sig[1]))) + column.lo = as.numeric(SharpeRatio.annualized(R[,column.a]))[1]*netaq + # if(column.a == 1) { lo = column.lo } + # else { lo = cbind (lo, column.lo) + # colnames(lo) = columnnames.a + # } + + lo=cbind(lo,column.lo) + } + colnames(lo) = columnnames.a + rownames(lo)= paste("Lo Sharpe Ratio") + return(lo) + + + # RESULTS: + + } Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/se.LoSharpe.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/se.LoSharpe.R (rev 0) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/se.LoSharpe.R 2013-09-07 14:58:14 UTC (rev 3020) @@ -0,0 +1,104 @@ +#'@title Andrew Lo Sharpe Ratio Statistics +#'@description +#' Although the Sharpe ratio has become part of the canon of modern financial +#' analysis, its applications typically do not account for the fact that it is an +#' estimated quantity, subject to estimation errors which can be substantial in +#' some cases. +#' +#' Many studies have documented various violations of the assumption of +#' IID returns for financial securities. +#' +#' Under the assumption of stationarity,a version of the Central Limit Theorem can +#' still be applied to the estimator . +#' @details +#' The relationship between SR and SR(q) is somewhat more involved for non- +#'IID returns because the variance of Rt(q) is not just the sum of the variances of component returns but also includes all the covariances. Specifically, under +#' the assumption that returns \eqn{R_t} are stationary, +#' \deqn{ Var[(R_t)] = \sum \sum Cov(R(t-i),R(t-j)) = q{\sigma^2} + 2{\sigma^2} \sum (q-k)\rho(k) } +#' Where \eqn{ \rho(k) = Cov(R(t),R(t-k))/Var[(R_t)]} is the \eqn{k^{th}} order autocorrelation coefficient of the series of returns.This yields the following relationship between SR and SR(q): +#' and i,j belongs to 0 to q-1 +#'\deqn{SR(q) = \eta(q) } +#'Where : +#' \deqn{ }{\eta(q) = [q]/[\sqrt(q\sigma^2) + 2\sigma^2 \sum(q-k)\rho(k)] } +#' Where k belongs to 0 to q-1 +#' Under the assumption of assumption of asymptotic variance of SR(q), the standard error for the Sharpe Ratio Esitmator can be computed as: +#' \deqn{SE(SR(q)) = \sqrt((1+SR^2/2)/T)} +#' SR(q) : Estimated Lo Sharpe Ratio +#' SR : Theoretical William Sharpe Ratio +#' @param Ra an xts, vector, matrix, data frame, timeSeries or zoo object of +#' daily asset returns +#' @param Rf an xts, vector, matrix, data frame, timeSeries or zoo object of +#' annualized Risk Free Rate +#' @param q Number of autocorrelated lag periods. Taken as 3 (Default) +#' @param \dots any other pass thru parameters +#' @author Shubhankit Mohan +#' @references Andrew Lo,\emph{ The Statistics of Sharpe Ratio.}2002, AIMR. +#' \url{http://papers.ssrn.com/sol3/papers.cfm?abstract_id=377260} +#' +#' Andrew Lo,\emph{Sharpe Ratio may be Overstated} +#' \url{http://www.risk.net/risk-magazine/feature/1506463/lo-sharpe-ratios-overstated} +#' @keywords ts multivariate distribution models non-iid +#' @examples +#' +#' data(managers) +#' se.LoSharpe(managers,0,3) +#' @rdname se.LoSharpe +#' @export +se.LoSharpe <- + function (Ra,Rf = 0,q = 3, ...) + { # @author Brian G. Peterson, Peter Carl + + + # Function: + R = checkData(Ra, method="xts") + # Get dimensions and labels + columns.a = ncol(R) + columnnames.a = colnames(R) + # Time used for daily Return manipulations + Time= 252*nyears(R) + clean.lo <- function(column.R,q) { + # compute the lagged return series + gamma.k =matrix(0,q) + mu = sum(column.R)/(Time) + Rf= Rf/(Time) + for(i in 1:q){ + lagR = lag(column.R, k=i) + # compute the Momentum Lagged Values + gamma.k[i]= (sum(((column.R-mu)*(lagR-mu)),na.rm=TRUE)) + } + return(gamma.k) + } + neta.lo <- function(pho.k,q) { + # compute the lagged return series + sumq = 0 + for(j in 1:q){ + sumq = sumq+ (q-j)*pho.k[j] + } + return(q/(sqrt(q+2*sumq))) + } + column.lo=NULL + lo=NULL + + for(column.a in 1:columns.a) { # for each asset passed in as R + # clean the data and get rid of NAs + mu = sum(R[,column.a])/(Time) + sig=sqrt(((R[,column.a]-mu)^2/(Time))) + pho.k = clean.lo(R[,column.a],q)/(as.numeric(sig[1])) + netaq=neta.lo(pho.k,q) + column.lo = (netaq*((mu-Rf)/as.numeric(sig[1]))) + column.lo= 1.96*sqrt((1+(column.lo*column.lo/2))/(Time)) + lo=cbind(lo,column.lo) + } + + colnames(lo) = columnnames.a + rownames(lo)= paste("Standard Error of Sharpe Ratio Estimates(95% Confidence)") + return(lo) + +#colnames(lo) = columnnames.a +#rownames(lo)= paste("Lo Sharpe Ratio") +#return(lo) + + + # RESULTS: + + } Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.Sharpe.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.Sharpe.R (rev 0) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.Sharpe.R 2013-09-07 14:58:14 UTC (rev 3020) @@ -0,0 +1,96 @@ +#'@title Sharpe Ratio Statistics Summary +#'@description +#' The Sharpe ratio is simply the return per unit of risk (represented by +#' variability). In the classic case, the unit of risk is the standard +#' deviation of the returns. +#' +#' \deqn{\frac{\overline{(R_{a}-R_{f})}}{\sqrt{\sigma_{(R_{a}-R_{f})}}}} +#' +#' William Sharpe now recommends \code{\link{InformationRatio}} preferentially +#' to the original Sharpe Ratio. +#' +#' The higher the Sharpe ratio, the better the combined performance of "risk" +#' and return. +#' +#' As noted, the traditional Sharpe Ratio is a risk-adjusted measure of return +#' that uses standard deviation to represent risk. + +#' Although the Sharpe ratio has become part of the canon of modern financial +#' analysis, its applications typically do not account for the fact that it is an +#' estimated quantity, subject to estimation errors that can be substantial in +#' some cases. +#' +#' Many studies have documented various violations of the assumption of +#' IID returns for financial securities. +#' +#' Under the assumption of stationarity,a version of the Central Limit Theorem can +#' still be applied to the estimator . +#' @details +#' The relationship between SR and SR(q) is somewhat more involved for non- +#'IID returns because the variance of Rt(q) is not just the sum of the variances of component returns but also includes all the covariances. Specifically, under +#' the assumption that returns \eqn{R_t} are stationary, +#' \deqn{ Var[(R_t)] = \sum \sum Cov(R(t-i),R(t-j)) = q{\sigma^2} + 2{\sigma^2} \sum (q-k)\rho(k) } +#' Where \eqn{ \rho(k) = Cov(R(t),R(t-k))/Var[(R_t)]} is the \eqn{k^{th}} order autocorrelation coefficient of the series of returns.This yields the following relationship between SR and SR(q): +#' and i,j belongs to 0 to q-1 +#'\deqn{SR(q) = \eta(q) } +#'Where : +#' \deqn{ }{\eta(q) = [q]/[\sqrt(q\sigma^2) + 2\sigma^2 \sum(q-k)\rho(k)] } +#' Where, k belongs to 0 to q-1 +#' SR(q) : Estimated Lo Sharpe Ratio +#' SR : Theoretical William Sharpe Ratio +#' @param Ra an xts, vector, matrix, data frame, timeSeries or zoo object of +#' daily asset returns +#' @param Rf an xts, vector, matrix, data frame, timeSeries or zoo object of +#' annualized Risk Free Rate +#' @param q Number of autocorrelated lag periods. Taken as 3 (Default) +#' @param digits Round off Numerical Value +#' @param \dots any other pass thru parameters +#' @author Shubhankit Mohan +#' @references Andrew Lo,\emph{ The Statistics of Sharpe Ratio.}2002, AIMR. +#' \url{http://papers.ssrn.com/sol3/papers.cfm?abstract_id=377260} +#' +#' Andrew Lo,\emph{Sharpe Ratio may be Overstated} +#' \url{http://www.risk.net/risk-magazine/feature/1506463/lo-sharpe-ratios-overstated} +#' @keywords ts multivariate distribution models non-iid +#' @examples +#' +#' data(managers) +#' table.Sharpe(managers,0,3) +#' @rdname table.Sharpe +#' @export +table.Sharpe <- + function (Ra,Rf = 0,q = 3,digits=4, ...) + { y = checkData(Ra, method = "xts") + columns = ncol(y) + rows = nrow(y) + columnnames = colnames(y) + rownames = rownames(y) + + # for each column, do the following: + for(column in 1:columns) { + x = y[,column] + + z = c(as.numeric(SharpeRatio.annualized(x)), + as.numeric(LoSharpe(x)), + as.numeric(Return.annualized(x)),as.numeric(StdDev.annualized(x)),as.numeric(se.LoSharpe(x))) + + znames = c( + "William Sharpe Ratio", + "Andrew Lo Sharpe Ratio", + "Annualized Return", + "Annualized Standard Deviation","Sharpe Ratio Standard Error(95%)" + ) + if(column == 1) { + resultingtable = data.frame(Value = z, row.names = znames) + } + else { + nextcolumn = data.frame(Value = z, row.names = znames) + resultingtable = cbind(resultingtable, nextcolumn) + } + } + colnames(resultingtable) = columnnames + ans = base::round(resultingtable, digits) + ans + + + } Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/LoSharpe.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/LoSharpe.Rd (rev 0) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/LoSharpe.Rd 2013-09-07 14:58:14 UTC (rev 3020) @@ -0,0 +1,71 @@ +\name{LoSharpe} +\alias{LoSharpe} +\title{Andrew Lo Sharpe Ratio} +\usage{ + LoSharpe(Ra, Rf = 0, q = 3, ...) +} +\arguments{ + \item{Ra}{an xts, vector, matrix, data frame, timeSeries + or zoo object of daily asset returns} + + \item{Rf}{an xts, vector, matrix, data frame, timeSeries + or zoo object of annualized Risk Free Rate} + + \item{q}{Number of autocorrelated lag periods. Taken as 3 + (Default)} + + \item{\dots}{any other pass thru parameters} +} +\description{ + Although the Sharpe ratio has become part of the canon of + modern financial analysis, its applications typically do + not account for the fact that it is an estimated + quantity, subject to estimation errors that can be + substantial in some cases. + + Many studies have documented various violations of the + assumption of IID returns for financial securities. + + Under the assumption of stationarity,a version of the + Central Limit Theorem can still be applied to the + estimator . +} +\details{ + The relationship between SR and SR(q) is somewhat more + involved for non- IID returns because the variance of + Rt(q) is not just the sum of the variances of component + returns but also includes all the covariances. + Specifically, under the assumption that returns \eqn{R_t} + are stationary, \deqn{ Var[(R_t)] = \sum \sum + Cov(R(t-i),R(t-j)) = q{\sigma^2} + 2{\sigma^2} \sum + (q-k)\rho(k) } Where \eqn{ \rho(k) = + Cov(R(t),R(t-k))/Var[(R_t)]} is the \eqn{k^{th}} order + autocorrelation coefficient of the series of returns.This + yields the following relationship between SR and SR(q): + and i,j belongs to 0 to q-1 \deqn{SR(q) = \eta(q) } Where + : \deqn{ }{\eta(q) = [q]/[\sqrt(q\sigma^2) + 2\sigma^2 + \sum(q-k)\rho(k)] } Where, k belongs to 0 to q-1 SR(q) : + Estimated Lo Sharpe Ratio SR : Theoretical William Sharpe + Ratio +} +\examples{ +data(managers) +LoSharpe(managers,0,3) +} +\author{ + Shubhankit Mohan +} +\references{ + Andrew Lo,\emph{ The Statistics of Sharpe Ratio.}2002, + AIMR. + \url{http://papers.ssrn.com/sol3/papers.cfm?abstract_id=377260} + + Andrew Lo,\emph{Sharpe Ratio may be Overstated} + \url{http://www.risk.net/risk-magazine/feature/1506463/lo-sharpe-ratios-overstated} +} +\keyword{distribution} +\keyword{models} +\keyword{multivariate} +\keyword{non-iid} +\keyword{ts} + Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/se.LoSharpe.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/se.LoSharpe.Rd (rev 0) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/se.LoSharpe.Rd 2013-09-07 14:58:14 UTC (rev 3020) @@ -0,0 +1,74 @@ +\name{se.LoSharpe} +\alias{se.LoSharpe} +\title{Andrew Lo Sharpe Ratio Statistics} +\usage{ + se.LoSharpe(Ra, Rf = 0, q = 3, ...) +} +\arguments{ + \item{Ra}{an xts, vector, matrix, data frame, timeSeries + or zoo object of daily asset returns} + + \item{Rf}{an xts, vector, matrix, data frame, timeSeries + or zoo object of annualized Risk Free Rate} + + \item{q}{Number of autocorrelated lag periods. Taken as 3 + (Default)} + + \item{\dots}{any other pass thru parameters} +} +\description{ + Although the Sharpe ratio has become part of the canon of + modern financial analysis, its applications typically do + not account for the fact that it is an estimated + quantity, subject to estimation errors which can be + substantial in some cases. + + Many studies have documented various violations of the + assumption of IID returns for financial securities. + + Under the assumption of stationarity,a version of the + Central Limit Theorem can still be applied to the + estimator . +} +\details{ + The relationship between SR and SR(q) is somewhat more + involved for non- IID returns because the variance of + Rt(q) is not just the sum of the variances of component + returns but also includes all the covariances. + Specifically, under the assumption that returns \eqn{R_t} + are stationary, \deqn{ Var[(R_t)] = \sum \sum + Cov(R(t-i),R(t-j)) = q{\sigma^2} + 2{\sigma^2} \sum + (q-k)\rho(k) } Where \eqn{ \rho(k) = + Cov(R(t),R(t-k))/Var[(R_t)]} is the \eqn{k^{th}} order + autocorrelation coefficient of the series of returns.This + yields the following relationship between SR and SR(q): + and i,j belongs to 0 to q-1 \deqn{SR(q) = \eta(q) } Where + : \deqn{ }{\eta(q) = [q]/[\sqrt(q\sigma^2) + 2\sigma^2 + \sum(q-k)\rho(k)] } Where k belongs to 0 to q-1 Under the + assumption of assumption of asymptotic variance of SR(q), + the standard error for the Sharpe Ratio Esitmator can be + computed as: \deqn{SE(SR(q)) = \sqrt((1+SR^2/2)/T)} SR(q) + : Estimated Lo Sharpe Ratio SR : Theoretical William + Sharpe Ratio +} +\examples{ +data(managers) +se.LoSharpe(managers,0,3) +} +\author{ + Shubhankit Mohan +} +\references{ + Andrew Lo,\emph{ The Statistics of Sharpe Ratio.}2002, + AIMR. + \url{http://papers.ssrn.com/sol3/papers.cfm?abstract_id=377260} + + Andrew Lo,\emph{Sharpe Ratio may be Overstated} + \url{http://www.risk.net/risk-magazine/feature/1506463/lo-sharpe-ratios-overstated} +} +\keyword{distribution} +\keyword{models} +\keyword{multivariate} +\keyword{non-iid} +\keyword{ts} + Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.Sharpe.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.Sharpe.Rd (rev 0) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.Sharpe.Rd 2013-09-07 14:58:14 UTC (rev 3020) @@ -0,0 +1,88 @@ +\name{table.Sharpe} +\alias{table.Sharpe} +\title{Sharpe Ratio Statistics Summary} +\usage{ + table.Sharpe(Ra, Rf = 0, q = 3, digits = 4, ...) +} +\arguments{ + \item{Ra}{an xts, vector, matrix, data frame, timeSeries + or zoo object of daily asset returns} + + \item{Rf}{an xts, vector, matrix, data frame, timeSeries + or zoo object of annualized Risk Free Rate} + + \item{q}{Number of autocorrelated lag periods. Taken as 3 + (Default)} + + \item{digits}{Round off Numerical Value} + + \item{\dots}{any other pass thru parameters} +} +\description{ + The Sharpe ratio is simply the return per unit of risk + (represented by variability). In the classic case, the + unit of risk is the standard deviation of the returns. + + \deqn{\frac{\overline{(R_{a}-R_{f})}}{\sqrt{\sigma_{(R_{a}-R_{f})}}}} + + William Sharpe now recommends + \code{\link{InformationRatio}} preferentially to the + original Sharpe Ratio. + + The higher the Sharpe ratio, the better the combined + performance of "risk" and return. + + As noted, the traditional Sharpe Ratio is a risk-adjusted + measure of return that uses standard deviation to + represent risk. Although the Sharpe ratio has become part + of the canon of modern financial analysis, its + applications typically do not account for the fact that + it is an estimated quantity, subject to estimation errors + that can be substantial in some cases. + + Many studies have documented various violations of the + assumption of IID returns for financial securities. + + Under the assumption of stationarity,a version of the + Central Limit Theorem can still be applied to the + estimator . +} +\details{ + The relationship between SR and SR(q) is somewhat more + involved for non- IID returns because the variance of + Rt(q) is not just the sum of the variances of component + returns but also includes all the covariances. + Specifically, under the assumption that returns \eqn{R_t} + are stationary, \deqn{ Var[(R_t)] = \sum \sum + Cov(R(t-i),R(t-j)) = q{\sigma^2} + 2{\sigma^2} \sum + (q-k)\rho(k) } Where \eqn{ \rho(k) = + Cov(R(t),R(t-k))/Var[(R_t)]} is the \eqn{k^{th}} order + autocorrelation coefficient of the series of returns.This + yields the following relationship between SR and SR(q): + and i,j belongs to 0 to q-1 \deqn{SR(q) = \eta(q) } Where + : \deqn{ }{\eta(q) = [q]/[\sqrt(q\sigma^2) + 2\sigma^2 + \sum(q-k)\rho(k)] } Where, k belongs to 0 to q-1 SR(q) : + Estimated Lo Sharpe Ratio SR : Theoretical William Sharpe + Ratio +} +\examples{ +data(managers) +table.Sharpe(managers,0,3) +} +\author{ + Shubhankit Mohan +} +\references{ + Andrew Lo,\emph{ The Statistics of Sharpe Ratio.}2002, + AIMR. + \url{http://papers.ssrn.com/sol3/papers.cfm?abstract_id=377260} + + Andrew Lo,\emph{Sharpe Ratio may be Overstated} + \url{http://www.risk.net/risk-magazine/feature/1506463/lo-sharpe-ratios-overstated} +} +\keyword{distribution} +\keyword{models} +\keyword{multivariate} +\keyword{non-iid} +\keyword{ts} + Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/LoSharpe.pdf =================================================================== (Binary files differ) From noreply at r-forge.r-project.org Sat Sep 7 21:45:19 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sat, 7 Sep 2013 21:45:19 +0200 (CEST) Subject: [Returnanalytics-commits] r3021 - pkg/PortfolioAnalytics/R Message-ID: <20130907194519.59D33184289@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-07 21:45:19 +0200 (Sat, 07 Sep 2013) New Revision: 3021 Modified: pkg/PortfolioAnalytics/R/generics.R Log: Modifying generic functions to round weights for printing. Modified: pkg/PortfolioAnalytics/R/generics.R =================================================================== --- pkg/PortfolioAnalytics/R/generics.R 2013-09-07 14:58:14 UTC (rev 3020) +++ pkg/PortfolioAnalytics/R/generics.R 2013-09-07 19:45:19 UTC (rev 3021) @@ -254,7 +254,7 @@ # get optimal weights cat("Optimal Weights:\n") - print.default(x$weights, digits=digits) + print.default(round(x$weights, digits=digits), digits=digits) cat("\n") # get objective measure @@ -301,7 +301,7 @@ # get optimal weights cat("Optimal Weights:\n") - print.default(x$weights, digits=digits) + print.default(round(x$weights, digits=digits), digits=digits) cat("\n") # get objective measures @@ -348,7 +348,7 @@ # get optimal weights cat("Optimal Weights:\n") - print.default(x$weights, digits=digits) + print.default(round(x$weights, digits=digits), digits=digits) cat("\n") # get objective measures @@ -395,7 +395,7 @@ # get optimal weights cat("Optimal Weights:\n") - print.default(x$weights, digits=digits) + print.default(round(x$weights, digits=digits), digits=digits) cat("\n") # get objective measures @@ -442,7 +442,7 @@ # get optimal weights cat("Optimal Weights:\n") - print.default(x$weights, digits=digits) + print.default(round(x$weights, digits=digits), digits=digits) cat("\n") # get objective measures @@ -490,7 +490,7 @@ # get optimal weights cat("Optimal Weights:\n") - print.default(object$weights) + print.default(round(object$weights, digits=4)) cat("\n") # objective measures @@ -742,7 +742,7 @@ colnames(wts) <- gsub("w.", "", colnames(wts)) rownames(wts) <- 1:nrow(object$frontier) cat("Weights along the efficient frontier:\n") - print(wts) + print(round(wts, digits=digits)) cat("\n") # Risk and return From noreply at r-forge.r-project.org Sat Sep 7 22:45:34 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sat, 7 Sep 2013 22:45:34 +0200 (CEST) Subject: [Returnanalytics-commits] r3022 - pkg/PortfolioAnalytics/sandbox Message-ID: <20130907204534.236A8185916@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-07 22:45:33 +0200 (Sat, 07 Sep 2013) New Revision: 3022 Modified: pkg/PortfolioAnalytics/sandbox/testing_weight_conc.R Log: Added example of weight concentration penalty using solve.QP manually. Modified: pkg/PortfolioAnalytics/sandbox/testing_weight_conc.R =================================================================== --- pkg/PortfolioAnalytics/sandbox/testing_weight_conc.R 2013-09-07 19:45:19 UTC (rev 3021) +++ pkg/PortfolioAnalytics/sandbox/testing_weight_conc.R 2013-09-07 20:45:33 UTC (rev 3022) @@ -59,3 +59,51 @@ opt4 <- optimize.portfolio(R=R, portfolio=conc, optimize_method="ROI", trace=TRUE) opt4 chart.Weights(opt4) + +##### +# Use solve.QP manually +library(quadprog) + +# number of assets +N <- ncol(R) + +# concentration aversion parameter +lambda_hhi <- 2 + +# Quadratic objective +Q <- 2*var(R) + lambda_hhi * diag(N) + +# Constraints matrix and rhs for full investment and long only +Amat <- cbind(rep(1, N), diag(N), -diag(N)) +rhs <- c(1, rep(0, N), rep(-1, N)) + +sol <- solve.QP(Dmat=Q, dvec=rep(0, N), Amat=Amat, bvec=rhs, meq=1) +sol$solution + +conc <- add.objective(portfolio=init, type="weight_concentration", name="HHI", + conc_aversion=2) + +opt <- optimize.portfolio(R=R, portfolio=conc, optimize_method="ROI", trace=TRUE) +all.equal(opt$weights, sol$solution, check.attributes=F) + +# concentration aversion parameter by group +lambda_hhi <- c(0.1, 0.05, 0.1, 0) + +hhi1 <- diag(N) +hhi1[3:8,] <- 0 + +hhi2 <- diag(N) +hhi2[c(1:2, 5:8),] <- 0 + +hhi3 <- diag(N) +hhi3[c(1:4, 7:8),] <- 0 + +hhi4 <- diag(N) +hhi4[1:6,] <- 0 + +Q <- 2*var(R) + lambda_hhi[1]*hhi1 + lambda_hhi[2]*hhi2 + lambda_hhi[3]*hhi3 + lambda_hhi[4]*hhi4 + +sol <- solve.QP(Dmat=Q, dvec=rep(0, N), Amat=Amat, bvec=rhs, meq=1) +sol$solution +all.equal(opt3$weights, sol$solution, check.attributes=F) + From noreply at r-forge.r-project.org Sun Sep 8 04:58:09 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 8 Sep 2013 04:58:09 +0200 (CEST) Subject: [Returnanalytics-commits] r3023 - pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R Message-ID: <20130908025810.0968818468D@r-forge.r-project.org> Author: shubhanm Date: 2013-09-08 04:58:07 +0200 (Sun, 08 Sep 2013) New Revision: 3023 Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/Return.GLM.R pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.ComparitiveReturn.GLM.R Log: crosschecked with literature formula Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/Return.GLM.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/Return.GLM.R 2013-09-07 20:45:33 UTC (rev 3022) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/Return.GLM.R 2013-09-08 02:58:07 UTC (rev 3023) @@ -50,7 +50,7 @@ clean.GLM <- function(column.R,q=3) { ma.coeff = as.numeric((arma(column.R,order=c(0,q)))$coef[1:q]) - column.glm = ma.coeff[q]*lag(column.R,q) + column.glm = (1-ma.coeff[q])*lag(column.R,q) return(column.glm) } Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.ComparitiveReturn.GLM.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.ComparitiveReturn.GLM.R 2013-09-07 20:45:33 UTC (rev 3022) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.ComparitiveReturn.GLM.R 2013-09-08 02:58:07 UTC (rev 3023) @@ -44,12 +44,16 @@ x = y[,column] x=na.omit(x) skew = skewness(x) - arma.coeff= arma(x,order=c(0,n)) + # arma.coeff= arma(x,order=c(0,n)) kurt= kurtosis(x) - z = c(skew, - ((sum(as.numeric(arma.coeff$coef[1:n])^2)^1.5)*(skew/(sum(as.numeric(arma.coeff$coef[1:n])^3)))), - kurt, - (-kurt*(sum(as.numeric(arma.coeff$coef[1:n])^2)^2-6*(sum(as.numeric(arma.coeff$coef[1:n])^2)*sum(as.numeric(arma.coeff$coef[1:n])^2)))/(sum(as.numeric(arma.coeff$coef[1:n])^4)))) + # z = c(skew, + # ((sum(as.numeric(arma.coeff$coef[1:n])^2)^1.5)*(skew/(sum(as.numeric(arma.coeff$coef[1:n])^3)))), + # kurt, + # (abs(kurt*(sum(as.numeric(arma.coeff$coef[1:n])^2)^2/6-(sum(as.numeric(arma.coeff$coef[1:n])^2)*sum(as.numeric(arma.coeff$coef[1:n])^2)))/(sum(as.numeric(arma.coeff$coef[1:n])^4))))) + aa=Return.GLM(x) + skew1=skewness(aa) + kurt1=kurtosis(aa) + z=c(skew,skew1,kurt,kurt1) znames = c( "Skewness ( Orignal) ", "Skewness (Unsmooth)", From noreply at r-forge.r-project.org Sun Sep 8 13:27:59 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 8 Sep 2013 13:27:59 +0200 (CEST) Subject: [Returnanalytics-commits] r3024 - pkg/PortfolioAttribution Message-ID: <20130908112759.4C2CD18544A@r-forge.r-project.org> Author: ababii Date: 2013-09-08 13:27:58 +0200 (Sun, 08 Sep 2013) New Revision: 3024 Modified: pkg/PortfolioAttribution/DESCRIPTION Log: - update description Modified: pkg/PortfolioAttribution/DESCRIPTION =================================================================== --- pkg/PortfolioAttribution/DESCRIPTION 2013-09-08 02:58:07 UTC (rev 3023) +++ pkg/PortfolioAttribution/DESCRIPTION 2013-09-08 11:27:58 UTC (rev 3024) @@ -4,8 +4,10 @@ Version: 0.2 Date: $Date: 2012-06-06 15:18:48 -0500 (Wed, 06 Jun 2012) $ Author: Andrii Babii -Maintainer: Brian G. Peterson -Description: Portfolio Attribution methods from Bacon, Carino, etc. GSoC 2012 project. +Maintainer: Andrii Babii +Description: This package provides functions for the ex-post Portfolio Attribution methods +from Bacon (2004), Carino (2009), etc. The package was created as a part of the +Google Summer of Code (GSoC) 2012 project. Depends: R (>= 2.14.0), zoo, @@ -15,7 +17,7 @@ plyr License: GPL URL: http://r-forge.r-project.org/projects/returnanalytics/ -Copyright: (c) 2004-2012 +Copyright: (c) 2004-2013 Collate: 'Attribution.geometric.R' 'attribution.levels.R' From noreply at r-forge.r-project.org Sun Sep 8 14:58:12 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 8 Sep 2013 14:58:12 +0200 (CEST) Subject: [Returnanalytics-commits] r3025 - pkg/PerformanceAnalytics/sandbox/pulkit/R Message-ID: <20130908125812.454DC184C67@r-forge.r-project.org> Author: pulkit Date: 2013-09-08 14:58:11 +0200 (Sun, 08 Sep 2013) New Revision: 3025 Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/BenchmarkPlots.R pkg/PerformanceAnalytics/sandbox/pulkit/R/BenchmarkSR.R pkg/PerformanceAnalytics/sandbox/pulkit/R/DrawdownBeta.R pkg/PerformanceAnalytics/sandbox/pulkit/R/GoldenSection.R pkg/PerformanceAnalytics/sandbox/pulkit/R/MaxDD.R pkg/PerformanceAnalytics/sandbox/pulkit/R/MinTRL.R pkg/PerformanceAnalytics/sandbox/pulkit/R/ProbSharpeRatio.R pkg/PerformanceAnalytics/sandbox/pulkit/R/SRIndifferenceCurve.R pkg/PerformanceAnalytics/sandbox/pulkit/R/TriplePenance.R pkg/PerformanceAnalytics/sandbox/pulkit/R/TuW.R pkg/PerformanceAnalytics/sandbox/pulkit/R/table.Penance.R Log: Handling NA values and other errors in Triple Penance and Benchmark Plots Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/BenchmarkPlots.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/BenchmarkPlots.R 2013-09-08 11:27:58 UTC (rev 3024) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/BenchmarkPlots.R 2013-09-08 12:58:11 UTC (rev 3025) @@ -47,6 +47,9 @@ #'chart.BenchmarkSR(edhec,vs="strategies") #'chart.BenchmarkSR(edhec,vs="sharpe") #' +#'data(managers) +#'chart.BenchmarkSR(managers,vs="strategies") +#' #'@export chart.BenchmarkSR<-function(R=NULL,S=NULL,main=NULL,ylab = NULL,xlab = NULL,element.color="darkgrey",lwd = 2,pch = 1,cex = 1,cex.axis=0.8,cex.lab = 1,cex.main = 1,vs=c("sharpe","correlation","strategies"),xlim = NULL,ylim = NULL,...){ @@ -69,7 +72,7 @@ if(!is.null(R)){ x = checkData(R) columns = ncol(x) - avgSR = mean(SharpeRatio(R)) + avgSR = mean(SharpeRatio(R,FUN="StdDev")) } else{ if(is.null(avgSR) | is.null(S)){ Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/BenchmarkSR.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/BenchmarkSR.R 2013-09-08 11:27:58 UTC (rev 3024) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/BenchmarkSR.R 2013-09-08 12:58:11 UTC (rev 3025) @@ -25,6 +25,8 @@ #' #'data(edhec) #'BenchmarkSR(edhec) #expected 0.393797 +#'data(managers) +#'BenchmarkSR(managers) # expected 0.8110536 #' #'@export #' @@ -44,7 +46,7 @@ if(columns == 1){ stop("The number of return series should be greater than 1") } - SR = SharpeRatio(x) + SR = SharpeRatio(x,FUN="StdDev") sr_avg = mean(SR) corr = table.Correlation(R,R) corr_avg = 0 Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/DrawdownBeta.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/DrawdownBeta.R 2013-09-08 11:27:58 UTC (rev 3024) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/DrawdownBeta.R 2013-09-08 12:58:11 UTC (rev 3025) @@ -115,7 +115,6 @@ } } beta_dd = sum((as.numeric(x[index])-x)*q)/CDaR(Rm,p=p) - print((as.numeric(x[index])-x)*q) return(beta_dd) } Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/GoldenSection.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/GoldenSection.R 2013-09-08 11:27:58 UTC (rev 3024) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/GoldenSection.R 2013-09-08 12:58:11 UTC (rev 3025) @@ -23,7 +23,7 @@ #' @references Bailey, David H. and Lopez de Prado, Marcos, Drawdown-Based Stop-Outs and the "Triple Penance" Rule(January 1, 2013). #' #'@export -golden_section<-function(a,b,minimum = TRUE,function_name,...){ +golden_section<-function(a,b,function_name,minimum = TRUE,...){ # DESCRIPTION # A function to perform the golden search algorithm on the provided function Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/MaxDD.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/MaxDD.R 2013-09-08 11:27:58 UTC (rev 3024) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/MaxDD.R 2013-09-08 12:58:11 UTC (rev 3025) @@ -50,7 +50,7 @@ #' MaxDD(edhec,0.95,"ar") #' MaxDD(edhec[,1],0.95,"normal") #expected values 4.241799 6.618966 #'@export -MaxDD<-function(R,confidence,type=c("ar","normal"),...) +MaxDD<-function(R,confidence=0.95,type=c("ar","normal"),...) { # DESCRIPTION: @@ -65,36 +65,35 @@ # FUNCTION: x = checkData(R) - + x = na.omit(x) if(ncol(x)==1 || is.null(R) || is.vector(R)){ - type = type[1] calcul = FALSE for(i in (1:length(x))){ if(!is.na(x[i])){ calcul = TRUE } } - x = na.omit(x) if(!calcul){ result = NA } else{ - if(type=="ar"){ + if(type[1]=="ar"){ result = get_minq(x,confidence) } - if(type=="normal"){ + if(type[1]=="normal"){ result = dd_norm(x,confidence) } } return(result) } - if(type=="ar"){ + if(type[1]=="ar"){ result = apply(x,MARGIN = 2,get_minq,confidence) } - if(type=="normal"){ + if(type[1]=="normal"){ result = apply(x,MARGIN = 2,dd_norm,confidence) } + result = round(result,3) rownames(result) = c("MaxDD(in %)","t*") return(result) } Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/MinTRL.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/MinTRL.R 2013-09-08 11:27:58 UTC (rev 3024) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/MinTRL.R 2013-09-08 12:58:11 UTC (rev 3025) @@ -51,6 +51,9 @@ #'MinTrackRecord(edhec[,1],refSR=0.1,Rf = 0.04/12) #'MinTrackRecord(refSR = 1/12^0.5,Rf = 0,p=0.95,sr = 2/12^0.5,sk=-0.72,kr=5.78) #'MinTrackRecord(edhec[,1:2],refSR = c(0.28,0.24)) +#' +#'data(managers) +#'MinTrackRecord(managers,refSR = 0) #'@export #' MinTrackRecord<-function(R = NULL, refSR,Rf=0,p = 0.95, weights = NULL,sr = NULL,sk = NULL, kr = NULL, ...){ Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/ProbSharpeRatio.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/ProbSharpeRatio.R 2013-09-08 11:27:58 UTC (rev 3024) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/ProbSharpeRatio.R 2013-09-08 12:58:11 UTC (rev 3025) @@ -44,7 +44,10 @@ #' data(edhec) #' ProbSharpeRatio(edhec[,1],refSR = 0.23) #' ProbSharpeRatio(refSR = 1/12^0.5,Rf = 0,p=0.95,sr = 2/12^0.5,sk=-0.72,kr=5.78,n=59) -#' ProbSharpeRatio(edhec[,1:2],refSR = c(0.28,0.24)) +#' ProbSharpeRatio(edhec[,1:2],refSR = c(0.28,0.24)) +#' +#' data(managers) +#' ProbSharpeRatio(managers,0) #'@export ProbSharpeRatio<- Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/SRIndifferenceCurve.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/SRIndifferenceCurve.R 2013-09-08 11:27:58 UTC (rev 3024) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/SRIndifferenceCurve.R 2013-09-08 12:58:11 UTC (rev 3025) @@ -46,7 +46,8 @@ #' #'data(edhec) #'chart.SRIndifference(edhec) -#' +#'data(managers) +#'chart.SRIndifference(managers) #'@export chart.SRIndifference<-function(R,reference.grid = TRUE, ylab = NULL,xlab = NULL,main = "Sharpe Ratio Indifference Curve",element.color = "darkgrey",lwd = 2,pch = 1,cex = 1,cex.axis = 0.8,cex.lab = 1,cex.main = 1,ylim = NULL,xlim = NULL,...){ @@ -74,7 +75,7 @@ if(columns == 1){ stop("The number of return series should be greater 1 ") } - SR = SharpeRatio(x) + SR = SharpeRatio(x,FUN="StdDev") sr_avg = mean(SR) corr = table.Correlation(R,R) corr_avg = 0 Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/TriplePenance.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/TriplePenance.R 2013-09-08 11:27:58 UTC (rev 3024) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/TriplePenance.R 2013-09-08 12:58:11 UTC (rev 3025) @@ -61,15 +61,27 @@ mu = mean(x, na.rm = TRUE) sigma_infinity = StdDev(x) phi = cov(x[-1],x[-length(x)])/(cov(x[-length(x)])) + sigma = sigma_infinity*((1-phi^2)^0.5) dp0 = 0 q_value = 0 bets = 0 - while(q_value <= 0){ + if(phi>=0 & mu >0){ + while(q_value <= 0){ bets = bets + 1 q_value = getQ(bets, phi, mu, sigma, dp0, confidence) } - minQ = golden_section(0,bets,TRUE,getQ,phi,mu,sigma,dp0,confidence) + minQ = golden_section(0,bets,getQ,TRUE,phi,mu,sigma,dp0,confidence) + } + else{ + if(phi<0){ + warning(paste("NaN produced because phi < 0 ",colnames(x))) + } + if(mu<0){ + warning(paste("NaN produced because mu < 0 ",colnames(x))) + } + minQ = list(value=NaN,x=NaN) + } return(c(-minQ$value*100,minQ$x)) } @@ -120,11 +132,23 @@ dp0 = 0 q_value = 0 bets = 0 + if(phi >=0 & mu >0){ while(q_value <= 0){ bets = bets + 1 q_value = getQ(bets, phi, mu, sigma, dp0, confidence) } - TuW = golden_section(bets-1,bets,TRUE,diff_Q,phi,mu,sigma,dp0,confidence) + TuW = golden_section(bets-1,bets,diff_Q,TRUE,phi,mu,sigma,dp0,confidence) + } + else{ + if(phi<0){ + warning(paste("NaN produced because phi < 0 ",colnames(x))) + } + if(mu<0){ + warning(paste("NaN produced because mu < 0 ",colnames(x))) + } + + TuW = list(x=NaN) + } return(TuW$x) } Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/TuW.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/TuW.R 2013-09-08 11:27:58 UTC (rev 3024) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/TuW.R 2013-09-08 12:58:11 UTC (rev 3025) @@ -37,9 +37,9 @@ #' TuW(edhec[,1],0.95,"normal") # expected value 103.2573 #'@export -TuW<-function(R,confidence,type=c("ar","normal"),...){ +TuW<-function(R,confidence=0.95,type=c("ar","normal"),...){ x = checkData(R) - type = type[1] + x = na.omit(x) if(ncol(x)==1 || is.null(R) || is.vector(R)){ calcul = FALSE for(i in (1:length(x))){ @@ -47,30 +47,30 @@ calcul = TRUE } } - x = na.omit(x) if(!calcul){ result = NA } else{ - if(type=="ar"){ + if(type[1]=="ar"){ result = get_TuW(x,confidence) } - if(type=="normal"){ + if(type[1]=="normal"){ result = tuw_norm(x,confidence) } } return(result) } else{ - if(type=="ar"){ + if(type[1]=="ar"){ result=apply(x,MARGIN = 2, get_TuW,confidence) } - if(type=="normal"){ + if(type[1]=="normal"){ result=apply(x,MARGIN = 2, tuw_norm,confidence) } result<-as.data.frame(result) result<-t(result) + result<-round(result,3) rownames(result)=paste("Max Time Under Water") return(result) } Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/table.Penance.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/table.Penance.R 2013-09-08 11:27:58 UTC (rev 3024) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/table.Penance.R 2013-09-08 12:58:11 UTC (rev 3025) @@ -26,6 +26,7 @@ # # Function: x = checkData(R) + x = na.omit(x) columns = ncol(x) columnnames = colnames(x) rownames = c("mean","stdDev","phi","sigma","MaxDD(in %)","t*","MaxTuW","Penance") From noreply at r-forge.r-project.org Sun Sep 8 20:41:36 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 8 Sep 2013 20:41:36 +0200 (CEST) Subject: [Returnanalytics-commits] r3026 - in pkg/PortfolioAnalytics: . R man sandbox Message-ID: <20130908184136.51AE418568C@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-08 20:41:35 +0200 (Sun, 08 Sep 2013) New Revision: 3026 Added: pkg/PortfolioAnalytics/R/charts.multiple.R pkg/PortfolioAnalytics/R/utility.combine.R pkg/PortfolioAnalytics/man/optimizations.combine.Rd pkg/PortfolioAnalytics/man/portfolios.combine.Rd pkg/PortfolioAnalytics/sandbox/testing_mult_opt_weights.R Modified: pkg/PortfolioAnalytics/DESCRIPTION pkg/PortfolioAnalytics/NAMESPACE pkg/PortfolioAnalytics/R/chart.Weights.R pkg/PortfolioAnalytics/R/extractstats.R pkg/PortfolioAnalytics/man/chart.Weights.Rd Log: Adding functions to plot the weights of multiple optimize.portfolio objects Modified: pkg/PortfolioAnalytics/DESCRIPTION =================================================================== --- pkg/PortfolioAnalytics/DESCRIPTION 2013-09-08 12:58:11 UTC (rev 3025) +++ pkg/PortfolioAnalytics/DESCRIPTION 2013-09-08 18:41:35 UTC (rev 3026) @@ -56,3 +56,5 @@ 'charts.efficient.frontier.R' 'charts.risk.R' 'charts.groups.R' + 'charts.multiple.R' + 'utility.combine.R' Modified: pkg/PortfolioAnalytics/NAMESPACE =================================================================== --- pkg/PortfolioAnalytics/NAMESPACE 2013-09-08 12:58:11 UTC (rev 3025) +++ pkg/PortfolioAnalytics/NAMESPACE 2013-09-08 18:41:35 UTC (rev 3026) @@ -1,3 +1,5 @@ +export("cex.legend=0.8,") +export("colorset=NULL,") export(add.constraint) export(add.objective) export(applyFUN) @@ -9,7 +11,6 @@ export(chart.RiskBudget) export(chart.RiskReward) export(chart.Weights.EF) -export(chart.Weights) export(constrained_group_tmp) export(constrained_objective_v2) export(constrained_objective) @@ -36,16 +37,20 @@ export(is.constraint) export(is.objective) export(is.portfolio) +export(legend.loc="topright",) export(meanetl.efficient.frontier) export(meanvar.efficient.frontier) export(minmax_objective) export(objective) +export(optimizations.combine) export(optimize.portfolio_v2) export(optimize.portfolio.parallel) export(optimize.portfolio.rebalancing) export(optimize.portfolio) +export(plot.type="line") export(portfolio_risk_objective) export(portfolio.spec) +export(portfolios.combine) export(pos_limit_fail) export(position_limit_constraint) export(quadratic_utility_objective) @@ -84,6 +89,7 @@ S3method(chart.RiskReward,optimize.portfolio.pso) S3method(chart.RiskReward,optimize.portfolio.random) S3method(chart.RiskReward,optimize.portfolio.ROI) +S3method(chart.Weights,opt.list) S3method(chart.Weights,optimize.portfolio.DEoptim) S3method(chart.Weights,optimize.portfolio.GenSA) S3method(chart.Weights,optimize.portfolio.pso) @@ -97,6 +103,7 @@ S3method(extractStats,optimize.portfolio.pso) S3method(extractStats,optimize.portfolio.random) S3method(extractStats,optimize.portfolio.ROI) +S3method(extractWeights,opt.list) S3method(extractWeights,optimize.portfolio.rebalancing) S3method(extractWeights,optimize.portfolio) S3method(plot,optimize.portfolio.DEoptim) Modified: pkg/PortfolioAnalytics/R/chart.Weights.R =================================================================== --- pkg/PortfolioAnalytics/R/chart.Weights.R 2013-09-08 12:58:11 UTC (rev 3025) +++ pkg/PortfolioAnalytics/R/chart.Weights.R 2013-09-08 18:41:35 UTC (rev 3026) @@ -18,11 +18,15 @@ #' @param cex.lab The magnification to be used for x and y labels relative to the current setting of \code{cex} #' @param element.color color for the default plot lines #' @param cex.axis The magnification to be used for axis annotation relative to the current setting of \code{cex} +#' @param colorset color palette or vector of colors to use +#' @param legend.loc location of the legend. If NULL, the legend will not be plotted +#' @param plot.type "line" or "barplot" #' @seealso \code{\link{optimize.portfolio}} #' @rdname chart.Weights #' @name chart.Weights -#' @aliases chart.Weights.optimize.portfolio.ROI chart.Weights.optimize.portfolio.DEoptim chart.Weights.optimize.portfolio.pso chart.Weights.optimize.portfolio.RP chart.Weights.optimize.portfolio.GenSA +#' @aliases chart.Weights.optimize.portfolio.ROI chart.Weights.optimize.portfolio.DEoptim chart.Weights.optimize.portfolio.pso chart.Weights.optimize.portfolio.RP chart.Weights.optimize.portfolio.GenSA chart.Weights.opt.list #' @export +#' colorset=NULL, legend.loc="topright", cex.legend=0.8, plot.type="line" chart.Weights <- function(object, neighbors = NULL, ..., main="Weights", las = 3, xlab=NULL, cex.lab = 1, element.color = "darkgray", cex.axis=0.8){ UseMethod("chart.Weights") } Added: pkg/PortfolioAnalytics/R/charts.multiple.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.multiple.R (rev 0) +++ pkg/PortfolioAnalytics/R/charts.multiple.R 2013-09-08 18:41:35 UTC (rev 3026) @@ -0,0 +1,65 @@ +# compare optimal weights of multiple portfolios + +#' @method chart.Weights opt.list +#' @S3method chart.Weights opt.list +#' @export +chart.Weights.opt.list <- function(object, neighbors=NULL, ..., main="Weights", las=3, xlab=NULL, cex.lab=1, element.color="darkgray", cex.axis=0.8, colorset=NULL, legend.loc="topright", cex.legend=0.8, plot.type="line"){ + if(!inherits(object, "opt.list")) stop("object must be of class 'opt.list'") + + if(plot.type %in% c("bar", "barplot")){ + barplotOptWeights(object=object, main=main, las=las, xlab=xlab, cex.lab=cex.lab, element.color=element.color, cex.axis=cex.axis, colorset=colorset, legend.loc=legend.loc, cex.legend=cex.legend, ...) + } else if(plot.type == "line"){ + + # get the optimal weights in a matrix + weights_mat <- extractWeights.opt.list(object) + opt_names <- rownames(weights_mat) + + columnnames <- colnames(weights_mat) + numassets <- length(columnnames) + + if(is.null(xlab)) + minmargin = 3 + else + minmargin = 5 + if(main=="") topmargin=1 else topmargin=4 + if(las > 1) {# set the bottom border to accommodate labels + bottommargin = max(c(minmargin, (strwidth(columnnames,units="in"))/par("cin")[1])) * cex.lab + if(bottommargin > 10 ) { + bottommargin<-10 + columnnames<-substr(columnnames,1,19) + # par(srt=45) #TODO figure out how to use text() and srt to rotate long labels + } + } + else { + bottommargin = minmargin + } + par(mar = c(bottommargin, 4, topmargin, 2) +.1) + + if(is.null(colorset)) colorset=1:nrow(weights_mat) + if(length(colorset) != nrow(weights_mat)) colorset <- rep(colorset[1], nrow(weights_mat)) + plot(weights_mat[1,], type="n", axes=FALSE, xlab='', ylab="Weights", main=main, ...) + for(i in 1:nrow(weights_mat)){ + points(weights_mat[i,], type="b", col=colorset[i], lty=1) + } + if(!is.null(legend.loc)) legend(legend.loc, legend=opt_names, col=colorset, bty="n", lty=1, cex=cex.legend) + axis(2, cex.axis=cex.axis, col=element.color) + axis(1, labels=columnnames, at=1:numassets, las=las, cex.axis=cex.axis, col=element.color) + box(col=element.color) + } +} + +barplotOptWeights <- function(object, ..., main="Weights", las=3, xlab=NULL, cex.lab=1, element.color="darkgray", cex.axis=0.8, colorset=NULL, legend.loc="topright", cex.legend=0.8){ + if(!inherits(object, "opt.list")) stop("object must be of class 'opt.list'") + + # get the optimal weights in a matrix + weights_mat <- extractWeights.opt.list(object) + opt_names <- rownames(weights_mat) + + if(is.null(colorset)) colorset <- 1:nrow(weights_mat) + + barplot(weights_mat, beside=TRUE, main=main, cex.axis=cex.axis, cex.names=cex.lab, las=las, col=colorset, ...) + if(!is.null(legend.loc)){ + legend(legend.loc, legend=opt_names, fill=colorset, bty="n", cex=cex.legend) + } + box(col=element.color) +} Modified: pkg/PortfolioAnalytics/R/extractstats.R =================================================================== --- pkg/PortfolioAnalytics/R/extractstats.R 2013-09-08 12:58:11 UTC (rev 3025) +++ pkg/PortfolioAnalytics/R/extractstats.R 2013-09-08 18:41:35 UTC (rev 3026) @@ -366,3 +366,32 @@ ) } +#' @method extractWeights opt.list +#' @S3method extractWeights opt.list +#' @export +extractWeights.opt.list <- function(object, ...){ + # get the optimal weights of each optimization in a list + weights_list <- list() + for(i in 1:length(object)){ + weights_list[[i]] <- object[[i]]$weights + } + + # get/set the names in the object + opt_names <- names(object) + if(is.null(opt_names)) opt_names <- paste("opt", 1:length(object)) + + # get the names of each element in weights_list + weights_names <- unlist(lapply(weights_list, names)) + + # unique names in weights_names + names_unique <- unique(weights_names) + + # create a matrix of zeros to fill in with weights later + weights_mat <- matrix(0, nrow=length(weights_list), ncol=length(names_unique), + dimnames=list(opt_names, names_unique)) + for(i in 1:length(weights_list)){ + pm <- pmatch(x=names(weights_list[[i]]), table=names_unique) + weights_mat[i, pm] <- weights_list[[i]] + } + return(weights_mat) +} Added: pkg/PortfolioAnalytics/R/utility.combine.R =================================================================== --- pkg/PortfolioAnalytics/R/utility.combine.R (rev 0) +++ pkg/PortfolioAnalytics/R/utility.combine.R 2013-09-08 18:41:35 UTC (rev 3026) @@ -0,0 +1,35 @@ + + +#' Combine objects created by optimize.portfolio +#' +#' This function takes a list of objects created by \code{\link{optimize.portfolio}} +#' and sets the class name attribute to 'opt.list' for use in generic functions +#' +#' @param x a list of objects created by \code{\link{optimize.portfolio}} +#' @return an \code{opt.list} object +#' @export +optimizations.combine <- function(x){ + if(!is.list(x)) stop("x must be passed in as a list") + for(i in 1:length(x)){ + if(!inherits(x[[i]], "optimize.portfolio")) stop("All objects in x must be of class 'optimize.portfolio'") + } + class(x) <- "opt.list" + return(x) +} + +#' Combine objects created by portfolio +#' +#' This function takes a list of objects created by \code{\link{portfolio.spec}} +#' and sets the class name attribute to 'portfolio.list' for use in generic functions +#' +#' @param x a list of objects created by \code{\link{portfolio.spec}} +#' @return a \code{portfolio.list} object +#' @export +portfolios.combine <- function(x){ + if(!is.list(x)) stop("x must be passed in as a list") + for(i in 1:length(x)){ + if(!inherits(x[[i]], "portfolio")) stop("All objects in x must be of class 'portfolio'") + } + class(x) <- "portfolio.list" + return(x) +} Modified: pkg/PortfolioAnalytics/man/chart.Weights.Rd =================================================================== --- pkg/PortfolioAnalytics/man/chart.Weights.Rd 2013-09-08 12:58:11 UTC (rev 3025) +++ pkg/PortfolioAnalytics/man/chart.Weights.Rd 2013-09-08 18:41:35 UTC (rev 3026) @@ -1,5 +1,6 @@ \name{chart.Weights} \alias{chart.Weights} +\alias{chart.Weights.opt.list} \alias{chart.Weights.optimize.portfolio.DEoptim} \alias{chart.Weights.optimize.portfolio.GenSA} \alias{chart.Weights.optimize.portfolio.pso} @@ -39,6 +40,13 @@ \item{cex.axis}{The magnification to be used for axis annotation relative to the current setting of \code{cex}} + + \item{colorset}{color palette or vector of colors to use} + + \item{legend.loc}{location of the legend. If NULL, the + legend will not be plotted} + + \item{plot.type}{"line" or "barplot"} } \description{ Chart the optimal weights and upper and lower bounds on Added: pkg/PortfolioAnalytics/man/optimizations.combine.Rd =================================================================== --- pkg/PortfolioAnalytics/man/optimizations.combine.Rd (rev 0) +++ pkg/PortfolioAnalytics/man/optimizations.combine.Rd 2013-09-08 18:41:35 UTC (rev 3026) @@ -0,0 +1,19 @@ +\name{optimizations.combine} +\alias{optimizations.combine} +\title{Combine objects created by optimize.portfolio} +\usage{ + optimizations.combine(x) +} +\arguments{ + \item{x}{a list of objects created by + \code{\link{optimize.portfolio}}} +} +\value{ + an \code{opt.list} object +} +\description{ + This function takes a list of objects created by + \code{\link{optimize.portfolio}} and sets the class name + attribute to 'opt.list' for use in generic functions +} + Added: pkg/PortfolioAnalytics/man/portfolios.combine.Rd =================================================================== --- pkg/PortfolioAnalytics/man/portfolios.combine.Rd (rev 0) +++ pkg/PortfolioAnalytics/man/portfolios.combine.Rd 2013-09-08 18:41:35 UTC (rev 3026) @@ -0,0 +1,20 @@ +\name{portfolios.combine} +\alias{portfolios.combine} +\title{Combine objects created by portfolio} +\usage{ + portfolios.combine(x) +} +\arguments{ + \item{x}{a list of objects created by + \code{\link{portfolio.spec}}} +} +\value{ + a \code{portfolio.list} object +} +\description{ + This function takes a list of objects created by + \code{\link{portfolio.spec}} and sets the class name + attribute to 'portfolio.list' for use in generic + functions +} + Added: pkg/PortfolioAnalytics/sandbox/testing_mult_opt_weights.R =================================================================== --- pkg/PortfolioAnalytics/sandbox/testing_mult_opt_weights.R (rev 0) +++ pkg/PortfolioAnalytics/sandbox/testing_mult_opt_weights.R 2013-09-08 18:41:35 UTC (rev 3026) @@ -0,0 +1,47 @@ + +library(PortfolioAnalytics) +library(ROI) +library(ROI.plugin.glpk) +library(ROI.plugin.quadprog) + +# We should be able to compare portfolios with different constraints, +# objectives, and number of assets + +data(edhec) +R <- edhec[, 1:4] +funds <- colnames(R) + +##### Construct Portfolios ##### +# GMV long only +port.gmv.lo <- portfolio.spec(assets=funds) +port.gmv.lo <- add.constraint(portfolio=port.gmv.lo, type="full_investment") +port.gmv.lo <- add.constraint(portfolio=port.gmv.lo, type="long_only") +port.gmv.lo <- add.objective(portfolio=port.gmv.lo, type="risk", name="var") + +# GMV with shorting +port.gmv.short <- portfolio.spec(assets=funds) +port.gmv.short <- add.constraint(portfolio=port.gmv.short, type="full_investment") +port.gmv.short <- add.constraint(portfolio=port.gmv.short, type="box", min=-0.3, max=1) +port.gmv.short <- add.objective(portfolio=port.gmv.short, type="risk", name="var") + +# QU box constraints +port.qu <- portfolio.spec(assets=funds) +port.qu <- add.constraint(portfolio=port.qu, type="full_investment") +port.qu <- add.constraint(portfolio=port.qu, type="box", min=0.05, max=0.6) +port.qu <- add.objective(portfolio=port.qu, type="risk", name="var", risk_aversion=0.25) +port.qu <- add.objective(portfolio=port.qu, type="return", name="mean") + +##### Run Optimizations ##### +opt.gmv.lo <- optimize.portfolio(R=R, portfolio=port.gmv.lo, optimize_method="ROI", trace=TRUE) +opt.gmv.short <- optimize.portfolio(R=R, portfolio=port.gmv.short, optimize_method="ROI", trace=TRUE) +opt.qu <- optimize.portfolio(R=R, portfolio=port.qu, optimize_method="ROI", trace=TRUE) + + +opt <- optimizations.combine(list(GMV.LO=opt.gmv.lo, GMV.SHORT=opt.gmv.short, QU=opt.qu)) +class(opt) + +chart.Weights(opt, legend.loc="topleft", cex.legend=0.8, ylim=c(-0.3, 1)) + +chart.Weights(opt, plot.type="bar", cex.lab=0.8, legend.loc="topleft", cex.legend=0.8, ylim=c(-0.3, 1)) + +extractWeights(opt) From noreply at r-forge.r-project.org Sun Sep 8 21:08:29 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 8 Sep 2013 21:08:29 +0200 (CEST) Subject: [Returnanalytics-commits] r3027 - in pkg/PortfolioAnalytics: . R man Message-ID: <20130908190829.B45041848DC@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-08 21:08:29 +0200 (Sun, 08 Sep 2013) New Revision: 3027 Modified: pkg/PortfolioAnalytics/NAMESPACE pkg/PortfolioAnalytics/R/chart.Weights.R pkg/PortfolioAnalytics/R/charts.DE.R pkg/PortfolioAnalytics/R/charts.GenSA.R pkg/PortfolioAnalytics/R/charts.PSO.R pkg/PortfolioAnalytics/R/charts.ROI.R pkg/PortfolioAnalytics/R/charts.RP.R pkg/PortfolioAnalytics/R/charts.multiple.R pkg/PortfolioAnalytics/man/chart.Weights.Rd Log: Updating documentation for chart.Weights.* functions Modified: pkg/PortfolioAnalytics/NAMESPACE =================================================================== --- pkg/PortfolioAnalytics/NAMESPACE 2013-09-08 18:41:35 UTC (rev 3026) +++ pkg/PortfolioAnalytics/NAMESPACE 2013-09-08 19:08:29 UTC (rev 3027) @@ -1,5 +1,3 @@ -export("cex.legend=0.8,") -export("colorset=NULL,") export(add.constraint) export(add.objective) export(applyFUN) @@ -11,6 +9,7 @@ export(chart.RiskBudget) export(chart.RiskReward) export(chart.Weights.EF) +export(chart.Weights) export(constrained_group_tmp) export(constrained_objective_v2) export(constrained_objective) @@ -37,7 +36,6 @@ export(is.constraint) export(is.objective) export(is.portfolio) -export(legend.loc="topright",) export(meanetl.efficient.frontier) export(meanvar.efficient.frontier) export(minmax_objective) @@ -47,7 +45,6 @@ export(optimize.portfolio.parallel) export(optimize.portfolio.rebalancing) export(optimize.portfolio) -export(plot.type="line") export(portfolio_risk_objective) export(portfolio.spec) export(portfolios.combine) Modified: pkg/PortfolioAnalytics/R/chart.Weights.R =================================================================== --- pkg/PortfolioAnalytics/R/chart.Weights.R 2013-09-08 18:41:35 UTC (rev 3026) +++ pkg/PortfolioAnalytics/R/chart.Weights.R 2013-09-08 19:08:29 UTC (rev 3027) @@ -20,13 +20,13 @@ #' @param cex.axis The magnification to be used for axis annotation relative to the current setting of \code{cex} #' @param colorset color palette or vector of colors to use #' @param legend.loc location of the legend. If NULL, the legend will not be plotted +#' @param cex.legend The magnification to be used for legend annotation relative to the current setting of \code{cex} #' @param plot.type "line" or "barplot" #' @seealso \code{\link{optimize.portfolio}} #' @rdname chart.Weights #' @name chart.Weights -#' @aliases chart.Weights.optimize.portfolio.ROI chart.Weights.optimize.portfolio.DEoptim chart.Weights.optimize.portfolio.pso chart.Weights.optimize.portfolio.RP chart.Weights.optimize.portfolio.GenSA chart.Weights.opt.list +#' @aliases chart.Weights.optimize.portfolio.ROI chart.Weights.optimize.portfolio.DEoptim chart.Weights.optimize.portfolio.pso chart.Weights.optimize.portfolio.RP chart.Weights.optimize.portfolio.GenSA #' @export -#' colorset=NULL, legend.loc="topright", cex.legend=0.8, plot.type="line" chart.Weights <- function(object, neighbors = NULL, ..., main="Weights", las = 3, xlab=NULL, cex.lab = 1, element.color = "darkgray", cex.axis=0.8){ UseMethod("chart.Weights") } Modified: pkg/PortfolioAnalytics/R/charts.DE.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.DE.R 2013-09-08 18:41:35 UTC (rev 3026) +++ pkg/PortfolioAnalytics/R/charts.DE.R 2013-09-08 19:08:29 UTC (rev 3027) @@ -81,9 +81,9 @@ box(col = element.color) } +#' @rdname chart.Weights #' @method chart.Weights optimize.portfolio.DEoptim #' @S3method chart.Weights optimize.portfolio.DEoptim -#' @export chart.Weights.optimize.portfolio.DEoptim <- chart.Weights.DE Modified: pkg/PortfolioAnalytics/R/charts.GenSA.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.GenSA.R 2013-09-08 18:41:35 UTC (rev 3026) +++ pkg/PortfolioAnalytics/R/charts.GenSA.R 2013-09-08 19:08:29 UTC (rev 3027) @@ -68,9 +68,9 @@ box(col = element.color) } +#' @rdname chart.Weights #' @method chart.Weights optimize.portfolio.GenSA #' @S3method chart.Weights optimize.portfolio.GenSA -#' @export chart.Weights.optimize.portfolio.GenSA <- chart.Weights.GenSA chart.Scatter.GenSA <- function(object, neighbors=NULL, ..., return.col="mean", risk.col="ES", chart.assets=FALSE, element.color="darkgray", cex.axis=0.8, ylim=NULL, xlim=NULL, rp=FALSE){ Modified: pkg/PortfolioAnalytics/R/charts.PSO.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.PSO.R 2013-09-08 18:41:35 UTC (rev 3026) +++ pkg/PortfolioAnalytics/R/charts.PSO.R 2013-09-08 19:08:29 UTC (rev 3027) @@ -68,9 +68,9 @@ box(col = element.color) } +#' @rdname chart.Weights #' @method chart.Weights optimize.portfolio.pso #' @S3method chart.Weights optimize.portfolio.pso -#' @export chart.Weights.optimize.portfolio.pso <- chart.Weights.pso chart.Scatter.pso <- function(object, neighbors=NULL, ..., return.col="mean", risk.col="ES", chart.assets=FALSE, element.color = "darkgray", cex.axis=0.8, xlim=NULL, ylim=NULL){ Modified: pkg/PortfolioAnalytics/R/charts.ROI.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.ROI.R 2013-09-08 18:41:35 UTC (rev 3026) +++ pkg/PortfolioAnalytics/R/charts.ROI.R 2013-09-08 19:08:29 UTC (rev 3027) @@ -68,9 +68,9 @@ box(col = element.color) } +#' @rdname chart.Weights #' @method chart.Weights optimize.portfolio.ROI #' @S3method chart.Weights optimize.portfolio.ROI -#' @export chart.Weights.optimize.portfolio.ROI <- chart.Weights.ROI Modified: pkg/PortfolioAnalytics/R/charts.RP.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.RP.R 2013-09-08 18:41:35 UTC (rev 3026) +++ pkg/PortfolioAnalytics/R/charts.RP.R 2013-09-08 19:08:29 UTC (rev 3027) @@ -82,9 +82,9 @@ box(col = element.color) } +#' @rdname chart.Weights #' @method chart.Weights optimize.portfolio.random #' @S3method chart.Weights optimize.portfolio.random -#' @export chart.Weights.optimize.portfolio.random <- chart.Weights.RP chart.Scatter.RP <- function(object, neighbors = NULL, ..., return.col='mean', risk.col='ES', chart.assets=FALSE, element.color = "darkgray", cex.axis=0.8, xlim=NULL, ylim=NULL){ Modified: pkg/PortfolioAnalytics/R/charts.multiple.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.multiple.R 2013-09-08 18:41:35 UTC (rev 3026) +++ pkg/PortfolioAnalytics/R/charts.multiple.R 2013-09-08 19:08:29 UTC (rev 3027) @@ -1,8 +1,8 @@ # compare optimal weights of multiple portfolios +#' @rdname chart.Weights #' @method chart.Weights opt.list #' @S3method chart.Weights opt.list -#' @export chart.Weights.opt.list <- function(object, neighbors=NULL, ..., main="Weights", las=3, xlab=NULL, cex.lab=1, element.color="darkgray", cex.axis=0.8, colorset=NULL, legend.loc="topright", cex.legend=0.8, plot.type="line"){ if(!inherits(object, "opt.list")) stop("object must be of class 'opt.list'") Modified: pkg/PortfolioAnalytics/man/chart.Weights.Rd =================================================================== --- pkg/PortfolioAnalytics/man/chart.Weights.Rd 2013-09-08 18:41:35 UTC (rev 3026) +++ pkg/PortfolioAnalytics/man/chart.Weights.Rd 2013-09-08 19:08:29 UTC (rev 3027) @@ -1,16 +1,47 @@ -\name{chart.Weights} +\name{chart.Weights.optimize.portfolio.DEoptim} \alias{chart.Weights} \alias{chart.Weights.opt.list} \alias{chart.Weights.optimize.portfolio.DEoptim} \alias{chart.Weights.optimize.portfolio.GenSA} \alias{chart.Weights.optimize.portfolio.pso} +\alias{chart.Weights.optimize.portfolio.random} \alias{chart.Weights.optimize.portfolio.ROI} \alias{chart.Weights.optimize.portfolio.RP} \title{boxplot of the weights of the optimal portfolios} \usage{ + \method{chart.Weights}{optimize.portfolio.DEoptim} (object, neighbors = NULL, ..., main = "Weights", + las = 3, xlab = NULL, cex.lab = 1, + element.color = "darkgray", cex.axis = 0.8) + + \method{chart.Weights}{optimize.portfolio.random} (object, neighbors = NULL, ..., main = "Weights", + las = 3, xlab = NULL, cex.lab = 1, + element.color = "darkgray", cex.axis = 0.8) + + \method{chart.Weights}{optimize.portfolio.ROI} (object, + neighbors = NULL, ..., main = "Weights", las = 3, + xlab = NULL, cex.lab = 1, element.color = "darkgray", + cex.axis = 0.8) + + \method{chart.Weights}{optimize.portfolio.pso} (object, + neighbors = NULL, ..., main = "Weights", las = 3, + xlab = NULL, cex.lab = 1, element.color = "darkgray", + cex.axis = 0.8) + + \method{chart.Weights}{optimize.portfolio.GenSA} (object, + neighbors = NULL, ..., main = "Weights", las = 3, + xlab = NULL, cex.lab = 1, element.color = "darkgray", + cex.axis = 0.8) + chart.Weights(object, neighbors = NULL, ..., main = "Weights", las = 3, xlab = NULL, cex.lab = 1, element.color = "darkgray", cex.axis = 0.8) + + \method{chart.Weights}{opt.list} (object, + neighbors = NULL, ..., main = "Weights", las = 3, + xlab = NULL, cex.lab = 1, element.color = "darkgray", + cex.axis = 0.8, colorset = NULL, + legend.loc = "topright", cex.legend = 0.8, + plot.type = "line") } \arguments{ \item{object}{optimal portfolio object created by @@ -46,6 +77,9 @@ \item{legend.loc}{location of the legend. If NULL, the legend will not be plotted} + \item{cex.legend}{The magnification to be used for legend + annotation relative to the current setting of \code{cex}} + \item{plot.type}{"line" or "barplot"} } \description{ From noreply at r-forge.r-project.org Mon Sep 9 01:37:27 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 9 Sep 2013 01:37:27 +0200 (CEST) Subject: [Returnanalytics-commits] r3028 - pkg/PerformanceAnalytics/sandbox/pulkit/R Message-ID: <20130908233727.560E61844AC@r-forge.r-project.org> Author: pulkit Date: 2013-09-09 01:37:26 +0200 (Mon, 09 Sep 2013) New Revision: 3028 Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/REM.R pkg/PerformanceAnalytics/sandbox/pulkit/R/chart.SharpeEfficient.R pkg/PerformanceAnalytics/sandbox/pulkit/R/redd.R Log: Error handling in redd and REM and some progress in Sharpe Efficient Frontier Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/REM.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/REM.R 2013-09-08 19:08:29 UTC (rev 3027) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/REM.R 2013-09-08 23:37:26 UTC (rev 3028) @@ -46,7 +46,21 @@ if(nr != 1 && nr != n ){ stop("The number of rows of the returns and the risk free rate do not match") } - + + index = NULL + #ERROR handling for cases when lookback period is greater than the number of rows + for(i in 1:ncol(x)){ + if(length(na.omit(x[,i])) Author: rossbennett34 Date: 2013-09-09 04:07:57 +0200 (Mon, 09 Sep 2013) New Revision: 3029 Modified: pkg/PortfolioAnalytics/R/chart.Weights.R pkg/PortfolioAnalytics/R/charts.DE.R pkg/PortfolioAnalytics/R/charts.GenSA.R pkg/PortfolioAnalytics/R/charts.PSO.R pkg/PortfolioAnalytics/R/charts.ROI.R pkg/PortfolioAnalytics/R/charts.RP.R pkg/PortfolioAnalytics/man/chart.Weights.Rd Log: Adding option to plot the weights as a barplot Modified: pkg/PortfolioAnalytics/R/chart.Weights.R =================================================================== --- pkg/PortfolioAnalytics/R/chart.Weights.R 2013-09-08 23:37:26 UTC (rev 3028) +++ pkg/PortfolioAnalytics/R/chart.Weights.R 2013-09-09 02:07:57 UTC (rev 3029) @@ -31,3 +31,63 @@ UseMethod("chart.Weights") } +barplotWeights <- function(object, ..., main="Weights", las=3, xlab=NULL, cex.lab=1, element.color="darkgray", cex.axis=0.8, legend.loc="topright", cex.legend=0.8, colorset=NULL){ + weights <- object$weights + columnnames <- names(weights) + + if(is.null(xlab)) + minmargin = 3 + else + minmargin = 5 + if(main=="") topmargin=1 else topmargin=4 + if(las > 1) {# set the bottom border to accommodate labels + bottommargin = max(c(minmargin, (strwidth(columnnames,units="in"))/par("cin")[1])) * cex.lab + if(bottommargin > 10 ) { + bottommargin<-10 + columnnames<-substr(columnnames,1,19) + # par(srt=45) #TODO figure out how to use text() and srt to rotate long labels + } + } + else { + bottommargin = minmargin + } + par(mar = c(bottommargin, 4, topmargin, 2) +.1) + + if(is.null(colorset)) colorset <- 1:length(weights) + barplot(height=weights, las=las, main=main, xlab=xlab, ylab="Weights", cex.axis=cex.axis, cex.names=cex.lab, col=colorset, ...) + if(!is.null(legend.loc)){ + legend(legend.loc, legend=names(weights), cex=cex.legend, fill=colorset, bty="n") + } + box(col=element.color) +} + + +barplotWeights <- function(object, ..., main="Weights", las=3, xlab=NULL, cex.lab=1, element.color="darkgray", cex.axis=0.8, legend.loc="topright", cex.legend=0.8, colorset=NULL){ + weights <- object$weights + columnnames <- names(weights) + + if(is.null(xlab)) + minmargin = 3 + else + minmargin = 5 + if(main=="") topmargin=1 else topmargin=4 + if(las > 1) {# set the bottom border to accommodate labels + bottommargin = max(c(minmargin, (strwidth(columnnames,units="in"))/par("cin")[1])) * cex.lab + if(bottommargin > 10 ) { + bottommargin<-10 + columnnames<-substr(columnnames,1,19) + # par(srt=45) #TODO figure out how to use text() and srt to rotate long labels + } + } + else { + bottommargin = minmargin + } + par(mar = c(bottommargin, 4, topmargin, 2) +.1) + + if(is.null(colorset)) colorset <- 1:length(weights) + barplot(height=weights, las=las, main=main, xlab=xlab, ylab="Weights", cex.axis=cex.axis, cex.names=cex.lab, col=colorset, ...) + if(!is.null(legend.loc)){ + legend(legend.loc, legend=names(weights), cex=cex.legend, fill=colorset, bty="n") + } + box(col=element.color) +} Modified: pkg/PortfolioAnalytics/R/charts.DE.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.DE.R 2013-09-08 23:37:26 UTC (rev 3028) +++ pkg/PortfolioAnalytics/R/charts.DE.R 2013-09-09 02:07:57 UTC (rev 3029) @@ -11,74 +11,79 @@ ############################################################################### -chart.Weights.DE <- function(object, neighbors = NULL, ..., main="Weights", las = 3, xlab=NULL, cex.lab = 1, element.color = "darkgray", cex.axis=0.8){ +chart.Weights.DE <- function(object, neighbors = NULL, ..., main="Weights", las = 3, xlab=NULL, cex.lab = 1, element.color = "darkgray", cex.axis=0.8, colorset=NULL, legend.loc="topright", cex.legend=0.8, plot.type="line"){ # Specific to the output of optimize.portfolio with optimize_method="DEoptim" if(!inherits(object, "optimize.portfolio.DEoptim")) stop("object must be of class 'optimize.portfolio.DEoptim'") - columnnames = names(object$weights) - numassets = length(columnnames) - - constraints <- get_constraints(object$portfolio) - - if(is.null(xlab)) - minmargin = 3 - else - minmargin = 5 - if(main=="") topmargin=1 else topmargin=4 - if(las > 1) {# set the bottom border to accommodate labels - bottommargin = max(c(minmargin, (strwidth(columnnames,units="in"))/par("cin")[1])) * cex.lab - if(bottommargin > 10 ) { - bottommargin<-10 - columnnames<-substr(columnnames,1,19) - # par(srt=45) #TODO figure out how to use text() and srt to rotate long labels + if(plot.type %in% c("bar", "barplot")){ + barplotWeights(object=object, ..., main=main, las=las, xlab=xlab, cex.lab=cex.lab, element.color=element.color, cex.axis=cex.axis, legend.loc=legend.loc, cex.legend=cex.legend, colorset=colorset) + } else if(plot.type == "line"){ + + columnnames = names(object$weights) + numassets = length(columnnames) + + constraints <- get_constraints(object$portfolio) + + if(is.null(xlab)) + minmargin = 3 + else + minmargin = 5 + if(main=="") topmargin=1 else topmargin=4 + if(las > 1) {# set the bottom border to accommodate labels + bottommargin = max(c(minmargin, (strwidth(columnnames,units="in"))/par("cin")[1])) * cex.lab + if(bottommargin > 10 ) { + bottommargin<-10 + columnnames<-substr(columnnames,1,19) + # par(srt=45) #TODO figure out how to use text() and srt to rotate long labels + } } + else { + bottommargin = minmargin + } + par(mar = c(bottommargin, 4, topmargin, 2) +.1) + if(any(is.infinite(constraints$max)) | any(is.infinite(constraints$min))){ + # set ylim based on weights if box constraints contain Inf or -Inf + ylim <- range(object$weights) + } else { + # set ylim based on the range of box constraints min and max + ylim <- range(c(constraints$min, constraints$max)) + } + plot(object$weights, type="b", col="blue", axes=FALSE, xlab='', ylim=ylim, ylab="Weights", main=main, pch=16, ...) + if(!any(is.infinite(constraints$min))){ + points(constraints$min, type="b", col="darkgray", lty="solid", lwd=2, pch=24) + } + if(!any(is.infinite(constraints$max))){ + points(constraints$max, type="b", col="darkgray", lty="solid", lwd=2, pch=25) + } + # if(!is.null(neighbors)){ + # if(is.vector(neighbors)){ + # xtract=extractStats(object) + # weightcols<-grep('w\\.',colnames(xtract)) #need \\. to get the dot + # if(length(neighbors)==1){ + # # overplot nearby portfolios defined by 'out' + # orderx = order(xtract[,"out"]) + # subsetx = head(xtract[orderx,], n=neighbors) + # for(i in 1:neighbors) points(subsetx[i,weightcols], type="b", col="lightblue") + # } else{ + # # assume we have a vector of portfolio numbers + # subsetx = xtract[neighbors,weightcols] + # for(i in 1:length(neighbors)) points(subsetx[i,], type="b", col="lightblue") + # } + # } + # if(is.matrix(neighbors) | is.data.frame(neighbors)){ + # # the user has likely passed in a matrix containing calculated values for risk.col and return.col + # nbweights<-grep('w\\.',colnames(neighbors)) #need \\. to get the dot + # for(i in 1:nrow(neighbors)) points(as.numeric(neighbors[i,nbweights]), type="b", col="lightblue") + # # note that here we need to get weight cols separately from the matrix, not from xtract + # # also note the need for as.numeric. points() doesn't like matrix inputs + # } + # } + + # points(object$weights, type="b", col="blue", pch=16) + axis(2, cex.axis = cex.axis, col = element.color) + axis(1, labels=columnnames, at=1:numassets, las=las, cex.axis = cex.axis, col = element.color) + box(col = element.color) } - else { - bottommargin = minmargin - } - par(mar = c(bottommargin, 4, topmargin, 2) +.1) - if(any(is.infinite(constraints$max)) | any(is.infinite(constraints$min))){ - # set ylim based on weights if box constraints contain Inf or -Inf - ylim <- range(object$weights) - } else { - # set ylim based on the range of box constraints min and max - ylim <- range(c(constraints$min, constraints$max)) - } - plot(object$weights, type="b", col="blue", axes=FALSE, xlab='', ylim=ylim, ylab="Weights", main=main, pch=16, ...) - if(!any(is.infinite(constraints$min))){ - points(constraints$min, type="b", col="darkgray", lty="solid", lwd=2, pch=24) - } - if(!any(is.infinite(constraints$max))){ - points(constraints$max, type="b", col="darkgray", lty="solid", lwd=2, pch=25) - } - # if(!is.null(neighbors)){ - # if(is.vector(neighbors)){ - # xtract=extractStats(object) - # weightcols<-grep('w\\.',colnames(xtract)) #need \\. to get the dot - # if(length(neighbors)==1){ - # # overplot nearby portfolios defined by 'out' - # orderx = order(xtract[,"out"]) - # subsetx = head(xtract[orderx,], n=neighbors) - # for(i in 1:neighbors) points(subsetx[i,weightcols], type="b", col="lightblue") - # } else{ - # # assume we have a vector of portfolio numbers - # subsetx = xtract[neighbors,weightcols] - # for(i in 1:length(neighbors)) points(subsetx[i,], type="b", col="lightblue") - # } - # } - # if(is.matrix(neighbors) | is.data.frame(neighbors)){ - # # the user has likely passed in a matrix containing calculated values for risk.col and return.col - # nbweights<-grep('w\\.',colnames(neighbors)) #need \\. to get the dot - # for(i in 1:nrow(neighbors)) points(as.numeric(neighbors[i,nbweights]), type="b", col="lightblue") - # # note that here we need to get weight cols separately from the matrix, not from xtract - # # also note the need for as.numeric. points() doesn't like matrix inputs - # } - # } - - # points(object$weights, type="b", col="blue", pch=16) - axis(2, cex.axis = cex.axis, col = element.color) - axis(1, labels=columnnames, at=1:numassets, las=las, cex.axis = cex.axis, col = element.color) - box(col = element.color) } #' @rdname chart.Weights Modified: pkg/PortfolioAnalytics/R/charts.GenSA.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.GenSA.R 2013-09-08 23:37:26 UTC (rev 3028) +++ pkg/PortfolioAnalytics/R/charts.GenSA.R 2013-09-09 02:07:57 UTC (rev 3029) @@ -1,71 +1,76 @@ -chart.Weights.GenSA <- function(object, neighbors = NULL, ..., main="Weights", las = 3, xlab=NULL, cex.lab = 1, element.color = "darkgray", cex.axis=0.8){ +chart.Weights.GenSA <- function(object, neighbors = NULL, ..., main="Weights", las = 3, xlab=NULL, cex.lab = 1, element.color = "darkgray", cex.axis=0.8, colorset=NULL, legend.loc="topright", cex.legend=0.8, plot.type="line"){ if(!inherits(object, "optimize.portfolio.GenSA")) stop("object must be of class 'optimize.portfolio.GenSA'") - columnnames = names(object$weights) - numassets = length(columnnames) - - constraints <- get_constraints(object$portfolio) - - if(is.null(xlab)) - minmargin = 3 - else - minmargin = 5 - if(main=="") topmargin=1 else topmargin=4 - if(las > 1) {# set the bottom border to accommodate labels - bottommargin = max(c(minmargin, (strwidth(columnnames,units="in"))/par("cin")[1])) * cex.lab - if(bottommargin > 10 ) { - bottommargin<-10 - columnnames<-substr(columnnames,1,19) - # par(srt=45) #TODO figure out how to use text() and srt to rotate long labels + if(plot.type %in% c("bar", "barplot")){ + barplotWeights(object=object, ..., main=main, las=las, xlab=xlab, cex.lab=cex.lab, element.color=element.color, cex.axis=cex.axis, legend.loc=legend.loc, cex.legend=cex.legend, colorset=colorset) + } else if(plot.type == "line"){ + + columnnames = names(object$weights) + numassets = length(columnnames) + + constraints <- get_constraints(object$portfolio) + + if(is.null(xlab)) + minmargin = 3 + else + minmargin = 5 + if(main=="") topmargin=1 else topmargin=4 + if(las > 1) {# set the bottom border to accommodate labels + bottommargin = max(c(minmargin, (strwidth(columnnames,units="in"))/par("cin")[1])) * cex.lab + if(bottommargin > 10 ) { + bottommargin<-10 + columnnames<-substr(columnnames,1,19) + # par(srt=45) #TODO figure out how to use text() and srt to rotate long labels + } } + else { + bottommargin = minmargin + } + par(mar = c(bottommargin, 4, topmargin, 2) +.1) + if(any(is.infinite(constraints$max)) | any(is.infinite(constraints$min))){ + # set ylim based on weights if box constraints contain Inf or -Inf + ylim <- range(object$weights) + } else { + # set ylim based on the range of box constraints min and max + ylim <- range(c(constraints$min, constraints$max)) + } + plot(object$weights, type="b", col="blue", axes=FALSE, xlab='', ylim=ylim, ylab="Weights", main=main, pch=16, ...) + if(!any(is.infinite(constraints$min))){ + points(constraints$min, type="b", col="darkgray", lty="solid", lwd=2, pch=24) + } + if(!any(is.infinite(constraints$max))){ + points(constraints$max, type="b", col="darkgray", lty="solid", lwd=2, pch=25) + } + # if(!is.null(neighbors)){ + # if(is.vector(neighbors)){ + # xtract=extractStats(ROI) + # weightcols<-grep('w\\.',colnames(xtract)) #need \\. to get the dot + # if(length(neighbors)==1){ + # # overplot nearby portfolios defined by 'out' + # orderx = order(xtract[,"out"]) + # subsetx = head(xtract[orderx,], n=neighbors) + # for(i in 1:neighbors) points(subsetx[i,weightcols], type="b", col="lightblue") + # } else{ + # # assume we have a vector of portfolio numbers + # subsetx = xtract[neighbors,weightcols] + # for(i in 1:length(neighbors)) points(subsetx[i,], type="b", col="lightblue") + # } + # } + # if(is.matrix(neighbors) | is.data.frame(neighbors)){ + # # the user has likely passed in a matrix containing calculated values for risk.col and return.col + # nbweights<-grep('w\\.',colnames(neighbors)) #need \\. to get the dot + # for(i in 1:nrow(neighbors)) points(as.numeric(neighbors[i,nbweights]), type="b", col="lightblue") + # # note that here we need to get weight cols separately from the matrix, not from xtract + # # also note the need for as.numeric. points() doesn't like matrix inputs + # } + # } + # points(ROI$weights, type="b", col="blue", pch=16) + axis(2, cex.axis = cex.axis, col = element.color) + axis(1, labels=columnnames, at=1:numassets, las=las, cex.axis = cex.axis, col = element.color) + box(col = element.color) } - else { - bottommargin = minmargin - } - par(mar = c(bottommargin, 4, topmargin, 2) +.1) - if(any(is.infinite(constraints$max)) | any(is.infinite(constraints$min))){ - # set ylim based on weights if box constraints contain Inf or -Inf - ylim <- range(object$weights) - } else { - # set ylim based on the range of box constraints min and max - ylim <- range(c(constraints$min, constraints$max)) - } - plot(object$weights, type="b", col="blue", axes=FALSE, xlab='', ylim=ylim, ylab="Weights", main=main, pch=16, ...) - if(!any(is.infinite(constraints$min))){ - points(constraints$min, type="b", col="darkgray", lty="solid", lwd=2, pch=24) - } - if(!any(is.infinite(constraints$max))){ - points(constraints$max, type="b", col="darkgray", lty="solid", lwd=2, pch=25) - } - # if(!is.null(neighbors)){ - # if(is.vector(neighbors)){ - # xtract=extractStats(ROI) - # weightcols<-grep('w\\.',colnames(xtract)) #need \\. to get the dot - # if(length(neighbors)==1){ - # # overplot nearby portfolios defined by 'out' - # orderx = order(xtract[,"out"]) - # subsetx = head(xtract[orderx,], n=neighbors) - # for(i in 1:neighbors) points(subsetx[i,weightcols], type="b", col="lightblue") - # } else{ - # # assume we have a vector of portfolio numbers - # subsetx = xtract[neighbors,weightcols] - # for(i in 1:length(neighbors)) points(subsetx[i,], type="b", col="lightblue") - # } - # } - # if(is.matrix(neighbors) | is.data.frame(neighbors)){ - # # the user has likely passed in a matrix containing calculated values for risk.col and return.col - # nbweights<-grep('w\\.',colnames(neighbors)) #need \\. to get the dot - # for(i in 1:nrow(neighbors)) points(as.numeric(neighbors[i,nbweights]), type="b", col="lightblue") - # # note that here we need to get weight cols separately from the matrix, not from xtract - # # also note the need for as.numeric. points() doesn't like matrix inputs - # } - # } - # points(ROI$weights, type="b", col="blue", pch=16) - axis(2, cex.axis = cex.axis, col = element.color) - axis(1, labels=columnnames, at=1:numassets, las=las, cex.axis = cex.axis, col = element.color) - box(col = element.color) } #' @rdname chart.Weights Modified: pkg/PortfolioAnalytics/R/charts.PSO.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.PSO.R 2013-09-08 23:37:26 UTC (rev 3028) +++ pkg/PortfolioAnalytics/R/charts.PSO.R 2013-09-09 02:07:57 UTC (rev 3029) @@ -1,71 +1,76 @@ -chart.Weights.pso <- function(object, neighbors = NULL, ..., main="Weights", las = 3, xlab=NULL, cex.lab = 1, element.color = "darkgray", cex.axis=0.8){ +chart.Weights.pso <- function(object, neighbors = NULL, ..., main="Weights", las = 3, xlab=NULL, cex.lab = 1, element.color = "darkgray", cex.axis=0.8, colorset=NULL, legend.loc="topright", cex.legend=0.8, plot.type="line"){ if(!inherits(object, "optimize.portfolio.pso")) stop("object must be of class 'optimize.portfolio.pso'") - columnnames = names(object$weights) - numassets = length(columnnames) - - constraints <- get_constraints(object$portfolio) - - if(is.null(xlab)) - minmargin = 3 - else - minmargin = 5 - if(main=="") topmargin=1 else topmargin=4 - if(las > 1) {# set the bottom border to accommodate labels - bottommargin = max(c(minmargin, (strwidth(columnnames,units="in"))/par("cin")[1])) * cex.lab - if(bottommargin > 10 ) { - bottommargin<-10 - columnnames<-substr(columnnames,1,19) - # par(srt=45) #TODO figure out how to use text() and srt to rotate long labels + if(plot.type %in% c("bar", "barplot")){ + barplotWeights(object=object, ..., main=main, las=las, xlab=xlab, cex.lab=cex.lab, element.color=element.color, cex.axis=cex.axis, legend.loc=legend.loc, cex.legend=cex.legend, colorset=colorset) + } else if(plot.type == "line"){ + + columnnames = names(object$weights) + numassets = length(columnnames) + + constraints <- get_constraints(object$portfolio) + + if(is.null(xlab)) + minmargin = 3 + else + minmargin = 5 + if(main=="") topmargin=1 else topmargin=4 + if(las > 1) {# set the bottom border to accommodate labels + bottommargin = max(c(minmargin, (strwidth(columnnames,units="in"))/par("cin")[1])) * cex.lab + if(bottommargin > 10 ) { + bottommargin<-10 + columnnames<-substr(columnnames,1,19) + # par(srt=45) #TODO figure out how to use text() and srt to rotate long labels + } } + else { + bottommargin = minmargin + } + par(mar = c(bottommargin, 4, topmargin, 2) +.1) + if(any(is.infinite(constraints$max)) | any(is.infinite(constraints$min))){ + # set ylim based on weights if box constraints contain Inf or -Inf + ylim <- range(object$weights) + } else { + # set ylim based on the range of box constraints min and max + ylim <- range(c(constraints$min, constraints$max)) + } + plot(object$weights, type="b", col="blue", axes=FALSE, xlab='', ylim=ylim, ylab="Weights", main=main, pch=16, ...) + if(!any(is.infinite(constraints$min))){ + points(constraints$min, type="b", col="darkgray", lty="solid", lwd=2, pch=24) + } + if(!any(is.infinite(constraints$max))){ + points(constraints$max, type="b", col="darkgray", lty="solid", lwd=2, pch=25) + } + # if(!is.null(neighbors)){ + # if(is.vector(neighbors)){ + # xtract=extractStats(ROI) + # weightcols<-grep('w\\.',colnames(xtract)) #need \\. to get the dot + # if(length(neighbors)==1){ + # # overplot nearby portfolios defined by 'out' + # orderx = order(xtract[,"out"]) + # subsetx = head(xtract[orderx,], n=neighbors) + # for(i in 1:neighbors) points(subsetx[i,weightcols], type="b", col="lightblue") + # } else{ + # # assume we have a vector of portfolio numbers + # subsetx = xtract[neighbors,weightcols] + # for(i in 1:length(neighbors)) points(subsetx[i,], type="b", col="lightblue") + # } + # } + # if(is.matrix(neighbors) | is.data.frame(neighbors)){ + # # the user has likely passed in a matrix containing calculated values for risk.col and return.col + # nbweights<-grep('w\\.',colnames(neighbors)) #need \\. to get the dot + # for(i in 1:nrow(neighbors)) points(as.numeric(neighbors[i,nbweights]), type="b", col="lightblue") + # # note that here we need to get weight cols separately from the matrix, not from xtract + # # also note the need for as.numeric. points() doesn't like matrix inputs + # } + # } + # points(ROI$weights, type="b", col="blue", pch=16) + axis(2, cex.axis = cex.axis, col = element.color) + axis(1, labels=columnnames, at=1:numassets, las=las, cex.axis = cex.axis, col = element.color) + box(col = element.color) } - else { - bottommargin = minmargin - } - par(mar = c(bottommargin, 4, topmargin, 2) +.1) - if(any(is.infinite(constraints$max)) | any(is.infinite(constraints$min))){ - # set ylim based on weights if box constraints contain Inf or -Inf - ylim <- range(object$weights) - } else { - # set ylim based on the range of box constraints min and max - ylim <- range(c(constraints$min, constraints$max)) - } - plot(object$weights, type="b", col="blue", axes=FALSE, xlab='', ylim=ylim, ylab="Weights", main=main, pch=16, ...) - if(!any(is.infinite(constraints$min))){ - points(constraints$min, type="b", col="darkgray", lty="solid", lwd=2, pch=24) - } - if(!any(is.infinite(constraints$max))){ - points(constraints$max, type="b", col="darkgray", lty="solid", lwd=2, pch=25) - } - # if(!is.null(neighbors)){ - # if(is.vector(neighbors)){ - # xtract=extractStats(ROI) - # weightcols<-grep('w\\.',colnames(xtract)) #need \\. to get the dot - # if(length(neighbors)==1){ - # # overplot nearby portfolios defined by 'out' - # orderx = order(xtract[,"out"]) - # subsetx = head(xtract[orderx,], n=neighbors) - # for(i in 1:neighbors) points(subsetx[i,weightcols], type="b", col="lightblue") - # } else{ - # # assume we have a vector of portfolio numbers - # subsetx = xtract[neighbors,weightcols] - # for(i in 1:length(neighbors)) points(subsetx[i,], type="b", col="lightblue") - # } - # } - # if(is.matrix(neighbors) | is.data.frame(neighbors)){ - # # the user has likely passed in a matrix containing calculated values for risk.col and return.col - # nbweights<-grep('w\\.',colnames(neighbors)) #need \\. to get the dot - # for(i in 1:nrow(neighbors)) points(as.numeric(neighbors[i,nbweights]), type="b", col="lightblue") - # # note that here we need to get weight cols separately from the matrix, not from xtract - # # also note the need for as.numeric. points() doesn't like matrix inputs - # } - # } - # points(ROI$weights, type="b", col="blue", pch=16) - axis(2, cex.axis = cex.axis, col = element.color) - axis(1, labels=columnnames, at=1:numassets, las=las, cex.axis = cex.axis, col = element.color) - box(col = element.color) } #' @rdname chart.Weights Modified: pkg/PortfolioAnalytics/R/charts.ROI.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.ROI.R 2013-09-08 23:37:26 UTC (rev 3028) +++ pkg/PortfolioAnalytics/R/charts.ROI.R 2013-09-09 02:07:57 UTC (rev 3029) @@ -1,71 +1,76 @@ -chart.Weights.ROI <- function(object, neighbors = NULL, ..., main="Weights", las = 3, xlab=NULL, cex.lab = 1, element.color = "darkgray", cex.axis=0.8){ - +chart.Weights.ROI <- function(object, neighbors = NULL, ..., main="Weights", las = 3, xlab=NULL, cex.lab = 1, element.color = "darkgray", cex.axis=0.8, colorset=NULL, legend.loc="topright", cex.legend=0.8, plot.type="line"){ + if(!inherits(object, "optimize.portfolio.ROI")) stop("object must be of class 'optimize.portfolio.ROI'") - columnnames = names(object$weights) - numassets = length(columnnames) - - constraints <- get_constraints(object$portfolio) - - if(is.null(xlab)) - minmargin = 3 - else - minmargin = 5 - if(main=="") topmargin=1 else topmargin=4 - if(las > 1) {# set the bottom border to accommodate labels - bottommargin = max(c(minmargin, (strwidth(columnnames,units="in"))/par("cin")[1])) * cex.lab - if(bottommargin > 10 ) { - bottommargin<-10 - columnnames<-substr(columnnames,1,19) - # par(srt=45) #TODO figure out how to use text() and srt to rotate long labels + if(plot.type %in% c("bar", "barplot")){ + barplotWeights(object=object, ..., main=main, las=las, xlab=xlab, cex.lab=cex.lab, element.color=element.color, cex.axis=cex.axis, legend.loc=legend.loc, cex.legend=cex.legend, colorset=colorset) + } else if(plot.type == "line"){ + + columnnames = names(object$weights) + numassets = length(columnnames) + + constraints <- get_constraints(object$portfolio) + + if(is.null(xlab)) + minmargin = 3 + else + minmargin = 5 + if(main=="") topmargin=1 else topmargin=4 + if(las > 1) {# set the bottom border to accommodate labels + bottommargin = max(c(minmargin, (strwidth(columnnames,units="in"))/par("cin")[1])) * cex.lab + if(bottommargin > 10 ) { + bottommargin<-10 + columnnames<-substr(columnnames,1,19) + # par(srt=45) #TODO figure out how to use text() and srt to rotate long labels + } } + else { + bottommargin = minmargin + } + par(mar = c(bottommargin, 4, topmargin, 2) +.1) + if(any(is.infinite(constraints$max)) | any(is.infinite(constraints$min))){ + # set ylim based on weights if box constraints contain Inf or -Inf + ylim <- range(object$weights) + } else { + # set ylim based on the range of box constraints min and max + ylim <- range(c(constraints$min, constraints$max)) + } + plot(object$weights, type="b", col="blue", axes=FALSE, xlab='', ylim=ylim, ylab="Weights", main=main, pch=16, ...) + if(!any(is.infinite(constraints$min))){ + points(constraints$min, type="b", col="darkgray", lty="solid", lwd=2, pch=24) + } + if(!any(is.infinite(constraints$max))){ + points(constraints$max, type="b", col="darkgray", lty="solid", lwd=2, pch=25) + } + # if(!is.null(neighbors)){ + # if(is.vector(neighbors)){ + # xtract=extractStats(object) + # weightcols<-grep('w\\.',colnames(xtract)) #need \\. to get the dot + # if(length(neighbors)==1){ + # # overplot nearby portfolios defined by 'out' + # orderx = order(xtract[,"out"]) + # subsetx = head(xtract[orderx,], n=neighbors) + # for(i in 1:neighbors) points(subsetx[i,weightcols], type="b", col="lightblue") + # } else{ + # # assume we have a vector of portfolio numbers + # subsetx = xtract[neighbors,weightcols] + # for(i in 1:length(neighbors)) points(subsetx[i,], type="b", col="lightblue") + # } + # } + # if(is.matrix(neighbors) | is.data.frame(neighbors)){ + # # the user has likely passed in a matrix containing calculated values for risk.col and return.col + # nbweights<-grep('w\\.',colnames(neighbors)) #need \\. to get the dot + # for(i in 1:nrow(neighbors)) points(as.numeric(neighbors[i,nbweights]), type="b", col="lightblue") + # # note that here we need to get weight cols separately from the matrix, not from xtract + # # also note the need for as.numeric. points() doesn't like matrix inputs + # } + # } + # points(object$weights, type="b", col="blue", pch=16) + axis(2, cex.axis = cex.axis, col = element.color) + axis(1, labels=columnnames, at=1:numassets, las=las, cex.axis = cex.axis, col = element.color) + box(col = element.color) } - else { - bottommargin = minmargin - } - par(mar = c(bottommargin, 4, topmargin, 2) +.1) - if(any(is.infinite(constraints$max)) | any(is.infinite(constraints$min))){ - # set ylim based on weights if box constraints contain Inf or -Inf - ylim <- range(object$weights) - } else { - # set ylim based on the range of box constraints min and max - ylim <- range(c(constraints$min, constraints$max)) - } - plot(object$weights, type="b", col="blue", axes=FALSE, xlab='', ylim=ylim, ylab="Weights", main=main, pch=16, ...) - if(!any(is.infinite(constraints$min))){ - points(constraints$min, type="b", col="darkgray", lty="solid", lwd=2, pch=24) - } - if(!any(is.infinite(constraints$max))){ - points(constraints$max, type="b", col="darkgray", lty="solid", lwd=2, pch=25) - } - # if(!is.null(neighbors)){ - # if(is.vector(neighbors)){ - # xtract=extractStats(object) - # weightcols<-grep('w\\.',colnames(xtract)) #need \\. to get the dot - # if(length(neighbors)==1){ - # # overplot nearby portfolios defined by 'out' - # orderx = order(xtract[,"out"]) - # subsetx = head(xtract[orderx,], n=neighbors) - # for(i in 1:neighbors) points(subsetx[i,weightcols], type="b", col="lightblue") - # } else{ - # # assume we have a vector of portfolio numbers - # subsetx = xtract[neighbors,weightcols] - # for(i in 1:length(neighbors)) points(subsetx[i,], type="b", col="lightblue") - # } - # } - # if(is.matrix(neighbors) | is.data.frame(neighbors)){ - # # the user has likely passed in a matrix containing calculated values for risk.col and return.col - # nbweights<-grep('w\\.',colnames(neighbors)) #need \\. to get the dot - # for(i in 1:nrow(neighbors)) points(as.numeric(neighbors[i,nbweights]), type="b", col="lightblue") - # # note that here we need to get weight cols separately from the matrix, not from xtract - # # also note the need for as.numeric. points() doesn't like matrix inputs - # } - # } - # points(object$weights, type="b", col="blue", pch=16) - axis(2, cex.axis = cex.axis, col = element.color) - axis(1, labels=columnnames, at=1:numassets, las=las, cex.axis = cex.axis, col = element.color) - box(col = element.color) } #' @rdname chart.Weights Modified: pkg/PortfolioAnalytics/R/charts.RP.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.RP.R 2013-09-08 23:37:26 UTC (rev 3028) +++ pkg/PortfolioAnalytics/R/charts.RP.R 2013-09-09 02:07:57 UTC (rev 3029) @@ -10,76 +10,82 @@ # ############################################################################### -chart.Weights.RP <- function(object, neighbors = NULL, ..., main="Weights", las = 3, xlab=NULL, cex.lab = 1, element.color = "darkgray", cex.axis=0.8){ +chart.Weights.RP <- function(object, neighbors = NULL, ..., main="Weights", las = 3, xlab=NULL, cex.lab = 1, element.color = "darkgray", cex.axis=0.8, colorset=NULL, legend.loc="topright", cex.legend=0.8, plot.type="line"){ # Specific to the output of the random portfolio code with constraints if(!inherits(object, "optimize.portfolio.random")){ stop("object must be of class 'optimize.portfolio.random'") } - columnnames = names(object$weights) - numassets = length(columnnames) - constraints <- get_constraints(object$portfolio) - - if(is.null(xlab)) - minmargin = 3 - else - minmargin = 5 [TRUNCATED] To get the complete diff run: svnlook diff /svnroot/returnanalytics -r 3029 From noreply at r-forge.r-project.org Mon Sep 9 14:42:37 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 9 Sep 2013 14:42:37 +0200 (CEST) Subject: [Returnanalytics-commits] r3030 - pkg/PerformanceAnalytics/sandbox/pulkit/R Message-ID: <20130909124237.DDBEC180603@r-forge.r-project.org> Author: pulkit Date: 2013-09-09 14:42:37 +0200 (Mon, 09 Sep 2013) New Revision: 3030 Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/REDDCOPS.R pkg/PerformanceAnalytics/sandbox/pulkit/R/REM.R pkg/PerformanceAnalytics/sandbox/pulkit/R/redd.R Log: REDDCOPS Error handling and testing Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/REDDCOPS.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/REDDCOPS.R 2013-09-09 02:07:57 UTC (rev 3029) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/REDDCOPS.R 2013-09-09 12:42:37 UTC (rev 3030) @@ -23,8 +23,6 @@ #'@param R an xts, vector, matrix, data frame, timeSeries or zoo object of #' asset returns #'@param delta Drawdown limit -#'@param sharpe If you want to use a constant Sharpe Ratio please specify here -#'else the return series will be used #'@param Rf risk free rate can be vector such as government security rate of return. #'@param h Look back period #'@param geometric geometric utilize geometric chaining (TRUE) or simple/arithmetic @@ -32,7 +30,8 @@ #'@param \dots any other variable #'@param asset The number of risky assets in the portfolio #'@param type The type of portfolio optimization -#' +#'@param sharpe If you want to use a constant Sharpe Ratio please specify here +#'else the return series will be used #'@author Pulkit Mehrotra #'@seealso \code{\link{chart.REDD}} \code{\link{EconomicDrawdown}} #'\code{\link{rollDrawdown}} \code{\link{EDDCOPS}} \code{\link{rollEconomicMax}} @@ -73,6 +72,19 @@ x = checkData(R) columns = ncol(x) columnnames = colnames(x) + index = NULL + # ERROR Handling for cases in which lookback period is greater than the number of rows + for(i in 1:ncol(x)){ + if(length(na.omit(x[,i])) Author: pulkit Date: 2013-09-09 15:41:14 +0200 (Mon, 09 Sep 2013) New Revision: 3031 Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/CdaR.R pkg/PerformanceAnalytics/sandbox/pulkit/R/DrawdownBeta.R pkg/PerformanceAnalytics/sandbox/pulkit/R/Drawdownalpha.R Log: NA values and error handling in Drawdown Beta and Alpha Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/CdaR.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/CdaR.R 2013-09-09 12:42:37 UTC (rev 3030) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/CdaR.R 2013-09-09 13:41:14 UTC (rev 3031) @@ -42,7 +42,6 @@ if((p*nr) %% 1 == 0){ drawdowns = -Drawdowns(R) drawdowns = drawdowns[order(drawdowns),increasing = TRUE] - print(drawdowns) # average of the drawdowns greater the (1-alpha).100% largest drawdowns result = -(1/((1-p)*nr))*sum(drawdowns[((p)*nr):nr]) } Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/DrawdownBeta.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/DrawdownBeta.R 2013-09-09 12:42:37 UTC (rev 3030) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/DrawdownBeta.R 2013-09-09 13:41:14 UTC (rev 3031) @@ -69,8 +69,13 @@ # The Drawdown beta is given as the output. + R = na.omit(R) + Rm = na.omit(Rm) x = checkData(R) xm = checkData(Rm) + if(nrow(x) != nrow(xm)){ + stop("The number of rows of the return series and the optimal portfolio should be equal") + } columnnames = colnames(R) columns = ncol(R) drawdowns_m = Drawdowns(Rm) @@ -95,7 +100,6 @@ DDbeta<-function(x){ q = NULL q_quantile = quantile(drawdowns_m,1-p) - print(drawdowns_m) for(i in 1:nrow(Rm)){ if(drawdowns_m[i] Author: braverock Date: 2013-09-09 21:24:19 +0200 (Mon, 09 Sep 2013) New Revision: 3032 Removed: pkg/PortfolioAttribution/R/logLinking.R pkg/PortfolioAttribution/R/logLinkingZoo.R pkg/PortfolioAttribution/R/periodApplyEZ.R pkg/PortfolioAttribution/R/relativeAttribution.R pkg/PortfolioAttribution/R/relativeAttributionWithoutFactors.R pkg/PortfolioAttribution/man/logLinking.Rd pkg/PortfolioAttribution/man/logLinking.zoo.Rd pkg/PortfolioAttribution/man/period.apply.EZ.Rd pkg/PortfolioAttribution/man/relativeAttribution.Rd pkg/PortfolioAttribution/man/relativeAttributionWithoutFactors.Rd Modified: pkg/PortfolioAttribution/DESCRIPTION pkg/PortfolioAttribution/R/AcctReturns.R pkg/PortfolioAttribution/R/Attribution.geometric.R pkg/PortfolioAttribution/R/AttributionFixedIncome.R pkg/PortfolioAttribution/R/CAPM.dynamic.R pkg/PortfolioAttribution/R/Carino.R pkg/PortfolioAttribution/R/Conv.option.R pkg/PortfolioAttribution/R/DaviesLaker.R pkg/PortfolioAttribution/R/Frongello.R pkg/PortfolioAttribution/R/Grap.R pkg/PortfolioAttribution/R/HierarchyQuintiles.R pkg/PortfolioAttribution/R/MarketTiming.R pkg/PortfolioAttribution/R/Menchero.R pkg/PortfolioAttribution/R/Modigliani.R pkg/PortfolioAttribution/R/Return.annualized.excess.R pkg/PortfolioAttribution/R/Return.level.R pkg/PortfolioAttribution/R/Weight.level.R pkg/PortfolioAttribution/R/Weight.transform.R pkg/PortfolioAttribution/R/attribution.R pkg/PortfolioAttribution/R/attribution.levels.R Log: - remove obsolete files - set keywords - bump version Modified: pkg/PortfolioAttribution/DESCRIPTION =================================================================== --- pkg/PortfolioAttribution/DESCRIPTION 2013-09-09 13:41:14 UTC (rev 3031) +++ pkg/PortfolioAttribution/DESCRIPTION 2013-09-09 19:24:19 UTC (rev 3032) @@ -1,15 +1,15 @@ Package: PortfolioAttribution Type: Package Title: Econometric tools for performance and risk analysis. -Version: 0.2 -Date: $Date: 2012-06-06 15:18:48 -0500 (Wed, 06 Jun 2012) $ +Version: 0.3 +Date: $Date$ Author: Andrii Babii Maintainer: Andrii Babii Description: This package provides functions for the ex-post Portfolio Attribution methods from Bacon (2004), Carino (2009), etc. The package was created as a part of the Google Summer of Code (GSoC) 2012 project. Depends: - R (>= 2.14.0), + R (>= 2.15.0), zoo, xts (>= 0.8), PerformanceAnalytics(>= 1.0.4.3) @@ -18,28 +18,3 @@ License: GPL URL: http://r-forge.r-project.org/projects/returnanalytics/ Copyright: (c) 2004-2013 -Collate: - 'Attribution.geometric.R' - 'attribution.levels.R' - 'attribution.R' - 'AttributionFixedIncome.R' - 'CAPM.dynamic.R' - 'Carino.R' - 'Conv.option.R' - 'DaviesLaker.R' - 'Frongello.R' - 'Grap.R' - 'HierarchyQuintiles.R' - 'Menchero.R' - 'Modigliani.R' - 'Return.annualized.excess.R' - 'Return.level.R' - 'MarketTiming.R' - 'Weight.level.R' - 'Weight.transform.R' - 'AcctReturns.R' - 'logLinking.R' - 'logLinkingZoo.R' - 'periodApplyEZ.R' - 'relativeAttribution.R' - 'relativeAttributionWithoutFactors.R' Property changes on: pkg/PortfolioAttribution/DESCRIPTION ___________________________________________________________________ Added: svn:keywords + Date,Author,Id Property changes on: pkg/PortfolioAttribution/R/AcctReturns.R ___________________________________________________________________ Added: svn:keywords + Date,Author,Id Property changes on: pkg/PortfolioAttribution/R/Attribution.geometric.R ___________________________________________________________________ Added: svn:keywords + Date,Author,Id Property changes on: pkg/PortfolioAttribution/R/AttributionFixedIncome.R ___________________________________________________________________ Added: svn:keywords + Date,Author,Id Property changes on: pkg/PortfolioAttribution/R/CAPM.dynamic.R ___________________________________________________________________ Added: svn:keywords + Date,Author,Id Property changes on: pkg/PortfolioAttribution/R/Carino.R ___________________________________________________________________ Added: svn:keywords + Date,Author,Id Property changes on: pkg/PortfolioAttribution/R/Conv.option.R ___________________________________________________________________ Added: svn:keywords + Date,Author,Id Property changes on: pkg/PortfolioAttribution/R/DaviesLaker.R ___________________________________________________________________ Added: svn:keywords + Date,Author,Id Property changes on: pkg/PortfolioAttribution/R/Frongello.R ___________________________________________________________________ Added: svn:keywords + Date,Author,Id Property changes on: pkg/PortfolioAttribution/R/Grap.R ___________________________________________________________________ Added: svn:keywords + Date,Author,Id Property changes on: pkg/PortfolioAttribution/R/HierarchyQuintiles.R ___________________________________________________________________ Added: svn:keywords + Date,Author,Id Property changes on: pkg/PortfolioAttribution/R/MarketTiming.R ___________________________________________________________________ Added: svn:keywords + Date,Author,Id Property changes on: pkg/PortfolioAttribution/R/Menchero.R ___________________________________________________________________ Added: svn:keywords + Date,Author,Id Property changes on: pkg/PortfolioAttribution/R/Modigliani.R ___________________________________________________________________ Added: svn:keywords + Date,Author,Id Property changes on: pkg/PortfolioAttribution/R/Return.annualized.excess.R ___________________________________________________________________ Added: svn:keywords + Date,Author,Id Property changes on: pkg/PortfolioAttribution/R/Return.level.R ___________________________________________________________________ Added: svn:keywords + Date,Author,Id Property changes on: pkg/PortfolioAttribution/R/Weight.level.R ___________________________________________________________________ Added: svn:keywords + Date,Author,Id Property changes on: pkg/PortfolioAttribution/R/Weight.transform.R ___________________________________________________________________ Added: svn:keywords + Date,Author,Id Property changes on: pkg/PortfolioAttribution/R/attribution.R ___________________________________________________________________ Added: svn:keywords + Date,Author,Id Property changes on: pkg/PortfolioAttribution/R/attribution.levels.R ___________________________________________________________________ Added: svn:keywords + Date,Author,Id Deleted: pkg/PortfolioAttribution/R/logLinking.R =================================================================== --- pkg/PortfolioAttribution/R/logLinking.R 2013-09-09 13:41:14 UTC (rev 3031) +++ pkg/PortfolioAttribution/R/logLinking.R 2013-09-09 19:24:19 UTC (rev 3032) @@ -1,66 +0,0 @@ -## logLinking.r -## -## purpose: Aggregates performance attribution effects over time to produce a multiperiod summary. -## -## authors: Eric Zivot -## created: June 5, 2012 - - - - -#' Logarithmic Linking -#' -#' Aggregates performance attribution effects over time to produce a -#' multiperiod summary. -#' -#' Transforms to a multiperiod arithmetic decomposition by distributing the -#' residual proportionately. Creates linking coefficients \code{k_t / k} and -#' applies to given \code{component} to combine arithmetic attribution effects -#' over time. If \code{weighted.benchmark.returns} is zero for each \code{p} -#' date, then the function returns absolute arithmetic attribution effect, -#' e.g., factor returns or idiosyncratic returns from factor model. -#' -#' @param weighted.portfolio.returns \code{n x p} data frame containing asset -#' ID (as rownames) and weighted portfolio returns for \code{p} dates -#' @param weighted.benchmark.returns \code{m x p} containing benchmark ID (as -#' rownames) and weighted benchmark returns for \code{p} dates -#' @param component data frame containing ID and attribution component for -#' \code{p} dates (e.g., \code{n x p} for manager selection or \code{m x p} -#' strategy allocation) -#' @return Returns a list of two objects of class "\code{numeric}" after -#' applying the linking coefficient from the logarithmic method: \item{linked}{ -#' combined \code{component} over time } \item{linked.total}{ sum of combined -#' \code{component} over time } -#' @author Eric Zivot -#' @seealso \code{\link{relativeAttribution}} -#' @references Christopherson, J., Carino, D., and Ferson, W. (2009) -#' \emph{Portfolio Performance Measurement and Benchmarking}, McGrall-Hill. -logLinking <- function(weighted.portfolio.returns, weighted.benchmark.returns, component){ -## inputs -## weighted.portfolio.returns n x p data frame containing asset ID (as rownames) and weighted portfolio returns for p dates -## weighted.benchmark.returns m x p data frame containing benchmark ID (as rownames) and weighted benchmark returns for p dates -## component data frame containing ID and attribution component for p dates (e.g., manager selection or strategy allocation) -## outputs -## list of combined components over time after applying the linking coefficient from the logarithmic method - -require(PerformanceAnalytics) - -logCoeff = data.frame((log(1+colSums(weighted.portfolio.returns)) - - log(1+colSums(weighted.benchmark.returns))) - /(colSums(weighted.portfolio.returns) - colSums(weighted.benchmark.returns)), - (log(1+Return.cumulative(colSums(weighted.portfolio.returns))) - - log(1+Return.cumulative(colSums(weighted.benchmark.returns)))) - /(Return.cumulative(colSums(weighted.portfolio.returns)) - Return.cumulative(colSums(weighted.benchmark.returns)))) -colnames(logCoeff) = c("k_t","K") -logCoeff = data.frame(logCoeff, logCoeff$k_t/logCoeff$K) -colnames(logCoeff) = c("k_t","K","B") - -linked = data.frame(component) -for(i in 1:nrow(logCoeff)){ - linked[,i] = as.matrix(component[,i])%*%logCoeff$B[i] -} - -ans = list("linked" = rowSums(linked), - "linked.total" = sum(rowSums(linked))) -return(ans) -} Deleted: pkg/PortfolioAttribution/R/logLinkingZoo.R =================================================================== --- pkg/PortfolioAttribution/R/logLinkingZoo.R 2013-09-09 13:41:14 UTC (rev 3031) +++ pkg/PortfolioAttribution/R/logLinkingZoo.R 2013-09-09 19:24:19 UTC (rev 3032) @@ -1,82 +0,0 @@ -# logLinkingZoo.r -# -# purpose: function to be passed to rollApplyEZ() for carino linking over -# user-specified attribution dates and aggregation periods -# -## authors: Eric Zivot -## created: June 5, 2012 - - -#' Log-linking Function For Use In rollApplyEZ -#' -#' Function to be passed to rollApplyEZ() for carino linking over -#' user-specified attribution dates and aggregation periods -#' -#' %% ~~ If necessary, more details than the description above ~~ -#' -#' @param x \code{zoo} object containing data to be aggregated. -#' @param asset.names Character vector of asset IDs -#' @param strategy.names Character vector of strategy abbreviations -#' @param component.names Character vector of component names. If -#' \code{component.type="manager"} then \code{component.names} are asset IDs; -#' if \code{component.type="strategy"} then \code{component.names} are strategy -#' abbreviations.if \code{component.type="factor"} then \code{component.names} -#' are factor IDs. -#' @param component.type character string indicating type of attribution. Valid -#' choices are \code{"manager"} for manager selection attribution; -#' \code{"strategy"} for strategy allocation attribution; \code{"factor"} for -#' factor attribution. -#' @param return.out character string indicating output from log-linking -#' function. Valid choices are \code{"linked"} for disaggregated results, and -#' \code{"linked.total"} for total results. -#' @return A numeric vector of attribution values with the same length as -#' \code{component.names} if \code{return.out="linked"} or a numeric value -#' giving total attribution if \code{return.out = "linked.total"}. -#' @author Eric Zivot. -#' @seealso \code{\link{logLinking}} -#' @examples -#' -#' ##---- Should be DIRECTLY executable !! ---- -#' ##-- ==> Define data, use random, -#' ##-- or do help(data=index) for the standard data sets. -#' -#' -#' -logLinking.zoo <- function(x, asset.names, strategy.names, - component.names, component.type = c("manager", "strategy", "factor"), - return.out = c("linked", "linked.total")) { -# Arguments -# x zoo object containing data to be aggregated. -# asset.names character vector of asset IDs -# strategy.names character vector of strategy abbreviations -# component.names character vector of component names. If component.type="manager" -# then component.names are asset IDs; if component.type="strategy" -# then component.names are strategy abbreviations. -# component.type character string indicating type of attribution. Valid choices -# are "manager" for manager selection attribution, and "strategy" -# for strategy allocation attribution. -# return.out character string indicating output from log-linking function. -# Valid choices are "linked" for disaggregated results, and "linked.total" -# for total results. -# Details -# Value -# A numeric vector of attribution values with the same length as component.names -# if return.out="linked" or a numeric value giving total attribution if -# return.out = "linked.total" - return.out = return.out[1] - component.type = component.type[1] - weighted.portfolio.returns = t(coredata(x[,asset.names,drop=FALSE])) - weighted.benchmark.returns = t(coredata(x[, strategy.names, drop=FALSE])) - component = t(coredata(x[, component.names, drop=FALSE])) - if (component.type == "manager") { - rownames(component) = asset.names - } else if (component.type == "strategy") { - rownames(component) = strategy.names - } else { - rownames(component) = component.names - } - carinoLinking = logLinking(weighted.portfolio.returns, - weighted.benchmark.returns, - component) - return(carinoLinking[[return.out]]) -} Deleted: pkg/PortfolioAttribution/R/periodApplyEZ.R =================================================================== --- pkg/PortfolioAttribution/R/periodApplyEZ.R 2013-09-09 13:41:14 UTC (rev 3031) +++ pkg/PortfolioAttribution/R/periodApplyEZ.R 2013-09-09 19:24:19 UTC (rev 3032) @@ -1,68 +0,0 @@ -# periodApplyEZ.r -# -# purpose: Modified version of period.apply() to work with logLinking.zoo() -# -## authors: Eric Zivot -## created: August 3, 2011 -## modified: August 3, 2011 - - -#' Apply Function Over Specified Interval -#' -#' Modified version of period.apply() from package xts to work with -#' logLinking.zoo -#' -#' Similar to the rest of the apply family, calculate a specified functions -#' value given a shifting set of data values. The primary difference is that it -#' is that \code{period.apply.EZ} applies a function to non-overlapping -#' intervals along a vector. This is a modified of the function -#' \code{period.apply} in package xts. The modification allows the output to be -#' either a vector with length of \code{INDEX} minus 1 or a matrix with number -#' of rows length of \code{INDEX} minus 1. Useful for applying arbitrary -#' functions over an entire data object by an aribirtary index, as when -#' \code{INDEX} is the result of a call to \code{endpoints}. -#' -#' -#' @param x data to apply \code{FUN} to -#' @param INDEX numeric vector specifying indexing -#' @param FUN an argument of type \code{function} -#' @param \dots additional arguments for \code{FUN} -#' @return A vector with length of INDEX minus 1 or a matrix with number of -#' rows length of INDEX minus 1. -#' @author Original code by Jeff Ryan. Modified by Eric Zivot. -period.apply.EZ <- function (x, INDEX, FUN, ...) -{ -# TODO Brian should merge/move this into xts/quantmod. -# Agruments -# x data to apply FUN to -# INDEX numeric vector specifying indexing -# FUN an argument of type function -# ... additional arguments for FUN -# Details -# Similar to the rest of the apply family, calculate a specified functions value -# given a shifting set of data values. The primary difference is that it is that -# period.apply applies a function to non-overlapping intervals along a vector. -# This is a modified of the function period.apply in package xts. The modification -# allows the output to be either a vector with length of INDEX minus 1 or a matrix -# with number of rows length of INDEX minus 1. -# -# Useful for applying arbitrary functions over an entire data object by an -# aribirtary index, as when INDEX is the result of a call to endpoints. -# -# Value -# A vector with length of INDEX minus 1 or a matrix with number of rows length -# of INDEX minus 1. - require(xts) - x <- try.xts(x, error = FALSE) - FUN <- match.fun(FUN) - xx <- sapply(1:(length(INDEX) - 1), function(y) { - FUN(x[(INDEX[y] + 1):INDEX[y + 1]], ...) - }) - ## ghetto rigging to work with logLinking.zoo(). Allows output to be either - ## a vector or a matrix. period.apply() only gives vector result - if (is.matrix(xx)) { - reclass(t(xx), x[INDEX]) - } else { - reclass(xx, x[INDEX]) - } -} Deleted: pkg/PortfolioAttribution/R/relativeAttribution.R =================================================================== --- pkg/PortfolioAttribution/R/relativeAttribution.R 2013-09-09 13:41:14 UTC (rev 3031) +++ pkg/PortfolioAttribution/R/relativeAttribution.R 2013-09-09 19:24:19 UTC (rev 3032) @@ -1,148 +0,0 @@ -## relativeAttribution.r -## -## purpose: Decompose relative portfolio returns into manager selection and -## strategy allocation components incorporating factor contributions -## -## authors: Eric Zivot -## created: June 5, 2012 -## - - -#' Relative Performance Attribution -#' -#' Decompose relative returns into manager selection and strategy allocation -#' components, and then into factor and idiosyncratic return contributions. -#' -#' The Brinson model reflects a sector-based investment process and attributes -#' portfolio return to manager selection and strategy allocation effects. -#' Positive contributions are earned by overweighting outperforming -#' managers/sectors and underweighting underperformers. In addition, the -#' manager selection and strategy allocation effects are decomposed into factor -#' and idiosyncratic return contributions as defined by a factor model. -#' -#' @param portfolio.df \code{n x (3 + f)} data frame containing asset ID (as -#' rownames), Strategy \code{n x 1}, Weight \code{n x 1}, Return \code{n x 1}, -#' exposure-weighted factor returns \code{n x f} whose column names are the -#' factor IDs -#' @param benchmark.df \code{m x (3 + f)} data frame containing benchmark ID -#' (as rownames), Strategy \code{n x 1}, Weights \code{n x 1}, Returns \code{n -#' x 1}, exposure-weighted factor returns \code{n x f} whose column names are -#' factor IDs. -#' @return Returns a list with the following components: \item{portfolio.ret}{ -#' \code{1 x 1} matrix containing portfolio return} \item{benchmark.ret}{ -#' \code{1 x 1} matrix containing benchmark return} \item{relative.ret}{ -#' \code{1 x 1} matrix containing relative return} \item{manager}{ \code{n x 1} -#' data frame containing asset ID (as rownames) and \code{ManagerSelection} } -#' \item{manager.total}{ numeric value of total \code{ManagerSelection} -#' component} \item{manager.factor}{ \code{n x (1 + f)} data frame containing -#' asset ID (as rownames) and ManagerSelection by \code{Idiosyncratic} \code{n -#' x 1} and factor IDs \code{n x f}} \item{manager.factor.total}{ \code{n x 1} -#' numeric object with asset ID (as rownames) and ManagerSelection component -#' summed over all factor IDs} \item{manager.idiosyncratic.total}{ numeric -#' value of ManagerSelection component summed over all Idiosyncratic values} -#' \item{strategy}{ \code{n x 2} data frame containing benchmark ID (as -#' rownames), \code{Strategy}, and \code{StrategyAllocation} } -#' \item{strategy.total}{ numeric value of total StrategyAllocation component} -#' \item{strategy.factor}{ \code{m x (1 + f)} data frame containing benchmark -#' ID (as rownames) and StrategyAllocation by \code{Idiosyncratic} \code{m x 1} -#' and factor IDs \code{m x f}} \item{strategy.factor.total}{ \code{m x 1} -#' numeric object with benchmark ID (as rownames) and StrategyAllocation -#' component summed over all factor IDs} \item{strategy.idiosyncratic.total}{ -#' numeric value of StrategyAllocation component summed over all Idiosyncratic -#' values} -#' @author Eric Zivot -#' @seealso \code{\link{logLinking}} -#' @references Davis, B. and Menchero, J. (2009) \emph{Beyond Brinson: -#' Establishing the Link Between Sector and Factor Models}, MSCI Barra Research -#' Insights. -relativeAttribution <- function(portfolio.df, benchmark.df){ -## -## inputs: -## portfolio.df n x (3 + f) data frame containing asset ID (as rownames), -## Strategy (n x 1), Weight (n x 1), Return (n x 1), -## exposure-weighted factor returns (n x f) whose column -## names are the factor IDs -## benchmark.df m x (3 + f) data frame containing benchmark ID (as rownames), -## Strategy (n x 1), Weights (n x 1), Return (n x 1), -## exposure-weighted factor returns (n x f) whose column -## names are factor IDs. -## -## outputs: -## list with the following components: -## -## portfolio.ret, matrix containing portfolio return -## benchmark.ret, matrix containing benchmark return -## relative.ret, matrix containing relative return -## manager, data frame containing class ID (as rownames) and ManagerSelection -## manager.total, numeric value of total ManagerSelection component -## manager.factor, data frame containing class ID (as rownames) and ManagerSelection by Idiosyncratic and factor IDs -## manager.factor.total, numeric object with class ID (as rownames) and ManagerSelection component summed over all factor IDs -## manager.idiosyncratic.total, numeric value of ManagerSelection component summed over all Idiosyncratic values -## strategy, data frame containing benchmark ID (as rownames), Strategy, and StrategyAllocation -## strategy.total, numeric value of total StrategyAllocation component -## strategy.factor, data frame containing benchmark ID (as rownames) and StrategyAllocation by Idiosyncratic and factor IDs -## strategy.factor.total, numeric object with benchmark ID (as rownames) and StrategyAllocation component summed over all factors IDs -## strategy.idiosyncratic.total, numeric value of StrategyAllocation component summed over all Idiosyncratic values -## -require(plyr) - -## Compute benchmark, portfolio, and returns -portfolio.ret = crossprod(portfolio.df$Weight, portfolio.df$Return) -benchmark.ret = crossprod(benchmark.df$Weight, benchmark.df$Return) -relative.ret = portfolio.ret - benchmark.ret - -## x is portfolio.df, y is benchmark.df -colnames(benchmark.df) = paste(colnames(benchmark.df), ".y", sep="") -colnames(benchmark.df)[1] = "Strategy" -temp = join(portfolio.df, benchmark.df, by = "Strategy") - -## Active returns by manager selection -manager = data.frame(temp$Weight * (temp$Return - temp$Return.y), row.names = rownames(temp)) -colnames(manager) = c("ManagerSelection") - -## Store factor names -factor.names = colnames(portfolio.df[4:ncol(portfolio.df)]) -factor.names.y = paste(factor.names, ".y", sep="") - -## Manager selection by factor and idiosyncratic returns -manager.factor = data.frame((temp$Return-rowSums(temp[,factor.names]) - (temp$Return.y-rowSums(temp[,factor.names.y])))*temp$Weight, - (temp[,factor.names] - temp[,factor.names.y]) * temp$Weight) -colnames(manager.factor) = c("Idiosyncratic", factor.names) - -## Active returns by strategy allocation -strategy = data.frame(benchmark.df$Strategy, row.names = rownames(benchmark.df)) -colnames(strategy) = c("Strategy") -for(i in 1:nrow(strategy)){ - strategy$ActiveWeight[i] = sum(portfolio.df$Weight[portfolio.df$Strategy == strategy$Strategy[i]]) - benchmark.df$Weight[i] -} -strategy = data.frame(strategy, strategy$ActiveWeight * (benchmark.df$Return - benchmark.df$Weight%*%benchmark.df$Return)) - -## Strategy allocation by factor and idiosyncratic returns -tmp.mat = matrix(0, nrow(strategy), ncol(benchmark.df)-1) -colnames(tmp.mat) = c("Strategy", "Idiosyncratic", factor.names) -rownames(tmp.mat) = rownames(benchmark.df) -strategy.factor = as.data.frame(tmp.mat) -strategy.factor$Strategy = benchmark.df$Strategy -strategy.factor$Idiosyncratic = (benchmark.df$Return - rowSums(benchmark.df[,factor.names.y]) - sum((benchmark.df$Return - rowSums(benchmark.df[,factor.names.y])) * benchmark.df$Weight)) * strategy$ActiveWeight -for(i in 1:nrow(strategy)){ - strategy.factor[i,factor.names] = colSums(benchmark.df[,factor.names.y] * benchmark.df$Weight) -} -strategy.factor[,factor.names] = (benchmark.df[,factor.names.y] - strategy.factor[,factor.names]) * strategy$ActiveWeight -colnames(strategy) = c("Strategy", "ActiveWeight", "StrategyAllocation") - -ans = list("manager" = manager, - "strategy" = strategy[,c("Strategy","StrategyAllocation")], - "strategy.active.weight" = strategy[,c("Strategy","ActiveWeight")], - "manager.factor" = manager.factor, - "strategy.factor" = strategy.factor, - "manager.factor.total" = rowSums(manager.factor[,factor.names]), - "manager.idiosyncratic.total" = sum(manager.factor$Idiosyncratic), - "strategy.factor.total" = rowSums(strategy.factor[,factor.names]), - "strategy.idiosyncratic.total" = sum(strategy.factor$Idiosyncratic), - "manager.total" = sum(manager$ManagerSelection), - "strategy.total" = sum(strategy$StrategyAllocation), - "portfolio.ret" = portfolio.ret, - "benchmark.ret" = benchmark.ret, - "relative.ret" = relative.ret) -return(ans) -} Deleted: pkg/PortfolioAttribution/R/relativeAttributionWithoutFactors.R =================================================================== --- pkg/PortfolioAttribution/R/relativeAttributionWithoutFactors.R 2013-09-09 13:41:14 UTC (rev 3031) +++ pkg/PortfolioAttribution/R/relativeAttributionWithoutFactors.R 2013-09-09 19:24:19 UTC (rev 3032) @@ -1,85 +0,0 @@ -## relativeAttributionWithoutFactors.r -## -## purpose: Decompose relative portfolio returns into manager selection and -## strategy allocation components -## -## authors: Eric Zivot -## created: June 5, 2012 -## - - -#' Relative Performance Attribution -#' -#' Decompose relative returns into manager selection and strategy allocation -#' components. -#' -#' The Brinson model reflects a sector-based investment process and attributes -#' portfolio return to manager selection and strategy allocation effects. -#' Positive contributions are earned by overweighting outperforming -#' managers/sectors and underweighting underperformers. -#' -#' @param portfolio.df \code{n x 3} data frame containing asset ID (as -#' rownames), Strategy \code{n x 1}, Weight \code{n x 1}, Return \code{n x 1}, -#' @param benchmark.df \code{m x 3} data frame containing benchmark ID (as -#' rownames), Strategy \code{n x 1}, Weights \code{n x 1}, Returns \code{n x -#' 1}, -#' @return Returns a list with the following components: \item{portfolio.ret}{ -#' \code{1 x 1} matrix containing portfolio return} \item{benchmark.ret}{ -#' \code{1 x 1} matrix containing benchmark return} \item{relative.ret}{ -#' \code{1 x 1} matrix containing relative return} \item{manager}{ \code{n x 1} -#' data frame containing asset ID (as rownames) and \code{ManagerSelection} } -#' \item{manager.total}{ numeric value of total \code{ManagerSelection} -#' component} \item{strategy}{ \code{n x 2} data frame containing benchmark ID -#' (as rownames), \code{Strategy}, and \code{StrategyAllocation} } -#' \item{strategy.total}{ numeric value of total StrategyAllocation component} -#' @author Eric Zivot -#' @seealso \code{\link{logLinking}} -#' @references Davis, B. and Menchero, J. (2009) \emph{Beyond Brinson: -#' Establishing the Link Between Sector and Factor Models}, MSCI Barra Research -#' Insights. -#' -relativeAttributionWithoutFactors <- function(portfolio.df, benchmark.df){ -## -## inputs: -## portfolio.df n x 3 data frame containing asset ID (as rownames), strategy, weights, returns -## benchmark.df m x 3 data frame containing benchmark ID (as rownames), strategy, weights, returns -## -## outputs: -## list with the following components: -## manager, dataframe giving active returns by manager selection -## strategy, dataframe giving active returns by strategy allocation -## -require(plyr) - -## x is portfolio.df, y is benchmark.df -temp = join(portfolio.df, benchmark.df, by = "Strategy") -colnames(temp) = c("Strategy", "Weight.x", "Return.x", "Weight.y", "Return.y") - -## Active returns by manager selection -manager = data.frame(temp$Weight.x * (temp$Return.x - temp$Return.y), row.names = rownames(temp)) -colnames(manager) = c("ManagerSelection") - -## Active returns by strategy allocation -strategy = data.frame(benchmark.df$Strategy, row.names = rownames(benchmark.df)) -colnames(strategy) = c("Strategy") -for(i in 1:nrow(strategy)){ - strategy$ActiveWeight[i] = sum(portfolio.df$Weight[portfolio.df$Strategy == strategy$Strategy[i]]) - benchmark.df$Weight[i] -} -strategy = data.frame(strategy, strategy$ActiveWeight * (benchmark.df$Return - benchmark.df$Weight%*%benchmark.df$Return)) -strategy$ActiveWeight = NULL -colnames(strategy) = c("Strategy", "StrategyAllocation") - -# compute benchmark, portfolio and returns -portfolio.ret = crossprod(portfolio.df$Weight, portfolio.df$Return) -benchmark.ret = crossprod(benchmark.df$Weight, benchmark.df$Return) -relative.ret = portfolio.ret - benchmark.ret - -ans = list("manager" = manager, - "strategy" = strategy, - "manager.total" = sum(manager$ManagerSelection), - "strategy.total" = sum(strategy$StrategyAllocation), - "portfolio.ret" = portfolio.ret, - "benchmark.ret" = benchmark.ret, - "relative.ret" = relative.ret) -return(ans) -} Deleted: pkg/PortfolioAttribution/man/logLinking.Rd =================================================================== --- pkg/PortfolioAttribution/man/logLinking.Rd 2013-09-09 13:41:14 UTC (rev 3031) +++ pkg/PortfolioAttribution/man/logLinking.Rd 2013-09-09 19:24:19 UTC (rev 3032) @@ -1,53 +0,0 @@ -\name{logLinking} -\alias{logLinking} -\title{Logarithmic Linking} -\usage{ - logLinking(weighted.portfolio.returns, - weighted.benchmark.returns, component) -} -\arguments{ - \item{weighted.portfolio.returns}{\code{n x p} data frame - containing asset ID (as rownames) and weighted portfolio - returns for \code{p} dates} - - \item{weighted.benchmark.returns}{\code{m x p} containing - benchmark ID (as rownames) and weighted benchmark returns - for \code{p} dates} - - \item{component}{data frame containing ID and attribution - component for \code{p} dates (e.g., \code{n x p} for - manager selection or \code{m x p} strategy allocation)} -} -\value{ - Returns a list of two objects of class "\code{numeric}" - after applying the linking coefficient from the - logarithmic method: \item{linked}{ combined - \code{component} over time } \item{linked.total}{ sum of - combined \code{component} over time } -} -\description{ - Aggregates performance attribution effects over time to - produce a multiperiod summary. -} -\details{ - Transforms to a multiperiod arithmetic decomposition by - distributing the residual proportionately. Creates - linking coefficients \code{k_t / k} and applies to given - \code{component} to combine arithmetic attribution - effects over time. If \code{weighted.benchmark.returns} - is zero for each \code{p} date, then the function returns - absolute arithmetic attribution effect, e.g., factor - returns or idiosyncratic returns from factor model. -} -\author{ - Eric Zivot -} -\references{ - Christopherson, J., Carino, D., and Ferson, W. (2009) - \emph{Portfolio Performance Measurement and - Benchmarking}, McGrall-Hill. -} -\seealso{ - \code{\link{relativeAttribution}} -} - Deleted: pkg/PortfolioAttribution/man/logLinking.zoo.Rd =================================================================== --- pkg/PortfolioAttribution/man/logLinking.zoo.Rd 2013-09-09 13:41:14 UTC (rev 3031) +++ pkg/PortfolioAttribution/man/logLinking.zoo.Rd 2013-09-09 19:24:19 UTC (rev 3032) @@ -1,64 +0,0 @@ -\name{logLinking.zoo} -\alias{logLinking.zoo} -\title{Log-linking Function For Use In rollApplyEZ} -\usage{ - logLinking.zoo(x, asset.names, strategy.names, - component.names, - component.type = c("manager", "strategy", "factor"), - return.out = c("linked", "linked.total")) -} -\arguments{ - \item{x}{\code{zoo} object containing data to be - aggregated.} - - \item{asset.names}{Character vector of asset IDs} - - \item{strategy.names}{Character vector of strategy - abbreviations} - - \item{component.names}{Character vector of component - names. If \code{component.type="manager"} then - \code{component.names} are asset IDs; if - \code{component.type="strategy"} then - \code{component.names} are strategy abbreviations.if - \code{component.type="factor"} then - \code{component.names} are factor IDs.} - - \item{component.type}{character string indicating type of - attribution. Valid choices are \code{"manager"} for - manager selection attribution; \code{"strategy"} for - strategy allocation attribution; \code{"factor"} for - factor attribution.} - - \item{return.out}{character string indicating output from - log-linking function. Valid choices are \code{"linked"} - for disaggregated results, and \code{"linked.total"} for - total results.} -} -\value{ - A numeric vector of attribution values with the same - length as \code{component.names} if - \code{return.out="linked"} or a numeric value giving - total attribution if \code{return.out = "linked.total"}. -} -\description{ - Function to be passed to rollApplyEZ() for carino linking - over user-specified attribution dates and aggregation - periods -} -\details{ - %% ~~ If necessary, more details than the description - above ~~ -} -\examples{ -##---- Should be DIRECTLY executable !! ---- -##-- ==> Define data, use random, -##-- or do help(data=index) for the standard data sets. -} -\author{ - Eric Zivot. -} -\seealso{ - \code{\link{logLinking}} -} - Deleted: pkg/PortfolioAttribution/man/period.apply.EZ.Rd =================================================================== [TRUNCATED] To get the complete diff run: svnlook diff /svnroot/returnanalytics -r 3032 From noreply at r-forge.r-project.org Mon Sep 9 21:27:16 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 9 Sep 2013 21:27:16 +0200 (CEST) Subject: [Returnanalytics-commits] r3033 - in pkg/PortfolioAttribution: R man Message-ID: <20130909192716.392F8181213@r-forge.r-project.org> Author: braverock Date: 2013-09-09 21:27:15 +0200 (Mon, 09 Sep 2013) New Revision: 3033 Removed: pkg/PortfolioAttribution/R/AcctReturns.R pkg/PortfolioAttribution/man/AcctReturns.Rd Log: - move AcctReturns to blotter package Deleted: pkg/PortfolioAttribution/R/AcctReturns.R =================================================================== --- pkg/PortfolioAttribution/R/AcctReturns.R 2013-09-09 19:24:19 UTC (rev 3032) +++ pkg/PortfolioAttribution/R/AcctReturns.R 2013-09-09 19:27:15 UTC (rev 3033) @@ -1,129 +0,0 @@ -#' Calculate account returns -#' -#' Similar to the \code{PortfReturns} function, but gives returns for the -#' entire account and takes into account external cashflows. External cashflows -#' are defined as contributions to or withdrawals from the account. Allows -#' selecting between time-weighted returns and linked modified Dietz approach. -#' If time-weighted method is selected, returns at time \eqn{t} are computed -#' using: \deqn{r_{t}=\frac{V_{t}}{V_{t-1}+C_{t}}-1} -#' where \eqn{V_{t}} - account value at time \eqn{t}, \eqn{C_{t}} - cashflow at -#' time \eqn{t}. The implicit assumption made here is that the cash flow is -#' available for the portfolio manager to invest from the beginning of the day. -#' These returns then can be chain linked with geometric compounding (for -#' instance using \code{Return.cumulative} function from the -#' \code{PerformanceAnalytics} package) to yield cumulative multi-period -#' returns: -#' \deqn{1+r=\prod_{t=1}^{T}(1+r_{t})=\prod_{t=1}^{T}\frac{V_{t}}{V_{t-1}+C_{t}}} -#' In the case if there were no cashflows, the result reduces to simple -#' one-period returns. Time-weighted returns has also an interpretation in -#' terms of unit value pricing. -#' If Modified Dietz method is selected, monthly returns are computed taking -#' into account cashflows within each month: -#' \deqn{r = \frac{V_{t}-V_{t-1}-C}{V_{t-1}+\sum_{t}C_{t}\times W_{t}}} -#' where \eqn{C} - total external cash flows within a month, -#' \eqn{C_{t}} - external cashflow at time \eqn{t}, -#' \deqn{W_{t}=\frac{TD-D_{t}}{TD}} - weighting ratio to be applied to external -#' cashflow on day \eqn{t}, -#' \eqn{TD} - total number of days within the month, -#' \eqn{D_{t}} - number of days since the beginning of the month including -#' weekends and public holidays. -#' Finally monthly Modified Dietz returns can also be linked geometrically. -#' -#' @aliases AcctReturns -#' @param Account string name of the account to generate returns for -#' @param \dots any other passthru parameters (like \code{native} for -#' \code{.getBySymbol} -#' @param Dates xts style ISO 8601 date subset to retrieve, default NULL -#' (all dates) -#' @param Portfolios concatenated string vector for portfolio names to retrieve -#' returns on, default NULL (all portfolios) -#' @param method Used to select between time-weighted and linked modified Dietz -#' returns. May be any of: \itemize{\item timeweighted \item dietz} By default -#' time-weighted is selected -#' @return returns xts with account returns -#' @author Brian Peterson, Andrii Babii -#' @seealso PortfReturns -#' @references Christopherson, Jon A., Carino, David R., Ferson, Wayne E. -#' \emph{Portfolio Performance Measurement and Benchmarking}. McGraw-Hill. -#' 2009. Chapter 5 \cr Bacon, C. \emph{Practical Portfolio Performance -#' Measurement and Attribution}. Wiley. 2004. Chapter 2 \cr -#' @keywords portfolio returns -#' @note -#' TODO handle portfolio and account in different currencies (not hard, just not done) -#' -#' TODO explicitly handle portfolio weights -#' -#' TODO support additions and withdrawals to available capital -#' @export -AcctReturns <- -function(Account, Dates = NULL, Portfolios = NULL, method = c("timeweighted", "dietz"), ...) -{ # @author Brian Peterson, Andrii Babii - aname <- Account - if(!grepl("account\\.", aname)){ - Account <- try(get(paste("account", aname, sep = '.'), envir = .blotter)) - } else{ - Account <- try(get(aname, envir = .blotter)) - } - if(inherits(Account, "try-error")){ - stop(paste("Account ", aname, " not found, use initAcct() to create a new - account")) - } - if(!inherits(Account, "account")){ - stop("Account ", aname, " passed is not the name of an account object.") - } - if(is.null(Portfolios)){ - Portfolios = names(Account$portfolios) - } - - # Get xts with net trading P&L for all portfolios associated with account - table = NULL - for(pname in Portfolios){ - Portfolio <- getPortfolio(pname) - if(is.null(Dates)){ - Dates <- paste("::", last(index(Portfolio$summary)), sep = '') - } - ptable = .getBySymbol(Portfolio = Portfolio, Attribute = "Net.Trading.PL", - Dates = Dates) - if(is.null(table)){ - table=ptable - } - else{ - table=cbind(table,ptable) - } - } - if(!is.null(attr(Account, 'initEq'))){ - initEq <- as.numeric(attr(Account, 'initEq')) - if(initEq == 0){ - stop("Initial equity of zero would produce div by zero NaN, Inf, -Inf - returns, please fix in initAcct().") - } - - #TODO check portfolio and account currencies and convert if necessary - - CF = Account$summary$Additions - Account$summary$Withdrawals # Cashflows - V = initEq + reclass(rowSums(table), table) # Account values - method = method[1] - - if (method == "timeweighted"){ - # Time-weighted returns - returns = V / (lag(V) + CF) - 1 - } - - if (method == "dietz"){ - # Linked modified Dietz - C = apply.monthly(CF, sum) # total monthly cashflow - V = apply.monthly(V, first) # monthly account values - cfweighted <- function(CF){ - TD = ndays(CF) # total number of days within the period - # number of days since the beginning of the period - D = round(as.vector((index(CF) - index(CF)[1])/3600/24)) - W = (TD - D) / TD # weights - cashfl = sum(CF * W) # weighted sum of cashflows within the period - return(cashfl) - } - cashfl = apply.monthly(CF, cfweighted) - returns = (V - lag(V) - C) / (lag(V) + cashfl) # Modified Dietz - } - } - return(returns) -} Deleted: pkg/PortfolioAttribution/man/AcctReturns.Rd =================================================================== --- pkg/PortfolioAttribution/man/AcctReturns.Rd 2013-09-09 19:24:19 UTC (rev 3032) +++ pkg/PortfolioAttribution/man/AcctReturns.Rd 2013-09-09 19:27:15 UTC (rev 3033) @@ -1,88 +0,0 @@ -\name{AcctReturns} -\alias{AcctReturns} -\title{Calculate account returns} -\usage{ - AcctReturns(Account, Dates = NULL, Portfolios = NULL, - method = c("timeweighted", "dietz"), ...) -} -\arguments{ - \item{Account}{string name of the account to generate - returns for} - - \item{\dots}{any other passthru parameters (like - \code{native} for \code{.getBySymbol}} - - \item{Dates}{xts style ISO 8601 date subset to retrieve, - default NULL (all dates)} - - \item{Portfolios}{concatenated string vector for - portfolio names to retrieve returns on, default NULL (all - portfolios)} - - \item{method}{Used to select between time-weighted and - linked modified Dietz returns. May be any of: - \itemize{\item timeweighted \item dietz} By default - time-weighted is selected} -} -\value{ - returns xts with account returns -} -\description{ - Similar to the \code{PortfReturns} function, but gives - returns for the entire account and takes into account - external cashflows. External cashflows are defined as - contributions to or withdrawals from the account. Allows - selecting between time-weighted returns and linked - modified Dietz approach. If time-weighted method is - selected, returns at time \eqn{t} are computed using: - \deqn{r_{t}=\frac{V_{t}}{V_{t-1}+C_{t}}-1} where - \eqn{V_{t}} - account value at time \eqn{t}, \eqn{C_{t}} - - cashflow at time \eqn{t}. The implicit assumption made - here is that the cash flow is available for the portfolio - manager to invest from the beginning of the day. These - returns then can be chain linked with geometric - compounding (for instance using \code{Return.cumulative} - function from the \code{PerformanceAnalytics} package) to - yield cumulative multi-period returns: - \deqn{1+r=\prod_{t=1}^{T}(1+r_{t})=\prod_{t=1}^{T}\frac{V_{t}}{V_{t-1}+C_{t}}} - In the case if there were no cashflows, the result - reduces to simple one-period returns. Time-weighted - returns has also an interpretation in terms of unit value - pricing. If Modified Dietz method is selected, monthly - returns are computed taking into account cashflows within - each month: \deqn{r = - \frac{V_{t}-V_{t-1}-C}{V_{t-1}+\sum_{t}C_{t}\times - W_{t}}} where \eqn{C} - total external cash flows within - a month, \eqn{C_{t}} - external cashflow at time \eqn{t}, - \deqn{W_{t}=\frac{TD-D_{t}}{TD}} - weighting ratio to be - applied to external cashflow on day \eqn{t}, \eqn{TD} - - total number of days within the month, \eqn{D_{t}} - - number of days since the beginning of the month including - weekends and public holidays. Finally monthly Modified - Dietz returns can also be linked geometrically. -} -\note{ - TODO handle portfolio and account in different currencies - (not hard, just not done) - - TODO explicitly handle portfolio weights - - TODO support additions and withdrawals to available - capital -} -\author{ - Brian Peterson, Andrii Babii -} -\references{ - Christopherson, Jon A., Carino, David R., Ferson, Wayne - E. \emph{Portfolio Performance Measurement and - Benchmarking}. McGraw-Hill. 2009. Chapter 5 \cr Bacon, C. - \emph{Practical Portfolio Performance Measurement and - Attribution}. Wiley. 2004. Chapter 2 \cr -} -\seealso{ - PortfReturns -} -\keyword{portfolio} -\keyword{returns} - From noreply at r-forge.r-project.org Mon Sep 9 22:12:00 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 9 Sep 2013 22:12:00 +0200 (CEST) Subject: [Returnanalytics-commits] r3034 - in pkg/PortfolioAttribution: . R Message-ID: <20130909201200.1E712185024@r-forge.r-project.org> Author: braverock Date: 2013-09-09 22:11:59 +0200 (Mon, 09 Sep 2013) New Revision: 3034 Modified: pkg/PortfolioAttribution/DESCRIPTION pkg/PortfolioAttribution/R/Attribution.geometric.R pkg/PortfolioAttribution/R/AttributionFixedIncome.R pkg/PortfolioAttribution/R/CAPM.dynamic.R pkg/PortfolioAttribution/R/Carino.R pkg/PortfolioAttribution/R/Conv.option.R pkg/PortfolioAttribution/R/DaviesLaker.R pkg/PortfolioAttribution/R/Frongello.R pkg/PortfolioAttribution/R/Grap.R pkg/PortfolioAttribution/R/HierarchyQuintiles.R pkg/PortfolioAttribution/R/MarketTiming.R pkg/PortfolioAttribution/R/Menchero.R pkg/PortfolioAttribution/R/Modigliani.R pkg/PortfolioAttribution/R/Return.annualized.excess.R pkg/PortfolioAttribution/R/Return.level.R pkg/PortfolioAttribution/R/Weight.level.R pkg/PortfolioAttribution/R/Weight.transform.R pkg/PortfolioAttribution/R/attribution.R pkg/PortfolioAttribution/R/attribution.levels.R Log: - fix svn:keywords Property changes on: pkg/PortfolioAttribution/DESCRIPTION ___________________________________________________________________ Modified: svn:keywords - Date,Author,Id + Date Author Id Property changes on: pkg/PortfolioAttribution/R/Attribution.geometric.R ___________________________________________________________________ Modified: svn:keywords - Date,Author,Id + Date Author Id Property changes on: pkg/PortfolioAttribution/R/AttributionFixedIncome.R ___________________________________________________________________ Modified: svn:keywords - Date,Author,Id + Date Author Id Property changes on: pkg/PortfolioAttribution/R/CAPM.dynamic.R ___________________________________________________________________ Modified: svn:keywords - Date,Author,Id + Date Author Id Property changes on: pkg/PortfolioAttribution/R/Carino.R ___________________________________________________________________ Modified: svn:keywords - Date,Author,Id + Date Author Id Property changes on: pkg/PortfolioAttribution/R/Conv.option.R ___________________________________________________________________ Modified: svn:keywords - Date,Author,Id + Date Author Id Property changes on: pkg/PortfolioAttribution/R/DaviesLaker.R ___________________________________________________________________ Modified: svn:keywords - Date,Author,Id + Date Author Id Property changes on: pkg/PortfolioAttribution/R/Frongello.R ___________________________________________________________________ Modified: svn:keywords - Date,Author,Id + Date Author Id Property changes on: pkg/PortfolioAttribution/R/Grap.R ___________________________________________________________________ Modified: svn:keywords - Date,Author,Id + Date Author Id Property changes on: pkg/PortfolioAttribution/R/HierarchyQuintiles.R ___________________________________________________________________ Modified: svn:keywords - Date,Author,Id + Date Author Id Property changes on: pkg/PortfolioAttribution/R/MarketTiming.R ___________________________________________________________________ Modified: svn:keywords - Date,Author,Id + Date Author Id Property changes on: pkg/PortfolioAttribution/R/Menchero.R ___________________________________________________________________ Modified: svn:keywords - Date,Author,Id + Date Author Id Property changes on: pkg/PortfolioAttribution/R/Modigliani.R ___________________________________________________________________ Modified: svn:keywords - Date,Author,Id + Date Author Id Property changes on: pkg/PortfolioAttribution/R/Return.annualized.excess.R ___________________________________________________________________ Modified: svn:keywords - Date,Author,Id + Date Author Id Property changes on: pkg/PortfolioAttribution/R/Return.level.R ___________________________________________________________________ Modified: svn:keywords - Date,Author,Id + Date Author Id Property changes on: pkg/PortfolioAttribution/R/Weight.level.R ___________________________________________________________________ Modified: svn:keywords - Date,Author,Id + Date Author Id Property changes on: pkg/PortfolioAttribution/R/Weight.transform.R ___________________________________________________________________ Modified: svn:keywords - Date,Author,Id + Date Author Id Property changes on: pkg/PortfolioAttribution/R/attribution.R ___________________________________________________________________ Modified: svn:keywords - Date,Author,Id + Date Author Id Property changes on: pkg/PortfolioAttribution/R/attribution.levels.R ___________________________________________________________________ Modified: svn:keywords - Date,Author,Id + Date Author Id From noreply at r-forge.r-project.org Mon Sep 9 22:13:04 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 9 Sep 2013 22:13:04 +0200 (CEST) Subject: [Returnanalytics-commits] r3035 - pkg/PortfolioAnalytics/R Message-ID: <20130909201305.0DEC6185024@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-09 22:13:04 +0200 (Mon, 09 Sep 2013) New Revision: 3035 Modified: pkg/PortfolioAnalytics/R/charts.risk.R Log: Correcting error in risk budget chart for looking up the proper index Modified: pkg/PortfolioAnalytics/R/charts.risk.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.risk.R 2013-09-09 20:11:59 UTC (rev 3034) +++ pkg/PortfolioAnalytics/R/charts.risk.R 2013-09-09 20:13:04 UTC (rev 3035) @@ -52,11 +52,13 @@ # list to store $pct_contrib values pct_contrib <- list() + idx <- NULL for(i in 1:length(object$objective_measures)){ if(length(object$objective_measures[[i]]) > 1){ # we have an objective measure with contribution and pct_contrib contrib[[i]] <- object$objective_measures[[i]][2] pct_contrib[[i]] <- object$objective_measures[[i]][3] + idx <- c(idx, i) } } @@ -82,15 +84,15 @@ par(mar = c(bottommargin, 4, topmargin, 2) +.1) if(risk.type == "absolute"){ - for(ii in 1:length(rb_idx)){ + for(ii in 1:length(idx)){ if(is.null(ylim)){ - ylim <- range(contrib[[ii]][[1]]) + ylim <- range(contrib[[idx[ii]]][[1]]) ylim[1] <- min(0, ylim[1]) ylim[2] <- ylim[2] * 1.15 } objname <- portfolio$objectives[[rb_idx[i]]]$name # Plot values of contribution - plot(contrib[[ii]][[1]], type="n", axes=FALSE, xlab="", ylim=ylim, ylab=paste(objname, "Contribution", sep=" "), main=main, cex.lab=cex.lab, ...) + plot(contrib[[idx[ii]]][[1]], type="n", axes=FALSE, xlab="", ylim=ylim, ylab=paste(objname, "Contribution", sep=" "), main=main, cex.lab=cex.lab, ...) # neighbors needs to be in the loop if there is more than one risk_budget_objective if(!is.null(neighbors)){ @@ -119,7 +121,7 @@ # also note the need for as.numeric. points() doesn't like matrix inputs } # end neighbors plot for matrix or data.frame } # end if neighbors is not null - points(contrib[[ii]][[1]], type="b", ...) + points(contrib[[idx[ii]]][[1]], type="b", ...) axis(2, cex.axis = cex.axis, col = element.color) axis(1, labels=columnnames, at=1:numassets, las=las, cex.axis = cex.axis, col = element.color) box(col = element.color) @@ -138,7 +140,7 @@ } objname <- portfolio$objectives[[rb_idx[i]]]$name # plot percentage contribution - plot(pct_contrib[[ii]][[1]], type="n", axes=FALSE, xlab='', ylim=ylim, ylab=paste(objname, " % Contribution", sep=" "), main=main, cex.lab=cex.lab, ...) + plot(pct_contrib[[idx[ii]]][[1]], type="n", axes=FALSE, xlab='', ylim=ylim, ylab=paste(objname, " % Contribution", sep=" "), main=main, cex.lab=cex.lab, ...) # Check for minimum percentage risk (min_prisk) argument if(!is.null(min_prisk)){ points(min_prisk, type="b", col="darkgray", lty="solid", lwd=2, pch=24) @@ -178,7 +180,7 @@ # also note the need for as.numeric. points() doesn't like matrix inputs } # end neighbors plot for matrix or data.frame } # end if neighbors is not null - points(pct_contrib[[ii]][[1]], type="b", ...) + points(pct_contrib[[idx[ii]]][[1]], type="b", ...) axis(2, cex.axis = cex.axis, col = element.color) axis(1, labels=columnnames, at=1:numassets, las=las, cex.axis = cex.axis, col = element.color) box(col = element.color) From noreply at r-forge.r-project.org Mon Sep 9 22:23:12 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 9 Sep 2013 22:23:12 +0200 (CEST) Subject: [Returnanalytics-commits] r3036 - pkg/PortfolioAttribution Message-ID: <20130909202312.B6808185024@r-forge.r-project.org> Author: braverock Date: 2013-09-09 22:23:12 +0200 (Mon, 09 Sep 2013) New Revision: 3036 Modified: pkg/PortfolioAttribution/DESCRIPTION pkg/PortfolioAttribution/NAMESPACE Log: - fix NAMESPACE and whitespace in DESCRIPTION file Modified: pkg/PortfolioAttribution/DESCRIPTION =================================================================== --- pkg/PortfolioAttribution/DESCRIPTION 2013-09-09 20:13:04 UTC (rev 3035) +++ pkg/PortfolioAttribution/DESCRIPTION 2013-09-09 20:23:12 UTC (rev 3036) @@ -5,9 +5,9 @@ Date: $Date$ Author: Andrii Babii Maintainer: Andrii Babii -Description: This package provides functions for the ex-post Portfolio Attribution methods -from Bacon (2004), Carino (2009), etc. The package was created as a part of the -Google Summer of Code (GSoC) 2012 project. +Description: This package provides functions for the ex-post Portfolio Attribution + methods from Bacon (2004), Carino (2009), etc. + The package was initially created as a part of the Google Summer of Code (GSoC) 2012 project. Depends: R (>= 2.15.0), zoo, Modified: pkg/PortfolioAttribution/NAMESPACE =================================================================== --- pkg/PortfolioAttribution/NAMESPACE 2013-09-09 20:13:04 UTC (rev 3035) +++ pkg/PortfolioAttribution/NAMESPACE 2013-09-09 20:23:12 UTC (rev 3036) @@ -1,4 +1,3 @@ -export(AcctReturns) export(Attribution) export(AttributionFixedIncome) export(Attribution.geometric) From noreply at r-forge.r-project.org Mon Sep 9 23:27:22 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 9 Sep 2013 23:27:22 +0200 (CEST) Subject: [Returnanalytics-commits] r3037 - in pkg/PortfolioAnalytics: demo sandbox Message-ID: <20130909212722.705BC18538D@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-09 23:27:22 +0200 (Mon, 09 Sep 2013) New Revision: 3037 Added: pkg/PortfolioAnalytics/demo/backwards_compat.R pkg/PortfolioAnalytics/demo/demo_DEoptim.R pkg/PortfolioAnalytics/demo/demo_efficient_frontier.R pkg/PortfolioAnalytics/demo/demo_factor_exposure.R pkg/PortfolioAnalytics/demo/demo_group_ROI.R pkg/PortfolioAnalytics/demo/demo_maxret_ROI.R pkg/PortfolioAnalytics/demo/demo_opt_combine.R pkg/PortfolioAnalytics/demo/demo_random_portfolios.R pkg/PortfolioAnalytics/demo/demo_return_target.R pkg/PortfolioAnalytics/demo/demo_weight_concentration.R Removed: pkg/PortfolioAnalytics/sandbox/testing_DE_opt_script.R pkg/PortfolioAnalytics/sandbox/testing_back_compat.R pkg/PortfolioAnalytics/sandbox/testing_constraint_fn_map.R pkg/PortfolioAnalytics/sandbox/testing_diversification.R pkg/PortfolioAnalytics/sandbox/testing_efficient_frontier.R pkg/PortfolioAnalytics/sandbox/testing_factor_exposure.R pkg/PortfolioAnalytics/sandbox/testing_group_multlevels.R pkg/PortfolioAnalytics/sandbox/testing_maxret_ROI.R pkg/PortfolioAnalytics/sandbox/testing_mult_opt_weights.R pkg/PortfolioAnalytics/sandbox/testing_return_target.R Modified: pkg/PortfolioAnalytics/demo/00Index pkg/PortfolioAnalytics/sandbox/testing_weight_conc.R Log: Moving several of the testing scripts from the sandbox folder to the demo folder. Updating the index file in the demo folder with added demos. Modified: pkg/PortfolioAnalytics/demo/00Index =================================================================== --- pkg/PortfolioAnalytics/demo/00Index 2013-09-09 20:23:12 UTC (rev 3036) +++ pkg/PortfolioAnalytics/demo/00Index 2013-09-09 21:27:22 UTC (rev 3037) @@ -2,6 +2,15 @@ sortino Maximize the Sortino Ratio of the portfolio testing_ROI Demonstrate creating constraint object and solve five basic convex portfolio optimization problems with ROI using the 'edhec' dataset. testing_pso Demonstrate creating constraint object and solve portfolio optimization problems with pso using the 'edhec' dataset. These sample problems are similar to those used in testing_ROI, so that one can compare solutions easily. -testing_GenSA Demonstrate the creating constraint object and solve portfolio optimization problems with GenSA using the 'edhec' datset. These sample problems are similar to those used in testing_ROI, so that one can compare solutions easily. -demo_ROI Demonstrate constraints and objectives that can be solved with ROI +testing_GenSA Demonstrate creating the constraint object and solve portfolio optimization problems with GenSA using the 'edhec' datset. These sample problems are similar to those used in testing_ROI, so that one can compare solutions easily. +demo_ROI Demonstrate constraints and objectives that can be solved with ROI. +demo_DEoptim Demonstrate solving portfolio optimization problems using DEoptim as the solver. The demo solvers 4 problems: 1) Maximize mean return per unit mETL 2) Minimize annualized standard deviation 3) Minimize annualized standard deviation with equal contribution to risk using standard deviation as the risk measure 4) Maximize mean return with equal contribution to risk using modified ETL as the risk measure. +demo_efficient_frontier Demonstrate how to create and chart efficient frontiers. +demo_factor_exposure Demonstrate how to use the factor_exposure constraint. +demo_group_ROI Demonstrate how to use group constraints using the ROI solver. +demo_maxret_ROI Demonstrate maximizing return using the ROI solver. +demo_opt_combine Demonstrate how to combine and chart the optimal weights for multiple optimizations. +demo_weight_concentration Demonstrate how to use the weight concentration objective. +backwards_compat Demonstrate how to solve optimization problems using v1 specification with a v1_constraint object. +demo_random_portfolios Demonstrate examples from script.workshop2012.R using random portfolios Copied: pkg/PortfolioAnalytics/demo/backwards_compat.R (from rev 3022, pkg/PortfolioAnalytics/sandbox/testing_back_compat.R) =================================================================== --- pkg/PortfolioAnalytics/demo/backwards_compat.R (rev 0) +++ pkg/PortfolioAnalytics/demo/backwards_compat.R 2013-09-09 21:27:22 UTC (rev 3037) @@ -0,0 +1,35 @@ +library(PortfolioAnalytics) +library(DEoptim) +library(ROI) +require(ROI.plugin.glpk) + +data(edhec) +ret <- edhec[, 1:4] +funds <- colnames(ret) + +# Set up constraint object using v1 specification +gen.constr <- constraint(assets=funds, min=0, max=0.55, min_sum=0.99, max_sum=1, weight_seq=generatesequence(min=0, max=0.55, by=0.002)) +class(gen.constr) + +# Add an objective to the gen.constr object +gen.constr <- add.objective(constraints=gen.constr, type="return", name="mean", enabled=TRUE) + +# Run the optimization +# optimize.portfolio will detect that a v1_constraint object has been passed in +# and will update to the v2 specification using a portfolio object with +# constraints and objectives from the v1_constraint object. + +# Random +optrpv1 <- optimize.portfolio(R=ret, constraints=gen.constr, optimize_method="random", search_size=2000) +print(optrpv1$portfolio) +print(optrpv1) + +# DEoptim +optdev1 <- optimize.portfolio(R=ret, constraints=gen.constr, optimize_method="DEoptim", search_size=2000) +print(optdev1) + +# ROI +optroiv1 <- optimize.portfolio(R=ret, constraints=gen.constr, optimize_method="ROI") +print(optroiv1) + + Copied: pkg/PortfolioAnalytics/demo/demo_DEoptim.R (from rev 3022, pkg/PortfolioAnalytics/sandbox/testing_DE_opt_script.R) =================================================================== --- pkg/PortfolioAnalytics/demo/demo_DEoptim.R (rev 0) +++ pkg/PortfolioAnalytics/demo/demo_DEoptim.R 2013-09-09 21:27:22 UTC (rev 3037) @@ -0,0 +1,198 @@ + +# The following optimization problems will be run +# mean-mETL +# - maximize mean-to-ETL (i.e. reward-to-risk) +# MinSD +# - minimize annualized standard deviation +# eqStdDev +# - equal risk (volatility) +# MeanRL +# - maximize mean with mETL risk limits + +# Include optimizer and multi-core packages +library(PortfolioAnalytics) +require(quantmod) +require(DEoptim) +require(foreach) + +# The multicore package, and therefore registerDoMC, should not be used in a +# GUI environment, because multiple processes then share the same GUI. Only use +# when running from the command line. +# require(doMC) +# registerDoMC(3) + +data(edhec) + +# Drop some indexes and reorder +edhec.R <- edhec[,c("Convertible Arbitrage", "Equity Market Neutral", + "Fixed Income Arbitrage", "Event Driven", "CTA Global", + "Global Macro", "Long/Short Equity")] + +# Annualized standard deviation +pasd <- function(R, weights){ + as.numeric(StdDev(R=R, weights=weights)*sqrt(12)) # hardcoded for monthly data + # as.numeric(StdDev(R=R, weights=weights)*sqrt(4)) # hardcoded for quarterly data +} + +# Set some parameters +rebalance_period = 'quarters' # uses endpoints identifiers from xts +clean = "none" #"boudt" +permutations = 4000 + +# Create initial portfolio object used to initialize ALL the bouy portfolios +init.portf <- portfolio.spec(assets=colnames(edhec.R), + weight_seq=generatesequence(by=0.005)) +# Add leverage constraint +init.portf <- add.constraint(portfolio=init.portf, + type="leverage", + min_sum=0.99, + max_sum=1.01) +# Add box constraint +init.portf <- add.constraint(portfolio=init.portf, + type="box", + min=0.05, + max=0.3) + +#Add measure 1, mean return +init.portf <- add.objective(portfolio=init.portf, + type="return", # the kind of objective this is + name="mean", # name of the function + enabled=TRUE, # enable or disable the objective + multiplier=0 # calculate it but don't use it in the objective +) + +# Add measure 2, annualized standard deviation +init.portf <- add.objective(portfolio=init.portf, + type="risk", # the kind of objective this is + name="pasd", # to minimize from the sample + enabled=TRUE, # enable or disable the objective + multiplier=0 # calculate it but don't use it in the objective +) + +# Add measure 3, ES with p=(1-1/12) +# set confidence for ES +p <- 1-1/12 # for monthly + +init.portf <- add.objective(portfolio=init.portf, + type="risk", # the kind of objective this is + name="ES", # the function to minimize + enabled=FALSE, # enable or disable the objective + multiplier=0, # calculate it but don't use it in the objective + arguments=list(p=p) +) + +# Set up portfolio for Mean-mETL +MeanmETL.portf <- init.portf +MeanmETL.portf$objectives[[1]]$multiplier=-1 # mean +MeanmETL.portf$objectives[[3]]$enabled=TRUE # mETL +MeanmETL.portf$objectives[[3]]$multiplier=1 # mETL + +# Set up portfolio for min pasd +MinSD.portf <- init.portf +MinSD.portf$objectives[[2]]$multiplier=1 + +# Set up portfolio for eqStdDev +EqSD.portf <- add.objective(portfolio=init.portf, + type="risk_budget", + name="StdDev", + min_concentration=TRUE, + arguments = list(p=(1-1/12))) +# Without a sub-objective, we get a somewhat undefined result, since there are (potentially) many Equal SD contribution portfolios. +EqSD.portf$objectives[[2]]$multiplier=1 # min pasd + +# Set up portfolio to maximize mean with mETL risk limit +MeanRL.portf <- add.objective(portfolio=init.portf, + type='risk_budget', + name="ES", + min_prisk=-Inf, + max_prisk=0.4, + arguments=list(method="modified", p=p)) +MeanRL.portf$objectives[[1]]$multiplier=-1 # mean +# Change box constraints max to vector of 1s +MeanRL.portf$constraints[[2]]$max=rep(1, 7) + +# Set the 'R' variable +R <- edhec.R + +start_time<-Sys.time() +print(paste('Starting optimization at',Sys.time())) + +##### mean-mETL ##### +MeanmETL.DE <- optimize.portfolio(R=R, + portfolio=MeanmETL.portf, + optimize_method="DEoptim", + trace=TRUE, + search_size=2000, + traceDE=5) +print(MeanmETL.DE) +print(MeanmETL.DE$elapsed_time) +chart.Weights(object=MeanmETL.DE, main="Mean-mETL Weights") +chart.RiskReward(object=MeanmETL.DE, return.col="mean", risk.col="ES") +# save(MeanmETL.DE, file=paste('MeanmETL',Sys.Date(),'rda',sep='.')) + +# Evaluate the objectives with DE through time +# MeanmETL.DE.t <- optimize.portfolio.rebalancing(R=R, +# portfolio=MeanSD.portf, +# optimize_method="random", +# trace=TRUE, +# search_size=2000, +# rebalance_on=rebalance_period, +# training_period=36) +# MeanmETL.w = extractWeights.rebal(MeanmETL.DE.t) +# MeanmETL=Return.rebalancing(edhec.R, MeanmETL) +# colnames(MeanmETL) = "MeanmETL" +# save(MeanmETL.DE, MeanmETL.DE.t, MeanmETL.w, MeanmETL, file=paste('MeanmETL',Sys.Date(),'rda',sep='.')) + +print(paste('Completed MeanmETL optimization at',Sys.time(),'moving on to MinSD')) + + +##### min pasd ##### +MinSD.DE <- optimize.portfolio(R=R, + portfolio=MinSD.portf, + optimize_method="DEoptim", + trace=TRUE, + search_size=2000, + traceDE=5) +print(MinSD.DE) +print(MinSD.DE$elapsed_time) +chart.Weights(object=MinSD.DE, plot.type="barplot", legend.loc=NULL) +chart.RiskReward(object=MinSD.DE, return.col="mean", risk.col="pasd") +# save(MinSD.DE, file=paste('MinSD',Sys.Date(),'rda',sep='.')) + +print(paste('Completed MinSD optimization at',Sys.time(),'moving on to EqSD')) + +##### EqSD ##### +EqSD.DE <- optimize.portfolio(R=R, + portfolio=EqSD.portf, + optimize_method="DEoptim", + trace=TRUE, + search_size=2000, + traceDE=5) +print(EqSD.DE) +print(EqSD.DE$elapsed_time) +# save(EqSD.DE, file=paste('EqSD',Sys.Date(),'rda',sep='.')) + +chart.Weights(object=EqSD.DE) +chart.RiskReward(object=EqSD.DE, return.col="mean", risk.col="StdDev") +chart.RiskBudget(object=EqSD.DE, risk.type="absolute") +chart.RiskBudget(object=EqSD.DE, risk.type="pct_contrib") + +print(paste('Completed EqSD optimization at',Sys.time(),'moving on to MeanRL')) + +##### MeanRL.DE ##### +MeanRL.DE <- optimize.portfolio(R=R, + portfolio=MeanRL.portf, + optimize_method="DEoptim", + trace=TRUE, + search_size=2000, + traceDE=5) +print(MeanRL.DE) +print(MeanRL.DE$elapsed_time) +# save(MeanRL.DE, file=paste('MeanRL',Sys.Date(),'rda',sep='.')) + +chart.Weights(object=MeanRL.DE) +chart.RiskBudget(object=MeanRL.DE, risk.type="pct_contrib", neighbors=25) + +end_time<-Sys.time() +print("Optimization Complete") +print(end_time-start_time) Copied: pkg/PortfolioAnalytics/demo/demo_efficient_frontier.R (from rev 3022, pkg/PortfolioAnalytics/sandbox/testing_efficient_frontier.R) =================================================================== --- pkg/PortfolioAnalytics/demo/demo_efficient_frontier.R (rev 0) +++ pkg/PortfolioAnalytics/demo/demo_efficient_frontier.R 2013-09-09 21:27:22 UTC (rev 3037) @@ -0,0 +1,163 @@ +# Script to test efficient frontiers + +# Efficient frontiers can be plotted two ways +# 1. Run optimize.portfolio with trace=TRUE and then chart that object +# 2. create an efficient frontier and then chart that object + +library(PortfolioAnalytics) +library(DEoptim) +library(ROI) +require(ROI.plugin.quadprog) +require(ROI.plugin.glpk) + +rm(list=ls()) + +data(edhec) +R <- edhec[, 1:5] +# change the column names for better legends in plotting +colnames(R) <- c("CA", "CTAG", "DS", "EM", "EQM") +funds <- colnames(R) + +# initial portfolio object +init <- portfolio.spec(assets=funds) +# initial constraints +init <- add.constraint(portfolio=init, type="full_investment") +init <- add.constraint(portfolio=init, type="box", min=0.15, max=0.45) +init <- add.constraint(portfolio=init, type="group", + groups=list(c(1, 3), + c(2, 4, 5)), + group_min=0.05, + group_max=0.7) + +# initial objective +init <- add.objective(portfolio=init, type="return", name="mean") + +# create mean-etl portfolio +meanetl.portf <- add.objective(portfolio=init, type="risk", name="ES") + +# create mean-var portfolio +meanvar.portf <- add.objective(portfolio=init, type="risk", name="var", risk_aversion=1e6) + +# create efficient frontiers + +# mean-var efficient frontier +meanvar.ef <- create.EfficientFrontier(R=R, portfolio=meanvar.portf, type="mean-StdDev") +print(meanvar.ef) +summary(meanvar.ef, digits=2) +meanvar.ef$frontier + +# The RAR.text argument can be used for the risk-adjusted-return name on the legend, +# by default it is 'Modified Sharpe Ratio' +chart.EfficientFrontier(meanvar.ef, match.col="StdDev", type="l", RAR.text="Sharpe Ratio", pch=4) + +# The tangency portfolio and line are plotted by default, these can be ommitted +# by setting rf=NULL +chart.EfficientFrontier(meanvar.ef, match.col="StdDev", type="b", rf=NULL) + +# The tangency line can be omitted with tangent.line=FALSE. The tangent portfolio, +# risk-free rate and Sharpe Ratio are still included in the plot +chart.EfficientFrontier(meanvar.ef, match.col="StdDev", type="l", tangent.line=FALSE) + +# The assets can be omitted with chart.assets=FALSE +chart.EfficientFrontier(meanvar.ef, match.col="StdDev", type="l", + tangent.line=FALSE, chart.assets=FALSE) + +# Just the names of the assets can be omitted with labels.assets=FALSE and the +# plotting character can be changed with pch.assets +chart.EfficientFrontier(meanvar.ef, match.col="StdDev", type="l", + tangent.line=FALSE, labels.assets=FALSE, pch.assets=1) + +# Chart the asset weights along the efficient frontier +chart.Weights.EF(meanvar.ef, colorset=bluemono, match.col="StdDev") + +# Chart the group weights along the efficient frontier +chart.Weights.EF(meanvar.ef, colorset=bluemono, by.groups=TRUE, match.col="StdDev") + +# The labels for Mean, Weight, and StdDev can be increased or decreased with +# the cex.lab argument. The default is cex.lab=0.8 +chart.Weights.EF(meanvar.ef, colorset=bluemono, match.col="StdDev", main="", cex.lab=1) + +# If you have a lot of assets and they don't fit with the default legend, you +# can set legend.loc=NULL and customize the plot. +par(mar=c(8, 4, 4, 2)+0.1, xpd=TRUE) +chart.Weights.EF(meanvar.ef, colorset=bluemono, match.col="StdDev", legend.loc=NULL) +legend("bottom", legend=colnames(R), inset=-1, fill=bluemono, bty="n", ncol=3, cex=0.8) +par(mar=c(5, 4, 4, 2)+0.1, xpd=FALSE) + +# run optimize.portfolio and chart the efficient frontier for that object +opt_meanvar <- optimize.portfolio(R=R, portfolio=meanvar.portf, optimize_method="ROI", trace=TRUE) + +# The efficient frontier is created from the 'opt_meanvar' object by getting +# The portfolio and returns objects and then passing those to create.EfficientFrontier +chart.EfficientFrontier(opt_meanvar, match.col="StdDev", n.portfolios=25, type="l") + +# Rerun the optimization with a new risk aversion parameter to change where the +# portfolio is along the efficient frontier. The 'optimal' portfolio plotted on +# the efficient frontier is the optimal portfolio returned by optimize.portfolio. +meanvar.portf$objectives[[2]]$risk_aversion=0.25 +opt_meanvar <- optimize.portfolio(R=R, portfolio=meanvar.portf, optimize_method="ROI", trace=TRUE) +chart.EfficientFrontier(opt_meanvar, match.col="StdDev", n.portfolios=25, type="l") + +# The weights along the efficient frontier can be plotted by passing in the +# optimize.portfolio output object +chart.Weights.EF(opt_meanvar, match.col="StdDev") + +chart.Weights.EF(opt_meanvar, match.col="StdDev", by.groups=TRUE) + +# Extract the efficient frontier and then plot it +# Note that if you want to do multiple charts of the efficient frontier from +# the optimize.portfolio object, it is best to extractEfficientFrontier as shown +# below +ef <- extractEfficientFrontier(object=opt_meanvar, match.col="StdDev", n.portfolios=15) +print(ef) +summary(ef, digits=5) +chart.Weights.EF(ef, match.col="StdDev", colorset=bluemono) +chart.Weights.EF(ef, match.col="StdDev", colorset=bluemono, by.groups=TRUE) + +# mean-etl efficient frontier +meanetl.ef <- create.EfficientFrontier(R=R, portfolio=meanetl.portf, type="mean-ES") +print(meanetl.ef) +summary(meanetl.ef) +chart.EfficientFrontier(meanetl.ef, match.col="ES", main="mean-ETL Efficient Frontier", type="l", col="blue", RAR.text="STARR") +chart.Weights.EF(meanetl.ef, colorset=bluemono, match.col="ES") +chart.Weights.EF(meanetl.ef, by.groups=TRUE, colorset=bluemono, match.col="ES") + +# mean-etl efficient frontier using random portfolios +meanetl.rp.ef <- create.EfficientFrontier(R=R, portfolio=meanetl.portf, type="random", match.col="ES") +chart.EfficientFrontier(meanetl.rp.ef, match.col="ES", main="mean-ETL RP Efficient Frontier", type="l", col="blue", rf=0) +chart.Weights.EF(meanetl.rp.ef, colorset=bluemono, match.col="ES") + +# mean-etl efficient frontier with optimize.portfolio output +opt_meanetl <- optimize.portfolio(R=R, portfolio=meanetl.portf, optimize_method="random", search_size=2000, trace=TRUE) +chart.EfficientFrontier(meanetl.rp.ef, match.col="ES", main="mean-ETL RP Efficient Frontier", type="l", col="blue", rf=0, RAR.text="STARR") + +##### overlay efficient frontiers of multiple portfolios ##### +# Create a mean-var efficient frontier for multiple portfolios and overlay the efficient frontier lines +# set up an initial portfolio with the full investment constraint and mean and var objectives +init.portf <- portfolio.spec(assets=funds) +init.portf <- add.constraint(portfolio=init.portf, type="full_investment") +init.portf <- add.objective(portfolio=init.portf, type="risk", name="var") +init.portf <- add.objective(portfolio=init.portf, type="return", name="mean") + +# long only constraints +lo.portf <- add.constraint(portfolio=init.portf, type="long_only") + +# box constraints +box.portf <- add.constraint(portfolio=init.portf, type="box", min=0.05, max=0.65) + +# group constraints (also add long only constraints to the group portfolio) +group.portf <- add.constraint(portfolio=init.portf, type="group", + groups=list(groupA=c(1, 3), + groupB=c(2, 4, 5)), + group_min=c(0.25, 0.15), + group_max=c(0.75, 0.55)) +group.portf <- add.constraint(portfolio=group.portf, type="long_only") +# optimize.portfolio(R=R, portfolio=group.portf, optimize_method="ROI") + +portf.list <- list(lo.portf, box.portf, group.portf) +legend.labels <- c("Long Only", "Box", "Group + Long Only") +chart.EfficientFrontierOverlay(R=R, portfolio_list=portf.list, type="mean-StdDev", + match.col="StdDev", legend.loc="topleft", + legend.labels=legend.labels, cex.legend=0.6, + labels.assets=FALSE, pch.assets=18) + Copied: pkg/PortfolioAnalytics/demo/demo_factor_exposure.R (from rev 3022, pkg/PortfolioAnalytics/sandbox/testing_factor_exposure.R) =================================================================== --- pkg/PortfolioAnalytics/demo/demo_factor_exposure.R (rev 0) +++ pkg/PortfolioAnalytics/demo/demo_factor_exposure.R 2013-09-09 21:27:22 UTC (rev 3037) @@ -0,0 +1,130 @@ +library(PortfolioAnalytics) +library(ROI) +require(ROI.plugin.quadprog) +require(ROI.plugin.glpk) +library(Rglpk) +library(DEoptim) + +data(edhec) +ret <- edhec[, 1:4] + +# Create portfolio object +pspec <- portfolio.spec(assets=colnames(ret)) +# Leverage constraint +lev_constr <- weight_sum_constraint(min_sum=1, max_sum=1) +# box constraint +lo_constr <- box_constraint(assets=pspec$assets, min=c(0.01, 0.02, 0.03, 0.04), max=0.65) +# group constraint +grp_constr <- group_constraint(assets=pspec$assets, groups=c(2, 1, 1), group_min=0.1, group_max=0.4) +# position limit constraint +pl_constr <- position_limit_constraint(assets=pspec$assets, max_pos=4) + +# Make up a B matrix for an industry factor model +# dummyA, dummyB, and dummyC could be industries, sectors, etc. +B <- cbind(c(1, 1, 0, 0), + c(0, 0, 1, 0), + c(0, 0, 0, 1)) +rownames(B) <- colnames(ret) +colnames(B) <- c("dummyA", "dummyB", "dummyC") +print(B) +lower <- c(0.1, 0.1, 0.1) +upper <- c(0.4, 0.4, 0.4) + +# Industry exposure constraint +# The exposure constraint and group constraint are equivalent to test that they +# result in the same solution +exp_constr <- factor_exposure_constraint(assets=pspec$assets, B=B, lower=lower, upper=upper) + +# objective to minimize variance +var_obj <- portfolio_risk_objective(name="var") +# objective to maximize return +ret_obj <- return_objective(name="mean") +# objective to minimize ETL +etl_obj <- portfolio_risk_objective(name="ETL") + +# group constraint and exposure constraint should result in same solution + +##### minimize var objective ##### +opta <- optimize.portfolio(R=ret, portfolio=pspec, + constraints=list(lev_constr, lo_constr, grp_constr), + objectives=list(var_obj), + optimize_method="ROI") +opta + +optb <- optimize.portfolio(R=ret, portfolio=pspec, + constraints=list(lev_constr, lo_constr, exp_constr), + objectives=list(var_obj), + optimize_method="ROI") +optb + +all.equal(opta$weights, optb$weights) + +##### maximize return objective ##### +optc <- optimize.portfolio(R=ret, portfolio=pspec, + constraints=list(lev_constr, lo_constr, grp_constr), + objectives=list(ret_obj), + optimize_method="ROI") +optc + +optd <- optimize.portfolio(R=ret, portfolio=pspec, + constraints=list(lev_constr, lo_constr, exp_constr), + objectives=list(ret_obj), + optimize_method="ROI") +optd + +all.equal(optc$weights, optd$weights) + +##### minimize ETL objective ##### +opte <- optimize.portfolio(R=ret, portfolio=pspec, + constraints=list(lev_constr, lo_constr, grp_constr), + objectives=list(etl_obj), + optimize_method="ROI") +opte + +optf <- optimize.portfolio(R=ret, portfolio=pspec, + constraints=list(lev_constr, lo_constr, exp_constr), + objectives=list(etl_obj), + optimize_method="ROI") +optf + +all.equal(opte$weights, optf$weights) + +##### maximize return objective with DEoptim ##### +set.seed(123) +optde1 <- optimize.portfolio(R=ret, portfolio=pspec, + constraints=list(lev_constr, lo_constr, grp_constr), + objectives=list(ret_obj), + optimize_method="DEoptim", + search_size=2000, + trace=TRUE) +optde1 + +set.seed(123) +optde2 <- optimize.portfolio(R=ret, portfolio=pspec, + constraints=list(lev_constr, lo_constr, exp_constr), + objectives=list(ret_obj), + optimize_method="DEoptim", + search_size=2000, + trace=TRUE) +optde2 + +all.equal(optde1$weights, optde2$weights) + +##### maximize return objective with random ##### +optrp1 <- optimize.portfolio(R=ret, portfolio=pspec, + constraints=list(lev_constr, lo_constr, grp_constr), + objectives=list(ret_obj), + optimize_method="random", + search_size=2000, + trace=TRUE) +optrp1 + +optrp2 <- optimize.portfolio(R=ret, portfolio=pspec, + constraints=list(lev_constr, lo_constr, exp_constr), + objectives=list(ret_obj), + optimize_method="random", + search_size=2000, + trace=TRUE) +optrp2 + +all.equal(optrp1$weights, optrp2$weights) \ No newline at end of file Copied: pkg/PortfolioAnalytics/demo/demo_group_ROI.R (from rev 3022, pkg/PortfolioAnalytics/sandbox/testing_group_multlevels.R) =================================================================== --- pkg/PortfolioAnalytics/demo/demo_group_ROI.R (rev 0) +++ pkg/PortfolioAnalytics/demo/demo_group_ROI.R 2013-09-09 21:27:22 UTC (rev 3037) @@ -0,0 +1,42 @@ + +library(PortfolioAnalytics) +library(ROI) +library(ROI.plugin.quadprog) +library(ROI.plugin.quadprog) + + +data(edhec) +R <- edhec[, 1:4] +colnames(R) <- c("CA", "CTAG", "DS", "EM") +funds <- colnames(R) + +# set up portfolio with objectives and constraints +pspec <- portfolio.spec(assets=funds) +pspec <- add.constraint(portfolio=pspec, type="full_investment") +pspec <- add.constraint(portfolio=pspec, type="long_only") +# add two levels of grouping +pspec <- add.constraint(portfolio=pspec, type="group", + groups=list(groupA=c(1, 3), + groupB=c(2, 4), + geoA=c(1, 2, 4), + geoB=3), + group_min=c(0.15, 0.25, 0.15, 0.2), + group_max=c(0.4, 0.7, 0.8, 0.62)) +pspec + +maxret <- add.objective(portfolio=pspec, type="return", name="mean") +opt_maxret <- optimize.portfolio(R=R, portfolio=maxret, optimize_method="ROI") +summary(opt_maxret) + +minvar <- add.objective(portfolio=pspec, type="risk", name="var") +opt_minvar <- optimize.portfolio(R=R, portfolio=minvar, optimize_method="ROI") +summary(opt_minvar) + +minetl <- add.objective(portfolio=pspec, type="risk", name="ETL") +opt_minetl <- optimize.portfolio(R=R, portfolio=minetl, optimize_method="ROI") +summary(opt_minetl) + +maxqu <- add.objective(portfolio=pspec, type="return", name="mean") +maxqu <- add.objective(portfolio=maxqu, type="risk", name="var", risk_aversion=0.25) +opt_maxqu <- optimize.portfolio(R=R, portfolio=maxqu, optimize_method="ROI") +summary(opt_maxqu) Copied: pkg/PortfolioAnalytics/demo/demo_maxret_ROI.R (from rev 3022, pkg/PortfolioAnalytics/sandbox/testing_maxret_ROI.R) =================================================================== --- pkg/PortfolioAnalytics/demo/demo_maxret_ROI.R (rev 0) +++ pkg/PortfolioAnalytics/demo/demo_maxret_ROI.R 2013-09-09 21:27:22 UTC (rev 3037) @@ -0,0 +1,56 @@ +library(PortfolioAnalytics) +library(foreach) +library(iterators) +library(quadprog) +library(Rglpk) +library(ROI) +require(ROI.plugin.glpk) +require(ROI.plugin.quadprog) + + +data(edhec) +ret <- edhec[, 1:4] +funds <- colnames(ret) + +##### Method 1 ##### +# Set up portfolio object with constraints and objectives to maximize return +# using the portfolio object to add constraints and objectives +pspec1 <- portfolio.spec(assets=funds) +pspec1 <- add.constraint(portfolio=pspec1, type="full_investment") +pspec1 <- add.constraint(portfolio=pspec1, type="box", min=0, max=0.65) +pspec1 <- add.objective(portfolio=pspec1, type="return", name="mean") + +opt1 <- optimize.portfolio(R=ret, portfolio=pspec1, optimize_method="ROI") + +##### Method 2 ##### +# Set up portfolio object with constraints and objective to maximize return +# using separate constraint and objective objects +pspec2 <- portfolio.spec(assets=funds) +weight_constr <- weight_sum_constraint(min_sum=1, max_sum=1) +box_constr <- box_constraint(assets=pspec2$assets, min=0, max=0.65) +ret_obj <- return_objective(name="mean") +cset <- list(weight_constr, box_constr) +obj <- list(ret_obj) + +opt2 <- optimize.portfolio(R=ret, portfolio=pspec2, constraints=cset, + objectives=obj, optimize_method="ROI") + +all.equal(extractWeights(opt1), extractWeights(opt2)) + +##### Method 1 Backtesting ##### +opt_rebal1 <- optimize.portfolio.rebalancing(R=ret, portfolio=pspec1, + optimize_method="ROI", + rebalance_on="months") +class(opt_rebal1) +inherits(opt_rebal1, "optimize.portfolio.rebalancing") +wts1 <- extractWeights(opt_rebal1) + +##### Method 2 Backtesting ##### +opt_rebal2 <- optimize.portfolio.rebalancing(R=ret, portfolio=pspec2, + constraints=cset, + objectives=obj, + optimize_method="ROI", + rebalance_on="months") +wts2 <- extractWeights(opt_rebal2) +all.equal(wts1, wts2) + Copied: pkg/PortfolioAnalytics/demo/demo_opt_combine.R (from rev 3026, pkg/PortfolioAnalytics/sandbox/testing_mult_opt_weights.R) =================================================================== --- pkg/PortfolioAnalytics/demo/demo_opt_combine.R (rev 0) +++ pkg/PortfolioAnalytics/demo/demo_opt_combine.R 2013-09-09 21:27:22 UTC (rev 3037) @@ -0,0 +1,47 @@ + +library(PortfolioAnalytics) +library(ROI) +library(ROI.plugin.glpk) +library(ROI.plugin.quadprog) + +# We should be able to compare portfolios with different constraints, +# objectives, and number of assets + +data(edhec) +R <- edhec[, 1:4] +funds <- colnames(R) + +##### Construct Portfolios ##### +# GMV long only +port.gmv.lo <- portfolio.spec(assets=funds) +port.gmv.lo <- add.constraint(portfolio=port.gmv.lo, type="full_investment") +port.gmv.lo <- add.constraint(portfolio=port.gmv.lo, type="long_only") +port.gmv.lo <- add.objective(portfolio=port.gmv.lo, type="risk", name="var") + +# GMV with shorting +port.gmv.short <- portfolio.spec(assets=funds) +port.gmv.short <- add.constraint(portfolio=port.gmv.short, type="full_investment") +port.gmv.short <- add.constraint(portfolio=port.gmv.short, type="box", min=-0.3, max=1) +port.gmv.short <- add.objective(portfolio=port.gmv.short, type="risk", name="var") + +# QU box constraints +port.qu <- portfolio.spec(assets=funds) +port.qu <- add.constraint(portfolio=port.qu, type="full_investment") +port.qu <- add.constraint(portfolio=port.qu, type="box", min=0.05, max=0.6) +port.qu <- add.objective(portfolio=port.qu, type="risk", name="var", risk_aversion=0.25) +port.qu <- add.objective(portfolio=port.qu, type="return", name="mean") + +##### Run Optimizations ##### +opt.gmv.lo <- optimize.portfolio(R=R, portfolio=port.gmv.lo, optimize_method="ROI", trace=TRUE) +opt.gmv.short <- optimize.portfolio(R=R, portfolio=port.gmv.short, optimize_method="ROI", trace=TRUE) +opt.qu <- optimize.portfolio(R=R, portfolio=port.qu, optimize_method="ROI", trace=TRUE) + + +opt <- optimizations.combine(list(GMV.LO=opt.gmv.lo, GMV.SHORT=opt.gmv.short, QU=opt.qu)) +class(opt) + +chart.Weights(opt, legend.loc="topleft", cex.legend=0.8, ylim=c(-0.3, 1)) + +chart.Weights(opt, plot.type="bar", cex.lab=0.8, legend.loc="topleft", cex.legend=0.8, ylim=c(-0.3, 1)) + +extractWeights(opt) Copied: pkg/PortfolioAnalytics/demo/demo_random_portfolios.R (from rev 3022, pkg/PortfolioAnalytics/sandbox/testing_rp_opt_script.R) =================================================================== --- pkg/PortfolioAnalytics/demo/demo_random_portfolios.R (rev 0) +++ pkg/PortfolioAnalytics/demo/demo_random_portfolios.R 2013-09-09 21:27:22 UTC (rev 3037) @@ -0,0 +1,211 @@ +# Demonstrate examples from script.workshop2012.R using the v2 specification + [TRUNCATED] To get the complete diff run: svnlook diff /svnroot/returnanalytics -r 3037 From noreply at r-forge.r-project.org Mon Sep 9 23:31:36 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 9 Sep 2013 23:31:36 +0200 (CEST) Subject: [Returnanalytics-commits] r3038 - in pkg/PerformanceAnalytics/sandbox/Shubhankit: . noniid.sm/vignettes sandbox Message-ID: <20130909213136.C596D18538D@r-forge.r-project.org> Author: shubhanm Date: 2013-09-09 23:31:35 +0200 (Mon, 09 Sep 2013) New Revision: 3038 Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OWReturn.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OWReturn.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/UnSmoothReturnAnalysis.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/ pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/AcarSim.R pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/CommodityReport.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/CommodityReport.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/LoSharpe.Rnw Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OkunevWhite.pdf Log: Addition of documentation file Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OWReturn.Rnw =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OWReturn.Rnw (rev 0) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OWReturn.Rnw 2013-09-09 21:31:35 UTC (rev 3038) @@ -0,0 +1,257 @@ +%% no need for \DeclareGraphicsExtensions{.pdf,.eps} + +\documentclass[12pt,letterpaper,english]{article} +\usepackage{times} +\usepackage[T1]{fontenc} +\IfFileExists{url.sty}{\usepackage{url}} + {\newcommand{\url}{\texttt}} + +\usepackage{babel} +%\usepackage{noweb} +\usepackage{Rd} + +\usepackage{Sweave} +\SweaveOpts{engine=R,eps=FALSE} +%\VignetteIndexEntry{Performance Attribution from Bacon} +%\VignetteDepends{PerformanceAnalytics} +%\VignetteKeywords{returns, performance, risk, benchmark, portfolio} +%\VignettePackage{PerformanceAnalytics} + +%\documentclass[a4paper]{article} +%\usepackage[noae]{Sweave} +%\usepackage{ucs} +%\usepackage[utf8x]{inputenc} +%\usepackage{amsmath, amsthm, latexsym} +%\usepackage[top=3cm, bottom=3cm, left=2.5cm]{geometry} +%\usepackage{graphicx} +%\usepackage{graphicx, verbatim} +%\usepackage{ucs} +%\usepackage[utf8x]{inputenc} +%\usepackage{amsmath, amsthm, latexsym} +%\usepackage{graphicx} + +\title{Umsmooth Return Models Impact} +\author{Shubhankit Mohan} + +\begin{document} +\SweaveOpts{concordance=TRUE} + +\maketitle + + +\begin{abstract} +The fact that many hedge fund returns exhibit extraordinary levels of serial correlation is now well-known and generally accepted as fact.Because hedge fund strategies have exceptionally high autocorrelations in reported returns and this is taken as evidence of return smoothing, we first develop a method to completely eliminate any order of serial correlation across a wide array of time series processes.Once this is complete, we can determine the underlying risk factors to the "true" hedge fund returns and examine the incremental benefit attained from using nonlinear payoffs relative to the more traditional linear factors. +\end{abstract} +\tableofcontents + + +<>= +library(PerformanceAnalytics) +data(edhec) +@ + +<>= +require(noniid.sm) #source("C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/Return.Okunev.R") +@ + +\section{Okunev White Model Methodology} +Given a sample of historical returns \((R_1,R_2, . . .,R_T)\),the method assumes the fund manager smooths returns in the following manner: + +%Let $X \sim N(0,1)$ and $Y \sim \textrm{Exponential}(\mu)$. Let +%$Z = \sin(X)$. $\sqrt{X}$. + +%$\hat{\mu}$ = $\displaystyle\frac{22}{7}$ +%e^{2 \mu} = 1 +%\begin{equation} +%\left(\sum_{t=1}^{T} R_t/T\right) = \hat{\mu} \\ +%\end{equation} +\begin{equation} + r_{0,t} = \sum_{i}^{} \beta_{i}r_{0,t-i} + (1- \alpha)r_{m,t} \\ +\end{equation} + + +\begin{equation} +where : \sum_{i}^{} \beta_{i} = (1- \alpha) \\ +\end{equation} + +\(r_{0,t}\) : is the observed (reported) return at time t (with 0 adjustments' to reported returns), \\ +\(r_{m,t}\) : is the true underlying (unreported) return at time t (determined by making m adjustments to reported returns). \\ + +The objective is to determine the true underlying return by removing the +autocorrelation structure in the original return series without making any assumptions regarding the actual time series properties of the underlying process. We are implicitly assuming by this approach that the autocorrelations that arise in reported returns are entirely due to the smoothing behavior funds engage in when reporting results. In fact, the method may be adopted to produce any desired level of autocorrelation at any lag and is not limited to simply eliminating all autocorrelations. + +\section{To Remove Up to m Orders of Autocorrelation} +To remove the first m orders of autocorrelation from a given return series we would proceed in a manner very similar to that detailed in \textbf{Geltner Return}. We would initially remove the first order autocorrelation, then proceed to eliminate the second order autocorrelation through the iteration process. In general, to remove any order, m, autocorrelations from a given return series we would make the following transformation to returns: + +\begin{equation} +r_{m,t}=\frac{r_{m-1,t}-c_{m}r_{m-1,t-m}}{1-c_{m}} +\end{equation} + +Where \(r_{m-1,t}\) is the series return with the first (m-1) order autocorrelation coefficient's removed.The general form for all the autocorrelations given by the process is : +\begin{equation} +a_{m,n}=\frac{a_{m-1,n}(1+c_{m}^2)-c_{m}(1+a_{m-1,2m})}{1+c_{m}^2 -2c_{m}a_{m-1,n}} +\end{equation} + +Once a solution is found for \(c_{m}\) to create \(r_{m,t}\) , one will need to iterate back to remove the first 'm'autocorrelations again. One will then need to once again remove the mth autocorrelation using the adjustment in equation (3). It would continue the process until the first m autocorrelations are sufficiently close to zero. + +\section{Time Series Characteristics} + +Given a series of historical returns \((R_1,R_2, . . .,R_T)\) from \textbf{January-1997} to \textbf{January-2008}, create a wealth index chart, bars for per-period performance, and underwater chart for drawdown of the Hedge Funds Indiciesfrom EDHEC Database. + +\subsection{ Performance Summary} +<>= +data(edhec) + +charts.PerformanceSummary(edhec[1:132,],colorset = rich6equal, lwd = 2, ylog = TRUE) +@ + +After applying the \textbf{Okunev White Model} to remove the serial correlation , we get the following Performance Chart. + +<>= +data(edhec) + +charts.PerformanceSummary(Return.Okunev(edhec[1:132,]),colorset = rich6equal, lwd = 2, ylog = TRUE) +@ + +\subsection{Autocorrelation UnSmoothing Impact} +One promiment feature visible by the summary chart is the removal of \textbf{serial autocorrelation} and \textbf{unsoomthing} of the return series.The significant drop in autocorrelation, is visible by the following chart based on indicies of the CTA global ,Distressed Securities and Ememrging Markets which had the highest autocorrelation . + +<>= +data(edhec) +chart.Autocorrelation(edhec[,1:3]) +@ + +The change can be evidently seen by the following chart : + + +<>= +data(edhec) +chart.Autocorrelation(Return.Okunev(edhec[,1:3])) +@ + + +\subsection{Comparing Distributions} + +In this example we use edhec database, to compute true Hedge Fund Returns. + +<>= +library(PerformanceAnalytics) +data(edhec) +Returns = Return.Okunev(edhec[,1]) +skewness(edhec[,1]) +skewness(Returns) +# Right Shift of Returns Ditribution for a negative skewed distribution +kurtosis(edhec[,1]) +kurtosis(Returns) +# Reduction in "peakedness" around the mean +layout(rbind(c(1, 2), c(3, 4))) + chart.Histogram(Returns, main = "Plain", methods = NULL) + chart.Histogram(Returns, main = "Density", breaks = 40, + methods = c("add.density", "add.normal")) + chart.Histogram(Returns, main = "Skew and Kurt", + methods = c("add.centered", "add.rug")) +chart.Histogram(Returns, main = "Risk Measures", + methods = c("add.risk")) +@ + +The above figure shows the behaviour of the distribution tending to a normal IID distribution.For comparitive purpose, one can observe the change in the charateristics of return as compared to the orignal. +<>= +library(PerformanceAnalytics) +data(edhec) +Returns = Return.Okunev(edhec[,1]) +layout(rbind(c(1, 2), c(3, 4))) + chart.Histogram(edhec[,1], main = "Plain", methods = NULL) + chart.Histogram(edhec[,1], main = "Density", breaks = 40, + methods = c("add.density", "add.normal")) + chart.Histogram(edhec[,1], main = "Skew and Kurt", + methods = c("add.centered", "add.rug")) +chart.Histogram(edhec[,1], main = "Risk Measures", + methods = c("add.risk")) + +@ + +\section{Risk Measure} + +\subsection{Mean absolute deviation} + +To calculate Mean absolute deviation we take the sum of the absolute value of the difference between the returns and the mean of the returns and we divide it by the number of returns. + + \deqn{MeanAbsoluteDeviation = \frac{\sum^{n}_{i=1}\mid r_i - \overline{r}\mid}{n}}{MeanAbsoluteDeviation = sum(|r-mean(r)|)/n } + +where \eqn{n} is the number of observations of the entire series, \eqn{r_i} is the return in month i and \eqn{\overline{r}} is the mean return + +<>= +data(edhec) +t1=MeanAbsoluteDeviation(edhec[,1:3]) +t2=MeanAbsoluteDeviation(Return.Okunev(edhec[,1:3])) +((t2-t1)*100)/(t1) # % Change +@ + +We can observe than due to the spurious serial autocorrelation, the true \textbf{volatility} was hidden, which is \textbf{more than 100 \% } in case of Distressed Securities to the one apparent to the investor.\textbf{CTA Global}, has the lowerst change, which is consistent,with the fact with it has the lowerst autocorreration. + +\subsection{Frequency (p.64)} + +Gives the period of the return distribution (ie 12 if monthly return, 4 if quarterly return) + +<<>>= +data(portfolio_bacon) +print(Frequency(portfolio_bacon[,1])) #expected 12 +@ + +\subsection{Sharpe Ratio (p.64)} + +The Sharpe ratio is simply the return per unit of risk (represented by variability). In the classic case, the unit of risk is the standard deviation of the returns. + +\deqn{\frac{\overline{(R_{a}-R_{f})}}{\sqrt{\sigma_{(R_{a}-R_{f})}}}} + +<<>>= +data(managers) +SharpeRatio(managers[,1,drop=FALSE], Rf=.035/12, FUN="StdDev") +@ + +\subsection{Risk-adjusted return: MSquared (p.67)} + +\eqn{M^2} is a risk adjusted return useful to judge the size of relative performance between differents portfolios. With it you can compare portfolios with different levels of risk. + +\deqn{M^2 = r_P + SR * (\sigma_M - \sigma_P) = (r_P - r_F) * \frac{\sigma_M}{\sigma_P} + r_F}{M squared = Rp + SR * (Market risk - Portfolio risk) = (Rp - Rf) * Market risk / Portfolio risk + Rf} + +where \eqn{r_P}. is the portfolio return annualized, \eqn{\sigma_M}. is the market risk and \eqn{\sigma_P} is the portfolio risk + +<<>>= +data(portfolio_bacon) +print(MSquared(portfolio_bacon[,1], portfolio_bacon[,2])) #expected 0.1068 +@ + +\subsection{MSquared Excess (p.68)} + +\eqn{M^2} excess is the quantity above the standard M. There is a geometric excess return which is better for Bacon and an arithmetic excess return + +\deqn{M^2 excess (geometric) = \frac{1 + M^2}{1 + b} - 1}{MSquared excess (geometric) = (1+M^2)/(1+b) - 1} +\deqn{M^2 excess (arithmetic) = M^2 - b}{MSquared excess (arithmetic) = M^2 - b} + +where \eqn{M^2}. is MSquared and \eqn{b}. is the benchmark annualised return. + +<<>>= +data(portfolio_bacon) +print(MSquaredExcess(portfolio_bacon[,1], portfolio_bacon[,2])) #expected -0.00998 +print(MSquaredExcess(portfolio_bacon[,1], portfolio_bacon[,2], Method="arithmetic")) #expected -0.011 +@ + + +\section{Downside Risk} +As we have obtained the true hedge fund returns, what is the actual \textbf{VaR,drawdown and downside potential} of the indices, can be illustrated by the following example, where we CTA Global and Distressed Securities indicies have been as sample sata sets. + +The following table, shows the change in \textbf{absolute value} in terms of percentage, when the Okunev White Return model has been implemented as compared to the Orginal model. We can observe, that for the given period , before the 2008 financial crisis, the hedge fund returns have a \textbf{100} \% increase in exposure.The result is consistent , when tested on other indicies, which show that true risk was camouflaged under the haze of smoothing in the hedge fund industry. + + +<>= +data(edhec) +table1 = table.DownsideRisk(edhec[,2:3]) +table2 = table.DownsideRisk(Return.Okunev(edhec[,2:3])) +((abs(table2)-abs(table1))/(abs(table1)))*100 +@ + +\section{Impact on Performance Ratios} + + +\end{document} \ No newline at end of file Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OWReturn.pdf =================================================================== (Binary files differ) Property changes on: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OWReturn.pdf ___________________________________________________________________ Added: svn:mime-type + application/octet-stream Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OkunevWhite.pdf =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OkunevWhite.pdf 2013-09-09 21:27:22 UTC (rev 3037) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OkunevWhite.pdf 2013-09-09 21:31:35 UTC (rev 3038) @@ -1,10 +1,10 @@ %PDF-1.5 %???? 1 0 obj << -/Length 187 +/Length 191 >> stream -concordance:OkunevWhite.tex:OkunevWhite.Rnw:1 44 1 1 6 44 1 1 2 1 0 3 1 5 0 1 1 5 0 1 2 6 0 1 1 5 0 1 2 1 0 1 1 1 2 1 0 1 2 1 0 1 2 5 0 1 2 1 1 1 2 1 0 4 1 1 2 1 0 1 2 1 0 1 2 6 0 1 3 1 1 +concordance:OkunevWhite.tex:OkunevWhite.Rnw:1 44 1 1 5 1 4 44 1 1 2 1 0 3 1 5 0 1 1 5 0 1 2 6 0 1 1 5 0 1 2 1 0 1 1 1 2 1 0 1 2 1 0 1 2 5 0 1 2 1 1 1 2 1 0 4 1 1 2 1 0 1 2 1 0 1 2 6 0 1 3 1 1 endstream endobj 4 0 obj << @@ -13,23 +13,35 @@ >> stream x??X[o??~??B?K(4b?W???M?A??@ -????hK??iP?O???\??D?i? q??3E?]?????d?q???????+?s??E???j?q!?U?R??6d??a?>??????w=???????;?`??G??r9??g?{?-??uZ??u???2?C?J ??Y????V????t?_??0?o?&F?2E^?A????? ???;U?l?4?????Ln;??PP6&?.z?uyl;i?| -bX?e?q?i???m?}???8&??m?(2??m?8~h?T??H?%{? }??0!;{/?4Y?b???e?=o?????,?t=J?? -;???x at C[?e??c?????4?????e?????????=xdK????E2??-?-<]??Y?%k?d????",a,??yT?<-??\?c?vD???+?5??P?'????n?+??H? -qS1?*{_/?8?y7?v??? -???4?G?;??"?-%% ?Quk56h?Q?9i?bH,?a???hC??;E2U(?@DsM???/B???*??T??ZJ??4q?TYPS?????S7,0?y??*?? -??E??>??S??-?n???? -????N?C?Lp3??K??y?v9??/?&=?????U?A?z+ -?{?@V=_ ?H??V??@???#E&??;u?*??????a,?4] ??E_>??q??S?O?;?????/Q??? ?cn?8?}W????w????7??D?=????S?J;???Y??s???T??~2?uH??A??????B?{=??(?peR17]?z!?????}An?^z??k?O?????cm^Q"I???T??v" -?????0??? j?)x???%??[AH????C??'?W}0(?1?@?Z?P?8?????ZOw1#???=??xM} D?Q??1Js?'????????F_M????1?[Q???:???????^?^?=W4m??R[Nc?JX??A???I??L??J??6?$)?B/??i???}? XJQ???'?]??w???$??F??]??(?}6?V?:???grm$?? C?7?^ty?Wj????H?k??C???e`??F?I???c?9$5Sg??+?3?\?w?mz???????.t![?K?/?%y?i?p'??"??????D??b= -?W?d?w {?N???=q?7`?S?????6&)][??)[a7??C??'??K??????eZ??g?ZY?????????"' +????hK??iP?O???\??D?i?w>Zk@??Vr_?,??nC??6????;??}????(> +?????-???o??_??y?-??uZ??u???2?C?J ??Y????V?????????-`l????hV????6h??y0`??y???z???? +??c?qcB??2?bEn????{??q?}?% +?*?o???N???????????BG??;??*??????y??Y????gC??;H????K??Gd??ZP??gw??h|??>?Ln;??PP6&?.z?uyl;i?| +bX?e?q?i???m?}??qL?q??Q d???/p??>?b??@K??z;aBv4"?^i?>?b???e?=o?????,?t=J?? +;???x at C[?e??c?????4?????e?????????4z0????%o?d>?4?[:[x4?~???K?&d?f?7?EX?X?C???yZr:??1????t+?Vk?{?O?O????W???v??b6U??0^?qx?n??????9?i???w<-?E?[JJ????jl????s?D??Xt?x????????+?B??@g +V??1? ???Y?3??Q|M\??W~?KT~'????fZF&>t_???GT??? m??5?]?f*???mv?<%?z?M???NA?l?Y??A[?????P???]??????d????=?P??xXs??nA?5??sI?????J??L-???]z??j$?q??? ]?V????/AI?nD)?Il?v?F?b?5??j??X??B??3???@??S3??GO? +?? ????IJ??.f?V?M???*?????j"*vsm??b???V?&??w?}?_? "/ endstream endobj 20 0 obj << -/Length 2159 +/Length 2157 /Filter /FlateDecode >> stream @@ -51,7 +63,7 @@ 8??>2??"?8p????????u????Z?yGT?!?H????? ???KFej?U?AI??,\???5%??????8a??fi???[%d9?xx???_?H?wQ??CKM??xe??z?&???5f?l?@W?}}????Y?? j9???/K#? '??? {oVQCHx?????XTH?@???{?m????`??BNV?????N?e?%?m???>?l???t*??2?WX???1?G.?C?K ?[?C?????????O??????;????? {?'??@~!???? $JN???J????3??P?O??????fd???Hp?OA?z????-K??;E??? -?a???17??r??$(?t?[cA:=\?k?D????*=|.{??W???_=???>\9???????????!}?h?!?Y~C???.S4#S.??c?~???=c.???e(??b?????.?izC??9??w???? ??>:?????@????q?|??t?/3Z: m???????D?V??t?s? ?Y?C!iV?$??"_?Po?>??x)?/?t? +?a???17??r??$(?t?[cA:=\?k?D????*=|.{??W???_=???>\9???????????!}?h?!?Y~C???.S4#S.??c?~???=c.???e(??b?????.?izC??9??w???? ??>:?????@????q?|??t?/3Z: m???????D?V??t?????5?H?K?.??0?m2???B?_?Is? endstream endobj 26 0 obj << @@ -61,11 +73,13 @@ stream x??UKo?0 ??W???U?,;>l??+??0????!??6Hj??a???D?t????m??E??Q??lr|?PLJ?j?l??\E1???'z???]???:T? ???`tt ?8?9n????v? -7?D?@PT??4n?????????&V}???mWn)0?InE X?U?????O\sw??v=*?e?? 6??"????T??~M|Z???m?_]??$???J ??(&\??1?K?.U c?p#H??0?Rt??\$???`>?s2????#??N??????l{z????]?N???V+29V??? M?As???????X?? -q???P???t??pw>??Zw??sc??-^B?T'?:=??Dc??Qb?\c??|????7%?? -uo???????lE?:??z??g???X??????3H?}Dz?????\???4?}???op?`???X??>???b???:"?!????????f?hZ?{????G????v?2?/ ?&?4?W??{? -???H???????;?'r[j?w??qW?^b?m=> -????*?l???'????7'o???c%F +7?D?@PT??4n?????????&V}???mWn)0?InE X?U?????O\sw??v=*?e?? 6??"????T??~M|Z???m?_]??$???J ??(&\??1?K?.U c?p#H??0?Rt??\$???`>?s2????u +???Q)?????l??0???woJ???????Y???/??^up}:?4????Mo?(?cu??g?|??? +?A?? +? +i?%??#?/??.??@?/??q}$?5?x9?uD?C????????????~6?!???{???7d?_ at fM?i?????:&}5?6?? +???w?O???????????T???z| +?3 C?U??\?O??2??????%E endstream endobj 29 0 obj << @@ -99,57 +113,76 @@ >>/ColorSpace << /sRGB 33 0 R >>>> -/Length 8573 +/Length 8958 /Filter /FlateDecode >> stream -x???K?%?q???)???h???????8??$$ ? ????R`???oV?S?$??ye??f????t?,?????[?????W?q?????-???_???????W???????_~q???????6???????_?q?o???b??6?+???a??^????????7z??????}?xO?S??????????o?|q?2??????w?o~?_k???a???[:?=U+??~x??????????*??R???B??gY??2i?????M3???W?d??????6N?r??????v???*?~=???+?7??x?v?m\???]?-?{?;??U)??d;???m??>??xo?3[Ua???????????????mm<_xf+??R?????'??x???V?Y?\3????{/nL???8???(?????? !??V?????rk?j??imZU???{???x???-?Sp?x??E?t/~n:?a}?q????????????????????L???P?^??`?p??????gN???i?Z???u?h?????>???????N?5?????_n_?8Z????Wp??X?9??????????????~???q???????co??h?p???????;7???j? -??p???s??8???_??iT???^??5?K?~j??$=?????????u bz??/1??3??%^G??#F??tx>"?????|?0 ?#??(?u??,??wb9???0??-????t????g?18???}?hvc????? -d??>???f?I?T[???^????"?? ?-n?????????` -????w?2??R??????J?,?'????f?d?? SG??,??\????f\u??np}q????F~/?Zh?`?v?q????)??Q?g?j?b???P?@w?K?h??n????r?`????%??-?i????E????lL????8g?B?\y?????9??\n*[?? -?i$?\?]?;??|?8?? -??-?;X???G????????? ???'mh?m?]?gp?L???????????Fe?{??? --&?8?????.Dy??`?R???P??DayKW??dp??(????????|???,??:G{?;????'???;?fd???v??????XH??U.??Sg+??K????0???????r8'+??? -?o??3?????S?j???4???^6??0FY???????^??a????????e?z????L?????l?8????(#?#????! '??(????h?????????}??????5?g???q?fG?;[????\??5?????????*9[??G?Q?/d????W4 -????5 -d?N -?' -??WT5j??!?8?Eg\?~s????????????S???????r??kZ?-?OZ??q??V?????zgYY????)?iu?U???U7???W????^??%?^??%?^M??W???W??O?+???^??+s?????U??vy?E???W k?6?j?5K?-???w?kp_??V???_mN??]/??[??!??Vv???9{??\???g?g&?9??K[?-??S/?$+??r_?{??sN+????W??e?z?,SnY??2?:?z??d49?^???N.}e?E>?????~9z?31?"'O??L???S?v?????qm&?^?????E????U?????z?8???u2?I??Y?W'?^`?????m?~,???lih?kb??/????J??w?????N??q?`?? -?8?W`y???HM?????????1N????h??G?? ???:/8?????F?<???/???ip?C9s?#???&p|?^>^?W???????>??W`?'????W?N??*????c??^h??"?????3??>U??? ?i;?d???>?p?G??????=?~??G3??d???d??&???????h??a??=????;?,?d?9???z9??d?????i?g???g?X=??^A???5?X?}???ZP?m?M????d?????????}l??C5?????6;?? ?:O?kk?:O?k?w:?????p??l?v?p??d?6?p??|?k? ???g^?^??y~?k? ?8O?k? w8O?k? ?7?V??+???Ol?????????o -[?????????Ol6?~l???'6K?>~lj~|f?????4??:w? -?n??mg>?N??r??/?g????N?q'?u??nd????1?-?6fu?c?|??? -??elg?tY????f?d? ;YhN?????v?dGaO?U???;??O???s?}???3=?}?j?? ?Wx?h?>a? -O.?`??6?[?Y?:?-??????ge]]??=+?Rtj??m?????,?1Q -3???????W?`?d??fj??,)6Q?3?6S??????(?L2?o??F?F?}V??H?N4)Q?Y ?U'???aV ??,1??R???diVI??d*Y???yV?Z?a?Y??'???J,>Q-?0?????R -'????u???????????f?F?O???D%HTBI??IT?T?L?*?W??d??g?W??? RN3?>???X?`?eZ$Q '(???DP? J8ah*T?rI?>?T?LP?>??P??NP$?????e??}?????+?AT?$(?????$*A???Zl???????SB??J????#???fE#U? 7?5m? s???`?O?,?4?*?5!?K??f %%+|?`??J?R% -2? ->Q0/?4e?,d?$?R5???`@?R??????? ->?=QHM????V???+?J%lU???k\Z??B? ????#??^*asWM???? Y??Y???:l?J?.R??3b?D???Z?6;?????^?#?.X??*a?@?vT??Y??????? ??m?7?uK???u?D?{@?M?F?[9????H2? -]?????? ?n?@b?????<@?? -??u?+?????mD?UC???rN)E?K?J$UBX2??`n???*,???d??H#VA??I???-???h?1AQ{?5?B6??K?"??*?0?????d}?? -G?:????yM%????? -?l/??P):???}???,C.~ Hb???M????*=??J??L?x??M?a4?6$???2?h?2%(?????pNO?$C?h??W(,???1E??F?C????8?????a????a??p-????p?????1??8*?vO???8?KQ?a??????w??????e`??C?9E?+?U.???y? -=]Q?/?y?<*?0"???*NC?UTq??@Q?ihM??????????y?????.?E?(????7 -?9q?`?????8>?y?/W???3?!?/?V??l??l??/fV?????G??Q?li?T??.?E?"??%?9?8e#)???? r? I?te@k^??1?)?#??9E?8e??tu?AtXpJX+Nk?vL???????V,}?)m?3?!,???5/???V_ ?????/??B -?91_? ?R&l.?c?K^?NKi?)-X??m???????-X??}*$6q????r,??B6??s_p??E??3????????,8???"????B?U???c??????-?x?'0???????P?V?e?t?i?VK??km??+?N?m?????n?v?3?8??\v??N????_?Y??{I????~N]??Z|?-t????l??i'.vY???m=?e?l??u?????b?i?K^???l??e?\L5?k_??/vD|?=qJy??j??????????k?WY?u????/{?m!?l/?]?b?U???_??m?9?k???????-?u??+n&??|??V????e??f?m?????j??}?Ov??V?M??_???????k?????????a????n????}??????????6???m???I???V??W????>??}?6?4?S??y?~????O???~d}????e??n_?????/%O????e??no??m?T{*?=????k^?5o????j_????q??????????????W?^??^??u}Y????jo?????????????Z??}?q????~?????????????i}?i??;??=???W?~????????????X?rl????????w????lO?f???y???b??????/??-r???}?e?e????#]1?e???O???z??????^?fo?bo?f?k??^??^????l?e??M????n_????_um???~??;wL???&N?5??{|e???=????foi??????m???????}??}|??v?q??d???%?????fo???z??}o{???????????l???{Z?=????}???i'?????????8????'{\? ?d?y?????i?????Hy??????/?m????|p??k????1a??i?????-?'?b???????3:?}m?r?/n'Nv??????^???8???E??%,?Q??????x??kr??z?}?d?y???~?}???8#8?6V?????????7?????????,{(?}?~?????????b5O[??k????d??l?s?#[{???????V???????????????C?g????-??.???c?)??????d??m??????/???b??????{b???x,?'6?o??kb?-?O??CN???r,pk~???????\???n,?%6????Sb?5???k7oy???m???????J~E???>?V?o?=??=?????J~@??3??aO?k???/?z?a???j??????Hw0?'?|????mg%??)[+???O?Z?o?=?Z???????a?????~>??????X???_T8???n?i?uy>B?????????C?~x>?uq=??~????V,O??o??T?????V,????G,?O?????.???0VA?s??k??????67??,????`?tVC/???9?#??{T1^?i?x ????n???????ay??W???gWK???j???}??x??Q??? +x??]??%?q??W????????$??2 C??? ? A???!?CofFd?{Nw?v??????+*?|?<n????n|???????q?b?????w??????o????_???????N>n??????w??y???o?.v??g???#??8????y???????????v???_ ?S???%?~??W???/????Xn??????}???Z?~?KV??C?I?1???w??w????? e%w#????-?p?z??%;-'-????qb????]0??????'?r??w????q???p???????_?+??????\??{K?x/M??????1-??/??w??G}f?? ?m5???k?z/???????V????????uy??p??????z??4??7??Cu??m??1?jl?????77?un?9/?V????????Vh??????=?u\g?rW??-??????6O,??79?{????????????????nh6n???????????SB[O??;?vOe=?|h?h?{??4??! +}[?}???.?z?O;?dO???e????i??????#??z??G?j??{?G?|??4?? ????s?J?7?;?k/vn:?????.??FH????T??go????X?|,???E^_8Ka??>`????????,???B??b????a{>???eg??'??y?X????x????.=5?]?S??wc~?=5????2???'?6?T???{x????X?????yfl;??1???sn?Q?s?4 y??4?????????O[;????j?{?m????t??s?b~?m?????s[??8?????  ??^?|??Zv?gc?9??f??s??q?x6???o?,?is?????.;??#?????????>e([????{??nW??q?3??q?????????#?c?#_GP*???#??O?????????????D%?a???>;Q?????J??y??a?g"?I????????????st??#??#??>???????:?j?G??X???Yxn:*???????[?u;??????|????y?s???'.?x6??"??????D?#?? <.???I95??U?^T?A?ZY???j??? +W?vW?????Gw?3.Z???l?Z??p?A??]?????W??bIB +3???????8??[d5aQb'\7?4??p?????@?p~7???]?X??'?C +L??^??? +b????!_?Ma?# ????=???bX?8b????q???}?a9 ???o?WML????m??r?Cf?[????@?_?hC?}?|#??????DG???????ts?C????00?g?Hf???????????U???)??????????T?@?2/????????5?1\??t?R?6?????,?????M??>??*;?%G?b???? +Oe???? +?? ??%?{?V???????F?`??????tR?28{???|?f?????:Y{F\7?k? +?R?;??8>? +?h??Mv}??m??l?6;???z???0~F??x???-,P??a????i;~dP#??n}??^i?n???O4?????'5??B??{?x??T??????]?????????>????S?n?b????l?Fq?0c?K???IGa?kr????H6|?????????d?z????'?~??4#F?D? 7????????Xk????hE??5????????wR7?X?????K??'?????_??w????-??=???=?[?Qq???DZqv?????o?3? ?w???T???o????F?%????????s???k?5O?G?$W|?GGO??_4JK??o??'?^?O?u?????zW6z?T??8????.????~x??(+^?x?n&??8a?<+^??i?m??j ???????/?????\W\???y ?W???U6?d?K6?d?K6???W???_u??!????W??j??.?7????<??t??+_:??/|?M_??????/??a?k?????u[?? _m?????? _m?W?S?x?k???????K?=n?+^?u at K\????KD^????/ +???W?6}?q>??|m???????o??-yEX???U?R?????/?c????????B]??W??5p_???<$?K_?k?q9?r?|9f^}b? ???c??;_??r???c???X?????v??8??c?}io??:?e~Qq-???|??|???????????/?u??/??|??|??|1??????{]?1??8Aq???:1K?? ?S_???z!f?~b????eq?qNW0O>?ub??7??????'? ?C?pP_?y???????????90?$?k`????~?}??????}i??+_=?n4??)F??%??G?? ?????v?7w[%?|?D=$????R?:,?&`?k??v???O?6??+B_#??????.??\?????}??l|U???4lA?c`????&?K?!?:??-?X?nx8_&,??u????H?m?V1??T}???uK????d2?Czs_????V7=d+h???W??B ?ZW?:y6!*f???n?I?y!???? +???-w?mQ'?Y;???@?|??ub???E??m`KX? +?| +???|a??V?S ?????x????u?luE[?????:# +??K$??0???????"C_?;V?67?X?R7V???????/???g????X???i??^?V???*?W ?#[???6b?k`?7???p????????#?+?/???????m)?A?|)v??}?+???? 8?j?????Q???=??u?K?'???,?W?x?@_???Ru????y?+?|?W<??w1??z????????V6?>+_?_?g????/???M??X??y? ?*?OL???2????Y???? +??:V?????BX ??@c|?????2}]??Z?G??x?Z`??L?y*?'k|?y,Kl?,??3???????2qg|?????_????????yT??5c??????4???|?:???G??3??|^?b)???n/?G????|???'??5?f|??????9^T?L?????h??V+?????bQ??D???y?????6??mu#??G+?T?'?2????????>*??kG=??Q??e?N??y?v?? ??}i~P???A_???s?zm\???g?? [TRUNCATED] To get the complete diff run: svnlook diff /svnroot/returnanalytics -r 3038 From noreply at r-forge.r-project.org Tue Sep 10 00:33:05 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Tue, 10 Sep 2013 00:33:05 +0200 (CEST) Subject: [Returnanalytics-commits] r3039 - in pkg/PerformanceAnalytics/sandbox/pulkit: R man week1/code Message-ID: <20130909223305.B97AA183BB0@r-forge.r-project.org> Author: pulkit Date: 2013-09-10 00:33:05 +0200 (Tue, 10 Sep 2013) New Revision: 3039 Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/DrawdownBeta.R pkg/PerformanceAnalytics/sandbox/pulkit/R/DrawdownBetaMulti.R pkg/PerformanceAnalytics/sandbox/pulkit/man/BenchmarkSR.Rd pkg/PerformanceAnalytics/sandbox/pulkit/man/MaxDD.Rd pkg/PerformanceAnalytics/sandbox/pulkit/man/MinTrackRecord.Rd pkg/PerformanceAnalytics/sandbox/pulkit/man/ProbSharpeRatio.Rd pkg/PerformanceAnalytics/sandbox/pulkit/man/REDDCOPS.Rd pkg/PerformanceAnalytics/sandbox/pulkit/man/TuW.Rd pkg/PerformanceAnalytics/sandbox/pulkit/man/chart.BenchmarkSR.Rd pkg/PerformanceAnalytics/sandbox/pulkit/man/chart.SRIndifference.Rd pkg/PerformanceAnalytics/sandbox/pulkit/man/golden_section.Rd pkg/PerformanceAnalytics/sandbox/pulkit/week1/code/PSROpt.py Log: na handling in drawdown beta and multi path drawdown beta Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/DrawdownBeta.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/DrawdownBeta.R 2013-09-09 21:31:35 UTC (rev 3038) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/DrawdownBeta.R 2013-09-09 22:33:05 UTC (rev 3039) @@ -69,10 +69,10 @@ # The Drawdown beta is given as the output. - R = na.omit(R) - Rm = na.omit(Rm) x = checkData(R) xm = checkData(Rm) + x = na.omit(x) + xm = na.omit(xm) if(nrow(x) != nrow(xm)){ stop("The number of rows of the return series and the optimal portfolio should be equal") } Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/DrawdownBetaMulti.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/DrawdownBetaMulti.R 2013-09-09 21:31:35 UTC (rev 3038) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/DrawdownBetaMulti.R 2013-09-09 22:33:05 UTC (rev 3039) @@ -59,9 +59,14 @@ # OUTPUT: # The Drawdown beta for multiple sample path is given as the output. - x = checkData(R) - xm = checkData(Rm) + xm = checkData(Rm) + x = na.omit(x) + xm = na.omit(xm) + if(nrow(R) != nrow(Rm)){ + stop("The length of the return series with the optimal portfolio should be equal") + } + columnnames = colnames(R) columns = ncol(R) drawdowns_m = Drawdowns(Rm) Modified: pkg/PerformanceAnalytics/sandbox/pulkit/man/BenchmarkSR.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/man/BenchmarkSR.Rd 2013-09-09 21:31:35 UTC (rev 3038) +++ pkg/PerformanceAnalytics/sandbox/pulkit/man/BenchmarkSR.Rd 2013-09-09 22:33:05 UTC (rev 3039) @@ -24,6 +24,8 @@ \examples{ data(edhec) BenchmarkSR(edhec) #expected 0.393797 +data(managers) +BenchmarkSR(managers) # expected 0.8110536 } \author{ Pulkit Mehrotra Modified: pkg/PerformanceAnalytics/sandbox/pulkit/man/MaxDD.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/man/MaxDD.Rd 2013-09-09 21:31:35 UTC (rev 3038) +++ pkg/PerformanceAnalytics/sandbox/pulkit/man/MaxDD.Rd 2013-09-09 22:33:05 UTC (rev 3039) @@ -2,7 +2,8 @@ \alias{MaxDD} \title{Triple Penance Rule} \usage{ - MaxDD(R, confidence, type = c("ar", "normal"), ...) + MaxDD(R, confidence = 0.95, type = c("ar", "normal"), + ...) } \arguments{ \item{R}{Returns} Modified: pkg/PerformanceAnalytics/sandbox/pulkit/man/MinTrackRecord.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/man/MinTrackRecord.Rd 2013-09-09 21:31:35 UTC (rev 3038) +++ pkg/PerformanceAnalytics/sandbox/pulkit/man/MinTrackRecord.Rd 2013-09-09 22:33:05 UTC (rev 3039) @@ -69,6 +69,9 @@ MinTrackRecord(edhec[,1],refSR=0.1,Rf = 0.04/12) MinTrackRecord(refSR = 1/12^0.5,Rf = 0,p=0.95,sr = 2/12^0.5,sk=-0.72,kr=5.78) MinTrackRecord(edhec[,1:2],refSR = c(0.28,0.24)) + +data(managers) +MinTrackRecord(managers,refSR = 0) } \author{ Pulkit Mehrotra Modified: pkg/PerformanceAnalytics/sandbox/pulkit/man/ProbSharpeRatio.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/man/ProbSharpeRatio.Rd 2013-09-09 21:31:35 UTC (rev 3038) +++ pkg/PerformanceAnalytics/sandbox/pulkit/man/ProbSharpeRatio.Rd 2013-09-09 22:33:05 UTC (rev 3039) @@ -61,6 +61,9 @@ ProbSharpeRatio(edhec[,1],refSR = 0.23) ProbSharpeRatio(refSR = 1/12^0.5,Rf = 0,p=0.95,sr = 2/12^0.5,sk=-0.72,kr=5.78,n=59) ProbSharpeRatio(edhec[,1:2],refSR = c(0.28,0.24)) + +data(managers) +ProbSharpeRatio(managers,0) } \author{ Pulkit Mehrotra Modified: pkg/PerformanceAnalytics/sandbox/pulkit/man/REDDCOPS.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/man/REDDCOPS.Rd 2013-09-09 21:31:35 UTC (rev 3038) +++ pkg/PerformanceAnalytics/sandbox/pulkit/man/REDDCOPS.Rd 2013-09-09 22:33:05 UTC (rev 3039) @@ -12,9 +12,6 @@ \item{delta}{Drawdown limit} - \item{sharpe}{If you want to use a constant Sharpe Ratio - please specify here else the return series will be used} - \item{Rf}{risk free rate can be vector such as government security rate of return.} @@ -29,6 +26,9 @@ \item{asset}{The number of risky assets in the portfolio} \item{type}{The type of portfolio optimization} + + \item{sharpe}{If you want to use a constant Sharpe Ratio + please specify here else the return series will be used} } \description{ The Rolling Economic Drawdown Controlled Optimal Modified: pkg/PerformanceAnalytics/sandbox/pulkit/man/TuW.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/man/TuW.Rd 2013-09-09 21:31:35 UTC (rev 3038) +++ pkg/PerformanceAnalytics/sandbox/pulkit/man/TuW.Rd 2013-09-09 22:33:05 UTC (rev 3039) @@ -2,7 +2,7 @@ \alias{TuW} \title{Maximum Time Under Water} \usage{ - TuW(R, confidence, type = c("ar", "normal"), ...) + TuW(R, confidence = 0.95, type = c("ar", "normal"), ...) } \arguments{ \item{R}{return series} Modified: pkg/PerformanceAnalytics/sandbox/pulkit/man/chart.BenchmarkSR.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/man/chart.BenchmarkSR.Rd 2013-09-09 21:31:35 UTC (rev 3038) +++ pkg/PerformanceAnalytics/sandbox/pulkit/man/chart.BenchmarkSR.Rd 2013-09-09 22:33:05 UTC (rev 3039) @@ -78,6 +78,9 @@ data(edhec) chart.BenchmarkSR(edhec,vs="strategies") chart.BenchmarkSR(edhec,vs="sharpe") + +data(managers) +chart.BenchmarkSR(managers,vs="strategies") } \author{ Pulkit Mehrotra Modified: pkg/PerformanceAnalytics/sandbox/pulkit/man/chart.SRIndifference.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/man/chart.SRIndifference.Rd 2013-09-09 21:31:35 UTC (rev 3038) +++ pkg/PerformanceAnalytics/sandbox/pulkit/man/chart.SRIndifference.Rd 2013-09-09 22:33:05 UTC (rev 3039) @@ -73,6 +73,8 @@ \examples{ data(edhec) chart.SRIndifference(edhec) +data(managers) +chart.SRIndifference(managers) } \author{ Pulkit Mehrotra Modified: pkg/PerformanceAnalytics/sandbox/pulkit/man/golden_section.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/man/golden_section.Rd 2013-09-09 21:31:35 UTC (rev 3038) +++ pkg/PerformanceAnalytics/sandbox/pulkit/man/golden_section.Rd 2013-09-09 22:33:05 UTC (rev 3039) @@ -2,7 +2,7 @@ \alias{golden_section} \title{Golden Section Algorithm} \usage{ - golden_section(a, b, minimum = TRUE, function_name, ...) + golden_section(a, b, function_name, minimum = TRUE, ...) } \arguments{ \item{a}{initial point} Modified: pkg/PerformanceAnalytics/sandbox/pulkit/week1/code/PSROpt.py =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/week1/code/PSROpt.py 2013-09-09 21:31:35 UTC (rev 3038) +++ pkg/PerformanceAnalytics/sandbox/pulkit/week1/code/PSROpt.py 2013-09-09 22:33:05 UTC (rev 3039) @@ -133,8 +133,8 @@ #------------------------------------------- def main(): #1) Inputs (path to csv file with returns series) - path='data.csv' - maxIter=10000 # Maximum number of iterations + path='ham_data.csv' + maxIter=1000 # Maximum number of iterations delta=.005 # Delta Z (attempted gain per interation) #2) Load data, set seed From noreply at r-forge.r-project.org Tue Sep 10 02:56:38 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Tue, 10 Sep 2013 02:56:38 +0200 (CEST) Subject: [Returnanalytics-commits] r3040 - pkg/PerformanceAnalytics/sandbox/pulkit/R Message-ID: <20130910005638.B7F38184C9A@r-forge.r-project.org> Author: pulkit Date: 2013-09-10 02:56:37 +0200 (Tue, 10 Sep 2013) New Revision: 3040 Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/BenchmarkPlots.R pkg/PerformanceAnalytics/sandbox/pulkit/R/BenchmarkSR.R pkg/PerformanceAnalytics/sandbox/pulkit/R/SRIndifferenceCurve.R Log: Annualized SR in Benchmark SR Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/BenchmarkPlots.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/BenchmarkPlots.R 2013-09-09 22:33:05 UTC (rev 3039) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/BenchmarkPlots.R 2013-09-10 00:56:37 UTC (rev 3040) @@ -72,7 +72,7 @@ if(!is.null(R)){ x = checkData(R) columns = ncol(x) - avgSR = mean(SharpeRatio(R,FUN="StdDev")) + avgSR = mean(SharpeRatio.annualized(R)) } else{ if(is.null(avgSR) | is.null(S)){ Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/BenchmarkSR.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/BenchmarkSR.R 2013-09-09 22:33:05 UTC (rev 3039) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/BenchmarkSR.R 2013-09-10 00:56:37 UTC (rev 3040) @@ -2,7 +2,7 @@ #'Benchmark Sharpe Ratio #' #'@description -#'The benchmark SR is a linear function of the average +#'The benchmark SR is a linear function of the average annualized #' SR of the individual strategies, and a decreasing #' convex function of the number of strategies and the #' average pairwise correlation. The Returns are given as @@ -10,8 +10,9 @@ #' #'\deqn{SR_B = \bar{SR}\sqrt{\frac{S}{1+(S-1)\bar{\rho}}}} #' -#'Here \eqn{\bar{SR}} is the average SR of the portfolio and \eqn{\bar{\rho}} -#'is the average correlation across off-diagonal elements +#'Here \eqn{\bar{SR}} is the average annualized Sharpe Ratio of the portfolio and \eqn{\bar{\rho}} +#'is the average correlation across off-diagonal elements. +#' #' #'@param R a vector, matrix, data frame,timeseries or zoo object of asset returns #' @@ -46,7 +47,7 @@ if(columns == 1){ stop("The number of return series should be greater than 1") } - SR = SharpeRatio(x,FUN="StdDev") + SR = SharpeRatio.annualized(x) sr_avg = mean(SR) corr = table.Correlation(R,R) corr_avg = 0 Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/SRIndifferenceCurve.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/SRIndifferenceCurve.R 2013-09-09 22:33:05 UTC (rev 3039) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/SRIndifferenceCurve.R 2013-09-10 00:56:37 UTC (rev 3040) @@ -75,7 +75,7 @@ if(columns == 1){ stop("The number of return series should be greater 1 ") } - SR = SharpeRatio(x,FUN="StdDev") + SR = SharpeRatio.annualized(x) sr_avg = mean(SR) corr = table.Correlation(R,R) corr_avg = 0 From noreply at r-forge.r-project.org Tue Sep 10 16:51:47 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Tue, 10 Sep 2013 16:51:47 +0200 (CEST) Subject: [Returnanalytics-commits] r3041 - pkg/PortfolioAnalytics/R Message-ID: <20130910145147.192821848A0@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-10 16:51:46 +0200 (Tue, 10 Sep 2013) New Revision: 3041 Modified: pkg/PortfolioAnalytics/R/optFUN.R Log: Adding function to optFUN for proportional cost. Modified: pkg/PortfolioAnalytics/R/optFUN.R =================================================================== --- pkg/PortfolioAnalytics/R/optFUN.R 2013-09-10 00:56:37 UTC (rev 3040) +++ pkg/PortfolioAnalytics/R/optFUN.R 2013-09-10 14:51:46 UTC (rev 3041) @@ -587,3 +587,116 @@ # roi.result <- ROI_solve(x=opt.prob, solver="quadprog") } +# proportional transaction cost constraint +gmv_opt_ptc <- function(R, constraints, moments, lambda, target, init_weights){ + # function for minimum variance or max quadratic utility problems + # modifying ProportionalCostOpt function from MPO package + + # Modify the returns matrix. This is done because there are 3 sets of + # variables 1) w.initial, 2) w.buy, and 3) w.sell + returns <- cbind(R, R, R) + V <- cov(returns) + + # number of assets + N <- ncol(R) + + # initial weights for solver + if(is.null(init_weights)) init_weights <- rep(1/ N, N) + + # Amat for initial weights + Amat <- cbind(diag(N), matrix(0, nrow=N, ncol=N*2)) + rhs <- init_weights + dir <- rep("==", N) + meq <- 4 + + # check for a target return constraint + if(!is.na(target)) { + # If var is the only objective specified, then moments$mean won't be calculated + if(all(moments$mean==0)){ + tmp_means <- colMeans(R) + } else { + tmp_means <- moments$mean + } + Amat <- rbind(Amat, rep((1+tmp_means), 3)) + dir <- c(dir, "==") + rhs <- c(rhs, (1+target)) + meq <- 5 + } + + # Amat for positive weights for w.buy and w.sell + weights.positive <- rbind(matrix(0,ncol=2*N,nrow=N),diag(2*N)) + temp.index <- (N*3-N+1):(N*3) + weights.positive[temp.index,] <- -1*weights.positive[temp.index,] + Amat <- rbind(Amat, t(weights.positive)) + rhs <- c(rhs, rep(0, 2*N)) + + # Amat for full investment constraint + ptc <- constraints$ptc + Amat <- rbind(Amat, rbind(c(rep(1, N), (1+ptc), (1-ptc)), -c(rep(1, N), (1+ptc), (1-ptc)))) + rhs <- c(rhs, constraints$min_sum, -constraints$max_sum) + dir <- c(dir, ">=", ">=") + + # Amat for lower box constraints + Amat <- rbind(Amat, cbind(diag(N), diag(N), diag(N))) + rhs <- c(rhs, constraints$min) + dir <- c(dir, rep(">=", N)) + + # Amat for upper box constraints + Amat <- rbind(Amat, cbind(-diag(N), -diag(N), -diag(N))) + rhs <- c(rhs, -constraints$max) + dir <- c(dir, rep(">=", N)) + + # include group constraints + if(try(!is.null(constraints$groups), silent=TRUE)){ + n.groups <- length(constraints$groups) + Amat.group <- matrix(0, nrow=n.groups, ncol=N) + for(i in 1:n.groups){ + Amat.group[i, constraints$groups[[i]]] <- 1 + } + if(is.null(constraints$cLO)) cLO <- rep(-Inf, n.groups) + if(is.null(constraints$cUP)) cUP <- rep(Inf, n.groups) + Amat <- rbind(Amat, cbind(Amat.group, Amat.group, Amat.group)) + Amat <- rbind(Amat, cbind(-Amat.group, -Amat.group, -Amat.group)) + dir <- c(dir, rep(">=", (n.groups + n.groups))) + rhs <- c(rhs, constraints$cLO, -constraints$cUP) + } + + # Add the factor exposures to Amat, dir, and rhs + if(!is.null(constraints$B)){ + t.B <- t(constraints$B) + Amat <- rbind(Amat, cbind(t.B, t.B, t.B)) + Amat <- rbind(Amat, cbind(-t.B, -t.B, -t.B)) + dir <- c(dir, rep(">=", 2 * nrow(t.B))) + rhs <- c(rhs, constraints$lower, -constraints$upper) + } + + d <- rep(-moments$mean, 3) + + qp.result <- try(solve.QP(Dmat=corpcor:::make.positive.definite(2*lambda*V), + dvec=d, Amat=t(Amat), bvec=rhs, meq=meq), silent=TRUE) + if(inherits(qp.result, "try-error")) stop("No solution found, consider adjusting constraints.") + + wts <- qp.result$solution + w.buy <- qp.result$solution[(N+1):(2*N)] + w.sell <- qp.result$solution[(2*N+1):(3*N)] + w.total <- w.initial + w.buy + w.sell + # wts.final <- wts[(1:N)] + wts[(1+N):(2*N)] + wts[(2*N+1):(3*N)] + + weights <- w.total + names(weights) <- colnames(R) + out <- list() + out$weights <- weights + out$out <- qp.result$val + return(out) + + # TODO + # Get this working with ROI + + # Not getting solution using ROI + # set up the quadratic objective + # ROI_objective <- Q_objective(Q=make.positive.definite(2*lambda*V), L=rep(-moments$mean, 3)) + + # opt.prob <- OP(objective=ROI_objective, + # constraints=L_constraint(L=Amat, dir=dir, rhs=rhs)) + # roi.result <- ROI_solve(x=opt.prob, solver="quadprog") +} From noreply at r-forge.r-project.org Tue Sep 10 17:34:35 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Tue, 10 Sep 2013 17:34:35 +0200 (CEST) Subject: [Returnanalytics-commits] r3042 - pkg/PortfolioAnalytics/R Message-ID: <20130910153435.F0DDC1854CD@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-10 17:34:34 +0200 (Tue, 10 Sep 2013) New Revision: 3042 Modified: pkg/PortfolioAnalytics/R/optimize.portfolio.R Log: Adding code to optimize.portfolio for proportional transaction cost constraints. Modified: pkg/PortfolioAnalytics/R/optimize.portfolio.R =================================================================== --- pkg/PortfolioAnalytics/R/optimize.portfolio.R 2013-09-10 14:51:46 UTC (rev 3041) +++ pkg/PortfolioAnalytics/R/optimize.portfolio.R 2013-09-10 15:34:34 UTC (rev 3042) @@ -733,11 +733,23 @@ if("var" %in% names(moments)){ # Minimize variance if the only objective specified is variance # Maximize Quadratic Utility if var and mean are specified as objectives - if(!is.null(constraints$turnover_target)){ - qp_result <- gmv_opt_toc(R=R, constraints=constraints, moments=moments, lambda=lambda, target=target, init_weights=portfolio$assets) - weights <- qp_result$weights - obj_vals <- constrained_objective(w=weights, R=R, portfolio, trace=TRUE, normalize=FALSE)$objective_measures - out <- list(weights=weights, objective_measures=obj_vals, opt_values=obj_vals, out=roi_result$out, call=call) + if(!is.null(constraints$turnover_target) | !is.null(constraints$ptc)){ + if(!is.null(constraints$turnover_target) & !is.null(constraints$ptc)){ + warning("Turnover and proportional transaction cost constraints detected, only running optimization for turnover constraint.") + constraints$ptc <- NULL + } + if(!is.null(constraints$turnover_target) & is.null(constraints$ptc)){ + qp_result <- gmv_opt_toc(R=R, constraints=constraints, moments=moments, lambda=lambda, target=target, init_weights=portfolio$assets) + weights <- qp_result$weights + obj_vals <- constrained_objective(w=weights, R=R, portfolio, trace=TRUE, normalize=FALSE)$objective_measures + out <- list(weights=weights, objective_measures=obj_vals, opt_values=obj_vals, out=roi_result$out, call=call) + } + if(!is.null(constraints$ptc) & is.null(constraints$turnover_target)){ + qp_result <- gmv_opt_ptc(R=R, constraints=constraints, moments=moments, lambda=lambda, target=target, init_weights=portfolio$assets) + weights <- qp_result$weights + obj_vals <- constrained_objective(w=weights, R=R, portfolio, trace=TRUE, normalize=FALSE)$objective_measures + out <- list(weights=weights, objective_measures=obj_vals, opt_values=obj_vals, out=roi_result$out, call=call) + } } else { roi_result <- gmv_opt(R=R, constraints=constraints, moments=moments, lambda=lambda, target=target, lambda_hhi=lambda_hhi, conc_groups=conc_groups) weights <- roi_result$weights From noreply at r-forge.r-project.org Tue Sep 10 18:49:13 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Tue, 10 Sep 2013 18:49:13 +0200 (CEST) Subject: [Returnanalytics-commits] r3043 - in pkg/PortfolioAnalytics: R demo Message-ID: <20130910164913.F2FE918112F@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-10 18:49:13 +0200 (Tue, 10 Sep 2013) New Revision: 3043 Added: pkg/PortfolioAnalytics/demo/demo_proportional_cost_ROI.R Modified: pkg/PortfolioAnalytics/R/constraints.R pkg/PortfolioAnalytics/R/optFUN.R pkg/PortfolioAnalytics/R/optimize.portfolio.R pkg/PortfolioAnalytics/demo/00Index Log: Added transaction cost as a constraint type. Added demo script for proportional cost constraint. Modified: pkg/PortfolioAnalytics/R/constraints.R =================================================================== --- pkg/PortfolioAnalytics/R/constraints.R 2013-09-10 15:34:34 UTC (rev 3042) +++ pkg/PortfolioAnalytics/R/constraints.R 2013-09-10 16:49:13 UTC (rev 3043) @@ -345,6 +345,13 @@ message=message, ...=...) }, + # transaction cost constraint + transaction=, transaction_cost = {tmp_constraint <- transaction_cost_constraint(assets=assets, + type=type, + enabled=enabled, + message=message, + ...=...) + }, # Do nothing and return the portfolio object if type is NULL null = {return(portfolio)} ) @@ -718,6 +725,9 @@ out$lower <- constraint$lower out$upper <- constraint$upper } + if(inherits(constraint, "transaction_cost_constraint")){ + out$ptc <- constraint$ptc + } } } @@ -963,6 +973,39 @@ return(Constraint) } +#' constructor for transaction_cost_constraint +#' +#' The transaction cost constraint specifies a proportional cost value. +#' This function is called by add.constraint when type="transaction_cost" is specified, see \code{\link{add.constraint}}. +#' +#' Note that with the ROI solvers, proportional transaction cost constraint is +#' currently only supported for the global minimum variance and quadratic +#' utility problems with ROI quadprog plugin. +#' +#' @param type character type of the constraint +#' @param ptc proportional transaction cost value +#' @param enabled TRUE/FALSE +#' @param message TRUE/FALSE. The default is message=FALSE. Display messages if TRUE. +#' @param \dots any other passthru parameters to specify box and/or group constraints +#' @author Ross Bennett +#' @seealso \code{\link{add.constraint}} +#' @examples +#' data(edhec) +#' ret <- edhec[, 1:4] +#' +#' pspec <- portfolio.spec(assets=colnames(ret)) +#' +#' pspec <- add.constraint(portfolio=pspec, type="transaction_cost", ptc=0.01) +#' @export +transaction_cost_constraint <- function(type="transaction_cost", assets, ptc, enabled=TRUE, message=FALSE, ...){ + nassets <- length(assets) + if(length(ptc) == 1) ptc <- rep(ptc, nassets) + if(length(ptc) != nassets) stop("length of ptc must be equal to number of assets") + Constraint <- constraint_v2(type, enabled=enabled, constrclass="transaction_cost_constraint", ...) + Constraint$ptc <- ptc + return(Constraint) +} + #' function for updating constrints, not well tested, may be broken #' #' can we use the generic update.default function? Modified: pkg/PortfolioAnalytics/R/optFUN.R =================================================================== --- pkg/PortfolioAnalytics/R/optFUN.R 2013-09-10 15:34:34 UTC (rev 3042) +++ pkg/PortfolioAnalytics/R/optFUN.R 2013-09-10 16:49:13 UTC (rev 3043) @@ -679,10 +679,10 @@ wts <- qp.result$solution w.buy <- qp.result$solution[(N+1):(2*N)] w.sell <- qp.result$solution[(2*N+1):(3*N)] - w.total <- w.initial + w.buy + w.sell - # wts.final <- wts[(1:N)] + wts[(1+N):(2*N)] + wts[(2*N+1):(3*N)] + w.total <- init_weights + w.buy + w.sell + wts.final <- wts[(1:N)] + wts[(1+N):(2*N)] + wts[(2*N+1):(3*N)] - weights <- w.total + weights <- wts.final names(weights) <- colnames(R) out <- list() out$weights <- weights Modified: pkg/PortfolioAnalytics/R/optimize.portfolio.R =================================================================== --- pkg/PortfolioAnalytics/R/optimize.portfolio.R 2013-09-10 15:34:34 UTC (rev 3042) +++ pkg/PortfolioAnalytics/R/optimize.portfolio.R 2013-09-10 16:49:13 UTC (rev 3043) @@ -742,13 +742,13 @@ qp_result <- gmv_opt_toc(R=R, constraints=constraints, moments=moments, lambda=lambda, target=target, init_weights=portfolio$assets) weights <- qp_result$weights obj_vals <- constrained_objective(w=weights, R=R, portfolio, trace=TRUE, normalize=FALSE)$objective_measures - out <- list(weights=weights, objective_measures=obj_vals, opt_values=obj_vals, out=roi_result$out, call=call) + out <- list(weights=weights, objective_measures=obj_vals, opt_values=obj_vals, out=qp_result$out, call=call) } if(!is.null(constraints$ptc) & is.null(constraints$turnover_target)){ qp_result <- gmv_opt_ptc(R=R, constraints=constraints, moments=moments, lambda=lambda, target=target, init_weights=portfolio$assets) weights <- qp_result$weights obj_vals <- constrained_objective(w=weights, R=R, portfolio, trace=TRUE, normalize=FALSE)$objective_measures - out <- list(weights=weights, objective_measures=obj_vals, opt_values=obj_vals, out=roi_result$out, call=call) + out <- list(weights=weights, objective_measures=obj_vals, opt_values=obj_vals, out=qp_result$out, call=call) } } else { roi_result <- gmv_opt(R=R, constraints=constraints, moments=moments, lambda=lambda, target=target, lambda_hhi=lambda_hhi, conc_groups=conc_groups) Modified: pkg/PortfolioAnalytics/demo/00Index =================================================================== --- pkg/PortfolioAnalytics/demo/00Index 2013-09-10 15:34:34 UTC (rev 3042) +++ pkg/PortfolioAnalytics/demo/00Index 2013-09-10 16:49:13 UTC (rev 3043) @@ -13,4 +13,5 @@ demo_weight_concentration Demonstrate how to use the weight concentration objective. backwards_compat Demonstrate how to solve optimization problems using v1 specification with a v1_constraint object. demo_random_portfolios Demonstrate examples from script.workshop2012.R using random portfolios +demo_proportional_cost_ROI Demonstrate how to use proportional transaction cost constraint with quadprog solver Added: pkg/PortfolioAnalytics/demo/demo_proportional_cost_ROI.R =================================================================== --- pkg/PortfolioAnalytics/demo/demo_proportional_cost_ROI.R (rev 0) +++ pkg/PortfolioAnalytics/demo/demo_proportional_cost_ROI.R 2013-09-10 16:49:13 UTC (rev 3043) @@ -0,0 +1,32 @@ +library(PortfolioAnalytics) +library(quadprog) + +data(edhec) +N <- 4 +R <- edhec[, 1:N] +colnames(R) <- c("CA", "CTAG", "DS", "EM") +funds <- colnames(R) + +# set up initial portfolio specification object +pspec <- portfolio.spec(assets=funds) +pspec <- add.constraint(portfolio=pspec, type="full_investment") +pspec <- add.constraint(portfolio=pspec, type="long_only") +pspec <- add.constraint(portfolio=pspec, type="transaction", ptc=0.01) + +# add var objective to minimize portfolio variance +minvar <- add.objective(portfolio=pspec, type="risk", name="var") + +# Note that if a return target is not specified, the results may not make sense +optimize.portfolio(R=R, portfolio=minvar, optimize_method="ROI") + +# Add a target return constraint +minvar <- add.constraint(portfolio=minvar, type="return", return_target=0.007) +optimize.portfolio(R=R, portfolio=minvar, optimize_method="ROI") + +# Add return and risk objective for quadratic utility +# Note that target return can be specified as a constraint or in the return +# objective as shown below +qu <- add.objective(portfolio=pspec, type="risk", name="var", risk_aversion=0.3) +qu <- add.objective(portfolio=qu, type="return", name="mean", target=0.007) +optimize.portfolio(R=R, portfolio=qu, optimize_method="ROI") + From noreply at r-forge.r-project.org Tue Sep 10 19:02:44 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Tue, 10 Sep 2013 19:02:44 +0200 (CEST) Subject: [Returnanalytics-commits] r3044 - pkg/PortfolioAnalytics/R Message-ID: <20130910170244.80EBC1853AD@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-10 19:02:44 +0200 (Tue, 10 Sep 2013) New Revision: 3044 Modified: pkg/PortfolioAnalytics/R/optFUN.R Log: Modifying gmv_opt so that risk aversion parameter, lambda, is multiplied through the quadratic objective per Doug's feedback. Modified: pkg/PortfolioAnalytics/R/optFUN.R =================================================================== --- pkg/PortfolioAnalytics/R/optFUN.R 2013-09-10 16:49:13 UTC (rev 3043) +++ pkg/PortfolioAnalytics/R/optFUN.R 2013-09-10 17:02:44 UTC (rev 3044) @@ -62,7 +62,7 @@ # set up the quadratic objective if(!is.null(lambda_hhi)){ if(length(lambda_hhi) == 1 & is.null(conc_groups)){ - ROI_objective <- Q_objective(Q=2*lambda*moments$var + lambda_hhi * diag(N), L=-moments$mean) + ROI_objective <- Q_objective(Q=2*lambda*(moments$var + lambda_hhi * diag(N)), L=-moments$mean) } else if(!is.null(conc_groups)){ # construct the matrix with concentration aversion values by group hhi_mat <- matrix(0, nrow=N, ncol=N) @@ -76,7 +76,7 @@ } hhi_mat <- hhi_mat + lambda_hhi[i] * tmpI } - ROI_objective <- Q_objective(Q=2*lambda*moments$var + hhi_mat, L=-moments$mean) + ROI_objective <- Q_objective(Q=2*lambda*(moments$var + hhi_mat), L=-moments$mean) } } else { ROI_objective <- Q_objective(Q=2*lambda*moments$var, L=-moments$mean) From noreply at r-forge.r-project.org Tue Sep 10 19:47:18 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Tue, 10 Sep 2013 19:47:18 +0200 (CEST) Subject: [Returnanalytics-commits] r3045 - in pkg/FactorAnalytics: R man Message-ID: <20130910174718.63F711858EA@r-forge.r-project.org> Author: chenyian Date: 2013-09-10 19:47:18 +0200 (Tue, 10 Sep 2013) New Revision: 3045 Modified: pkg/FactorAnalytics/R/factorModelPerformanceAttribution.r pkg/FactorAnalytics/man/factorModelPerformanceAttribution.Rd Log: debug. Modified: pkg/FactorAnalytics/R/factorModelPerformanceAttribution.r =================================================================== --- pkg/FactorAnalytics/R/factorModelPerformanceAttribution.r 2013-09-10 17:02:44 UTC (rev 3044) +++ pkg/FactorAnalytics/R/factorModelPerformanceAttribution.r 2013-09-10 17:47:18 UTC (rev 3045) @@ -27,16 +27,16 @@ #' @export #' @examples #' -#' +#' \dontrun{ #' data(managers.df) #' fit.ts <- fitTimeSeriesFactorModel(assets.names=colnames(managers.df[,(1:6)]), #' factors.names=c("EDHEC.LS.EQ","SP500.TR"), #' data=managers.df,fit.method="OLS") #' # withoud benchmark #' fm.attr <- factorModelPerformanceAttribution(fit.ts) +#' } #' #' -#' factorModelPerformanceAttribution <- function(fit,...) { Modified: pkg/FactorAnalytics/man/factorModelPerformanceAttribution.Rd =================================================================== --- pkg/FactorAnalytics/man/factorModelPerformanceAttribution.Rd 2013-09-10 17:02:44 UTC (rev 3044) +++ pkg/FactorAnalytics/man/factorModelPerformanceAttribution.Rd 2013-09-10 17:47:18 UTC (rev 3045) @@ -34,6 +34,7 @@ \eqn{u_t}. } \examples{ +\dontrun{ data(managers.df) fit.ts <- fitTimeSeriesFactorModel(assets.names=colnames(managers.df[,(1:6)]), factors.names=c("EDHEC.LS.EQ","SP500.TR"), @@ -41,6 +42,7 @@ # withoud benchmark fm.attr <- factorModelPerformanceAttribution(fit.ts) } +} \author{ Yi-An Chen. } From noreply at r-forge.r-project.org Tue Sep 10 21:29:51 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Tue, 10 Sep 2013 21:29:51 +0200 (CEST) Subject: [Returnanalytics-commits] r3046 - pkg/PortfolioAnalytics/R Message-ID: <20130910192951.97F93185D5D@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-10 21:29:51 +0200 (Tue, 10 Sep 2013) New Revision: 3046 Modified: pkg/PortfolioAnalytics/R/constrained_objective.R Log: Adding penalty term for transaction costs to constrained_objective. Modified: pkg/PortfolioAnalytics/R/constrained_objective.R =================================================================== --- pkg/PortfolioAnalytics/R/constrained_objective.R 2013-09-10 17:47:18 UTC (rev 3045) +++ pkg/PortfolioAnalytics/R/constrained_objective.R 2013-09-10 19:29:51 UTC (rev 3046) @@ -495,6 +495,15 @@ } } } # End factor exposure constraint penalty + + # Add penalty for transaction costs + if(!is.null(constraints$ptc)){ + # calculate total transaction cost using portfolio$assets as initial set of weights + tc <- sum(abs(w - portfolio$assets) * constraints$ptc) + # for now use a multiplier of 1, may need to adjust this later + mult <- 1 + out <- out + penalty * mult * tc + } # End transaction cost penalty nargs <- list(...) if(length(nargs)==0) nargs <- NULL From noreply at r-forge.r-project.org Tue Sep 10 22:01:31 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Tue, 10 Sep 2013 22:01:31 +0200 (CEST) Subject: [Returnanalytics-commits] r3047 - pkg/PortfolioAnalytics/demo Message-ID: <20130910200131.C326C185C4D@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-10 22:01:31 +0200 (Tue, 10 Sep 2013) New Revision: 3047 Added: pkg/PortfolioAnalytics/demo/demo_proportional_cost.R Removed: pkg/PortfolioAnalytics/demo/demo_proportional_cost_ROI.R Log: Added random portfolios example to demo_proportional_cost.R. Renamed demo file to drop the _ROI suffix. Copied: pkg/PortfolioAnalytics/demo/demo_proportional_cost.R (from rev 3043, pkg/PortfolioAnalytics/demo/demo_proportional_cost_ROI.R) =================================================================== --- pkg/PortfolioAnalytics/demo/demo_proportional_cost.R (rev 0) +++ pkg/PortfolioAnalytics/demo/demo_proportional_cost.R 2013-09-10 20:01:31 UTC (rev 3047) @@ -0,0 +1,76 @@ +library(PortfolioAnalytics) +library(quadprog) + +data(edhec) +N <- 4 +R <- edhec[, 1:N] +colnames(R) <- c("CA", "CTAG", "DS", "EM") +funds <- colnames(R) + +# Transaction costs are calculated using the optimal weights and initial set of weights. +# The initial set of weights is specified in the portfolio object. In the case +# below, the initial set of weights is an equally-weighted portfolio. + +# set up initial portfolio specification object +pspec <- portfolio.spec(assets=funds) +pspec <- add.constraint(portfolio=pspec, type="full_investment") +pspec <- add.constraint(portfolio=pspec, type="long_only") +pspec <- add.constraint(portfolio=pspec, type="transaction", ptc=0.01) + +# add var objective to minimize portfolio variance +minvar <- add.objective(portfolio=pspec, type="risk", name="var") + +# Note that if a return target is not specified, the results may not make sense +optimize.portfolio(R=R, portfolio=minvar, optimize_method="ROI") + +# Add a target return constraint +minvar <- add.constraint(portfolio=minvar, type="return", return_target=0.007) +optimize.portfolio(R=R, portfolio=minvar, optimize_method="ROI") + +# Add return and risk objective for quadratic utility +# Note that target return can be specified as a constraint or in the return +# objective as shown below +qu <- add.objective(portfolio=pspec, type="risk", name="var", risk_aversion=0.3) +qu <- add.objective(portfolio=qu, type="return", name="mean", target=0.007) +optimize.portfolio(R=R, portfolio=qu, optimize_method="ROI") + +# Now use random portfolios +# set up portfolio with equally weighted portfolio for initial weights +pspec <- portfolio.spec(assets=funds) +pspec <- add.constraint(portfolio=pspec, type="full_investment") +pspec <- add.constraint(portfolio=pspec, type="long_only") +pspec <- add.constraint(portfolio=pspec, type="transaction", ptc=0.01) + +# There is no transaction cost, the penalty should be 0. +# constrained_objective(w=rep(1/4, 4), R=R, portfolio=pspec) +# wts <- c(0.2, 0.3, 0.25, 0.25) +# 10000 * sum(abs(wts - pspec$assets)*pspec$constraints[[3]]$ptc) +# constrained_objective(w=wts, R=R, portfolio=pspec) + +# add objective to minimize standard deviation +pspec <- add.objective(portfolio=pspec, type="risk", name="StdDev") + +# This pushes the optimal portfolio to the initial weights +opt_rp <- optimize.portfolio(R=R, portfolio=pspec, optimize_method="random", search_size=2000) +opt_rp + + +# Now use random portfolios +# set up portfolio with initial weights +pspec <- portfolio.spec(assets=c(0.15, 0.15, 0.2, 0.5)) +pspec <- add.constraint(portfolio=pspec, type="full_investment") +pspec <- add.constraint(portfolio=pspec, type="long_only") +pspec <- add.constraint(portfolio=pspec, type="transaction", ptc=0.01) + +# There is no transaction cost, the penalty should be 0. +# constrained_objective(w=rep(1/4, 4), R=R, portfolio=pspec) +# wts <- c(0.2, 0.3, 0.25, 0.25) +# 10000 * sum(abs(wts - pspec$assets)*pspec$constraints[[3]]$ptc) +# constrained_objective(w=wts, R=R, portfolio=pspec) + +# add objective to minimize standard deviation +pspec <- add.objective(portfolio=pspec, type="risk", name="StdDev") + +# This pushes the optimal portfolio to the initial weights +opt_rp <- optimize.portfolio(R=R, portfolio=pspec, optimize_method="random", search_size=2000) +opt_rp Deleted: pkg/PortfolioAnalytics/demo/demo_proportional_cost_ROI.R =================================================================== --- pkg/PortfolioAnalytics/demo/demo_proportional_cost_ROI.R 2013-09-10 19:29:51 UTC (rev 3046) +++ pkg/PortfolioAnalytics/demo/demo_proportional_cost_ROI.R 2013-09-10 20:01:31 UTC (rev 3047) @@ -1,32 +0,0 @@ -library(PortfolioAnalytics) -library(quadprog) - -data(edhec) -N <- 4 -R <- edhec[, 1:N] -colnames(R) <- c("CA", "CTAG", "DS", "EM") -funds <- colnames(R) - -# set up initial portfolio specification object -pspec <- portfolio.spec(assets=funds) -pspec <- add.constraint(portfolio=pspec, type="full_investment") -pspec <- add.constraint(portfolio=pspec, type="long_only") -pspec <- add.constraint(portfolio=pspec, type="transaction", ptc=0.01) - -# add var objective to minimize portfolio variance -minvar <- add.objective(portfolio=pspec, type="risk", name="var") - -# Note that if a return target is not specified, the results may not make sense -optimize.portfolio(R=R, portfolio=minvar, optimize_method="ROI") - -# Add a target return constraint -minvar <- add.constraint(portfolio=minvar, type="return", return_target=0.007) -optimize.portfolio(R=R, portfolio=minvar, optimize_method="ROI") - -# Add return and risk objective for quadratic utility -# Note that target return can be specified as a constraint or in the return -# objective as shown below -qu <- add.objective(portfolio=pspec, type="risk", name="var", risk_aversion=0.3) -qu <- add.objective(portfolio=qu, type="return", name="mean", target=0.007) -optimize.portfolio(R=R, portfolio=qu, optimize_method="ROI") - From noreply at r-forge.r-project.org Tue Sep 10 22:23:51 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Tue, 10 Sep 2013 22:23:51 +0200 (CEST) Subject: [Returnanalytics-commits] r3048 - in pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm: . R man Message-ID: <20130910202351.75DD11853AD@r-forge.r-project.org> Author: shubhanm Date: 2013-09-10 22:23:51 +0200 (Tue, 10 Sep 2013) New Revision: 3048 Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/glmi.R pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/glmi.Rd pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/lmi.Rd Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/DESCRIPTION Log: Addition of support HAC/HC functions for glm and lm regression model Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/DESCRIPTION =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/DESCRIPTION 2013-09-10 20:01:31 UTC (rev 3047) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/DESCRIPTION 2013-09-10 20:23:51 UTC (rev 3048) @@ -36,3 +36,5 @@ 'LoSharpe.R' 'se.LoSharpe.R' 'table.Sharpe.R' + 'glmi.R' + 'lmi.R' Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/glmi.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/glmi.R (rev 0) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/glmi.R 2013-09-10 20:23:51 UTC (rev 3048) @@ -0,0 +1,92 @@ +glmi <- function (formula, family = gaussian, data,vcov = NULL, weights, subset, + na.action, start = NULL, etastart, mustart, offset, control = list(...), + model = TRUE, method = "glm.fit", x = FALSE, y = TRUE, contrasts = NULL, + ...) +{ + call <- match.call() + if (is.character(family)) + family <- get(family, mode = "function", envir = parent.frame()) + if (is.function(family)) + family <- family() + if (is.null(family$family)) { + print(family) + stop("'family' not recognized") + } + if (missing(data)) + data <- environment(formula) + mf <- match.call(expand.dots = FALSE) + m <- match(c("formula", "data", "subset", "weights", "na.action", + "etastart", "mustart", "offset"), names(mf), 0L) + mf <- mf[c(1L, m)] + mf$drop.unused.levels <- TRUE + mf[[1L]] <- as.name("model.frame") + mf <- eval(mf, parent.frame()) + if (identical(method, "model.frame")) + return(mf) + if (!is.character(method) && !is.function(method)) + stop("invalid 'method' argument") + if (identical(method, "glm.fit")) + control <- do.call("glm.control", control) + mt <- attr(mf, "terms") + Y <- model.response(mf, "any") + if (length(dim(Y)) == 1L) { + nm <- rownames(Y) + dim(Y) <- NULL + if (!is.null(nm)) + names(Y) <- nm + } + X <- if (!is.empty.model(mt)) + model.matrix(mt, mf, contrasts) + else matrix(, NROW(Y), 0L) + weights <- as.vector(model.weights(mf)) + if (!is.null(weights) && !is.numeric(weights)) + stop("'weights' must be a numeric vector") + if (!is.null(weights) && any(weights < 0)) + stop("negative weights not allowed") + offset <- as.vector(model.offset(mf)) + if (!is.null(offset)) { + if (length(offset) != NROW(Y)) + stop(gettextf("number of offsets is %d should equal %d (number of observations)", + length(offset), NROW(Y)), domain = NA) + } + mustart <- model.extract(mf, "mustart") + etastart <- model.extract(mf, "etastart") + fit <- eval(call(if (is.function(method)) "method" else method, + x = X, y = Y, weights = weights, start = start, etastart = etastart, + mustart = mustart, offset = offset, family = family, + control = control, intercept = attr(mt, "intercept") > + 0L)) + if (length(offset) && attr(mt, "intercept") > 0L) { + fit2 <- eval(call(if (is.function(method)) "method" else method, + x = X[, "(Intercept)", drop = FALSE], y = Y, weights = weights, + offset = offset, family = family, control = control, + intercept = TRUE)) + if (!fit2$converged) + warning("fitting to calculate the null deviance did not converge -- increase 'maxit'?") + fit$null.deviance <- fit2$deviance + } + if (model) + fit$model <- mf + fit$na.action <- attr(mf, "na.action") + if (x) + fit$x <- X + if (!y) + fit$y <- NULL + fit <- c(fit, list(call = call, formula = formula, terms = mt, + data = data, offset = offset, control = control, method = method, + contrasts = attr(X, "contrasts"), xlevels = .getXlevels(mt, + mf))) + class(fit) <- c(fit$class, c("glm", "lm")) + fit + if(is.null(vcov)) { + se <- vcov(fit) + } else { + if (is.function(vcov)) + se <- vcov(fit) + else + se <- vcov + } + fit = list(fit,vHaC = se) + fit + +} \ No newline at end of file Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R (rev 0) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R 2013-09-10 20:23:51 UTC (rev 3048) @@ -0,0 +1,77 @@ + +lmi <- function (formula, data,vcov = NULL, subset, weights, na.action, method = "qr", + model = TRUE, x = FALSE, y = FALSE, qr = TRUE, singular.ok = TRUE, + contrasts = NULL, offset, ...) +{ + ret.x <- x + ret.y <- y + cl <- match.call() + mf <- match.call(expand.dots = FALSE) + m <- match(c("formula", "data", "subset", "weights", "na.action", + "offset"), names(mf), 0L) + mf <- mf[c(1L, m)] + mf$drop.unused.levels <- TRUE + mf[[1L]] <- as.name("model.frame") + mf <- eval(mf, parent.frame()) + if (method == "model.frame") + return(mf) + else if (method != "qr") + warning(gettextf("method = '%s' is not supported. Using 'qr'", + method), domain = NA) + mt <- attr(mf, "terms") + y <- model.response(mf, "numeric") + w <- as.vector(model.weights(mf)) + if (!is.null(w) && !is.numeric(w)) + stop("'weights' must be a numeric vector") + offset <- as.vector(model.offset(mf)) + if (!is.null(offset)) { + if (length(offset) != NROW(y)) + stop(gettextf("number of offsets is %d, should equal %d (number of observations)", + length(offset), NROW(y)), domain = NA) + } + if (is.empty.model(mt)) { + x <- NULL + z <- list(coefficients = if (is.matrix(y)) matrix(, 0, + 3) else numeric(), residuals = y, fitted.values = 0 * + y, weights = w, rank = 0L, df.residual = if (!is.null(w)) sum(w != + 0) else if (is.matrix(y)) nrow(y) else length(y)) + if (!is.null(offset)) { + z$fitted.values <- offset + z$residuals <- y - offset + } + } + else { + x <- model.matrix(mt, mf, contrasts) + z <- if (is.null(w)) + lm.fit(x, y, offset = offset, singular.ok = singular.ok, + ...) + else lm.wfit(x, y, w, offset = offset, singular.ok = singular.ok, + ...) + } + class(z) <- c(if (is.matrix(y)) "mlm", "lm") + z$na.action <- attr(mf, "na.action") + z$offset <- offset + z$contrasts <- attr(x, "contrasts") + z$xlevels <- .getXlevels(mt, mf) + z$call <- cl + z$terms <- mt + if (model) + z$model <- mf + if (ret.x) + z$x <- x + if (ret.y) + z$y <- y + if (!qr) + z$qr <- NULL + #z + if(is.null(vcov)) { + se <- vcov(z) + } else { + if (is.function(vcov)) + se <- vcov(z) + else + se <- vcov + } + z = list(z,vHaC = se) + z +} Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/glmi.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/glmi.Rd (rev 0) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/glmi.Rd 2013-09-10 20:23:51 UTC (rev 3048) @@ -0,0 +1,18 @@ +\name{glmi} +\alias{glmi} +\title{Support of HAC methods within lm regression model} +\usage{ + glmi(formula, family = gaussian, data, vcov = NULL, + weights, subset, na.action, start = NULL, etastart, + mustart, offset, control = list(...), model = TRUE, + method = "glm.fit", x = FALSE, y = TRUE, + contrasts = NULL, ...) +} +\description{ + Support of HAC methods within lm regression model +} +\seealso{ + \code{\link{glm} +} +} + Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/lmi.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/lmi.Rd (rev 0) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/lmi.Rd 2013-09-10 20:23:51 UTC (rev 3048) @@ -0,0 +1,17 @@ +\name{lmi} +\alias{lmi} +\title{Support of HAC methods within glm regression model} +\usage{ + lmi(formula, data, vcov = NULL, subset, weights, + na.action, method = "qr", model = TRUE, x = FALSE, + y = FALSE, qr = TRUE, singular.ok = TRUE, + contrasts = NULL, offset, ...) +} +\description{ + Support of HAC methods within glm regression model +} +\seealso{ + \code{\link{lm} +} +} + From noreply at r-forge.r-project.org Wed Sep 11 00:14:53 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 11 Sep 2013 00:14:53 +0200 (CEST) Subject: [Returnanalytics-commits] r3049 - pkg/PerformanceAnalytics/sandbox/pulkit/R Message-ID: <20130910221453.CC406184D71@r-forge.r-project.org> Author: pulkit Date: 2013-09-11 00:14:53 +0200 (Wed, 11 Sep 2013) New Revision: 3049 Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/MaxDD.R pkg/PerformanceAnalytics/sandbox/pulkit/R/TriplePenance.R pkg/PerformanceAnalytics/sandbox/pulkit/R/TuW.R pkg/PerformanceAnalytics/sandbox/pulkit/R/table.Penance.R Log: na handling in Triple penance Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/MaxDD.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/MaxDD.R 2013-09-10 20:23:51 UTC (rev 3048) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/MaxDD.R 2013-09-10 22:14:53 UTC (rev 3049) @@ -65,7 +65,6 @@ # FUNCTION: x = checkData(R) - x = na.omit(x) if(ncol(x)==1 || is.null(R) || is.vector(R)){ calcul = FALSE for(i in (1:length(x))){ Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/TriplePenance.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/TriplePenance.R 2013-09-10 20:23:51 UTC (rev 3048) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/TriplePenance.R 2013-09-10 22:14:53 UTC (rev 3049) @@ -58,6 +58,7 @@ # # confidence: The confidence interval. x = checkData(R) + x = na.omit(x) mu = mean(x, na.rm = TRUE) sigma_infinity = StdDev(x) phi = cov(x[-1],x[-length(x)])/(cov(x[-length(x)])) @@ -79,6 +80,7 @@ } if(mu<0){ warning(paste("NaN produced because mu < 0 ",colnames(x))) + } minQ = list(value=NaN,x=NaN) } @@ -124,6 +126,7 @@ x = checkData(R) + x = na.omit(x) mu = mean(x, na.rm = TRUE) sigma_infinity = StdDev(x) phi = cov(x[-1],x[-length(x)])/(cov(x[-length(x)])) Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/TuW.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/TuW.R 2013-09-10 20:23:51 UTC (rev 3048) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/TuW.R 2013-09-10 22:14:53 UTC (rev 3049) @@ -39,7 +39,6 @@ TuW<-function(R,confidence=0.95,type=c("ar","normal"),...){ x = checkData(R) - x = na.omit(x) if(ncol(x)==1 || is.null(R) || is.vector(R)){ calcul = FALSE for(i in (1:length(x))){ Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/table.Penance.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/table.Penance.R 2013-09-10 20:23:51 UTC (rev 3048) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/table.Penance.R 2013-09-10 22:14:53 UTC (rev 3049) @@ -14,7 +14,7 @@ #' @references Bailey, David H. and Lopez de Prado, Marcos, Drawdown-Based Stop-Outs and the "Triple Penance" Rule(January 1, 2013). #' @export -table.Penance<-function(R,confidence){ +table.Penance<-function(R,confidence=0.95){ # DESCRIPTION: # Maximum Drawdown and Time under Water considering first-order serial correlation # @@ -26,15 +26,15 @@ # # Function: x = checkData(R) - x = na.omit(x) columns = ncol(x) columnnames = colnames(x) rownames = c("mean","stdDev","phi","sigma","MaxDD(in %)","t*","MaxTuW","Penance") for(column in 1:columns){ - phi = cov(x[,column][-1],x[,column][-length(x[,column])])/(cov(x[,column][-length(x[,column])])) - sigma_infinity = StdDev(x[,column]) + col_val = na.omit(x[,column]) + phi = cov(col_val[-1],col_val[-length(col_val)])/(cov(col_val[-length(col_val)])) + sigma_infinity = StdDev(col_val) sigma = sigma_infinity*((1-phi^2)^0.5) - column_MinQ<-c(mean(x[,column]),sigma_infinity,phi,sigma) + column_MinQ<-c(mean(col_val),sigma_infinity,phi,sigma) column_MinQ <- c(column_MinQ,get_minq(x[,column],confidence)) column_TuW = get_TuW(x[,column],confidence) v = c(column_MinQ,column_TuW,column_MinQ[5]/column_TuW) From noreply at r-forge.r-project.org Wed Sep 11 01:24:02 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 11 Sep 2013 01:24:02 +0200 (CEST) Subject: [Returnanalytics-commits] r3050 - pkg/PerformanceAnalytics/sandbox/pulkit/R Message-ID: <20130910232402.746191845FD@r-forge.r-project.org> Author: pulkit Date: 2013-09-11 01:24:02 +0200 (Wed, 11 Sep 2013) New Revision: 3050 Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/table.Penance.R Log: Correction in penance formula Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/table.Penance.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/table.Penance.R 2013-09-10 22:14:53 UTC (rev 3049) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/table.Penance.R 2013-09-10 23:24:02 UTC (rev 3050) @@ -37,7 +37,7 @@ column_MinQ<-c(mean(col_val),sigma_infinity,phi,sigma) column_MinQ <- c(column_MinQ,get_minq(x[,column],confidence)) column_TuW = get_TuW(x[,column],confidence) - v = c(column_MinQ,column_TuW,column_MinQ[5]/column_TuW) + v = c(column_MinQ,column_TuW,(column_TuW/column_MinQ[5])-1) if(column == 1){ result = data.frame(Value = v, row.names = rownames) } From noreply at r-forge.r-project.org Wed Sep 11 01:57:44 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 11 Sep 2013 01:57:44 +0200 (CEST) Subject: [Returnanalytics-commits] r3051 - in pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm: . R man Message-ID: <20130910235744.EFD41184DE2@r-forge.r-project.org> Author: shubhanm Date: 2013-09-11 01:57:44 +0200 (Wed, 11 Sep 2013) New Revision: 3051 Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/NAMESPACE pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/EmaxDDGBM.R pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.EMaxDDGBM.R pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/EMaxDDGBM.Rd pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.EMaxDDGBM.Rd Log: Output format change,+ addition of examples Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/NAMESPACE =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/NAMESPACE 2013-09-10 23:24:02 UTC (rev 3050) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/NAMESPACE 2013-09-10 23:57:44 UTC (rev 3051) @@ -3,7 +3,7 @@ export(CDrawdown) export(chart.AcarSim) export(chart.Autocorrelation) -export(EMaxDDGBM) +export(EmaxDDGBM) export(GLMSmoothIndex) export(LoSharpe) export(QP.Norm) Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/EmaxDDGBM.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/EmaxDDGBM.R 2013-09-10 23:24:02 UTC (rev 3050) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/EmaxDDGBM.R 2013-09-10 23:57:44 UTC (rev 3051) @@ -1,17 +1,31 @@ -#' Expected Drawdown using Brownian Motion Assumptions +#' @title Summary of Expected Drawdown using Brownian Motion Assumptions and Return-Volatility #' -#' Works on the model specified by Maddon-Ismail -#' -#' -#' -#' @param R an xts, vector, matrix, data frame, timeSeries or zoo object of -#' asset returns +#' @title Expected Maximum Drawdown Using Brownian Motion Assumptions +#' @description Works on the model specified by Maddon-Ismail which investigates the behavior of this statistic for a Brownian motion +#' with drift. +#' @details If X(t) is a random process on [0, T ], the maximum drawdown at time T , D(T), is defined by +#' where \deqn{D(T) = sup [X(s) - X(t)]} where s belongs to [0,t] and s belongs to [0,T] +#'Informally, this is the largest drop from a peak to a bottom. In this paper, we investigate the +#'behavior of this statistic for a Brownian motion with drift. In particular, we give an infinite +#'series representation of its distribution, and consider its expected value. When the drift is zero, +#'we give an analytic expression for the expected value, and for non-zero drift, we give an infinite +#'series representation. For all cases, we compute the limiting \bold{(\eqn{T tends to \infty})} behavior, which can be +#'logarithmic (\eqn{\mu} > 0), square root (\eqn{\mu} = 0), or linear (\eqn{\mu} < 0). +#' @param R an xts, vector, matrix, data frame, timeSeries or zoo object of asset returns #' @param digits significant number -#' @author Shubhankit +#' @author Shubhankit Mohan #' @keywords Expected Drawdown Using Brownian Motion Assumptions -#' +#' @references Magdon-Ismail, M., Atiya, A., Pratap, A., and Yaser S. Abu-Mostafa: On the Maximum Drawdown of a Browninan Motion, Journal of Applied Probability 41, pp. 147-161, 2004 \url{http://alumnus.caltech.edu/~amir/drawdown-jrnl.pdf} +#' @keywords Drawdown models Brownian Motion Assumptions +#' @examples +#' +#'library(PerformanceAnalytics) +#' data(edhec) +#' EmaxDDGBM(edhec) +#' @rdname EMaxDDGBM +#' @export #' @export -EMaxDDGBM <- +EmaxDDGBM <- function (R,digits =4) {# @author @@ -153,13 +167,26 @@ Ed<-(2*sig^2/mu)*(-Qn) } - } - return(Ed[1]*100) + z = c((Ed*100)) + znames = c("Expected Drawdown in % using Brownian Motion Assumptions") + if(column == 1) { + resultingtable = data.frame(Value = z, row.names = znames) + } + else { + nextcolumn = data.frame(Value = z, row.names = znames) + resultingtable = cbind(resultingtable, nextcolumn) + } + } + colnames(resultingtable) = columnnames + ans = base::round(resultingtable, digits) + ans + - } + + } ############################################################################### # R (http://r-project.org/) Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.EMaxDDGBM.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.EMaxDDGBM.R 2013-09-10 23:24:02 UTC (rev 3050) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.EMaxDDGBM.R 2013-09-10 23:57:44 UTC (rev 3051) @@ -1,4 +1,4 @@ -#' @title Expected Drawdown using Brownian Motion Assumptions +#' @title Summary of Expected Drawdown using Brownian Motion Assumptions and Return-Volatility #' #' @description Works on the model specified by Maddon-Ismail which investigates the behavior of this statistic for a Brownian motion #' with drift. Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/EMaxDDGBM.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/EMaxDDGBM.Rd 2013-09-10 23:24:02 UTC (rev 3050) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/EMaxDDGBM.Rd 2013-09-10 23:57:44 UTC (rev 3051) @@ -1,8 +1,8 @@ -\name{EMaxDDGBM} -\alias{EMaxDDGBM} -\title{Expected Drawdown using Brownian Motion Assumptions} +\name{EmaxDDGBM} +\alias{EmaxDDGBM} +\title{Summary of Expected Drawdown using Brownian Motion Assumptions and Return-Volatility} \usage{ - EMaxDDGBM(R, digits = 4) + EmaxDDGBM(R, digits = 4) } \arguments{ \item{R}{an xts, vector, matrix, data frame, timeSeries @@ -11,15 +11,47 @@ \item{digits}{significant number} } \description{ - Works on the model specified by Maddon-Ismail + Works on the model specified by Maddon-Ismail which + investigates the behavior of this statistic for a + Brownian motion with drift. } +\details{ + If X(t) is a random process on [0, T ], the maximum + drawdown at time T , D(T), is defined by where \deqn{D(T) + = sup [X(s) - X(t)]} where s belongs to [0,t] and s + belongs to [0,T] Informally, this is the largest drop + from a peak to a bottom. In this paper, we investigate + the behavior of this statistic for a Brownian motion with + drift. In particular, we give an infinite series + representation of its distribution, and consider its + expected value. When the drift is zero, we give an + analytic expression for the expected value, and for + non-zero drift, we give an infinite series + representation. For all cases, we compute the limiting + \bold{(\eqn{T tends to \infty})} behavior, which can be + logarithmic (\eqn{\mu} > 0), square root (\eqn{\mu} = 0), + or linear (\eqn{\mu} < 0). +} +\examples{ +library(PerformanceAnalytics) +data(edhec) +EmaxDDGBM(edhec) +} \author{ - Shubhankit + Shubhankit Mohan } +\references{ + Magdon-Ismail, M., Atiya, A., Pratap, A., and Yaser S. + Abu-Mostafa: On the Maximum Drawdown of a Browninan + Motion, Journal of Applied Probability 41, pp. 147-161, + 2004 + \url{http://alumnus.caltech.edu/~amir/drawdown-jrnl.pdf} +} \keyword{Assumptions} \keyword{Brownian} \keyword{Drawdown} \keyword{Expected} +\keyword{models} \keyword{Motion} \keyword{Using} Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.EMaxDDGBM.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.EMaxDDGBM.Rd 2013-09-10 23:24:02 UTC (rev 3050) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.EMaxDDGBM.Rd 2013-09-10 23:57:44 UTC (rev 3051) @@ -1,57 +1,57 @@ -\name{table.EMaxDDGBM} -\alias{table.EMaxDDGBM} -\title{Expected Drawdown using Brownian Motion Assumptions} -\usage{ - table.EMaxDDGBM(R, digits = 4) -} -\arguments{ - \item{R}{an xts, vector, matrix, data frame, timeSeries - or zoo object of asset returns} - - \item{digits}{significant number} -} -\description{ - Works on the model specified by Maddon-Ismail which - investigates the behavior of this statistic for a - Brownian motion with drift. -} -\details{ - If X(t) is a random process on [0, T ], the maximum - drawdown at time T , D(T), is defined by where \deqn{D(T) - = sup [X(s) - X(t)]} where s belongs to [0,t] and s - belongs to [0,T] Informally, this is the largest drop - from a peak to a bottom. In this paper, we investigate - the behavior of this statistic for a Brownian motion with - drift. In particular, we give an infinite series - representation of its distribution, and consider its - expected value. When the drift is zero, we give an - analytic expression for the expected value, and for - non-zero drift, we give an infinite series - representation. For all cases, we compute the limiting - \bold{(\eqn{T tends to \infty})} behavior, which can be - logarithmic (\eqn{\mu} > 0), square root (\eqn{\mu} = 0), - or linear (\eqn{\mu} < 0). -} -\examples{ -library(PerformanceAnalytics) -data(edhec) -table.EMaxDDGBM(edhec) -} -\author{ - Shubhankit Mohan -} -\references{ - Magdon-Ismail, M., Atiya, A., Pratap, A., and Yaser S. - Abu-Mostafa: On the Maximum Drawdown of a Browninan - Motion, Journal of Applied Probability 41, pp. 147-161, - 2004 - \url{http://alumnus.caltech.edu/~amir/drawdown-jrnl.pdf} -} -\keyword{Assumptions} -\keyword{Brownian} -\keyword{Drawdown} -\keyword{Expected} -\keyword{models} -\keyword{Motion} -\keyword{Using} - +\name{table.EMaxDDGBM} +\alias{table.EMaxDDGBM} +\title{Summary of Expected Drawdown using Brownian Motion Assumptions and Return-Volatility} +\usage{ + table.EMaxDDGBM(R, digits = 4) +} +\arguments{ + \item{R}{an xts, vector, matrix, data frame, timeSeries + or zoo object of asset returns} + + \item{digits}{significant number} +} +\description{ + Works on the model specified by Maddon-Ismail which + investigates the behavior of this statistic for a + Brownian motion with drift. +} +\details{ + If X(t) is a random process on [0, T ], the maximum + drawdown at time T , D(T), is defined by where \deqn{D(T) + = sup [X(s) - X(t)]} where s belongs to [0,t] and s + belongs to [0,T] Informally, this is the largest drop + from a peak to a bottom. In this paper, we investigate + the behavior of this statistic for a Brownian motion with + drift. In particular, we give an infinite series + representation of its distribution, and consider its + expected value. When the drift is zero, we give an + analytic expression for the expected value, and for + non-zero drift, we give an infinite series + representation. For all cases, we compute the limiting + \bold{(\eqn{T tends to \infty})} behavior, which can be + logarithmic (\eqn{\mu} > 0), square root (\eqn{\mu} = 0), + or linear (\eqn{\mu} < 0). +} +\examples{ +library(PerformanceAnalytics) +data(edhec) +table.EMaxDDGBM(edhec) +} +\author{ + Shubhankit Mohan +} +\references{ + Magdon-Ismail, M., Atiya, A., Pratap, A., and Yaser S. + Abu-Mostafa: On the Maximum Drawdown of a Browninan + Motion, Journal of Applied Probability 41, pp. 147-161, + 2004 + \url{http://alumnus.caltech.edu/~amir/drawdown-jrnl.pdf} +} +\keyword{Assumptions} +\keyword{Brownian} +\keyword{Drawdown} +\keyword{Expected} +\keyword{models} +\keyword{Motion} +\keyword{Using} + From noreply at r-forge.r-project.org Wed Sep 11 03:11:07 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 11 Sep 2013 03:11:07 +0200 (CEST) Subject: [Returnanalytics-commits] r3052 - in pkg/PerformanceAnalytics/sandbox/pulkit: . R man Message-ID: <20130911011107.31140185D82@r-forge.r-project.org> Author: pulkit Date: 2013-09-11 03:11:06 +0200 (Wed, 11 Sep 2013) New Revision: 3052 Added: pkg/PerformanceAnalytics/sandbox/pulkit/R/Penance.R pkg/PerformanceAnalytics/sandbox/pulkit/man/Penance.Rd Modified: pkg/PerformanceAnalytics/sandbox/pulkit/DESCRIPTION pkg/PerformanceAnalytics/sandbox/pulkit/NAMESPACE pkg/PerformanceAnalytics/sandbox/pulkit/R/MaxDD.R pkg/PerformanceAnalytics/sandbox/pulkit/R/TriplePenance.R pkg/PerformanceAnalytics/sandbox/pulkit/R/chart.Penance.R pkg/PerformanceAnalytics/sandbox/pulkit/R/table.Penance.R pkg/PerformanceAnalytics/sandbox/pulkit/man/BenchmarkSR.Rd pkg/PerformanceAnalytics/sandbox/pulkit/man/chart.Penance.Rd pkg/PerformanceAnalytics/sandbox/pulkit/man/table.Penance.Rd Log: Added Penance.R plus some correction and examples Modified: pkg/PerformanceAnalytics/sandbox/pulkit/DESCRIPTION =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/DESCRIPTION 2013-09-10 23:57:44 UTC (rev 3051) +++ pkg/PerformanceAnalytics/sandbox/pulkit/DESCRIPTION 2013-09-11 01:11:06 UTC (rev 3052) @@ -48,3 +48,4 @@ 'capm_aorda.R' 'psr_python.R' 'ret.R' + 'Penance.R' Modified: pkg/PerformanceAnalytics/sandbox/pulkit/NAMESPACE =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/NAMESPACE 2013-09-10 23:57:44 UTC (rev 3051) +++ pkg/PerformanceAnalytics/sandbox/pulkit/NAMESPACE 2013-09-11 01:11:06 UTC (rev 3052) @@ -14,6 +14,7 @@ export(MinTrackRecord) export(MonteSimulTriplePenance) export(MultiBetaDrawdown) +export(Penance) export(ProbSharpeRatio) export(PsrPortfolio) export(REDDCOPS) Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/MaxDD.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/MaxDD.R 2013-09-10 23:57:44 UTC (rev 3051) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/MaxDD.R 2013-09-11 01:11:06 UTC (rev 3052) @@ -91,6 +91,7 @@ } if(type[1]=="normal"){ result = apply(x,MARGIN = 2,dd_norm,confidence) + print(result) } result = round(result,3) rownames(result) = c("MaxDD(in %)","t*") Added: pkg/PerformanceAnalytics/sandbox/pulkit/R/Penance.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/Penance.R (rev 0) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/Penance.R 2013-09-11 01:11:06 UTC (rev 3052) @@ -0,0 +1,77 @@ +#' @title +#' Penance +#' +#'@description +#'A plot for Penance vs phi for the given portfolio +#'The relationship between penance and phi is given by +#' +#'\deqn{penance = \frac{Maximum Time under water}{t_\alpha^{*}-1}} +#' +#'Penance Measures how long it takes to recover from the maximum drawdown +#'as a multiple of the time it took to reach the bottom. Penance is smaller, +#'the higher the value of \eqn{\phi(Phi)} and the higher the ratio \eqn{\frac{\mu}{\sigma}}. +#'Positive serial autocorrelation leads to smaller Penance due to greater periods under +#'water. +#' @param R Returns +#' @param confidence the confidence interval +#' @param type The type of distribution "normal" or "ar"."ar" stands for Autoregressive. +#' @param \dots any other passthru variable +#' @author Pulkit Mehrotra +#' @seealso \code{\link{chart.Penance}} \code{\link{table.Penance}} \code{\link{TuW}} +#' @references Bailey, David H. and Lopez de Prado, Marcos, Drawdown-Based Stop-Outs and the "Triple Penance" Rule(January 1, 2013). +#' +#' @examples +#' data(edhec) +#' Penance(edhec,0.95,"ar") +#' Penance(edhec[,1],0.95,"normal") +#'@export +Penance<-function(R,confidence=0.95,type=c("ar","normal"),...) +{ + + # DESCRIPTION: + # Calculates the maximum drawdown for the return series based on the given + # distribution normal or autoregressive. + + # INPUT: + # The Return Series of the portfolio is taken as the input. The Return + # Series can be an xts, vector, matrix, data frame, timeSeries or zoo object of + # asset returns. The type of distribution , "normal" or non-normal "ar", The confidence + # level + + # FUNCTION: + x = checkData(R) + if(ncol(x)==1 || is.null(R) || is.vector(R)){ + calcul = FALSE + for(i in (1:length(x))){ + if(!is.na(x[i])){ + calcul = TRUE + } + } + if(!calcul){ + result = NA + } + else{ + if(type[1]=="ar"){ + result = get_penance(x,confidence) + } + if(type[1]=="normal"){ + result = penance_norm(x,confidence) + } + } + + return(result) + } + if(type[1]=="ar"){ + result = apply(x,MARGIN = 2,get_penance,confidence) + } + if(type[1]=="normal"){ + result = apply(x,MARGIN = 2,penance_norm,confidence) + } + result = round(result,3) + result = as.data.frame(result) + result = t(result) + rownames(result) = paste("Penance") + return(result) +} + + Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/TriplePenance.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/TriplePenance.R 2013-09-10 23:57:44 UTC (rev 3051) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/TriplePenance.R 2013-09-11 01:11:06 UTC (rev 3052) @@ -14,6 +14,19 @@ ## Bailey, David H. and Lopez de Prado, Marcos, Drawdown-Based Stop-Outs ## and the ?Triple Penance? Rule(January 1, 2013). + +penance_norm<-function(x,confidence){ + # DESCRIPTION: + # A function to return the Penance for a normal distribution + + # Inputs: + # R: The Return Series + # + # confidence: The confidence Level + penance = (tuw_norm(x,confidence)/dd_norm(x,confidence)[2])-1 + return(penance) +} + dd_norm<-function(x,confidence){ # DESCRIPTION: # A function to return the maximum drawdown for a normal distribution @@ -22,6 +35,7 @@ # R: The Return Series # # confidence: The confidence Level + x = na.omit(x) sd = StdDev(x) mu = mean(x, na.rm = TRUE) dd = max(0,((qnorm(1-confidence)*sd)^2)/(4*mu)) @@ -37,6 +51,7 @@ # Inputs: # R: Return series # confidence: The confidence level + x = na.omit(x) sd = StdDev(x) mu = mean(x,na.rm = TRUE) tuw = ((qnorm(1-confidence)*sd)/mu)^2 @@ -45,7 +60,17 @@ } +get_penance<-function(x,confidence){ + # DESCRIPTION: + # A function to return the Penance for a first order serially autocorrelated distribution + # Inputs: + # R: The Return Series + # + # confidence: The confidence Level + penance = (get_TuW(x,confidence)/get_minq(x,confidence)[2])-1 + return(penance) +} get_minq<-function(R,confidence){ # DESCRIPTION: Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/chart.Penance.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/chart.Penance.R 2013-09-10 23:57:44 UTC (rev 3051) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/chart.Penance.R 2013-09-11 01:11:06 UTC (rev 3052) @@ -2,11 +2,10 @@ #'Penance vs phi plot #' #'@description -#' #'A plot for Penance vs phi for the given portfolio #'The relationship between penance and phi is given by #' -#'\deqn{penance = \frac{Maximum Drawdown}{Maximum Time Under Water}} +#'\deqn{penance = \frac{Maximum Time under water}{t_\alpha^{*}-1}} #' #'Penance Measures how long it takes to recover from the maximum drawdown #'as a multiple of the time it took to reach the bottom. Penance is smaller, @@ -68,8 +67,9 @@ phi = 1:columns penance = 1:columns for(column in 1:columns){ - phi[column] = cov(x[,column][-1],x[,column][-length(x[,column])])/(cov(x[,column][-length(x[,column])])) - penance[column]<-MaxDD(x[,column],confidence,type = type)[1]/TuW(x[,column],confidence,type = type) + col_val = na.omit(x[,column]) + phi[column] = cov(col_val[-1],col_val[-length(col_val)])/(cov(col_val[-length(col_val)])) + penance[column]<-Penance(x[,column],confidence,type=type[1]) } if(is.null(ylab)){ ylab = "Penance" Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/table.Penance.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/table.Penance.R 2013-09-10 23:57:44 UTC (rev 3051) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/table.Penance.R 2013-09-11 01:11:06 UTC (rev 3052) @@ -8,13 +8,13 @@ #' #' @param R Returns #' @param confidence the confidence interval -#' +#' @param type The type of distribution "normal" or "ar"."ar" stands for Autoregressive. #' @author Pulkit Mehrotra #' @seealso \code{\link{chart.Penance}} \code{\link{MaxDD}} \code{\link{TuW}} #' @references Bailey, David H. and Lopez de Prado, Marcos, Drawdown-Based Stop-Outs and the "Triple Penance" Rule(January 1, 2013). #' @export -table.Penance<-function(R,confidence=0.95){ +table.Penance<-function(R,confidence=0.95,type=c("ar","norm")){ # DESCRIPTION: # Maximum Drawdown and Time under Water considering first-order serial correlation # @@ -35,9 +35,10 @@ sigma_infinity = StdDev(col_val) sigma = sigma_infinity*((1-phi^2)^0.5) column_MinQ<-c(mean(col_val),sigma_infinity,phi,sigma) - column_MinQ <- c(column_MinQ,get_minq(x[,column],confidence)) - column_TuW = get_TuW(x[,column],confidence) - v = c(column_MinQ,column_TuW,(column_TuW/column_MinQ[5])-1) + column_MinQ <- c(column_MinQ,MaxDD(x[,column],confidence,type=type[1])) + column_TuW = TuW(x[,column],confidence,type=type[1]) + v = c(column_MinQ,column_TuW,Penance(x[,column],confidence,type=type[1])) + v = round(v,4) if(column == 1){ result = data.frame(Value = v, row.names = rownames) } Modified: pkg/PerformanceAnalytics/sandbox/pulkit/man/BenchmarkSR.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/man/BenchmarkSR.Rd 2013-09-10 23:57:44 UTC (rev 3051) +++ pkg/PerformanceAnalytics/sandbox/pulkit/man/BenchmarkSR.Rd 2013-09-11 01:11:06 UTC (rev 3052) @@ -9,17 +9,18 @@ object of asset returns} } \description{ - The benchmark SR is a linear function of the average SR - of the individual strategies, and a decreasing convex - function of the number of strategies and the average - pairwise correlation. The Returns are given as the input - with the benchmark Sharpe Ratio as the output. + The benchmark SR is a linear function of the average + annualized SR of the individual strategies, and a + decreasing convex function of the number of strategies + and the average pairwise correlation. The Returns are + given as the input with the benchmark Sharpe Ratio as the + output. \deqn{SR_B = \bar{SR}\sqrt{\frac{S}{1+(S-1)\bar{\rho}}}} - Here \eqn{\bar{SR}} is the average SR of the portfolio - and \eqn{\bar{\rho}} is the average correlation across - off-diagonal elements + Here \eqn{\bar{SR}} is the average annualized Sharpe + Ratio of the portfolio and \eqn{\bar{\rho}} is the + average correlation across off-diagonal elements. } \examples{ data(edhec) Added: pkg/PerformanceAnalytics/sandbox/pulkit/man/Penance.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/man/Penance.Rd (rev 0) +++ pkg/PerformanceAnalytics/sandbox/pulkit/man/Penance.Rd 2013-09-11 01:11:06 UTC (rev 3052) @@ -0,0 +1,50 @@ +\name{Penance} +\alias{Penance} +\title{Penance} +\usage{ + Penance(R, confidence = 0.95, type = c("ar", "normal"), + ...) +} +\arguments{ + \item{R}{Returns} + + \item{confidence}{the confidence interval} + + \item{type}{The type of distribution "normal" or + "ar"."ar" stands for Autoregressive.} + + \item{\dots}{any other passthru variable} +} +\description{ + A plot for Penance vs phi for the given portfolio The + relationship between penance and phi is given by + + \deqn{penance = \frac{Maximum Time under + water}{t_\alpha^{*}-1}} + + Penance Measures how long it takes to recover from the + maximum drawdown as a multiple of the time it took to + reach the bottom. Penance is smaller, the higher the + value of \eqn{\phi(Phi)} and the higher the ratio + \eqn{\frac{\mu}{\sigma}}. Positive serial autocorrelation + leads to smaller Penance due to greater periods under + water. +} +\examples{ +data(edhec) +Penance(edhec,0.95,"ar") +Penance(edhec[,1],0.95,"normal") +} +\author{ + Pulkit Mehrotra +} +\references{ + Bailey, David H. and Lopez de Prado, Marcos, + Drawdown-Based Stop-Outs and the "Triple Penance" + Rule(January 1, 2013). +} +\seealso{ + \code{\link{chart.Penance}} \code{\link{table.Penance}} + \code{\link{TuW}} +} + Modified: pkg/PerformanceAnalytics/sandbox/pulkit/man/chart.Penance.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/man/chart.Penance.Rd 2013-09-10 23:57:44 UTC (rev 3051) +++ pkg/PerformanceAnalytics/sandbox/pulkit/man/chart.Penance.Rd 2013-09-11 01:11:06 UTC (rev 3052) @@ -58,8 +58,8 @@ A plot for Penance vs phi for the given portfolio The relationship between penance and phi is given by - \deqn{penance = \frac{Maximum Drawdown}{Maximum Time - Under Water}} + \deqn{penance = \frac{Maximum Time under + water}{t_\alpha^{*}-1}} Penance Measures how long it takes to recover from the maximum drawdown as a multiple of the time it took to Modified: pkg/PerformanceAnalytics/sandbox/pulkit/man/table.Penance.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/man/table.Penance.Rd 2013-09-10 23:57:44 UTC (rev 3051) +++ pkg/PerformanceAnalytics/sandbox/pulkit/man/table.Penance.Rd 2013-09-11 01:11:06 UTC (rev 3052) @@ -2,12 +2,16 @@ \alias{table.Penance} \title{Table for displaying the Mximum Drawdown and the Time under Water} \usage{ - table.Penance(R, confidence) + table.Penance(R, confidence = 0.95, + type = c("ar", "norm")) } \arguments{ \item{R}{Returns} \item{confidence}{the confidence interval} + + \item{type}{The type of distribution "normal" or + "ar"."ar" stands for Autoregressive.} } \description{ \code{table.Penance} Displays the table showing From noreply at r-forge.r-project.org Wed Sep 11 04:39:37 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 11 Sep 2013 04:39:37 +0200 (CEST) Subject: [Returnanalytics-commits] r3053 - in pkg/PortfolioAnalytics: R demo Message-ID: <20130911023937.376581851F6@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-11 04:39:36 +0200 (Wed, 11 Sep 2013) New Revision: 3053 Modified: pkg/PortfolioAnalytics/R/constrained_objective.R pkg/PortfolioAnalytics/demo/demo_proportional_cost.R Log: Modified penalty and demo script for transaction costs. Modified: pkg/PortfolioAnalytics/R/constrained_objective.R =================================================================== --- pkg/PortfolioAnalytics/R/constrained_objective.R 2013-09-11 01:11:06 UTC (rev 3052) +++ pkg/PortfolioAnalytics/R/constrained_objective.R 2013-09-11 02:39:36 UTC (rev 3053) @@ -502,7 +502,7 @@ tc <- sum(abs(w - portfolio$assets) * constraints$ptc) # for now use a multiplier of 1, may need to adjust this later mult <- 1 - out <- out + penalty * mult * tc + out <- out + mult * tc } # End transaction cost penalty nargs <- list(...) Modified: pkg/PortfolioAnalytics/demo/demo_proportional_cost.R =================================================================== --- pkg/PortfolioAnalytics/demo/demo_proportional_cost.R 2013-09-11 01:11:06 UTC (rev 3052) +++ pkg/PortfolioAnalytics/demo/demo_proportional_cost.R 2013-09-11 02:39:36 UTC (rev 3053) @@ -37,40 +37,39 @@ # Now use random portfolios # set up portfolio with equally weighted portfolio for initial weights pspec <- portfolio.spec(assets=funds) -pspec <- add.constraint(portfolio=pspec, type="full_investment") +pspec <- add.constraint(portfolio=pspec, type="leverage", min_sum=0.99, max_sum=1.01) pspec <- add.constraint(portfolio=pspec, type="long_only") pspec <- add.constraint(portfolio=pspec, type="transaction", ptc=0.01) # There is no transaction cost, the penalty should be 0. # constrained_objective(w=rep(1/4, 4), R=R, portfolio=pspec) # wts <- c(0.2, 0.3, 0.25, 0.25) -# 10000 * sum(abs(wts - pspec$assets)*pspec$constraints[[3]]$ptc) +# sum(abs(wts - pspec$assets)*pspec$constraints[[3]]$ptc) # constrained_objective(w=wts, R=R, portfolio=pspec) # add objective to minimize standard deviation pspec <- add.objective(portfolio=pspec, type="risk", name="StdDev") # This pushes the optimal portfolio to the initial weights -opt_rp <- optimize.portfolio(R=R, portfolio=pspec, optimize_method="random", search_size=2000) +opt_rp <- optimize.portfolio(R=R, portfolio=pspec, optimize_method="random", search_size=2000, trace=TRUE) opt_rp - # Now use random portfolios # set up portfolio with initial weights pspec <- portfolio.spec(assets=c(0.15, 0.15, 0.2, 0.5)) -pspec <- add.constraint(portfolio=pspec, type="full_investment") +pspec <- add.constraint(portfolio=pspec, type="leverage", min_sum=0.99, max_sum=1.01) pspec <- add.constraint(portfolio=pspec, type="long_only") pspec <- add.constraint(portfolio=pspec, type="transaction", ptc=0.01) # There is no transaction cost, the penalty should be 0. # constrained_objective(w=rep(1/4, 4), R=R, portfolio=pspec) # wts <- c(0.2, 0.3, 0.25, 0.25) -# 10000 * sum(abs(wts - pspec$assets)*pspec$constraints[[3]]$ptc) +# sum(abs(wts - pspec$assets)*pspec$constraints[[3]]$ptc) # constrained_objective(w=wts, R=R, portfolio=pspec) # add objective to minimize standard deviation pspec <- add.objective(portfolio=pspec, type="risk", name="StdDev") # This pushes the optimal portfolio to the initial weights -opt_rp <- optimize.portfolio(R=R, portfolio=pspec, optimize_method="random", search_size=2000) +opt_rp <- optimize.portfolio(R=R, portfolio=pspec, optimize_method="random", search_size=2000, trace=TRUE) opt_rp From noreply at r-forge.r-project.org Wed Sep 11 07:34:29 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 11 Sep 2013 07:34:29 +0200 (CEST) Subject: [Returnanalytics-commits] r3054 - pkg/PortfolioAnalytics/R Message-ID: <20130911053429.9782B185CF3@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-11 07:34:29 +0200 (Wed, 11 Sep 2013) New Revision: 3054 Modified: pkg/PortfolioAnalytics/R/extractstats.R Log: Making extractObjectiveMeasures and S3 generic function and adding function to extract objective measures for opt.list object. Modified: pkg/PortfolioAnalytics/R/extractstats.R =================================================================== --- pkg/PortfolioAnalytics/R/extractstats.R 2013-09-11 02:39:36 UTC (rev 3053) +++ pkg/PortfolioAnalytics/R/extractstats.R 2013-09-11 05:34:29 UTC (rev 3054) @@ -307,6 +307,12 @@ #' @author Ross Bennett #' @export extractObjectiveMeasures <- function(object){ + UseMethod("extractObjectiveMeasures") +} + +#' @method extractObjectiveMeasures optimize.portfolio +#' @S3method extractObjectiveMeasures optimize.portfolio +extractObjectiveMeasures.optimize.portfolio <- function(object){ if(!inherits(object, "optimize.portfolio")) stop("object must be of class 'optimize.portfolio'") # objective measures returned as $objective_measures for all other solvers out <- object$objective_measures @@ -395,3 +401,32 @@ } return(weights_mat) } + +#' @method extractObjectiveMeasures opt.list +#' @S3method extractObjectiveMeasures opt.list +extractObjectiveMeasures.opt.list <- function(object){ + if(!inherits(object, "opt.list")) stop("object must be of class 'opt.list'") + # get/set the names in the object + opt_names <- names(object) + if(is.null(opt_names)) opt_names <- paste("opt", 1:length(object)) + + obj_list <- list() + for(i in 1:length(object)){ + tmp <- unlist(object[[i]]$objective_measures) + names(tmp) <- PortfolioAnalytics:::name.replace(names(tmp)) + obj_list[[opt_names[i]]] <- tmp + } + obj_list + + obj_names <- unique(unlist(lapply(obj_list, names))) + + obj_mat <- matrix(NA, nrow=length(obj_list), ncol=length(obj_names), + dimnames=list(opt_names, obj_names)) + + for(i in 1:length(obj_list)){ + pm <- pmatch(x=names(obj_list[[i]]), table=obj_names) + obj_mat[i, pm] <- obj_list[[i]] + } + return(obj_mat) +} + From noreply at r-forge.r-project.org Wed Sep 11 12:09:49 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 11 Sep 2013 12:09:49 +0200 (CEST) Subject: [Returnanalytics-commits] r3055 - in pkg/Meucci: R data demo Message-ID: <20130911100949.C7DF2183D80@r-forge.r-project.org> Author: xavierv Date: 2013-09-11 12:09:49 +0200 (Wed, 11 Sep 2013) New Revision: 3055 Added: pkg/Meucci/data/returnsDistribution.rda Modified: pkg/Meucci/R/PlotCompositionEfficientFrontier.R pkg/Meucci/R/PlotDistributions.R pkg/Meucci/R/Prior2Posterior.R pkg/Meucci/R/RankingInformation.R pkg/Meucci/demo/AnalyticalvsNumerical.R pkg/Meucci/demo/RankingInformation.R Log: -fixed the errors from the Ranking information example for the Historical Scenarios paper Modified: pkg/Meucci/R/PlotCompositionEfficientFrontier.R =================================================================== --- pkg/Meucci/R/PlotCompositionEfficientFrontier.R 2013-09-11 05:34:29 UTC (rev 3054) +++ pkg/Meucci/R/PlotCompositionEfficientFrontier.R 2013-09-11 10:09:49 UTC (rev 3055) @@ -10,7 +10,7 @@ #' @author Xavier Valls \email{flamejat@@gmail.com} #' @export -PlotCompositionEfficientFrontier = function(Portfolios) +PlotCompositionEfficientFrontier = function( Portfolios, s, e ) { dev.new(); Modified: pkg/Meucci/R/PlotDistributions.R =================================================================== --- pkg/Meucci/R/PlotDistributions.R 2013-09-11 05:34:29 UTC (rev 3054) +++ pkg/Meucci/R/PlotDistributions.R 2013-09-11 10:09:49 UTC (rev 3055) @@ -7,8 +7,14 @@ #' @param p_ a vector containing the posterior probability values #' @param Mu_ a vector containing the posterior means #' @param Sigma_ a vector containing the posterior standard deviations +#' +#' @references +#' A. Meucci, "Fully Flexible Views: Theory and Practice" \url{http://www.symmys.com/node/158} +#' See Meucci script for "PlotDistributions.m" +#' #' @author Ram Ahluwalia \email{ram@@wingedfootcapital.com} #' @export + PlotDistributions = function( X , p , Mu , Sigma , p_ , Mu_ , Sigma_ ) { J = nrow( X ) @@ -21,7 +27,7 @@ # set ranges xl = min( X[ , n ] ) xh = max( X[ , n ] ) - x = as.matrix(seq(from=xl, to=xh, by=(xh-xl)/100)) + x = as.matrix( seq( from=xl, to=xh, by=(xh-xl) / 100 ) ) # posterior numerical # h3 = pHist(X[ ,n] , p_ , NBins ) @@ -31,11 +37,11 @@ h4 = plot( x , y1, type='l', col='red', xlab='', ylab='' ) # prior analytical - par(new = TRUE) + par( new = TRUE ) y2 = dnorm( x , Mu[n] ,sqrt( Sigma[n,n] ) ) h2 = plot( x , y2, type='l', col='blue', xlab='', ylab='' ) # xlim( cbind( xl , xh ) ) - legend(x = 1.5, y =0.4 ,legend=c("analytical","prior"), lwd=c(0.2,0.2), lty=c(1,1), col=c("red", "blue")) + legend( x = 1.5, y =0.4 , legend=c("analytical","prior"), lwd=c(0.2,0.2), lty=c(1,1), col=c("red", "blue") ) } } \ No newline at end of file Modified: pkg/Meucci/R/Prior2Posterior.R =================================================================== --- pkg/Meucci/R/Prior2Posterior.R 2013-09-11 05:34:29 UTC (rev 3054) +++ pkg/Meucci/R/Prior2Posterior.R 2013-09-11 10:09:49 UTC (rev 3055) @@ -15,7 +15,7 @@ #' @return S_ a covariance matrix with the full-confidence posterior distribution of Sigma #' #' @references -#' \url{http://www.symmys.com} +#' \url{http://www.symmys.com/node/158} #' \url{http://ssrn.com/abstract=1213325} #' A. Meucci - "Fully Flexible Views: Theory and Practice". See formula (21) and (22) on page 7 #' See Meucci script Prior2Posterior.m attached to Entropy Pooling Paper Modified: pkg/Meucci/R/RankingInformation.R =================================================================== --- pkg/Meucci/R/RankingInformation.R 2013-09-11 05:34:29 UTC (rev 3054) +++ pkg/Meucci/R/RankingInformation.R 2013-09-11 10:09:49 UTC (rev 3055) @@ -1,30 +1,90 @@ -# TODO: translate PlotResults function -# TODO: update plot of efficient frontier to show maximum return case # TODO: add max weights constraint to EfficientFrontier() # TODO: add computeCVaR to EfficientFrontier() - # TODO: confirm QuadProg does not have a bug (i.e. it can optimize expected returns without use dvec by adding an equality constraint) -#' Generate a Stacked Bar Chart based on the frontier weights matrix +#' Plots the efficient frontier, as it appears in A. Meucci, "Fully Flexible Views: Theory and Practice", The Risk Magazine, +#' October 2008, p 100-106. #' -#' @param weightsMatrix a matrix of weights where rows are efficient portfolios summing to one, and columns are assets -StackedBarChart = function( weightsMatrix ) +#' @param e the NumPortf x 1 matrix of expected returns for each portfolio along the efficient frontier +#' @param s the NumPortf x 1 matrix of standard deviation of returns for each portfolio along the efficient frontier +#' @param w the NumPortf x N matrix of compositions (security weights) for each portfolio along the efficient frontier +#' +#' @references +#' A. Meucci, "Fully Flexible Views: Theory and Practice" \url{http://www.symmys.com/node/158} +#' See Meucci script for "RankingInformation/PlotFrontier.m" +#' +#' @author Xavier Valls \email{flamejat@@gmail.com} +#' @export + + +PlotFrontier = function( e, s, w ) { - data = as.data.frame( weightsMatrix ) - data$aspect = 1:nrow(data) - data2 = reshape2:::melt( data , id.vars = "aspect" ) - p <- ggplot(data2, aes( x = factor(aspect), y = value, fill = factor( variable ) ) ) + geom_bar() #+ opts( title = expression( "Efficient Frontier Weights" )) - return( p ) + xx = dim( w )[ 1 ]; + N = dim( w )[ 2 ]; + Data = t( apply( w, 1, cumsum ) ); + + plot( c(min(s), 0), xlim = c( min(s) , max(s) ), ylim = c( 0, max(Data) ), + main= "frontier", xlab = " Portfolio # risk propensity", ylab = "Portfolio composition" ); + + for( n in 1 : N ) + { + x = rbind( min(s), s, max(s) ); + y = rbind( 0, matrix( Data[ , N-n+1 ] ), 0 ); + polygon( x, y, col = rgb( 0.9 - mod(n,3)*0.2, 0.9 - mod(n,3)*0.2, 0.9 - mod(n,3)*0.2) ); + } } -#' view the rankings +#' Plots the results of computing the efficient frontier (Expected returns and frontier), as it appears in A. Meucci, "Fully Flexible Views: Theory and Practice", The Risk Magazine, +#' October 2008, p 100-106. #' +#' @param e the NumPortf x 1 matrix of expected returns for each portfolio along the efficient frontier +#' @param s the NumPortf x 1 matrix of standard deviation of returns for each portfolio along the efficient frontier +#' @param w the NumPortf x N matrix of compositions (security weights) for each portfolio along the efficient frontier +#' @param M the NumPortf x 1 vector of expected returns for each asset +#' @param Lower constraints +#' @param Upper constraints +#' +#' @references +#' A. Meucci, "Fully Flexible Views: Theory and Practice" \url{http://www.symmys.com/node/158} +#' See Meucci script for "RankingInformation/PlotResults.m" +#' +#' @author Xavier Valls \email{flamejat@@gmail.com} + +PlotResults = function( e, s, w, M, Lower = NULL , Upper = NULL) +{ + N = length( M ); + dev.new(); + par( mfrow = c( 1, 2 ) ); + h1 = hist( M*100, plot = F ) + barplot( h$density, horiz = T, main = "expected returns", xlab = "", ylab = "" ); + if(length(Lower) || length(Upper)) + { + Changed = array( 0, N ); + Changed[ union( Lower, Upper ) ] = M[ union( Lower, Upper ) ] * 100; + h2 = hist(Changed, plot = F ); + barplot( h2$density, horiz = T, col = "red", add = T ); + } + + PlotFrontier( e*100, s*100, w ); +} + + + +#' Computes posterior probabilities to view the rankings, as it appears in A. Meucci, +#' "Fully Flexible Views: Theory and Practice", The Risk Magazine, October 2008, p 100-106. +#' #' @param X a vector containing returns for all the asset classes #' @param p a vector containing the prior probability values #' @param Lower a vector of indexes indicating which column is lower than the corresponding column number in Upper #' @param Upper a vector of indexes indicating which column is lower than the corresponding column number in Upper +#' +#' @references +#' A. Meucci, "Fully Flexible Views: Theory and Practice" \url{http://www.symmys.com/node/158} +#' See Meucci script for "RankingInformation/ViewRanking.m" +#' +#' @author Ram Ahluwalia \email{ram@@wingedfootcapital.com} #' @export EntropyProg -# @example ViewRanking( X , p , Lower = c(3,4) , Upper = c(4,5) ) # two inequality views: asset 3 < asset 4 returns, and asset 4 < asset 5 returns +#' @example ViewRanking( X , p , Lower = c(3,4) , Upper = c(4,5) ) # two inequality views: asset 3 < asset 4 returns, and asset 4 < asset 5 returns ViewRanking = function( X , p , Lower , Upper ) { @@ -53,19 +113,37 @@ return( p_ ) } -#' Generates an efficient frontier based on Meucci's Ranking Information version with the following inputs +#' Generates an efficient frontier based on Meucci's Ranking Information version and returns a A list with +#' NumPortf efficient portfolios whos returns are equally spaced along the whole range of the efficient frontier, +#' as it appears in A. Meucci, "Fully Flexible Views: Theory and Practice", The Risk Magazine, October 2008, +#' p 100-106. +#' +#' Most recent version of article and MATLAB code available at +#' http://www.symmys.com/node/158 +#' #' @param X a matrix with the joint-scenario probabilities by asset (rows are joint-scenarios, columns are assets) #' @param p a vector of probabilities associated with each scenario in matrix X #' @param Options a list of options....TBD -#' @return A list with NumPortf efficient portfolios whos returns are equally spaced along the whole range of the efficient frontier -#' Exps the NumPortf x 1 vector of expected returns for each asset -#' Covs the NumPortf x N vector of security volatilities along the efficient frontier -#' w the NumPortf x N matrix of compositions (security weights) for each portfolio along the efficient frontier -#' e the NumPortf x 1 matrix of expected returns for each portfolio along the efficient frontier -#' s the NumPortf x 1 matrix of standard deviation of returns for each portfolio along the efficient frontier +#' +#' @return Exps the NumPortf x 1 vector of expected returns for each asset +#' @return Covs the NumPortf x N vector of security volatilities along the efficient frontier +#' @return w the NumPortf x N matrix of compositions (security weights) for each portfolio along the efficient frontier +#' @return e the NumPortf x 1 matrix of expected returns for each portfolio along the efficient frontier +#' @return s the NumPortf x 1 matrix of standard deviation of returns for each portfolio along the efficient frontier +#' +#' @references +#' A. Meucci, "Fully Flexible Views: Theory and Practice" \url{http://www.symmys.com/node/158} +#' See Meucci script for "RankingInformation/EfficientFrontier.m" +#' +#' @author Ram Ahluwalia \email{ram@@wingedfootcapital.com} and Xavier Valls \email{flamejat@@gmail.com} #' @export + RIEfficientFrontier = function( X , p , Options) -{ +{ + + if( !require("limSolve") ) stop("This script requieres the limSolve package installed") + + library( matlab ) J = nrow( X ) # number of scenarios @@ -73,38 +151,36 @@ Exps = t(X) %*% p # probability-weighted expected return of each asset - Scnd_Mom = t(X) %*% (X * ( p %*% ones( 1 , N ) ) ) + Scnd_Mom = t(X) %*% (X * ( p %*% matrix( 1, 1 , N ) ) ) Scnd_Mom = ( Scnd_Mom + t(Scnd_Mom) ) / 2 # an N*N matrix Covs = Scnd_Mom - Exps %*% t( Exps ) Constr = list() # constrain the sum of weights to 1 - Constr$Aeq = ones( 1 , N ) + Constr$Aeq = matrix( 1, 1 , N ) Constr$beq = 1 # constrain the weight of any security to between 0 and 1 - Constr$Aleq = rbind( eye(N) , -eye(N) ) # linear coefficients matrix A in the inequality constraint A*x <= b - Constr$bleq = rbind( ones(N,1) , 0*ones(N,1) ) # constraint vector b in the inequality constraint A*x <= b + Constr$Aleq = rbind( diag( 1, N ) , - diag( 1, N ) ) # linear coefficients matrix A in the inequality constraint A*x <= b + Constr$bleq = rbind( matrix( 1, N, 1 ) , matrix( 0, N, 1 ) ) # constraint vector b in the inequality constraint A*x <= b Amat = rbind( Constr$Aeq , Constr$Aleq ) # stack the equality constraints on top of the inequality constraints bvec = rbind( Constr$beq , Constr$bleq ) # stack the equality constraints on top of the inequality constraints ############################################################################################ # determine return of minimum-risk portfolio - FirstDegree = zeros( N , 1 ) # TODO: assumes that securities have zero expected returns when computing efficient frontier? + FirstDegree = matrix( 0, N , 1 ) # TODO: assumes that securities have zero expected returns when computing efficient frontier? SecondDegree = Covs - library( quadprog ) # Why is FirstDegree "expected returns" set to 0? # We capture the equality view in the equality constraints matrix # In other words, we have a constraint that the Expected Returns by Asset %*% Weights = Target Return MinVol_Weights = solve.QP( Dmat = SecondDegree , dvec = -1*FirstDegree , Amat = -1*t(Amat) , bvec = -1*bvec , meq = length( Constr$beq ) ) - MinSDev_Exp = t( MinVol_Weights$solution ) %*% Exps + MinSDev_Exp = t( MinVol_Weights$solution ) %*% Exps ############################################################################################ # determine return of maximum-return portfolio FirstDegree = -Exps - library( limSolve ) MaxRet_Weights = linp( E = Constr$Aeq , F = Constr$beq , G = -1*Constr$Aleq , H = -1*Constr$bleq , Cost = FirstDegree , ispos = FALSE )$X MaxExp_Exp = t( MaxRet_Weights) %*% Exps @@ -120,7 +196,7 @@ ############################################################################################ # compute the NumPortf compositions and risk-return coordinates - FirstDegree = zeros( N , 1 ) + FirstDegree = matrix( 0, N , 1 ) w = matrix( , ncol = N , nrow = 0 ) e = matrix( , ncol = 1 , nrow = 0 ) Added: pkg/Meucci/data/returnsDistribution.rda =================================================================== (Binary files differ) Property changes on: pkg/Meucci/data/returnsDistribution.rda ___________________________________________________________________ Added: svn:mime-type + application/octet-stream Modified: pkg/Meucci/demo/AnalyticalvsNumerical.R =================================================================== --- pkg/Meucci/demo/AnalyticalvsNumerical.R 2013-09-11 05:34:29 UTC (rev 3054) +++ pkg/Meucci/demo/AnalyticalvsNumerical.R 2013-09-11 10:09:49 UTC (rev 3055) @@ -5,8 +5,8 @@ #' http://www.symmys.com/node/158 #' #' @references -#' A. Meucci, Fully Flexible Views: Theory and Practice \url{http://www.symmys.com/node/158} -#' See Meucci script for "S_MAIN.m" +#' A. Meucci, "Fully Flexible Views: Theory and Practice" \url{http://www.symmys.com/node/158} +#' See Meucci script for "AnalyticalvsNumerical/S_MAIN.m" #' #' @author Ram Ahluwalia \email{ram@@wingedfootcapital.com} and Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/demo/RankingInformation.R =================================================================== --- pkg/Meucci/demo/RankingInformation.R 2013-09-11 05:34:29 UTC (rev 3054) +++ pkg/Meucci/demo/RankingInformation.R 2013-09-11 10:09:49 UTC (rev 3055) @@ -1,101 +1,50 @@ #' Entropy Pooling Example - Ranking Information script #' -#' This script performs ranking allocation using the -#' Entropy-Pooling approach by Attilio Meucci, as it appears in -#' "A. Meucci - Fully Flexible Views: Theory and Practice - -#' The Risk Magazine, October 2008, p 100-106" -#' available at www.symmys.com > Research > Working Papers +#' This script performs ranking allocation using the Entropy-Pooling approach by Attilio Meucci, +#' as it appears in A. Meucci, "Fully Flexible Views: Theory and Practice", The Risk Magazine, +#' October 2008, p 100-106. +#' +#' Most recent version of article and MATLAB code available at +#' http://www.symmys.com/node/158 +#' +#' @references +#' A. Meucci, "Fully Flexible Views: Theory and Practice" \url{http://www.symmys.com/node/158} +#' See Meucci script for "RankingInformation/S_MAIN.m" +#' +#' @author Xavier Valls \email{flamejat@@gmail.com} -#' Code by A. Meucci, September 2008 -#' Last version available at www.symmys.com > Teaching > MATLAB - ############################################################################# # Load panel X of joint returns realizations and vector p of respective probabilities # In real life, these are provided by the estimation process ############################################################################# -data("ReturnsDistribution") +load("../data/returnsDistribution.rda"); -############################################################################# +########################################################################################################### # compute and plot efficient frontier based on prior market distribution -############################################################################# -Options = list() -Options$NumPortf = 20 # number of portfolios in efficient frontier -Options$FrontierSpan = c( .3 , .9 ) # range of normalized exp.vals. spanned by efficient frontier +########################################################################################################### +Options = list(); +Options$NumPortf = 20; # number of portfolios in efficient frontier +Options$FrontierSpan = c( 0.3, 0.9 ); # range of normalized exp.vals. spanned by efficient frontier -frontierPrior = RIEfficientFrontier( X , P , Options ) # Frontier Plot Data contains [e,s,w,M,S] +EF = RIEfficientFrontier( returnsDistribution$X, returnsDistribution$p, Options ); +PlotResults( EF$e, EF$Sdev, EF$Composition, EF$Exps ); -# PlotResults( frontierPrior$e , frontierPrior$Sdev , frontierPrior$Composition , frontierPrior$Exps ) -plot( x = (frontierPrior$Sdev)^2 , y = frontierPrior$e , xlab = "Variance" , ylab = "Expected Return" , main = "Prior" , type = "l" , ylim = c( .03 , .1 ) ) -# create stacked bar chart. each bar is a row (20 rows). each row sums to one. add legend. -options( warn = 0 ) -library( ggplot2 ) -plotStackedBar <- StackedBarChart( frontierPrior$Composition ) -plotStackedBar -options( warn = 2 ) -############################################################################# -# process ordering information (this is the core of the Entropy Pooling approach -############################################################################# +########################################################################################################### +# process ordering information (this is the core of the Entropy Pooling approach) +########################################################################################################### -# print expected returns of assets 3 and 4 -frontierPrior$Exps[3] -frontierPrior$Exps[4] # note that asset 4 has a higher expected return assuming the prior distribution - # the expected return of each entry of Lower is supposed to be smaller than respective entry in Upper -Lower = as.numeric( c( 4 ) ) -Upper = as.numeric( c( 3 ) ) -P_ = ViewRanking( X , P , Lower , Upper )$p_ +Lower = 4; +Upper = 3; +p_ = ViewRanking( returnsDistribution$X, returnsDistribution$p, Lower, Upper )$p_; -# confidence -c = .5 -blendedProbability = (1-c) * P + c * P_ +#confidence +c = 0.5; +p_ = ( 1 - c ) * returnsDistribution$p + c * p_ ; -############################################################################# +########################################################################################################### # compute and plot efficient frontier based on posterior market distribution -############################################################################# +########################################################################################################### -frontierFullConfidencePosterior = RIEfficientFrontier( X , P_ , Options ) -# print expected returns of assets 3 and 4 -frontierFullConfidencePosterior$Exps[3] -frontierFullConfidencePosterior$Exps[4] # note that asset 3 and asset 4 have equal expected returns - -# bar chart of portfolios on frontier -- note asset 3 has substantially more weight vs. asset 4 -options( warn = 0 ) -library( ggplot2 ) -plotStackedBar <- StackedBarChart( frontierFullConfidencePosterior$Composition ) -plotStackedBar -options( warn = 2 ) - -frontierPosterior = RIEfficientFrontier( X , blendedProbability , Options ) -# print expected returns of assets 3 and 4 -frontierPosterior$Exps[3] -frontierPosterior$Exps[4] # note that asset 4 still has a higher expected return, but less so - -plot( x = (frontierPosterior$Sdev)^2 , y = frontierPosterior$e , xlab = "Variance" , ylab = "Expected Return" , main = "Posterior" , type = "l" , ylim = c( .03 , .1 ) ) -# PlotResults( frontierPosterior$e , frontierPosterior$Sdev , frontierPosterior$Composition , frontierPosterior$Exps , Lower , Upper ) - -# bar chart of portfolios on frontier -options( warn = 0 ) -library( ggplot2 ) -plotStackedBar <- StackedBarChart( frontierPosterior$Composition ) -plotStackedBar -options( warn = 2 ) - -# Tests -# Test1 - views that are already in the prior return no revision -result = ViewRanking( X , P , c(3,3) , c(4,4) ) # none of the probabilities are revised from 1e-05. Why? Because the expectation that asset 3 is lower than expected return of asset 4 is already satisfied in prior -result2 = ViewRanking( X , P , c(3) , c(4) ) # none of the probabilities are revised from 1e-05 - -# Test2 - indentical (repeated) views return the same probabilities -result3 = ViewRanking( X , P , c(4) , c(3) ) # returns revised probability distribution -result4 = ViewRanking( X , P , c(4,4) , c(3,3) ) # returns identical probability distribution as in result3 - -# Test3 - indentical (repeated) views return the same probabilities -result3 = ViewRanking( X , P , c(4) , c(3) ) # returns revised probability distribution -result4 = ViewRanking( X , P , c(4,4) , c(3,3) ) # returns identical probability distribution as in result3 - -# Test4 - indentical (repeated) views return the same probabilities -result5 = ViewRanking( X , P , c(4) , c(3) ) # returns revised probability distribution -result6 = ViewRanking( X , P , c(4,1) , c(3,2) ) # the second view is non-binding since it is already reflected in prior, so p_ matches result 5 - -# Test5 -result7 = ViewRanking( X , P , c(4,2) , c(3,1) ) # the second view is non-binding since it is already reflected in prior, so p_ matches result 5 \ No newline at end of file +EF_ = RIEfficientFrontier( returnsDistribution$X, p_, Options ); +PlotResults( EF_$e, EF_$Sdev, EF_$Composition, EF_$Exps, Lower, Upper ); From noreply at r-forge.r-project.org Wed Sep 11 12:11:09 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 11 Sep 2013 12:11:09 +0200 (CEST) Subject: [Returnanalytics-commits] r3056 - in pkg/Meucci: . R man Message-ID: <20130911101109.45273183D80@r-forge.r-project.org> Author: xavierv Date: 2013-09-11 12:11:08 +0200 (Wed, 11 Sep 2013) New Revision: 3056 Added: pkg/Meucci/man/PlotFrontier.Rd pkg/Meucci/man/PlotResults.Rd Modified: pkg/Meucci/NAMESPACE pkg/Meucci/R/RankingInformation.R pkg/Meucci/man/PlotCompositionEfficientFrontier.Rd pkg/Meucci/man/PlotDistributions.Rd pkg/Meucci/man/Prior2Posterior.Rd pkg/Meucci/man/RIEfficientFrontier.Rd pkg/Meucci/man/ViewRanking.Rd Log: -updated documentation for the Ranking information example from the Historical Scenarios paper Modified: pkg/Meucci/NAMESPACE =================================================================== --- pkg/Meucci/NAMESPACE 2013-09-11 10:09:49 UTC (rev 3055) +++ pkg/Meucci/NAMESPACE 2013-09-11 10:11:08 UTC (rev 3056) @@ -45,6 +45,7 @@ export(PerformIidAnalysis) export(PlotCompositionEfficientFrontier) export(PlotDistributions) +export(PlotFrontier) export(PlotMarginalsNormalInverseWishart) export(PlotVolVsCompositionEfficientFrontier) export(Prior2Posterior) Modified: pkg/Meucci/R/RankingInformation.R =================================================================== --- pkg/Meucci/R/RankingInformation.R 2013-09-11 10:09:49 UTC (rev 3055) +++ pkg/Meucci/R/RankingInformation.R 2013-09-11 10:11:08 UTC (rev 3056) @@ -84,8 +84,9 @@ #' #' @author Ram Ahluwalia \email{ram@@wingedfootcapital.com} #' @export EntropyProg -#' @example ViewRanking( X , p , Lower = c(3,4) , Upper = c(4,5) ) # two inequality views: asset 3 < asset 4 returns, and asset 4 < asset 5 returns +# example ViewRanking( X , p , Lower = c(3,4) , Upper = c(4,5) ) # two inequality views: asset 3 < asset 4 returns, and asset 4 < asset 5 returns + ViewRanking = function( X , p , Lower , Upper ) { library( matlab ) Modified: pkg/Meucci/man/PlotCompositionEfficientFrontier.Rd =================================================================== --- pkg/Meucci/man/PlotCompositionEfficientFrontier.Rd 2013-09-11 10:09:49 UTC (rev 3055) +++ pkg/Meucci/man/PlotCompositionEfficientFrontier.Rd 2013-09-11 10:11:08 UTC (rev 3056) @@ -3,7 +3,7 @@ \title{Plot the efficient frontier, as described in A. Meucci, "Risk and Asset Allocation", Springer, 2005.} \usage{ - PlotCompositionEfficientFrontier(Portfolios) + PlotCompositionEfficientFrontier(Portfolios, s, e) } \arguments{ \item{Portfolios}{: [matrix] (M x N) M portfolios of size Modified: pkg/Meucci/man/PlotDistributions.Rd =================================================================== --- pkg/Meucci/man/PlotDistributions.Rd 2013-09-11 10:09:49 UTC (rev 3055) +++ pkg/Meucci/man/PlotDistributions.Rd 2013-09-11 10:11:08 UTC (rev 3056) @@ -29,4 +29,9 @@ \author{ Ram Ahluwalia \email{ram at wingedfootcapital.com} } +\references{ + A. Meucci, "Fully Flexible Views: Theory and Practice" + \url{http://www.symmys.com/node/158} See Meucci script + for "PlotDistributions.m" +} Added: pkg/Meucci/man/PlotFrontier.Rd =================================================================== --- pkg/Meucci/man/PlotFrontier.Rd (rev 0) +++ pkg/Meucci/man/PlotFrontier.Rd 2013-09-11 10:11:08 UTC (rev 3056) @@ -0,0 +1,32 @@ +\name{PlotFrontier} +\alias{PlotFrontier} +\title{Plots the efficient frontier, as it appears in A. Meucci, "Fully Flexible Views: Theory and Practice", The Risk Magazine, + October 2008, p 100-106.} +\usage{ + PlotFrontier(e, s, w) +} +\arguments{ + \item{e}{the NumPortf x 1 matrix of expected returns for + each portfolio along the efficient frontier} + + \item{s}{the NumPortf x 1 matrix of standard deviation of + returns for each portfolio along the efficient frontier} + + \item{w}{the NumPortf x N matrix of compositions + (security weights) for each portfolio along the efficient + frontier} +} +\description{ + Plots the efficient frontier, as it appears in A. Meucci, + "Fully Flexible Views: Theory and Practice", The Risk + Magazine, October 2008, p 100-106. +} +\author{ + Xavier Valls \email{flamejat at gmail.com} +} +\references{ + A. Meucci, "Fully Flexible Views: Theory and Practice" + \url{http://www.symmys.com/node/158} See Meucci script + for "RankingInformation/PlotFrontier.m" +} + Added: pkg/Meucci/man/PlotResults.Rd =================================================================== --- pkg/Meucci/man/PlotResults.Rd (rev 0) +++ pkg/Meucci/man/PlotResults.Rd 2013-09-11 10:11:08 UTC (rev 3056) @@ -0,0 +1,40 @@ +\name{PlotResults} +\alias{PlotResults} +\title{Plots the results of computing the efficient frontier (Expected returns and frontier), as it appears in A. Meucci, "Fully Flexible Views: Theory and Practice", The Risk Magazine, +October 2008, p 100-106.} +\usage{ + PlotResults(e, s, w, M, Lower = NULL, Upper = NULL) +} +\arguments{ + \item{e}{the NumPortf x 1 matrix of expected returns for + each portfolio along the efficient frontier} + + \item{s}{the NumPortf x 1 matrix of standard deviation of + returns for each portfolio along the efficient frontier} + + \item{w}{the NumPortf x N matrix of compositions + (security weights) for each portfolio along the efficient + frontier} + + \item{M}{the NumPortf x 1 vector of expected returns for + each asset} + + \item{Lower}{constraints} + + \item{Upper}{constraints} +} +\description{ + Plots the results of computing the efficient frontier + (Expected returns and frontier), as it appears in A. + Meucci, "Fully Flexible Views: Theory and Practice", The + Risk Magazine, October 2008, p 100-106. +} +\author{ + Xavier Valls \email{flamejat at gmail.com} +} +\references{ + A. Meucci, "Fully Flexible Views: Theory and Practice" + \url{http://www.symmys.com/node/158} See Meucci script + for "RankingInformation/PlotResults.m" +} + Modified: pkg/Meucci/man/Prior2Posterior.Rd =================================================================== --- pkg/Meucci/man/Prior2Posterior.Rd 2013-09-11 10:09:49 UTC (rev 3055) +++ pkg/Meucci/man/Prior2Posterior.Rd 2013-09-11 10:11:08 UTC (rev 3056) @@ -1,53 +1,53 @@ -\name{Prior2Posterior} -\alias{Prior2Posterior} -\title{Calculate the full-confidence posterior distributions of Mu and Sigma} -\usage{ - Prior2Posterior(M, Q, M_Q, S, G, S_G) -} -\arguments{ - \item{M}{a numeric vector with the Mu of the normal - reference model} - - \item{Q}{a numeric vector used to construct a view on - expectation of the linear combination QX} - - \item{M_Q}{a numeric vector with the view of the - expectations of QX} - - \item{S}{a covariance matrix for the normal reference - model} - - \item{G}{a numeric vector used to construct a view on - covariance of the linear combination GX} - - \item{S_G}{a numeric with the expectation associated with - the covariance of the linear combination GX} -} -\value{ - a list with - - M_ a numeric vector with the full-confidence posterior - distribution of Mu - - S_ a covariance matrix with the full-confidence posterior - distribution of Sigma -} -\description{ - \deqn{ \tilde{ \mu } \equiv \mu + \Sigma Q' {\big(Q - \Sigma Q' \big)}^{-1} \big( \tilde{\mu}_{Q} - Q \mu - \big), \\ \tilde{ \Sigma } \equiv \Sigma + \Sigma G' - \big({\big(G \Sigma G' \big)}^{-1} \tilde{ \Sigma }_G - {\big(G \Sigma G' \big)}^{-1} - {\big(G \Sigma G' - \big)}^{-1} \big) G \Sigma } -} -\author{ - Ram Ahluwalia \email{ram at wingedfootcapital.com} -} -\references{ - \url{http://www.symmys.com} - \url{http://ssrn.com/abstract=1213325} A. Meucci - "Fully - Flexible Views: Theory and Practice". See formula (21) - and (22) on page 7 See Meucci script Prior2Posterior.m - attached to Entropy Pooling Paper -} - +\name{Prior2Posterior} +\alias{Prior2Posterior} +\title{Calculate the full-confidence posterior distributions of Mu and Sigma} +\usage{ + Prior2Posterior(M, Q, M_Q, S, G, S_G) +} +\arguments{ + \item{M}{a numeric vector with the Mu of the normal + reference model} + + \item{Q}{a numeric vector used to construct a view on + expectation of the linear combination QX} + + \item{M_Q}{a numeric vector with the view of the + expectations of QX} + + \item{S}{a covariance matrix for the normal reference + model} + + \item{G}{a numeric vector used to construct a view on + covariance of the linear combination GX} + + \item{S_G}{a numeric with the expectation associated with + the covariance of the linear combination GX} +} +\value{ + a list with + + M_ a numeric vector with the full-confidence posterior + distribution of Mu + + S_ a covariance matrix with the full-confidence posterior + distribution of Sigma +} +\description{ + \deqn{ \tilde{ \mu } \equiv \mu + \Sigma Q' {\big(Q + \Sigma Q' \big)}^{-1} \big( \tilde{\mu}_{Q} - Q \mu + \big), \\ \tilde{ \Sigma } \equiv \Sigma + \Sigma G' + \big({\big(G \Sigma G' \big)}^{-1} \tilde{ \Sigma }_G + {\big(G \Sigma G' \big)}^{-1} - {\big(G \Sigma G' + \big)}^{-1} \big) G \Sigma } +} +\author{ + Ram Ahluwalia \email{ram at wingedfootcapital.com} +} +\references{ + \url{http://www.symmys.com/node/158} + \url{http://ssrn.com/abstract=1213325} A. Meucci - "Fully + Flexible Views: Theory and Practice". See formula (21) + and (22) on page 7 See Meucci script Prior2Posterior.m + attached to Entropy Pooling Paper +} + Modified: pkg/Meucci/man/RIEfficientFrontier.Rd =================================================================== --- pkg/Meucci/man/RIEfficientFrontier.Rd 2013-09-11 10:09:49 UTC (rev 3055) +++ pkg/Meucci/man/RIEfficientFrontier.Rd 2013-09-11 10:11:08 UTC (rev 3056) @@ -1,6 +1,9 @@ \name{RIEfficientFrontier} \alias{RIEfficientFrontier} -\title{Generates an efficient frontier based on Meucci's Ranking Information version with the following inputs} +\title{Generates an efficient frontier based on Meucci's Ranking Information version and returns a A list with +NumPortf efficient portfolios whos returns are equally spaced along the whole range of the efficient frontier, +as it appears in A. Meucci, "Fully Flexible Views: Theory and Practice", The Risk Magazine, October 2008, +p 100-106.} \usage{ RIEfficientFrontier(X, p, Options) } @@ -14,20 +17,32 @@ \item{Options}{a list of options....TBD} } \value{ - A list with NumPortf efficient portfolios whos returns - are equally spaced along the whole range of the efficient - frontier Exps the NumPortf x 1 vector of expected returns - for each asset Covs the NumPortf x N vector of security - volatilities along the efficient frontier w the NumPortf - x N matrix of compositions (security weights) for each - portfolio along the efficient frontier e the NumPortf x 1 - matrix of expected returns for each portfolio along the - efficient frontier s the NumPortf x 1 matrix of standard - deviation of returns for each portfolio along the - efficient frontier + Exps the NumPortf x 1 vector of expected returns for each + asset + + Covs the NumPortf x N vector of security volatilities + along the efficient frontier + + w the NumPortf x N matrix of compositions (security + weights) for each portfolio along the efficient frontier + + e the NumPortf x 1 matrix of expected returns for each + portfolio along the efficient frontier + + s the NumPortf x 1 matrix of standard deviation of + returns for each portfolio along the efficient frontier } \description{ - Generates an efficient frontier based on Meucci's Ranking - Information version with the following inputs + Most recent version of article and MATLAB code available + at http://www.symmys.com/node/158 } +\author{ + Ram Ahluwalia \email{ram at wingedfootcapital.com} and + Xavier Valls \email{flamejat at gmail.com} +} +\references{ + A. Meucci, "Fully Flexible Views: Theory and Practice" + \url{http://www.symmys.com/node/158} See Meucci script + for "RankingInformation/EfficientFrontier.m" +} Modified: pkg/Meucci/man/ViewRanking.Rd =================================================================== --- pkg/Meucci/man/ViewRanking.Rd 2013-09-11 10:09:49 UTC (rev 3055) +++ pkg/Meucci/man/ViewRanking.Rd 2013-09-11 10:11:08 UTC (rev 3056) @@ -1,23 +1,35 @@ -\name{ViewRanking} -\alias{ViewRanking} -\title{view the rankings} -\usage{ - ViewRanking(X, p, Lower, Upper) -} -\arguments{ - \item{X}{a vector containing returns for all the asset - classes} - - \item{p}{a vector containing the prior probability - values} - - \item{Lower}{a vector of indexes indicating which column - is lower than the corresponding column number in Upper} - - \item{Upper}{a vector of indexes indicating which column - is lower than the corresponding column number in Upper} -} -\description{ - view the rankings -} - +\name{ViewRanking} +\alias{ViewRanking} +\title{Computes posterior probabilities to view the rankings, as it appears in A. Meucci, +"Fully Flexible Views: Theory and Practice", The Risk Magazine, October 2008, p 100-106.} +\usage{ + ViewRanking(X, p, Lower, Upper) +} +\arguments{ + \item{X}{a vector containing returns for all the asset + classes} + + \item{p}{a vector containing the prior probability + values} + + \item{Lower}{a vector of indexes indicating which column + is lower than the corresponding column number in Upper} + + \item{Upper}{a vector of indexes indicating which column + is lower than the corresponding column number in Upper} +} +\description{ + Computes posterior probabilities to view the rankings, as + it appears in A. Meucci, "Fully Flexible Views: Theory + and Practice", The Risk Magazine, October 2008, p + 100-106. +} +\author{ + Ram Ahluwalia \email{ram at wingedfootcapital.com} +} +\references{ + A. Meucci, "Fully Flexible Views: Theory and Practice" + \url{http://www.symmys.com/node/158} See Meucci script + for "RankingInformation/ViewRanking.m" +} + From noreply at r-forge.r-project.org Wed Sep 11 18:40:17 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 11 Sep 2013 18:40:17 +0200 (CEST) Subject: [Returnanalytics-commits] r3057 - pkg/PortfolioAnalytics/R Message-ID: <20130911164018.0DEBF185167@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-11 18:40:17 +0200 (Wed, 11 Sep 2013) New Revision: 3057 Modified: pkg/PortfolioAnalytics/R/charts.multiple.R Log: Adding chart.RiskReward function to plot multiple optimizations in risk-return space. Modified: pkg/PortfolioAnalytics/R/charts.multiple.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.multiple.R 2013-09-11 10:11:08 UTC (rev 3056) +++ pkg/PortfolioAnalytics/R/charts.multiple.R 2013-09-11 16:40:17 UTC (rev 3057) @@ -63,3 +63,49 @@ } box(col=element.color) } + +#' @rdname chart.RiskReward +#' @method chart.RiskReward opt.list +#' @S3method chart.RiskReward opt.list +chart.RiskReward.opt.list <- function(object, ..., risk.col="ES", return.col="mean", main="", ylim=NULL, xlim=NULL, labels.assets=TRUE, pch.assets=1, cex.assets=0.8, cex.axis=0.8, cex.lab=0.8, colorset=NULL, element.color="darkgray"){ + if(!inherits(object, "opt.list")) stop("object must be of class 'opt.list'") + # Get the objective measures + obj <- extractObjectiveMeasures(object) + + # check if risk.col and return.col are valid objective measures + columnnames <- colnames(obj) + if(!(risk.col %in% columnnames)) stop(paste(risk.col, "not in column names")) + if(!(return.col %in% columnnames)) stop(paste(return.col, "not in column names")) + + # data to plot + dat <- na.omit(obj[, c(risk.col, return.col)]) + if(nrow(dat) < 1) stop("No data to plot after na.omit") + dat_names <- rownames(dat) + + # colors to plot + if(is.null(colorset)){ + colorset <- 1:nrow(dat) + } + + # set xlim and ylim + if(is.null(xlim)){ + xlim <- range(dat[, risk.col]) + xlim[1] <- 0 + xlim[2] <- xlim[2] * 1.25 + } + + if(is.null(ylim)){ + ylim <- range(dat[, return.col]) + ylim[1] <- 0 + ylim[2] <- ylim[2] * 1.15 + } + + # plot the points + plot(x=dat[, risk.col], y=dat[, return.col], cex.lab=cex.lab, main=main, ylab=return.col, xlab=risk.col, xlim=xlim, ylim=ylim, pch=pch.assets, col=colorset, ..., axes=FALSE) + if(labels.assets) text(x=dat[, risk.col], y=dat[, return.col], labels=dat_names, pos=4, cex=cex.assets, col=colorset) + + # add the axis + axis(2, cex.axis=cex.axis, col=element.color) + axis(1, cex.axis=cex.axis, col=element.color) + box(col=element.color) +} From noreply at r-forge.r-project.org Wed Sep 11 19:20:10 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 11 Sep 2013 19:20:10 +0200 (CEST) Subject: [Returnanalytics-commits] r3058 - in pkg/FactorAnalytics: sandbox sandbox/Man sandbox/R vignettes Message-ID: <20130911172010.3E87E185326@r-forge.r-project.org> Author: chenyian Date: 2013-09-11 19:20:09 +0200 (Wed, 11 Sep 2013) New Revision: 3058 Added: pkg/FactorAnalytics/sandbox/Man/ pkg/FactorAnalytics/sandbox/Man/Style.Rd pkg/FactorAnalytics/sandbox/Man/chart.Style.Rd pkg/FactorAnalytics/sandbox/Man/covEWMA.Rd pkg/FactorAnalytics/sandbox/Man/impliedFactorReturns.Rd pkg/FactorAnalytics/sandbox/Man/modifiedEsReport.Rd pkg/FactorAnalytics/sandbox/Man/modifiedIncrementalES.Rd pkg/FactorAnalytics/sandbox/Man/modifiedIncrementalVaR.Rd pkg/FactorAnalytics/sandbox/Man/modifiedPortfolioEsDecomposition.Rd pkg/FactorAnalytics/sandbox/Man/modifiedPortfolioVaRDecomposition.Rd pkg/FactorAnalytics/sandbox/Man/modifiedVaRReport.Rd pkg/FactorAnalytics/sandbox/Man/nonparametricEsReport.Rd pkg/FactorAnalytics/sandbox/Man/nonparametricIncrementalES.Rd pkg/FactorAnalytics/sandbox/Man/nonparametricIncrementalVaR.Rd pkg/FactorAnalytics/sandbox/Man/nonparametricPortfolioEsDecomposition.Rd pkg/FactorAnalytics/sandbox/Man/nonparametricPortfolioVaRDecomposition.Rd pkg/FactorAnalytics/sandbox/Man/nonparametricVaRReport.Rd pkg/FactorAnalytics/sandbox/Man/normalEsReport.Rd pkg/FactorAnalytics/sandbox/Man/normalIncrementalES.Rd pkg/FactorAnalytics/sandbox/Man/normalIncrementalVaR.Rd pkg/FactorAnalytics/sandbox/Man/normalPortfolioEsDecomposition.Rd pkg/FactorAnalytics/sandbox/Man/normalPortfolioVaRDecomposition.Rd pkg/FactorAnalytics/sandbox/Man/normalVaRReport.Rd pkg/FactorAnalytics/sandbox/Man/portfolioSdDecomposition.Rd pkg/FactorAnalytics/sandbox/Man/scenarioPredictions.Rd pkg/FactorAnalytics/sandbox/Man/scenarioPredictionsPortfolio.Rd pkg/FactorAnalytics/sandbox/R/ pkg/FactorAnalytics/sandbox/R/FactorAnalytics-package.R pkg/FactorAnalytics/sandbox/R/bootstrapFactorESdecomposition.r pkg/FactorAnalytics/sandbox/R/bootstrapFactorVaRdecomposition.r pkg/FactorAnalytics/sandbox/R/chart.RollingStyle.R pkg/FactorAnalytics/sandbox/R/chart.Style.R pkg/FactorAnalytics/sandbox/R/covEWMA.R pkg/FactorAnalytics/sandbox/R/factorModelFactorRiskDecomposition.r pkg/FactorAnalytics/sandbox/R/factorModelGroupRiskDecomposition.r pkg/FactorAnalytics/sandbox/R/factorModelPortfolioRiskDecomposition.r pkg/FactorAnalytics/sandbox/R/factorModelRiskAttribution.r pkg/FactorAnalytics/sandbox/R/factorModelRiskDecomposition.r pkg/FactorAnalytics/sandbox/R/factorModelSimulation.r pkg/FactorAnalytics/sandbox/R/impliedFactorReturns.R pkg/FactorAnalytics/sandbox/R/modifiedEsReport.R pkg/FactorAnalytics/sandbox/R/modifiedIncrementalES.R pkg/FactorAnalytics/sandbox/R/modifiedIncrementalVaR.R pkg/FactorAnalytics/sandbox/R/modifiedPortfolioEsDecomposition.R pkg/FactorAnalytics/sandbox/R/modifiedPortfolioVaRDecomposition.R pkg/FactorAnalytics/sandbox/R/modifiedVaRReport.R pkg/FactorAnalytics/sandbox/R/nonparametricEsReport.R pkg/FactorAnalytics/sandbox/R/nonparametricIncrementalES.R pkg/FactorAnalytics/sandbox/R/nonparametricIncrementalVaR.R pkg/FactorAnalytics/sandbox/R/nonparametricPortfolioEsDecomposition.R pkg/FactorAnalytics/sandbox/R/nonparametricPortfolioVaRDecomposition.R pkg/FactorAnalytics/sandbox/R/nonparametricVaRReport.R pkg/FactorAnalytics/sandbox/R/normalEsReport.R pkg/FactorAnalytics/sandbox/R/normalIncrementalES.R pkg/FactorAnalytics/sandbox/R/normalIncrementalVaR.R pkg/FactorAnalytics/sandbox/R/normalPortfolioEsDecomposition.R pkg/FactorAnalytics/sandbox/R/normalPortfolioVaRDecomposition.R pkg/FactorAnalytics/sandbox/R/normalVaRReport.R pkg/FactorAnalytics/sandbox/R/portfolioSdDecomposition.R pkg/FactorAnalytics/sandbox/R/scenarioPredictions.r pkg/FactorAnalytics/sandbox/R/scenarioPredictionsPortfolio.r pkg/FactorAnalytics/sandbox/R/style.QPfit.R pkg/FactorAnalytics/sandbox/R/style.fit.R pkg/FactorAnalytics/sandbox/R/table.RollingStyle.R pkg/FactorAnalytics/sandbox/example.menu.plot.r pkg/FactorAnalytics/sandbox/getCommomFactor.r pkg/FactorAnalytics/sandbox/plotFactorModelFit.r pkg/FactorAnalytics/sandbox/test.vignette.r pkg/FactorAnalytics/sandbox/testfile.r Modified: pkg/FactorAnalytics/vignettes/fundamentalFM.Rnw Log: add unused function to sandbox file and upload to r-forge. Added: pkg/FactorAnalytics/sandbox/Man/Style.Rd =================================================================== --- pkg/FactorAnalytics/sandbox/Man/Style.Rd (rev 0) +++ pkg/FactorAnalytics/sandbox/Man/Style.Rd 2013-09-11 17:20:09 UTC (rev 3058) @@ -0,0 +1,104 @@ +\name{chart.Style} +\alias{chart.Style} +\alias{chart.RollingStyle} +\alias{table.RollingStyle} +\alias{style.fit} +\alias{style.QPfit} +%- Also NEED an '\alias' for EACH other topic documented here. +\title{ calculate and display effective style weights } +\description{ + Functions that calculate effective style weights and display the results in a bar chart. \code{chart.Style} calculates and displays style weights calculated over a single period. \code{chart.RollingStyle} calculates and displays those weights in rolling windows through time. \code{style.fit} manages the calculation of the weights by method. \code{style.QPfit} calculates the specific constraint case that requires quadratic programming. +} +\usage{ +chart.Style(R.fund, R.style, method = c("constrained", "unconstrained", "normalized"), leverage = FALSE, main = NULL, ylim = NULL, unstacked=TRUE, ...) + +chart.RollingStyle(R.fund, R.style, method = c("constrained","unconstrained","normalized"), leverage = FALSE, width = 12, main = NULL, space = 0, ...) + +style.fit(R.fund, R.style, model=FALSE, method = c("constrained", "unconstrained", "normalized"), leverage = FALSE, selection = c("none", "AIC"), ...) + +style.QPfit(R.fund, R.style, model = FALSE, leverage = FALSE, ...) + +} +%- maybe also 'usage' for other objects documented here. +\arguments{ + \item{R.fund}{ matrix, data frame, or zoo object with fund returns to be analyzed } + \item{R.style}{ matrix, data frame, or zoo object with style index returns. Data object must be of the same length and time-aligned with R.fund } + \item{method}{ specify the method of calculation of style weights as "constrained", "unconstrained", or "normalized". For more information, see \code{\link{style.fit}} } + \item{leverage}{ logical, defaults to 'FALSE'. If 'TRUE', the calculation of weights assumes that leverage may be used. For more information, see \code{\link{style.fit}} } + \item{model}{ logical. If 'model' = TRUE in \code{\link{style.QPfit}}, the full result set is shown from the output of \code{solve.QP}. } + \item{selection}{ either "none" (default) or "AIC". If "AIC", then the function uses a stepwise regression to identify find the model with minimum AIC value. See \code{\link{step}} for more detail.} + \item{unstacked}{ logical. If set to 'TRUE' \emph{and} only one row of data is submitted in 'w', then the chart creates a normal column chart. If more than one row is submitted, then this is ignored. See examples below. } + \item{space}{ the amount of space (as a fraction of the average bar width) left before each bar, as in \code{\link{barplot}}. Default for \code{chart.RollingStyle} is 0; for \code{chart.Style} the default is 0.2. } + \item{main}{ set the chart title, same as in \code{\link{plot}} } + \item{width}{ number of periods or window to apply rolling style analysis over } + \item{ylim}{ set the y-axis limit, same as in \code{\link{plot}} } + \item{\dots}{ for the charting functions, these are arguments to be passed to \code{\link{barplot}}. These can include further arguments (such as 'axes', 'asp' and 'main') and graphical parameters (see 'par') which are passed to 'plot.window()', 'title()' and 'axis'. For the calculation functions, these are ignored. } +} +\details{ +These functions calculate style weights using an asset class style model as described in detail in Sharpe (1992). The use of quadratic programming to determine a fund's exposures to the changes in returns of major asset classes is usually refered to as "style analysis". + +The "unconstrained" method implements a simple factor model for style analysis, as in: +\deqn{Ri = bi1*F1+bi2*F2+...+bin*Fn+ei}{R_i = b_{i1}F_1+b_{i2}F_2+\dots+b_{in}F_n +e_i} +where \eqn{Ri}{R_i} represents the return on asset i, \eqn{Fj}{F_j} represents each factor, and \eqn{ei}{e_i} represents the "non-factor" component of the return on i. This is simply a multiple regression analysis with fund returns as the dependent variable and asset class returns as the independent variables. The resulting slope coefficients are then interpreted as the fund's historic exposures to asset class returns. In this case, coefficients do not sum to 1. + +The "normalized" method reports the results of a multiple regression analysis similar to the first, but with one constraint: the coefficients are required to add to 1. Coefficients may be negative, indicating short exposures. To enforce the constraint, coefficients are normalized. + +The "constrained" method includes the constraint that the coefficients sum to 1, but adds +that the coefficients must lie between 0 and 1. These inequality constraints require a +quadratic programming algorithm using \code{\link[quadprog]{solve.QP}} from the 'quadprog' package, +and the implementation is discussed under \code{\link{style.QPfit}}. If set to TRUE, +"leverage" allows the sum of the coefficients to exceed 1. + +According to Sharpe (1992), the calculation for the constrained case is represented as: +\deqn{min var(Rf - sum[wi * R.si]) = min var(F - w*S)}{min \sigma(R_f - \sum{w_i * R_s_i}) = min \sigma(F - w*S)} +\deqn{s.t. sum[wi] = 1; wi > 0}{ s.t. \sum{w_i} = 1; w_i > 0} + +Remembering that: + +\deqn{\sigma(aX + bY) = a^2 \sigma(X) + b^2 \sigma(Y) + 2ab cov(X,Y) = \sigma(R.f) + w'*V*w - 2*w'*cov(R.f,R.s)} + +we can drop \eqn{var(Rf)}{\sigma(R_f)} as it isn't a function of weights, multiply both sides by 1/2: + +\deqn{= min (1/2) w'*V*w - C'w}{= min (1/2) w'*V*w - C'w} +\deqn{ s.t. w'*e = 1, w_i > 0}{ s.t. w'*e = 1, w_i > 0} + +Which allows us to use \code{\link[quadprog]{solve.QP}}, which is specified as: +\deqn{min(-d' b + 1/2 b' D b)}{min(-d' b + 1/2 b' D b)} +and the constraints +\deqn{ A' b >= b.0 }{ A' b >= b_0 } + +so: +b is the weight vector, +D is the variance-covariance matrix of the styles +d is the covariance vector between the fund and the styles + +The chart functions then provide a graphical summary of the results. The underlying +function, \code{\link{style.fit}}, provides the outputs of the analysis and more +information about fit, including an R-squared value. + +Styles identified in this analysis may be interpreted as an average of potentially +changing exposures over the period covered. The function \code{\link{chart.RollingStyle}} +may be useful for examining the behavior of a manager's average exposures to asset classes over time, using a rolling-window analysis. + + The chart functions plot a column chart or stacked column chart of the resulting style weights to the current device. Both \code{style.fit} and \code{style.QPfit} produce a list of data frames containing 'weights' and 'R.squared' results. If 'model' = TRUE in \code{style.QPfit}, the full result set is shown from the output of \code{solve.QP}. +} +\references{ +Sharpe, W. Asset Allocation: Management Style and Performance Measurement Journal of Portfolio Management, 1992, 7-19. See \url{ http://www.stanford.edu/~wfsharpe/art/sa/sa.htm} + } +\author{ Peter Carl } +\note{ + None of the functions \code{chart.Style}, \code{style.fit}, and \code{style.QPfit} make any attempt to align the two input data series. The \code{chart.RollingStyle}, on the other hand, does merge the two series and manages the calculation over common periods. +} +\seealso{ \code{\link{barplot}}, \code{\link{par}} } +\examples{ +data(edhec) +data(managers) +style.fit(managers[97:132,2,drop=FALSE],edhec[85:120,], method="constrained", leverage=FALSE) +chart.Style(managers[97:132,2,drop=FALSE],edhec[85:120,], method="constrained", leverage=FALSE, unstack=TRUE, las=3) +chart.RollingStyle(managers[,2,drop=FALSE],edhec[,1:11], method="constrained", leverage=FALSE, width=36, cex.legend = .7, colorset=rainbow12equal, las=1) +} +% Add one or more standard keywords, see file 'KEYWORDS' in the +% R documentation directory. +\keyword{ ts } +\keyword{ multivariate } +\keyword{ hplot } Added: pkg/FactorAnalytics/sandbox/Man/chart.Style.Rd =================================================================== --- pkg/FactorAnalytics/sandbox/Man/chart.Style.Rd (rev 0) +++ pkg/FactorAnalytics/sandbox/Man/chart.Style.Rd 2013-09-11 17:20:09 UTC (rev 3058) @@ -0,0 +1,191 @@ +\name{chart.Style} +\alias{chart.RollingStyle} +\alias{chart.Style} +\alias{style.fit} +\alias{style.QPfit} +\alias{table.RollingStyle} +\title{calculate and display effective style weights} +\usage{ + chart.Style(R.fund, R.style, + method = c("constrained", "unconstrained", "normalized"), + leverage = FALSE, main = NULL, ylim = NULL, + unstacked = TRUE, ...) +} +\arguments{ + \item{R.fund}{matrix, data frame, or zoo object with fund + returns to be analyzed} + + \item{R.style}{matrix, data frame, or zoo object with + style index returns. Data object must be of the same + length and time-aligned with R.fund} + + \item{method}{specify the method of calculation of style + weights as "constrained", "unconstrained", or + "normalized". For more information, see + \code{\link{style.fit}}} + + \item{leverage}{logical, defaults to 'FALSE'. If 'TRUE', + the calculation of weights assumes that leverage may be + used. For more information, see \code{\link{style.fit}}} + + \item{model}{logical. If 'model' = TRUE in + \code{\link{style.QPfit}}, the full result set is shown + from the output of \code{solve.QP}.} + + \item{selection}{either "none" (default) or "AIC". If + "AIC", then the function uses a stepwise regression to + identify find the model with minimum AIC value. See + \code{\link{step}} for more detail.} + + \item{unstacked}{logical. If set to 'TRUE' \emph{and} + only one row of data is submitted in 'w', then the chart + creates a normal column chart. If more than one row is + submitted, then this is ignored. See examples below.} + + \item{space}{the amount of space (as a fraction of the + average bar width) left before each bar, as in + \code{\link{barplot}}. Default for + \code{chart.RollingStyle} is 0; for \code{chart.Style} + the default is 0.2.} + + \item{main}{set the chart title, same as in + \code{\link{plot}}} + + \item{width}{number of periods or window to apply rolling + style analysis over} + + \item{ylim}{set the y-axis limit, same as in + \code{\link{plot}}} + + \item{\dots}{for the charting functions, these are + arguments to be passed to \code{\link{barplot}}. These + can include further arguments (such as 'axes', 'asp' and + 'main') and graphical parameters (see 'par') which are + passed to 'plot.window()', 'title()' and 'axis'. For the + calculation functions, these are ignored.} +} +\description{ + Functions that calculate effective style weights and + display the results in a bar chart. \code{chart.Style} + calculates and displays style weights calculated over a + single period. \code{chart.RollingStyle} calculates and + displays those weights in rolling windows through time. + \code{style.fit} manages the calculation of the weights + by method. \code{style.QPfit} calculates the specific + constraint case that requires quadratic programming. +} +\details{ + These functions calculate style weights using an asset + class style model as described in detail in Sharpe + (1992). The use of quadratic programming to determine a + fund's exposures to the changes in returns of major asset + classes is usually refered to as "style analysis". + + The "unconstrained" method implements a simple factor + model for style analysis, as in: \deqn{Ri = + bi1*F1+bi2*F2+...+bin*Fn+ei}{R_i = + b_{i1}F_1+b_{i2}F_2+\dots+b_{in}F_n +e_i} where + \eqn{Ri}{R_i} represents the return on asset i, + \eqn{Fj}{F_j} represents each factor, and \eqn{ei}{e_i} + represents the "non-factor" component of the return on i. + This is simply a multiple regression analysis with fund + returns as the dependent variable and asset class returns + as the independent variables. The resulting slope + coefficients are then interpreted as the fund's historic + exposures to asset class returns. In this case, + coefficients do not sum to 1. + + The "normalized" method reports the results of a multiple + regression analysis similar to the first, but with one + constraint: the coefficients are required to add to 1. + Coefficients may be negative, indicating short exposures. + To enforce the constraint, coefficients are normalized. + + The "constrained" method includes the constraint that the + coefficients sum to 1, but adds that the coefficients + must lie between 0 and 1. These inequality constraints + require a quadratic programming algorithm using + \code{\link[quadprog]{solve.QP}} from the 'quadprog' + package, and the implementation is discussed under + \code{\link{style.QPfit}}. If set to TRUE, "leverage" + allows the sum of the coefficients to exceed 1. + + According to Sharpe (1992), the calculation for the + constrained case is represented as: \deqn{min var(Rf - + sum[wi * R.si]) = min var(F - w*S)}{min \sigma(R_f - + \sum{w_i * R_s_i}) = min \sigma(F - w*S)} \deqn{s.t. + sum[wi] = 1; wi > 0}{ s.t. \sum{w_i} = 1; w_i > 0} + + Remembering that: + + \deqn{\sigma(aX + bY) = a^2 \sigma(X) + b^2 \sigma(Y) + + 2ab cov(X,Y) = \sigma(R.f) + w'*V*w - 2*w'*cov(R.f,R.s)} + + we can drop \eqn{var(Rf)}{\sigma(R_f)} as it isn't a + function of weights, multiply both sides by 1/2: + + \deqn{= min (1/2) w'*V*w - C'w}{= min (1/2) w'*V*w - C'w} + \deqn{ s.t. w'*e = 1, w_i > 0}{ s.t. w'*e = 1, w_i > 0} + + Which allows us to use \code{\link[quadprog]{solve.QP}}, + which is specified as: \deqn{min(-d' b + 1/2 b' D + b)}{min(-d' b + 1/2 b' D b)} and the constraints \deqn{ + A' b >= b.0 }{ A' b >= b_0 } + + so: b is the weight vector, D is the variance-covariance + matrix of the styles d is the covariance vector between + the fund and the styles + + The chart functions then provide a graphical summary of + the results. The underlying function, + \code{\link{style.fit}}, provides the outputs of the + analysis and more information about fit, including an + R-squared value. + + Styles identified in this analysis may be interpreted as + an average of potentially changing exposures over the + period covered. The function + \code{\link{chart.RollingStyle}} may be useful for + examining the behavior of a manager's average exposures + to asset classes over time, using a rolling-window + analysis. + + The chart functions plot a column chart or stacked column + chart of the resulting style weights to the current + device. Both \code{style.fit} and \code{style.QPfit} + produce a list of data frames containing 'weights' and + 'R.squared' results. If 'model' = TRUE in + \code{style.QPfit}, the full result set is shown from the + output of \code{solve.QP}. +} +\note{ + None of the functions \code{chart.Style}, + \code{style.fit}, and \code{style.QPfit} make any attempt + to align the two input data series. The + \code{chart.RollingStyle}, on the other hand, does merge + the two series and manages the calculation over common + periods. +} +\examples{ +data(edhec) +data(managers) +style.fit(managers[97:132,2,drop=FALSE],edhec[85:120,], method="constrained", leverage=FALSE) +chart.Style(managers[97:132,2,drop=FALSE],edhec[85:120,], method="constrained", leverage=FALSE, unstack=TRUE, las=3) +chart.RollingStyle(managers[,2,drop=FALSE],edhec[,1:11], method="constrained", leverage=FALSE, width=36, cex.legend = .7, colorset=rainbow12equal, las=1) +} +\author{ + Peter Carl +} +\references{ + Sharpe, W. Asset Allocation: Management Style and + Performance Measurement Journal of Portfolio Management, + 1992, 7-19. See \url{ + http://www.stanford.edu/~wfsharpe/art/sa/sa.htm} +} +\seealso{ + \code{\link{barplot}}, \code{\link{par}} +} +\keyword{hplot} +\keyword{multivariate} +\keyword{ts} + Added: pkg/FactorAnalytics/sandbox/Man/covEWMA.Rd =================================================================== --- pkg/FactorAnalytics/sandbox/Man/covEWMA.Rd (rev 0) +++ pkg/FactorAnalytics/sandbox/Man/covEWMA.Rd 2013-09-11 17:20:09 UTC (rev 3058) @@ -0,0 +1,49 @@ +\name{covEWMA} +\alias{covEWMA} +\title{Compute RiskMetrics-type EWMA Covariance Matrix} +\usage{ + covEWMA(factors, lambda = 0.96, return.cor = FALSE) +} +\arguments{ + \item{factors}{\code{T x K} data.frame containing asset + returns, where \code{T} is the number of time periods and + \code{K} is the number of assets.} + + \item{lambda}{Scalar exponential decay factor. Must lie + between between 0 and 1.} + + \item{return.cor}{Logical, if TRUE then return EWMA + correlation matrices.} +} +\value{ + \code{T x K x K} array giving the time series of EWMA + covariance matrices if \code{return.cor=FALSE} and EWMA + correlation matrices if \code{return.cor=TRUE}. +} +\description{ + Compute time series of RiskMetrics-type EWMA covariance + matrices of returns. Initial covariance matrix is assumed + to be the unconditional covariance matrix. +} +\details{ + The EWMA covariance matrix at time \code{t} is compute as + \cr \code{Sigma(t) = lambda*Sigma(t-1) + + (1-lambda)*R(t)t(R(t))} \cr where \code{R(t)} is the + \code{K x 1} vector of returns at time \code{t}. +} +\examples{ +# compute time vaying covariance of factors. +data(managers.df) +factors = managers.df[,(7:9)] +cov.f.ewma <- covEWMA(factors) +cov.f.ewma[120,,] +} +\author{ + Eric Zivot and Yi-An Chen. +} +\references{ + Zivot, E. and J. Wang (2006), \emph{Modeling Financial + Time Series with S-PLUS, Second Edition}, + Springer-Verlag. +} + Added: pkg/FactorAnalytics/sandbox/Man/impliedFactorReturns.Rd =================================================================== --- pkg/FactorAnalytics/sandbox/Man/impliedFactorReturns.Rd (rev 0) +++ pkg/FactorAnalytics/sandbox/Man/impliedFactorReturns.Rd 2013-09-11 17:20:09 UTC (rev 3058) @@ -0,0 +1,53 @@ +\name{impliedFactorReturns} +\alias{impliedFactorReturns} +\title{Compute Implied Factor Returns Using Covariance Matrix Approach} +\usage{ + impliedFactorReturns(factor.scenarios, mu.factors, + cov.factors) +} +\arguments{ + \item{factor.scenarios}{m x 1 vector of scenario values + for a subset of the n > m risk factors} + + \item{mu.factors}{\code{n x 1} vector of factor mean + returns.} + + \item{cov.factors}{\code{n x n} factor covariance + matrix.} +} +\value{ + \code{(n - m) x 1} vector of implied factor returns +} +\description{ + Compute risk factor conditional mean returns for a one + group of risk factors given specified returns for another + group of risk factors based on the assumption that all + risk factor returns are multivariately normally + distributed. +} +\details{ + Let \code{y} denote the \code{m x 1} vector of factor + scenarios and \code{x} denote the \code{(n-m) x 1} vector + of other factors. Assume that \code{(y', x')'} has a + multivariate normal distribution with mean \code{(mu.y', + mu.x')'} and covariance matrix partitioned as + \code{(cov.yy, cov.yx, cov.xy, cov.xx)}. Then the implied + factor scenarios are computed as \code{E[x|y] = mu.x + + cov.xy*cov.xx^-1 * (y - mu.y)} +} +\examples{ +# get data +data(managers.df) +factors = managers.df[,(7:9)] +# make up a factor mean returns scenario for factor SP500.TR +factor.scenarios <- 0.1 +names(factor.scenarios) <- "SP500.TR" +mu.factors <- mean(factors) +cov.factors <- var(factors) +# implied factor returns +impliedFactorReturns(factor.scenarios,mu.factors,cov.factors) +} +\author{ + Eric Zivot and Yi-An Chen. +} + Added: pkg/FactorAnalytics/sandbox/Man/modifiedEsReport.Rd =================================================================== --- pkg/FactorAnalytics/sandbox/Man/modifiedEsReport.Rd (rev 0) +++ pkg/FactorAnalytics/sandbox/Man/modifiedEsReport.Rd 2013-09-11 17:20:09 UTC (rev 3058) @@ -0,0 +1,71 @@ +\name{modifiedEsReport} +\alias{modifiedEsReport} +\title{compute ES report via Cornish-Fisher expansion for collection of assets in a +portfolio given simulated (bootstrapped) return data.} +\usage{ + modifiedEsReport(bootData, w, delta.w = 0.001, + tail.prob = 0.01, method = c("derivative", "average"), + nav, nav.p, fundStrategy, i1, i2) +} +\arguments{ + \item{bootData}{B x n matrix of B bootstrap returns on + assets in portfolio.} + + \item{w}{n x 1 vector of portfolio weights.} + + \item{delta.w}{scalar, change in portfolio weight for + computing numerical derivative. Default value is 0.010.} + + \item{tail.prob}{scalar tail probability.} + + \item{method}{character, method for computing marginal + ES. Valid choices are "derivative" for numerical + computation of the derivative of portfolio ES wrt fund + portfolio weight; "average" for approximating E[Ri | + Rp<=VaR]} + + \item{nav}{n x 1 vector of net asset values in each + fund.} + + \item{nav.p}{scalar, net asset value of portfolio + percentage.} + + \item{fundStrategy}{n x 1 vector of fund strategies.} + + \item{i1,i2}{if ff object is used, the ffapply functions + do apply an EXPRession and provide two indices FROM="i1" + and TO="i2", which mark beginning and end of the batch + and can be used in the applied expression.} +} +\value{ + dataframe with the following columns: Strategy n x 1 + strategy. Net.Asset.value n x 1 net asset values. + Allocation n x 1 vector of asset weights. Mean n x 1 mean + of each funds. Std.Dev n x 1 standard deviation of each + funds. Assets.ES n x 1 vector of asset specific ES + values. cES n x 1 vector of asset specific component ES + values. cES.dollar n x 1 vector of asset specific + component ES values in dollar terms. pcES n x 1 vector of + asset specific percent contribution to ES values. iES n x + 1 vector of asset specific incremental ES values. + iES.dollar n x 1 vector of asset specific component ES + values in dollar terms. mES n x 1 vector of asset + specific marginal ES values. mES.dollar n x 1 vector of + asset specific marginal ES values in dollar terms. +} +\description{ + compute ES report via Cornish-Fisher expansion for + collection of assets in a portfolio given simulated + (bootstrapped) return data. Report format follows that of + Excel VaR report. +} +\examples{ +data(managers.df) +ret.assets = managers.df[,(1:6)] +modifiedEsReport (bootData= ret.assets[,1:3], w=c(1/3,1/3,1/3), delta.w = 0.001, tail.prob = 0.01, + method="derivative",nav=c(100,200,100), nav.p=500, fundStrategy=c("S1","S2","S3")) +} +\author{ + Eric Zivot and Yi-An Chen. +} + Added: pkg/FactorAnalytics/sandbox/Man/modifiedIncrementalES.Rd =================================================================== --- pkg/FactorAnalytics/sandbox/Man/modifiedIncrementalES.Rd (rev 0) +++ pkg/FactorAnalytics/sandbox/Man/modifiedIncrementalES.Rd 2013-09-11 17:20:09 UTC (rev 3058) @@ -0,0 +1,44 @@ +\name{modifiedIncrementalES} +\alias{modifiedIncrementalES} +\title{Compute incremental ES given bootstrap data and portfolio weights.} +\usage{ + modifiedIncrementalES(bootData, w, tail.prob = 0.01, i1, + i2) +} +\arguments{ + \item{bootData}{B x N matrix of B bootstrap returns on n + assets in portfolio.} + + \item{w}{N x 1 vector of portfolio weights} + + \item{tail.prob}{scalar tail probability.} + + \item{i1,i2}{if ff object is used, the ffapply functions + do apply an EXPRession and provide two indices FROM="i1" + and TO="i2", which mark beginning and end of the batch + and can be used in the applied expression.} +} +\value{ + n x 1 matrix of incremental ES values for each asset. +} +\description{ + Compute incremental ES given bootstrap data and portfolio + weights. Incremental ES is defined as the change in + portfolio ES that occurs when an asset is removed from + the portfolio and allocation is spread equally among + remaining assets. VaR used in ES computation is computed + as an estimated quantile using the Cornish-Fisher + expansion. +} +\examples{ +data(managers.df) +ret.assets = managers.df[,(1:6)] +modifiedIncrementalES(ret.assets[,1:3],w=c(1/3,1/3,1/3),tail.prob = 0.05) +} +\author{ + Eric Zivot and Yi-An Chen. +} +\references{ + Jorian, P. (2007). Value-at-Risk, pg. 168. +} + Added: pkg/FactorAnalytics/sandbox/Man/modifiedIncrementalVaR.Rd =================================================================== --- pkg/FactorAnalytics/sandbox/Man/modifiedIncrementalVaR.Rd (rev 0) +++ pkg/FactorAnalytics/sandbox/Man/modifiedIncrementalVaR.Rd 2013-09-11 17:20:09 UTC (rev 3058) @@ -0,0 +1,43 @@ +\name{modifiedIncrementalVaR} +\alias{modifiedIncrementalVaR} +\title{Compute incremental VaR given bootstrap data and portfolio weights.} +\usage{ + modifiedIncrementalVaR(bootData, w, tail.prob = 0.01, i1, + i2) +} +\arguments{ + \item{bootData}{B x N matrix of B bootstrap returns on n + assets in portfolio.} + + \item{w}{N x 1 vector of portfolio weights} + + \item{tail.prob}{scalar tail probability.} + + \item{i1,i2}{if ff object is used, the ffapply functions + do apply an EXPRession and provide two indices FROM="i1" + and TO="i2", which mark beginning and end of the batch + and can be used in the applied expression.} +} +\value{ + n x 1 matrix of incremental VaR values for each asset. +} +\description{ + Compute incremental VaR given bootstrap data and + portfolio weights. Incremental VaR is defined as the + change in portfolio VaR that occurs when an asset is + removed from the portfolio and allocation is spread + equally among remaining assets. VaR is computed as an + estimated quantile using the Cornish-Fisher expansion. +} +\examples{ +data(managers.df) +ret.assets = managers.df[,(1:6)] +modifiedIncrementalVaR(ret.assets[,1:3],w=c(1/3,1/3,1/3),tail.prob = 0.05) +} +\author{ + Eric Zivot and Yi-An Chen. +} +\references{ + Jorian, P. (2007). Value-at-Risk, pg. 168. +} + Added: pkg/FactorAnalytics/sandbox/Man/modifiedPortfolioEsDecomposition.Rd =================================================================== --- pkg/FactorAnalytics/sandbox/Man/modifiedPortfolioEsDecomposition.Rd (rev 0) +++ pkg/FactorAnalytics/sandbox/Man/modifiedPortfolioEsDecomposition.Rd 2013-09-11 17:20:09 UTC (rev 3058) @@ -0,0 +1,54 @@ +\name{modifiedPortfolioEsDecomposition} +\alias{modifiedPortfolioEsDecomposition} +\title{Compute portfolio ES (risk) decomposition by assets.} +\usage{ + modifiedPortfolioEsDecomposition(bootData, w, + delta.w = 0.001, tail.prob = 0.01, + method = c("derivative", "average")) +} +\arguments{ + \item{bootData}{B x N matrix of B bootstrap returns on + assets in portfolio.} + + \item{w}{N x 1 vector of portfolio weights} + + \item{delta.w}{Scalar, change in portfolio weight for + computing numerical derivative.} + + \item{tail.prob}{Scalar, tail probability.} + + \item{method}{Character, method for computing marginal + ES. Valid choices are "derivative" for numerical + computation of the derivative of portfolio ES with + respect to fund portfolio weight; "average" for + approximating E[R_i | R_p<=VaR].} +} +\value{ + an S3 list containing +} +\description{ + Compute portfolio ES decomposition given historical or + simulated data and portfolio weights. Marginal ES is + computed either as the numerical derivative of ES with + respect to portfolio weight or as the expected fund + return given portfolio return is less than or equal to + portfolio VaR VaR is compute as an estimated quantile + using the Cornish-Fisher expansion. +} +\examples{ +data(managers.df) +ret.assets = managers.df[,(1:6)] +modifiedPortfolioEsDecomposition(ret.assets[,1:3], w=c(1/3,1/3,1/3), delta.w = 0.001, + tail.prob = 0.01, method=c("derivative")) +} +\author{ + Eric Zivot and Yi-An Chen. +} +\references{ + 1. Hallerback (2003), "Decomposing Portfolio + Value-at-Risk: A General Analysis", The Journal of Risk + 5/2. 2. Yamai and Yoshiba (2002). "Comparative Analyses + of Expected Shortfall and Value-at-Risk: Their Estimation + Error, Decomposition, and Optimization Bank of Japan. +} + Added: pkg/FactorAnalytics/sandbox/Man/modifiedPortfolioVaRDecomposition.Rd =================================================================== --- pkg/FactorAnalytics/sandbox/Man/modifiedPortfolioVaRDecomposition.Rd (rev 0) +++ pkg/FactorAnalytics/sandbox/Man/modifiedPortfolioVaRDecomposition.Rd 2013-09-11 17:20:09 UTC (rev 3058) @@ -0,0 +1,57 @@ +\name{modifiedPortfolioVaRDecomposition} +\alias{modifiedPortfolioVaRDecomposition} +\title{Compute portfolio VaR decomposition given historical or simulated data and +portfolio weights.} +\usage{ + modifiedPortfolioVaRDecomposition(bootData, w, + delta.w = 0.001, tail.prob = 0.01, + method = c("derivative", "average")) +} +\arguments{ + \item{bootData}{B x N matrix of B bootstrap returns on + assets in portfolio.} + + \item{w}{N x 1 vector of portfolio weights} + + \item{delta.w}{Scalar, change in portfolio weight for + computing numerical derivative.} + + \item{tail.prob}{Scalar, tail probability.} + + \item{method}{Character, method for computing marginal + ES. Valid choices are "derivative" for numerical + computation of the derivative of portfolio ES with + respect to fund portfolio weight; "average" for + approximating E[R_i | R_p =VaR].} +} +\value{ + an S3 list containing +} +\description{ + Compute portfolio VaR decomposition given historical or + simulated data and portfolio weights. The partial + derivative of VaR wrt factor beta is computed as the + expected factor return given fund return is equal to its + VaR and approximated by kernel estimator. VaR is compute + as an estimated quantile using the Cornish-Fisher + expansion. +} +\examples{ +data(managers.df) +ret.assets = managers.df[,(1:6)] +modifiedPortfolioVaRDecomposition(ret.assets[,1:3], w=c(1/3,1/3,1/3), delta.w = 0.001, + tail.prob = 0.01, method=c("average")) +} +\author{ + Eric Zivot and Yi-An Chen. +} +\references{ + 1. Hallerback (2003), "Decomposing Portfolio + Value-at-Risk: A General Analysis", The Journal of Risk + 5/2. 2. Yamai and Yoshiba (2002). "Comparative Analyses + of Expected Shortfall and Value-at-Risk: Their Estimation + Error, Decomposition, and Optimization Bank of Japan. 3. + Epperlein and Smillie (2006) "Cracking VAR with Kernels," + Risk. +} + Added: pkg/FactorAnalytics/sandbox/Man/modifiedVaRReport.Rd =================================================================== --- pkg/FactorAnalytics/sandbox/Man/modifiedVaRReport.Rd (rev 0) +++ pkg/FactorAnalytics/sandbox/Man/modifiedVaRReport.Rd 2013-09-11 17:20:09 UTC (rev 3058) @@ -0,0 +1,73 @@ +\name{modifiedVaRReport} +\alias{modifiedVaRReport} +\title{compute VaR report via Cornish-Fisher expansion for collection of assets in +a portfolio given simulated (bootstrapped) return data.} +\usage{ + modifiedVaRReport(bootData, w, delta.w = 0.001, + tail.prob = 0.01, method = c("derivative", "average"), + nav, nav.p, fundStrategy, i1, i2) +} +\arguments{ + \item{bootData}{B x n matrix of B bootstrap returns on + assets in portfolio.} + + \item{w}{n x 1 vector of portfolio weights.} + + \item{delta.w}{scalar, change in portfolio weight for + computing numerical derivative. Default value is 0.010.} + + \item{tail.prob}{scalar tail probability.} + + \item{method}{character, method for computing marginal + VaR Valid choices are "derivative" for numerical + computation of the derivative of portfolio VaR wrt fund + portfolio weight; "average" for approximating E[Ri | Rp + =VaR]} + + \item{nav}{n x 1 vector of net asset values in each + fund.} + + \item{nav.p}{scalar, net asset value of portfolio + percentage.} + + \item{fundStrategy}{n x 1 vector of fund strategies.} + + \item{i1,i2}{if ff object is used, the ffapply functions + do apply an EXPRession and provide two indices FROM="i1" + and TO="i2", which mark beginning and end of the batch + and can be used in the applied expression.} +} +\value{ + dataframe with the following columns: Strategy n x 1 + strategy. Net.Asset.value n x 1 net asset values. + Allocation n x 1 vector of asset weights. Mean n x 1 mean + of each funds. Std.Dev n x 1 standard deviation of each + funds. Assets.VaR n x 1 vector of asset specific VaR + values. cVaR n x 1 vector of asset specific component VaR + values. cVaR.dollar n x 1 vector of asset specific + component VaR values in dollar terms. pcVaR n x 1 vector + of asset specific percent contribution to VaR values. + iVaR n x 1 vector of asset specific incremental VaR + values. iVaR.dollar n x 1 vector of asset specific + component VaR values in dollar terms. mVaR n x 1 vector + of asset specific marginal VaR values. mVaR.dollar n x 1 + vector of asset specific marginal VaR values in dollar + terms. +} +\description{ + compute VaR report via Cornish-Fisher expansion for + collection of assets in a portfolio given simulated + (bootstrapped) return data. Report format follows that of + Excel VaR report. +} +\examples{ +data(managers.df) +ret.assets = managers.df[,(1:6)] [TRUNCATED] To get the complete diff run: svnlook diff /svnroot/returnanalytics -r 3058 From noreply at r-forge.r-project.org Wed Sep 11 19:27:19 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 11 Sep 2013 19:27:19 +0200 (CEST) Subject: [Returnanalytics-commits] r3059 - in pkg/PortfolioAnalytics: . R man Message-ID: <20130911172719.BA83A185326@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-11 19:27:19 +0200 (Wed, 11 Sep 2013) New Revision: 3059 Added: pkg/PortfolioAnalytics/man/transaction_cost_constraint.Rd Modified: pkg/PortfolioAnalytics/NAMESPACE pkg/PortfolioAnalytics/R/chart.RiskReward.R pkg/PortfolioAnalytics/R/charts.DE.R pkg/PortfolioAnalytics/R/charts.GenSA.R pkg/PortfolioAnalytics/R/charts.PSO.R pkg/PortfolioAnalytics/R/charts.ROI.R pkg/PortfolioAnalytics/R/charts.RP.R pkg/PortfolioAnalytics/R/constraints.R pkg/PortfolioAnalytics/man/chart.RiskReward.Rd Log: Making chart.RiskReward a more simple S3 generic. Updating documentation. Modified: pkg/PortfolioAnalytics/NAMESPACE =================================================================== --- pkg/PortfolioAnalytics/NAMESPACE 2013-09-11 17:20:09 UTC (rev 3058) +++ pkg/PortfolioAnalytics/NAMESPACE 2013-09-11 17:27:19 UTC (rev 3059) @@ -67,6 +67,7 @@ export(set.portfolio.moments_v2) export(set.portfolio.moments) export(trailingFUN) +export(transaction_cost_constraint) export(turnover_constraint) export(turnover_objective) export(turnover) @@ -81,6 +82,7 @@ S3method(chart.EfficientFrontier,efficient.frontier) S3method(chart.EfficientFrontier,optimize.portfolio.ROI) S3method(chart.EfficientFrontier,optimize.portfolio) +S3method(chart.RiskReward,opt.list) S3method(chart.RiskReward,optimize.portfolio.DEoptim) S3method(chart.RiskReward,optimize.portfolio.GenSA) S3method(chart.RiskReward,optimize.portfolio.pso) @@ -94,6 +96,8 @@ S3method(chart.Weights,optimize.portfolio.ROI) S3method(chart.Weights.EF,efficient.frontier) S3method(chart.Weights.EF,optimize.portfolio) +S3method(extractObjectiveMeasures,opt.list) +S3method(extractObjectiveMeasures,optimize.portfolio) S3method(extractStats,optimize.portfolio.DEoptim) S3method(extractStats,optimize.portfolio.GenSA) S3method(extractStats,optimize.portfolio.parallel) Modified: pkg/PortfolioAnalytics/R/chart.RiskReward.R =================================================================== --- pkg/PortfolioAnalytics/R/chart.RiskReward.R 2013-09-11 17:20:09 UTC (rev 3058) +++ pkg/PortfolioAnalytics/R/chart.RiskReward.R 2013-09-11 17:27:19 UTC (rev 3059) @@ -21,13 +21,17 @@ #' @param cex.axis The magnification to be used for axis annotation relative to the current setting of \code{cex} #' @param xlim set the x-axis limit, same as in \code{\link{plot}} #' @param ylim set the y-axis limit, same as in \code{\link{plot}} +#' @param rp TRUE/FALSE to generate random portfolios to plot the feasible space +#' @param main a main title for the plot +#' @param labels.assets TRUE/FALSE to include the names in the plot. +#' @param pch.assets plotting character of the assets, same as in \code{\link{plot}} +#' @param cex.assets A numerical value giving the amount by which the asset points should be magnified relative to the default. +#' @param cex.lab A numerical value giving the amount by which the labels should be magnified relative to the default. +#' @param colorset color palette or vector of colors to use #' @seealso \code{\link{optimize.portfolio}} #' @rdname chart.RiskReward -#' @aliases chart.RiskReward.optimize.portfolio.DEoptim chart.RiskReward.optimize.portfolio.RP -#' chart.RiskReward.optimize.portfolio.ROI chart.RiskReward.optimize.portfolio.pso -#' chart.RiskReward.optimize.portfolio.GenSA #' @export -chart.RiskReward <- function(object, neighbors, ..., return.col, risk.col, chart.assets, element.color, cex.axis, xlim, ylim){ +chart.RiskReward <- function(object, ...){ UseMethod("chart.RiskReward") } Modified: pkg/PortfolioAnalytics/R/charts.DE.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.DE.R 2013-09-11 17:20:09 UTC (rev 3058) +++ pkg/PortfolioAnalytics/R/charts.DE.R 2013-09-11 17:27:19 UTC (rev 3059) @@ -92,7 +92,7 @@ chart.Weights.optimize.portfolio.DEoptim <- chart.Weights.DE -chart.Scatter.DE <- function(object, neighbors = NULL, ..., return.col='mean', risk.col='ES', chart.assets=FALSE, element.color = "darkgray", cex.axis=0.8, xlim=NULL, ylim=NULL){ +chart.Scatter.DE <- function(object, ..., neighbors = NULL, return.col='mean', risk.col='ES', chart.assets=FALSE, element.color = "darkgray", cex.axis=0.8, xlim=NULL, ylim=NULL){ # more or less specific to the output of the DEoptim portfolio code with constraints # will work to a point with other functions, such as optimize.porfolio.parallel # there's still a lot to do to improve this. @@ -289,9 +289,9 @@ box(col = element.color) } +#' @rdname chart.RiskReward #' @method chart.RiskReward optimize.portfolio.DEoptim #' @S3method chart.RiskReward optimize.portfolio.DEoptim -#' @export chart.RiskReward.optimize.portfolio.DEoptim <- chart.Scatter.DE Modified: pkg/PortfolioAnalytics/R/charts.GenSA.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.GenSA.R 2013-09-11 17:20:09 UTC (rev 3058) +++ pkg/PortfolioAnalytics/R/charts.GenSA.R 2013-09-11 17:27:19 UTC (rev 3059) @@ -78,7 +78,7 @@ #' @S3method chart.Weights optimize.portfolio.GenSA chart.Weights.optimize.portfolio.GenSA <- chart.Weights.GenSA -chart.Scatter.GenSA <- function(object, neighbors=NULL, ..., return.col="mean", risk.col="ES", chart.assets=FALSE, element.color="darkgray", cex.axis=0.8, ylim=NULL, xlim=NULL, rp=FALSE){ +chart.Scatter.GenSA <- function(object, ..., neighbors=NULL, return.col="mean", risk.col="ES", chart.assets=FALSE, element.color="darkgray", cex.axis=0.8, ylim=NULL, xlim=NULL, rp=FALSE){ if(!inherits(object, "optimize.portfolio.GenSA")) stop("object must be of class 'optimize.portfolio.GenSA'") @@ -135,9 +135,9 @@ box(col = element.color) } +#' @rdname chart.RiskReward #' @method chart.RiskReward optimize.portfolio.GenSA #' @S3method chart.RiskReward optimize.portfolio.GenSA -#' @export chart.RiskReward.optimize.portfolio.GenSA <- chart.Scatter.GenSA Modified: pkg/PortfolioAnalytics/R/charts.PSO.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.PSO.R 2013-09-11 17:20:09 UTC (rev 3058) +++ pkg/PortfolioAnalytics/R/charts.PSO.R 2013-09-11 17:27:19 UTC (rev 3059) @@ -78,7 +78,7 @@ #' @S3method chart.Weights optimize.portfolio.pso chart.Weights.optimize.portfolio.pso <- chart.Weights.pso -chart.Scatter.pso <- function(object, neighbors=NULL, ..., return.col="mean", risk.col="ES", chart.assets=FALSE, element.color = "darkgray", cex.axis=0.8, xlim=NULL, ylim=NULL){ +chart.Scatter.pso <- function(object, ..., neighbors=NULL, return.col="mean", risk.col="ES", chart.assets=FALSE, element.color = "darkgray", cex.axis=0.8, xlim=NULL, ylim=NULL){ if(!inherits(object, "optimize.portfolio.pso")) stop("object must be of class 'optimize.portfolio.pso'") R <- object$R @@ -193,9 +193,9 @@ box(col = element.color) } +#' @rdname chart.RiskReward #' @method chart.RiskReward optimize.portfolio.pso #' @S3method chart.RiskReward optimize.portfolio.pso -#' @export chart.RiskReward.optimize.portfolio.pso <- chart.Scatter.pso Modified: pkg/PortfolioAnalytics/R/charts.ROI.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.ROI.R 2013-09-11 17:20:09 UTC (rev 3058) +++ pkg/PortfolioAnalytics/R/charts.ROI.R 2013-09-11 17:27:19 UTC (rev 3059) @@ -79,7 +79,7 @@ chart.Weights.optimize.portfolio.ROI <- chart.Weights.ROI -chart.Scatter.ROI <- function(object, neighbors=NULL, ..., return.col="mean", risk.col="ES", chart.assets=FALSE, element.color = "darkgray", cex.axis=0.8, xlim=NULL, ylim=NULL, rp=FALSE){ +chart.Scatter.ROI <- function(object, ..., neighbors=NULL, return.col="mean", risk.col="ES", chart.assets=FALSE, element.color = "darkgray", cex.axis=0.8, xlim=NULL, ylim=NULL, rp=FALSE){ if(!inherits(object, "optimize.portfolio.ROI")) stop("object must be of class 'optimize.portfolio.ROI'") @@ -137,9 +137,9 @@ box(col = element.color) } +#' @rdname chart.RiskReward #' @method chart.RiskReward optimize.portfolio.ROI #' @S3method chart.RiskReward optimize.portfolio.ROI -#' @export chart.RiskReward.optimize.portfolio.ROI <- chart.Scatter.ROI Modified: pkg/PortfolioAnalytics/R/charts.RP.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.RP.R 2013-09-11 17:20:09 UTC (rev 3058) +++ pkg/PortfolioAnalytics/R/charts.RP.R 2013-09-11 17:27:19 UTC (rev 3059) @@ -93,7 +93,7 @@ #' @S3method chart.Weights optimize.portfolio.random chart.Weights.optimize.portfolio.random <- chart.Weights.RP -chart.Scatter.RP <- function(object, neighbors = NULL, ..., return.col='mean', risk.col='ES', chart.assets=FALSE, element.color = "darkgray", cex.axis=0.8, xlim=NULL, ylim=NULL){ +chart.Scatter.RP <- function(object, ..., neighbors = NULL, return.col='mean', risk.col='ES', chart.assets=FALSE, element.color = "darkgray", cex.axis=0.8, xlim=NULL, ylim=NULL){ # more or less specific to the output of the random portfolio code with constraints # will work to a point with other functions, such as optimize.porfolio.parallel # there's still a lot to do to improve this. @@ -246,9 +246,9 @@ box(col = element.color) } +#' @rdname chart.RiskReward #' @method chart.RiskReward optimize.portfolio.random #' @S3method chart.RiskReward optimize.portfolio.random -#' @export chart.RiskReward.optimize.portfolio.random <- chart.Scatter.RP Modified: pkg/PortfolioAnalytics/R/constraints.R =================================================================== --- pkg/PortfolioAnalytics/R/constraints.R 2013-09-11 17:20:09 UTC (rev 3058) +++ pkg/PortfolioAnalytics/R/constraints.R 2013-09-11 17:27:19 UTC (rev 3059) @@ -983,6 +983,7 @@ #' utility problems with ROI quadprog plugin. #' #' @param type character type of the constraint +#' @param assets number of assets, or optionally a named vector of assets specifying initial weights #' @param ptc proportional transaction cost value #' @param enabled TRUE/FALSE #' @param message TRUE/FALSE. The default is message=FALSE. Display messages if TRUE. Modified: pkg/PortfolioAnalytics/man/chart.RiskReward.Rd =================================================================== --- pkg/PortfolioAnalytics/man/chart.RiskReward.Rd 2013-09-11 17:20:09 UTC (rev 3058) +++ pkg/PortfolioAnalytics/man/chart.RiskReward.Rd 2013-09-11 17:27:19 UTC (rev 3059) @@ -1,15 +1,46 @@ -\name{chart.RiskReward} +\name{chart.RiskReward.optimize.portfolio.DEoptim} \alias{chart.RiskReward} +\alias{chart.RiskReward.opt.list} \alias{chart.RiskReward.optimize.portfolio.DEoptim} \alias{chart.RiskReward.optimize.portfolio.GenSA} \alias{chart.RiskReward.optimize.portfolio.pso} +\alias{chart.RiskReward.optimize.portfolio.random} \alias{chart.RiskReward.optimize.portfolio.ROI} -\alias{chart.RiskReward.optimize.portfolio.RP} \title{classic risk reward scatter} \usage{ - chart.RiskReward(object, neighbors, ..., return.col, - risk.col, chart.assets, element.color, cex.axis, xlim, - ylim) + \method{chart.RiskReward}{optimize.portfolio.DEoptim} (object, ..., neighbors = NULL, return.col = "mean", + risk.col = "ES", chart.assets = FALSE, + element.color = "darkgray", cex.axis = 0.8, + xlim = NULL, ylim = NULL) + + \method{chart.RiskReward}{optimize.portfolio.random} (object, ..., neighbors = NULL, return.col = "mean", + risk.col = "ES", chart.assets = FALSE, + element.color = "darkgray", cex.axis = 0.8, + xlim = NULL, ylim = NULL) + + \method{chart.RiskReward}{optimize.portfolio.ROI} (object, ..., neighbors = NULL, return.col = "mean", + risk.col = "ES", chart.assets = FALSE, + element.color = "darkgray", cex.axis = 0.8, + xlim = NULL, ylim = NULL, rp = FALSE) + + \method{chart.RiskReward}{optimize.portfolio.pso} (object, ..., neighbors = NULL, return.col = "mean", + risk.col = "ES", chart.assets = FALSE, + element.color = "darkgray", cex.axis = 0.8, + xlim = NULL, ylim = NULL) + + \method{chart.RiskReward}{optimize.portfolio.GenSA} (object, ..., neighbors = NULL, return.col = "mean", + risk.col = "ES", chart.assets = FALSE, + element.color = "darkgray", cex.axis = 0.8, + ylim = NULL, xlim = NULL, rp = FALSE) + + chart.RiskReward(object, ...) + + \method{chart.RiskReward}{opt.list} (object, ..., + risk.col = "ES", return.col = "mean", main = "", + ylim = NULL, xlim = NULL, labels.assets = TRUE, + pch.assets = 1, cex.assets = 0.8, cex.axis = 0.8, + cex.lab = 0.8, colorset = NULL, + element.color = "darkgray") } \arguments{ \item{object}{optimal portfolio created by @@ -40,6 +71,27 @@ \item{ylim}{set the y-axis limit, same as in \code{\link{plot}}} + + \item{rp}{TRUE/FALSE to generate random portfolios to + plot the feasible space} + + \item{main}{a main title for the plot} + + \item{labels.assets}{TRUE/FALSE to include the names in + the plot.} + + \item{pch.assets}{plotting character of the assets, same + as in \code{\link{plot}}} + + \item{cex.assets}{A numerical value giving the amount by + which the asset points should be magnified relative to + the default.} + + \item{cex.lab}{A numerical value giving the amount by + which the labels should be magnified relative to the + default.} + + \item{colorset}{color palette or vector of colors to use} } \description{ \code{neighbors} may be specified in three ways. The Added: pkg/PortfolioAnalytics/man/transaction_cost_constraint.Rd =================================================================== --- pkg/PortfolioAnalytics/man/transaction_cost_constraint.Rd (rev 0) +++ pkg/PortfolioAnalytics/man/transaction_cost_constraint.Rd 2013-09-11 17:27:19 UTC (rev 3059) @@ -0,0 +1,50 @@ +\name{transaction_cost_constraint} +\alias{transaction_cost_constraint} +\title{constructor for transaction_cost_constraint} +\usage{ + transaction_cost_constraint(type = "transaction_cost", + assets, ptc, enabled = TRUE, message = FALSE, ...) +} +\arguments{ + \item{type}{character type of the constraint} + + \item{assets}{number of assets, or optionally a named + vector of assets specifying initial weights} + + \item{ptc}{proportional transaction cost value} + + \item{enabled}{TRUE/FALSE} + + \item{message}{TRUE/FALSE. The default is message=FALSE. + Display messages if TRUE.} + + \item{\dots}{any other passthru parameters to specify box + and/or group constraints} +} +\description{ + The transaction cost constraint specifies a proportional + cost value. This function is called by add.constraint + when type="transaction_cost" is specified, see + \code{\link{add.constraint}}. +} +\details{ + Note that with the ROI solvers, proportional transaction + cost constraint is currently only supported for the + global minimum variance and quadratic utility problems + with ROI quadprog plugin. +} +\examples{ +data(edhec) +ret <- edhec[, 1:4] + +pspec <- portfolio.spec(assets=colnames(ret)) + +pspec <- add.constraint(portfolio=pspec, type="transaction_cost", ptc=0.01) +} +\author{ + Ross Bennett +} +\seealso{ + \code{\link{add.constraint}} +} + From noreply at r-forge.r-project.org Wed Sep 11 21:04:12 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 11 Sep 2013 21:04:12 +0200 (CEST) Subject: [Returnanalytics-commits] r3060 - in pkg/Meucci: R data demo Message-ID: <20130911190412.ABB35185667@r-forge.r-project.org> Author: xavierv Date: 2013-09-11 21:04:12 +0200 (Wed, 11 Sep 2013) New Revision: 3060 Added: pkg/Meucci/data/butterfliesAnalytics.rda Modified: pkg/Meucci/R/data.R pkg/Meucci/demo/ButterflyTrading.R Log: - fixed data and its documentation for the butterfly trading example from the FFP paper Modified: pkg/Meucci/R/data.R =================================================================== --- pkg/Meucci/R/data.R 2013-09-11 17:27:19 UTC (rev 3059) +++ pkg/Meucci/R/data.R 2013-09-11 19:04:12 UTC (rev 3060) @@ -208,4 +208,24 @@ #' @author Xavier Valls\email{flamejat@@gmail.com} #' @references A. Meucci, Exercises in Advanced Risk and Portfolio Management. \url{http://symmys.com/node/170} #' @keywords data +NULL + +#' @title Panel X of joint returns realizations and vector p of respective probabilities +#' +#' @name returnsDistribution +#' @docType data +#' @author Xavier Valls\email{flamejat@@gmail.com} +#' @references A. Meucci, "Fully Flexible Views: Theory and Practice", The Risk Magazine, +#' October 2008, p 100-106. \url{http://symmys.com/node/158} +#' @keywords data +NULL + +#' @title Factor Distribution Butterflies +#' +#' @name FDButterflies +#' @docType data +#' @author Xavier Valls\email{flamejat@@gmail.com} +#' @references A. Meucci, "Fully Flexible Views: Theory and Practice", The Risk Magazine, +#' October 2008, p 100-106. \url{http://symmys.com/node/158} +#' @keywords data NULL \ No newline at end of file Added: pkg/Meucci/data/butterfliesAnalytics.rda =================================================================== (Binary files differ) Property changes on: pkg/Meucci/data/butterfliesAnalytics.rda ___________________________________________________________________ Added: svn:mime-type + application/octet-stream Modified: pkg/Meucci/demo/ButterflyTrading.R =================================================================== --- pkg/Meucci/demo/ButterflyTrading.R 2013-09-11 17:27:19 UTC (rev 3059) +++ pkg/Meucci/demo/ButterflyTrading.R 2013-09-11 19:04:12 UTC (rev 3060) @@ -1,308 +1,25 @@ -#' This script performs the butterfly-trading case study for the -#' Entropy-Pooling approach by Attilio Meucci, as it appears in -#' "A. Meucci - Fully Flexible Views: Theory and Practice - -#' The Risk Magazine, October 2008, p 100-106" -#' available at www.symmys.com > Research > Working Papers -#' Adapted from Code by A. Meucci, September 2008 -#' Last version available at www.symmys.com > Teaching > MATLAB +#' This script performs the butterfly-trading case study for the Entropy-Pooling approach by Attilio Meucci, +#' as it appears in A. Meucci, "Fully Flexible Views: Theory and Practice", The Risk Magazine, October 2008, +#' p 100-106 +#' +#' Most recent version of article and MATLAB code available at +#' http://www.symmys.com/node/158 +#' +#' @references +#' A. Meucci, "Fully Flexible Views: Theory and Practice" \url{http://www.symmys.com/node/158} +#' See Meucci script for "ButterflyTrading/S_MAIN.m" +#' +#' @author Ram Ahluwalia \email{ram@@wingedfootcapital.com} and Xavier Valls \email{flamejat@@gmail.com} -PlotFrontier = function( e , s , w ) -{ - # subplot(2,1,1) - plot( s , e ) - # grid on - # set(gca,'xlim',[min(s) max(s)]) - # - # subplot(2,1,2) - - xx = nrow( w ) ; N = ncol( w ) - Data = apply( w , 1 , cumsum ) #TODO: Check. Take cumulative sum of *rows*. Try sapply? - - for ( n in 1:N ) - { - x = cbind( min(s) , s , max(s) ) - y = cbind( 0 , Data[ , N-n+1 ] , 0 ) - # hold on - #h = fill( x , y , cbind( .9 , .9 , .9) - mod( n , 3 ) %*% cbind( .2 , .2 , .2) ) - } - - #set(gca,'xlim',[min(s) max(s)],'ylim',[0 max(max(Data))]) - #xlabel('portfolio # (risk propensity)') - #ylabel('portfolio composition') -} -ViewCurveSlope = function( X , p ) -{ - # view 3 (expectations and binding constraints): slope of the yield curve will increase by 5 bp - - J = nrow( X ) ; K = ncol( X ) - - # constrain probabilities to sum to one... - Aeq = ones( 1 , J ) - beq = 1 - - # ...constrain the expectation... - V = X[ , 14 ] - X[ , 13 ] - v = .0005 - - Aeq = rbind( Aeq , t(V) ) - - beq = rbind( beq , v ) - - A = b = emptyMatrix - - # ...compute posterior probabilities - p_ = EntropyProg( p , A , b , Aeq ,beq )$p_ - return( p_ ) -} - -ViewRealizedVol = function( X , p ) -{ - # view 2 (relative inequality view on median): bullish on realized volatility of MSFT (i.e. absolute log-change in the underlying). - # This is the variable such that, if larger than a threshold, a long position in the butterfly turns into a profit (e.g. Rachev 2003) - # we issue a relative statement on the media comparing it with the third quintile implied by the reference market model - - library( matlab ) - J = nrow( X ) ; K = ncol( X ) - - # constrain probabilities to sum to one... - Aeq = ones( 1 , J ) - beq = 1 - - # ...constrain the median... - V = abs( X[ , 1 ] ) # absolute value of the log of changes in MSFT close prices (definition of realized volatility) - - V_Sort = sort( V , decreasing = FALSE ) # sorting of the abs value of log changes in prices from smallest to largest - I_Sort = order( V ) - - F = cumsum( p[ I_Sort ] ) # represents the cumulative sum of probabilities from ~0 to 1 - - I_Reference = max( matlab:::find( F <= 3/5 ) ) # finds the (max) index corresponding to element with value <= 3/5 along the empirical cumulative density function for the abs log-changes in price - V_Reference = V_Sort[ I_Reference ] # returns the corresponding abs log of change in price at the 3/5 of the cumulative density function - - I_Select = find( V <= V_Reference ) # finds all indices with value of abs log-change in price less than the reference value - - a = zeros( 1 , J ) - a[ I_Select ] = 1 # select those cases where the abs log-change in price is less than the 3/5 of the empirical cumulative density... - - A = a - b = .5 # ... and assign the probability of these cases occuring as 50%. This moves the media of the distribution - - # ...compute posterior probabilities - p_ = EntropyProg( p , A , b , Aeq , beq )$p_ - - return( p_ ) -} - -ViewImpliedVol = function( X , p ) -{ - # View 1 (inequality view): bearish on on 2m-6m implied volaility spread for Google - - J = nrow( X ) ; K = ncol( X ) - - # constrain probabilities to sum to one... - Aeq = ones( 1 , J ) - beq = 1 - - # ...constrain the expectation... - V = X[ , 12 ] - X[ , 11 ] # GOOG_vol_182 (6m implied vol) - GOOG_vol_91 (2m implied vol) - m = mean( V ) - s = std( V ) - - A = t( V ) - b = m - s - - # ...compute posterior probabilities - p_ = EntropyProg( p , A , b , Aeq , beq )$p_ - - return( p_ ) -} - -ComputeCVaR = function( Units , Scenarios , Conf ) -{ - PnL = Scenarios %*% Units - Sort_PnL = PnL[ order( PnL , decreasing = FALSE ) ] - - J = length( PnL ) - Cut = round( J %*% ( 1 - Conf ) , 0 ) - - CVaR = -mean( Sort_PnL[ 1:Cut ] ) - - return( CVaR ) -} - -LongShortMeanCVaRFrontier = function( PnL , Probs , Butterflies , Options ) -{ - library( matlab ) - library( quadprog ) - library( limSolve ) - - # setup constraints - J = nrow(PnL); N = ncol(PnL) - P_0s = matrix( , nrow = 1 , ncol = 0 ) - D_s = matrix( , nrow = 1 , ncol = 0 ) - emptyMatrix = matrix( nrow = 0 , ncol = 0 ) - - for ( n in 1:N ) - { - P_0s = cbind( P_0s , Butterflies[[n]]$P_0 ) # 1x9 matrix - D_s = cbind( D_s , Butterflies[[n]]$Delta ) # 1x9 matrix - } - - Constr = list() - Constr$Aeq = P_0s # linear coefficients in the constraints Aeq*X = beq (equality constraints) - Constr$beq = Options$Budget # the constant vector in the constraints Aeq*x = beq - - if ( Options$DeltaNeutral == TRUE ) - { - Constr$Aeq = rbind( Constr$Aeq , D_s ) # 2x9 matrix - Constr$beq = rbind( Constr$beq , 0 ) # 2x9 matrix - } - - Constr$Aleq = rbind( diag( as.vector( P_0s ) ) , -diag( as.vector( P_0s ) ) ) # linear coefficients in the constraints A*x <= b. an 18x9 matrix - Constr$bleq = rbind( Options$Limit * ones(N,1) , Options$Limit * ones(N,1) ) # constant vector in the constraints A*x <= b. an 18x1 matrix - - # determine expectation of minimum-variance portfolio - Exps = t(PnL) %*% Probs - Scnd_Mom = t(PnL) %*% (PnL * (Probs %*% ones(1,N) ) ) - Scnd_Mom = ( Scnd_Mom + t(Scnd_Mom) ) / 2 - Covs = Scnd_Mom - Exps %*% t(Exps) - - Amat = rbind( Constr$Aeq , Constr$Aleq ) # stack the equality constraints on top of the inequality constraints - bvec = rbind( Constr$beq , Constr$bleq ) # stack the equality constraints on top of the inequality constraints - - #if ( nrow(Covs) != length( zeros(N,1) ) ) stop("Dmat and dvec are incompatible!") - #if ( nrow(Covs) != nrow(Amat)) stop("Amat and dvec are incompatible!") - - MinSDev_Units = solve.QP( Dmat = Covs , dvec = -1 * zeros(N,1) , Amat = -1*t(Amat) , bvec = -1*bvec , meq = length( Constr$beq) ) # TODO: Check this - MinSDev_Exp = t( MinSDev_Units$solution ) %*% Exps - - # determine expectation of maximum-expectation portfolio - - MaxExp_Units = linp( E = Constr$Aeq , F = Constr$beq , G = -1*Constr$Aleq , H = -1*Constr$bleq , Cost = -Exps , ispos = FALSE )$X - - MaxExp_Exp = t( MaxExp_Units ) %*% Exps - - # slice efficient frontier in NumPortf equally thick horizontal sections - Grid = t( seq( from = Options$FrontierSpan[1] , to = Options$FrontierSpan[2] , length.out = Options$NumPortf ) ) - TargetExp = as.numeric( MinSDev_Exp ) + Grid * as.numeric( ( MaxExp_Exp - MinSDev_Exp ) ) - - # compute composition, expectation, s.dev. and CVaR of the efficient frontier - Composition = matrix( , ncol = N , nrow = 0 ) - Exp = matrix( , ncol = 1 , nrow = 0 ) - SDev = matrix( , ncol = 1 , nrow = 0 ) - CVaR = matrix( , ncol = 1 , nrow = 0 ) - - for (i in 1:Options$NumPortf ) - { - # determine least risky portfolio for given expectation - AEq = rbind( Constr$Aeq , t(Exps) ) # equality constraint: set expected return for each asset... - bEq = rbind( Constr$beq , TargetExp[i] ) - - Amat = rbind( AEq , Constr$Aleq ) # stack the equality constraints on top of the inequality constraints - bvec = rbind( bEq , Constr$bleq ) # ...and target portfolio return for i'th efficient portfolio - - # Why is FirstDegree "expected returns" set to 0? - # Becasuse we capture the equality view in the equality constraints matrix - # In other words, we have a constraint that the Expected Returns by Asset %*% Weights = Target Return - Units = solve.QP( Dmat = Covs , dvec = -1*zeros(N,1) , Amat = -1*t(Amat) , bvec = -1*bvec , meq = length( bEq ) ) - - # store results - Composition = rbind( Composition , t( Units$solution ) ) - - Exp = rbind( Exp , t( Units$solution ) %*% Exps ) - SDev = rbind( SDev , sqrt( t( Units$solution ) %*% Covs %*% Units$solution ) ) - CVaR = rbind( CVaR , ComputeCVaR( Units$solution , PnL , Options$Quant ) ) - } - - colnames( Composition ) = c( "MSFT_vol_30" , "MSFT_vol_91" , "MSFT_vol_182" , - "YHOO_vol_30" , "YHOO_vol_91" , "YHOO_vol_182" , - "GOOG_vol_30" , "GOOG_vol_91" , "GOOG_vol_182" ) - - return( list( Exp = Exp , SDev = SDev , CVaR = CVaR , Composition = Composition ) ) -} - - -MapVol = function( sig , y , K , T ) -{ - # in real life a and b below should be calibrated to security-specific time series - - a=-.00000000001 - b= .00000000001 - - s = sig + a/sqrt(T) * ( log(K) - log(y) ) + b/T*( log(K) - log(y) )^2 - - return( s ) -} - -HorizonPricing = function( Butterflies , X ) -{ - r = .04 # risk-free rate - tau = 1/252 # investment horizon - - # factors: 1. 'MSFT_close' 2. 'MSFT_vol_30' 3. 'MSFT_vol_91' 4. 'MSFT_vol_182' - # securities: 1. 'MSFT_vol_30' 2. 'MSFT_vol_91' 3. 'MSFT_vol_182' - - # create a new row called DlnY and Dsig - # create a new row called 'DlnY'. Assign the first row (vector) of X to this DlnY for the 1:3 securities - for ( s in 1:3 ) { Butterflies[[s]]$DlnY = X[ , 1 ] } - - # assign the 2nd row of X to a new element called Dsig - Butterflies[[1]]$Dsig=X[ , 2 ] - Butterflies[[2]]$Dsig=X[ , 3 ] - Butterflies[[3]]$Dsig=X[ , 4 ] - - # factors: 5. 'YHOO_close' 6. 'YHOO_vol_30' 7. 'YHOO_vol_91' 8. 'YHOO_vol_182' - # securities: 4. 'YHOO_vol_30' 5. 'YHOO_vol_91' 6. 'YHOO_vol_182' - for ( s in 4:6 ) { Butterflies[[s]]$DlnY=X[ , 5 ] } - - Butterflies[[4]]$Dsig=X[ , 6 ] - Butterflies[[5]]$Dsig=X[ , 7 ] - Butterflies[[6]]$Dsig=X[ , 8 ] - - # factors: # 9. 'GOOG_close' 10. 'GOOG_vol_30' 11. 'GOOG_vol_91' 12. 'GOOG_vol_182' - # securities: 7. 'GOOG_vol_30' 8. 'GOOG_vol_91' 9. 'GOOG_vol_182' - for ( s in 7:9 ) { Butterflies[[s]]$DlnY=X[ , 9 ] } - - Butterflies[[7]]$Dsig=X[ , 10 ] - Butterflies[[8]]$Dsig=X[ , 11 ] - Butterflies[[9]]$Dsig=X[ , 12 ] - - PnL = matrix( NA , nrow = nrow(X) ) - - for ( s in 1:length(Butterflies) ) - { - Y = Butterflies[[s]]$Y_0 * exp(Butterflies[[s]]$DlnY) - ATMsig = apply( cbind( Butterflies[[s]]$sig_0 + Butterflies[[s]]$Dsig , 10^-6 ) , 1 , max ) - t = Butterflies[[s]]$T - tau - K = Butterflies[[s]]$K - sig = MapVol(ATMsig , Y , K , t ) - - # library(RQuantLib) # this function can only operate on one option at a time, so we use fOptions - # C = EuropeanOption( type = "call" , underlying = Y , strike = K , dividendYield = 0 , riskFreeRate = r , maturity = t , volatility = sig )$value - # P = EuropeanOption( type = "put" , underlying = Y , strike = K , dividendYield = 0 , riskFreeRate = r , maturity = t , volatility = sig )$value - - # use fOptions to value options - library( fOptions ) - C = GBSOption( TypeFlag = "c" , S = Y , X = K , r = r , b = 0 , Time = t , sigma = sig ) - P = GBSOption( TypeFlag = "p" , S = Y , X = K , r = r , b = 0 , Time = t , sigma = sig ) - - Butterflies[[s]]$P_T = C at price + P at price - PnL = cbind( PnL , Butterflies[[s]]$P_T ) - } - PnL = PnL[ , -1 ] - - return( PnL ) -} - ################################################################### #' Load panel X of joint factors realizations and vector p of respective probabilities #' In real life, these are provided by the estimation process ################################################################### load("butterflyTradingX.rda") -library( R.matlab ) -library( matlab ) +#library( R.matlab ) +#library( matlab ) emptyMatrix = matrix( nrow = 0 , ncol = 0 ) @@ -390,45 +107,4 @@ p_3b = ViewCurveSlope( X , p ) p_4 = ViewCurveSlopeTest( X , p ) -ViewCurveSlopeTest = function( X , p ) - { - J = nrow( X ) ; K = ncol( X ) - - # constrain probabilities to sum to one... - Aeq = ones( 1 , J ) - beq = matrix( 1 , nrow = 1 , ncol = 1 ) - browser() - # ...constrain the expectation... - V = matrix( , nrow = nrow( X ) , ncol = 0 ) - # Add 3 equality views - V = cbind( V , X[ , 14 ] - X[ , 13 ] ) # View 1: spread on treasuries - V = cbind( V , X[ , 14 ] - X[ , 13 ] ) # View 2: identical view (spread on treasuries) - V = cbind( V , X[ , 6 ] - X[ , 5 ] ) # View 3: difference in YHOO Vol - v = matrix( c( .0005 , 0 ) , nrow = ncol( V ) , ncol = 1 ) - - Aeq = rbind( Aeq , t(V) ) - - beq = rbind( beq , v ) - - # add an inequality view - # ...constrain the median... - V = abs( X[ , 1 ] ) # absolute value of the log of changes in MSFT close prices (definition of realized volatility) - V_Sort = sort( V , decreasing = FALSE ) # sorting of the abs value of log changes in prices from smallest to largest - I_Sort = order( V ) - - F = cumsum( p[ I_Sort ] ) # represents the cumulative sum of probabilities from ~0 to 1 - - I_Reference = max( matlab:::find( F <= 3/5 ) ) # finds the (max) index corresponding to element with value <= 3/5 along the empirical cumulative density function for the abs log-changes in price - V_Reference = V_Sort[ I_Reference ] # returns the corresponding abs log of change in price at the 3/5 of the cumulative density function - I_Select = find( V <= V_Reference ) # finds all indices with value of abs log-change in price less than the reference value - a = zeros( 1 , J ) - a[ I_Select ] = 1 # select those cases where the abs log-change in price is less than the 3/5 of the empirical cumulative density... - - A = a - b = .5 # ... and assign the probability of these cases occuring as 50%. This moves the media of the distribution - - # ...compute posterior probabilities - p_ = EntropyProg( p , A , b , Aeq ,beq ) - return( p_ ) - } From noreply at r-forge.r-project.org Wed Sep 11 21:07:28 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 11 Sep 2013 21:07:28 +0200 (CEST) Subject: [Returnanalytics-commits] r3061 - in pkg/Meucci: . data man Message-ID: <20130911190729.11ECE185D82@r-forge.r-project.org> Author: xavierv Date: 2013-09-11 21:07:28 +0200 (Wed, 11 Sep 2013) New Revision: 3061 Added: pkg/Meucci/data/factorDistributions.rda pkg/Meucci/man/FDButterflies.Rd pkg/Meucci/man/returnsDistribution.Rd Modified: pkg/Meucci/DESCRIPTION Log: - missing files from last commit Modified: pkg/Meucci/DESCRIPTION =================================================================== --- pkg/Meucci/DESCRIPTION 2013-09-11 19:04:12 UTC (rev 3060) +++ pkg/Meucci/DESCRIPTION 2013-09-11 19:07:28 UTC (rev 3061) @@ -66,7 +66,6 @@ 'MeanDiversificationFrontier.R' 'MultivariateOUnCointegration.R' 'Prior2Posterior.R' - 'RankingInformation.R' 'RobustBayesianAllocation.R' 'LognormalMoments2Parameters.R' 'LognormalParameters2Statistics.R' @@ -105,3 +104,5 @@ 'Fit2Moms.R' 'LeastInfoKernel.R' 'data.R' + 'ButterflyTradingFunctions.R' + 'RankingInformationFunctions.R' Added: pkg/Meucci/data/factorDistributions.rda =================================================================== (Binary files differ) Property changes on: pkg/Meucci/data/factorDistributions.rda ___________________________________________________________________ Added: svn:mime-type + application/octet-stream Added: pkg/Meucci/man/FDButterflies.Rd =================================================================== --- pkg/Meucci/man/FDButterflies.Rd (rev 0) +++ pkg/Meucci/man/FDButterflies.Rd 2013-09-11 19:07:28 UTC (rev 3061) @@ -0,0 +1,18 @@ +\docType{data} +\name{FDButterflies} +\alias{FDButterflies} +\title{Factor Distribution Butterflies} +\description{ + Factor Distribution Butterflies +} +\author{ + Xavier Valls\email{flamejat at gmail.com} +} +\references{ + A. Meucci, "Fully Flexible Views: Theory and Practice", + The Risk Magazine, October 2008, p 100-106. + \url{http://symmys.com/node/158} +} +\keyword{data} +\keyword{datasets} + Added: pkg/Meucci/man/returnsDistribution.Rd =================================================================== --- pkg/Meucci/man/returnsDistribution.Rd (rev 0) +++ pkg/Meucci/man/returnsDistribution.Rd 2013-09-11 19:07:28 UTC (rev 3061) @@ -0,0 +1,19 @@ +\docType{data} +\name{returnsDistribution} +\alias{returnsDistribution} +\title{Panel X of joint returns realizations and vector p of respective probabilities} +\description{ + Panel X of joint returns realizations and vector p of + respective probabilities +} +\author{ + Xavier Valls\email{flamejat at gmail.com} +} +\references{ + A. Meucci, "Fully Flexible Views: Theory and Practice", + The Risk Magazine, October 2008, p 100-106. + \url{http://symmys.com/node/158} +} +\keyword{data} +\keyword{datasets} + From noreply at r-forge.r-project.org Wed Sep 11 21:30:24 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 11 Sep 2013 21:30:24 +0200 (CEST) Subject: [Returnanalytics-commits] r3062 - in pkg/Meucci: R man Message-ID: <20130911193024.AE1FF180941@r-forge.r-project.org> Author: xavierv Date: 2013-09-11 21:30:24 +0200 (Wed, 11 Sep 2013) New Revision: 3062 Added: pkg/Meucci/man/butterfliesAnalytics.Rd Modified: pkg/Meucci/R/PlotCompositionEfficientFrontier.R pkg/Meucci/R/data.R pkg/Meucci/man/PlotCompositionEfficientFrontier.Rd Log: - fixed r check errors Modified: pkg/Meucci/R/PlotCompositionEfficientFrontier.R =================================================================== --- pkg/Meucci/R/PlotCompositionEfficientFrontier.R 2013-09-11 19:07:28 UTC (rev 3061) +++ pkg/Meucci/R/PlotCompositionEfficientFrontier.R 2013-09-11 19:30:24 UTC (rev 3062) @@ -10,7 +10,7 @@ #' @author Xavier Valls \email{flamejat@@gmail.com} #' @export -PlotCompositionEfficientFrontier = function( Portfolios, s, e ) +PlotCompositionEfficientFrontier = function( Portfolios ) { dev.new(); Modified: pkg/Meucci/R/data.R =================================================================== --- pkg/Meucci/R/data.R 2013-09-11 19:07:28 UTC (rev 3061) +++ pkg/Meucci/R/data.R 2013-09-11 19:30:24 UTC (rev 3062) @@ -228,4 +228,14 @@ #' @references A. Meucci, "Fully Flexible Views: Theory and Practice", The Risk Magazine, #' October 2008, p 100-106. \url{http://symmys.com/node/158} #' @keywords data +NULL + +#' @title Butterflies Analytics +#' +#' @name butterfliesAnalytics +#' @docType data +#' @author Xavier Valls\email{flamejat@@gmail.com} +#' @references A. Meucci, "Fully Flexible Views: Theory and Practice", The Risk Magazine, +#' October 2008, p 100-106. \url{http://symmys.com/node/158} +#' @keywords data NULL \ No newline at end of file Modified: pkg/Meucci/man/PlotCompositionEfficientFrontier.Rd =================================================================== --- pkg/Meucci/man/PlotCompositionEfficientFrontier.Rd 2013-09-11 19:07:28 UTC (rev 3061) +++ pkg/Meucci/man/PlotCompositionEfficientFrontier.Rd 2013-09-11 19:30:24 UTC (rev 3062) @@ -3,7 +3,7 @@ \title{Plot the efficient frontier, as described in A. Meucci, "Risk and Asset Allocation", Springer, 2005.} \usage{ - PlotCompositionEfficientFrontier(Portfolios, s, e) + PlotCompositionEfficientFrontier(Portfolios) } \arguments{ \item{Portfolios}{: [matrix] (M x N) M portfolios of size Added: pkg/Meucci/man/butterfliesAnalytics.Rd =================================================================== --- pkg/Meucci/man/butterfliesAnalytics.Rd (rev 0) +++ pkg/Meucci/man/butterfliesAnalytics.Rd 2013-09-11 19:30:24 UTC (rev 3062) @@ -0,0 +1,18 @@ +\docType{data} +\name{butterfliesAnalytics} +\alias{butterfliesAnalytics} +\title{Butterflies Analytics} +\description{ + Butterflies Analytics +} +\author{ + Xavier Valls\email{flamejat at gmail.com} +} +\references{ + A. Meucci, "Fully Flexible Views: Theory and Practice", + The Risk Magazine, October 2008, p 100-106. + \url{http://symmys.com/node/158} +} +\keyword{data} +\keyword{datasets} + From noreply at r-forge.r-project.org Wed Sep 11 21:36:06 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 11 Sep 2013 21:36:06 +0200 (CEST) Subject: [Returnanalytics-commits] r3063 - in pkg/FactorAnalytics: R man Message-ID: <20130911193606.B7C06180941@r-forge.r-project.org> Author: chenyian Date: 2013-09-11 21:36:06 +0200 (Wed, 11 Sep 2013) New Revision: 3063 Modified: pkg/FactorAnalytics/R/factorModelCovariance.r pkg/FactorAnalytics/R/factorModelEsDecomposition.R pkg/FactorAnalytics/R/factorModelMonteCarlo.R pkg/FactorAnalytics/R/factorModelPerformanceAttribution.r pkg/FactorAnalytics/R/factorModelSdDecomposition.R pkg/FactorAnalytics/man/factorModelCovariance.Rd pkg/FactorAnalytics/man/factorModelEsDecomposition.Rd pkg/FactorAnalytics/man/factorModelMonteCarlo.Rd pkg/FactorAnalytics/man/factorModelPerformanceAttribution.Rd pkg/FactorAnalytics/man/factorModelSdDecomposition.Rd Log: modifying several Rd files to improve documentary. Modified: pkg/FactorAnalytics/R/factorModelCovariance.r =================================================================== --- pkg/FactorAnalytics/R/factorModelCovariance.r 2013-09-11 19:30:24 UTC (rev 3062) +++ pkg/FactorAnalytics/R/factorModelCovariance.r 2013-09-11 19:36:06 UTC (rev 3063) @@ -1,10 +1,11 @@ #' Compute Factor Model Covariance Matrix. #' -#' Compute asset return covariance matrix from factor model parameters. +#' Compute asset return covariance matrix from factor model. #' -#' The return on asset \code{i} (\code{i = 1,...,N}) is assumed to follow the -#' factor model \cr \code{R(i,t) = alpha + t(beta)*F(t) + e(i,t), e(i,t) ~ iid -#' (0, sig(i)^2)} \cr where \code{beta} is a \code{K x 1} vector of factor +#' The return on asset \code{i} is assumed to follow the +#' factor model +#' \cr \code{R(i,t) = alpha + t(beta)*F(t) + e(i,t), e(i,t) ~ iid(0, sig(i)^2)} \cr +#' where \code{beta} is a \code{K x 1} vector of factor #' exposures. The return variance is then \cr \code{var(R(i,t) = #' t(beta)*var(F(t))*beta + sig(i)^2}, \cr and the \code{N x N} covariance #' matrix of the return vector \code{R} is \cr \code{var(R) = B*var(F(t))*t(B) Modified: pkg/FactorAnalytics/R/factorModelEsDecomposition.R =================================================================== --- pkg/FactorAnalytics/R/factorModelEsDecomposition.R 2013-09-11 19:30:24 UTC (rev 3062) +++ pkg/FactorAnalytics/R/factorModelEsDecomposition.R 2013-09-11 19:36:06 UTC (rev 3063) @@ -1,4 +1,4 @@ -#' Compute Factor Model Factor ES Decomposition +#' Compute Factor Model ES Decomposition #' #' Compute the factor model factor expected shortfall (ES) decomposition for an #' asset based on Euler's theorem given historic or simulated data and factor @@ -30,11 +30,11 @@ #' \item{VaR} {Scalar, nonparametric VaR value for fund reported as a #' positive number.} #' \item{n.exceed} Scalar, number of observations beyond VaR. -#' \item{idx.exceed} \code{n.exceed x 1} vector giving index values of exceedences. +#' \item{idx.exceed} n.exceed x 1 vector giving index values of exceedences. #' \item{ES.fm} Scalar. nonparametric ES value for fund reported as a positive number. -#' \item{mES.fm} \code{(K+1) x 1} vector of factor marginal contributions to ES. -#' \item{cES.fm} \code{(K+1) x 1} vector of factor component contributions to ES. -#' \item{pcES.fm} \code{(K+1) x 1} vector of factor percentage component contributions to ES. +#' \item{mES.fm} (K+1) x 1 vector of factor marginal contributions to ES. +#' \item{cES.fm} (K+1) x 1 vector of factor component contributions to ES. +#' \item{pcES.fm} (K+1) x 1 vector of factor percentage component contributions to ES. #' } #' @author Eric Zviot and Yi-An Chen. #' @references 1. Hallerback (2003), "Decomposing Portfolio Value-at-Risk: A Modified: pkg/FactorAnalytics/R/factorModelMonteCarlo.R =================================================================== --- pkg/FactorAnalytics/R/factorModelMonteCarlo.R 2013-09-11 19:30:24 UTC (rev 3062) +++ pkg/FactorAnalytics/R/factorModelMonteCarlo.R 2013-09-11 19:36:06 UTC (rev 3063) @@ -2,7 +2,7 @@ #' #' Simulate returns using factor model Monte Carlo method. Parametric method #' like normal distribution, Cornish-Fisher and skew-t distribution for -#' residuals can be selected. Resampling method like non-parametric bootstrap +#' residuals can be selected. Resampling method such as non-parametric bootstrap #' or stationary bootstrap can be selected. #' #' The factor model Monte Carlo method is described in Jiang (2009). @@ -37,11 +37,11 @@ #' residuals in output list object. #' @return A list with the following components: #' \itemize{ -#' \item returns \code{n.boot x n.funds} matrix of simulated fund +#' \item{returns} \code{n.boot x n.funds} matrix of simulated fund #' returns. -#' \item factors \code{n.boot x n.factors} matrix of resampled factor +#' \item{factors} \code{n.boot x n.factors} matrix of resampled factor #' returns. Returned only if \code{return.factors = TRUE}. -#' \item residuals \code{n.boot x n.funds} matrix of simulated fund +#' \item{residuals} \code{n.boot x n.funds} matrix of simulated fund #' residuals. Returned only if \code{return.residuals = TRUE}. #' } #' @author Eric Zivot and Yi-An Chen. Modified: pkg/FactorAnalytics/R/factorModelPerformanceAttribution.r =================================================================== --- pkg/FactorAnalytics/R/factorModelPerformanceAttribution.r 2013-09-11 19:30:24 UTC (rev 3062) +++ pkg/FactorAnalytics/R/factorModelPerformanceAttribution.r 2013-09-11 19:36:06 UTC (rev 3063) @@ -1,14 +1,13 @@ #' Compute performance attribution #' -#' Decompose total returns or active returns into returns attributed to factors -#' and specific returns. Class of FM.attribution is generated and generic -#' function \code{plot()} and \code{summary()},\code{print()} can be used. +#' Decompose total returns into returns attributed to factors and specific returns. +#' Class of FM.attribution is generated and generic function \code{plot()} and \code{summary()},\code{print()} can be applied. #' -#' total returns can be decomposed into returns attributed to factors and -#' specific returns. \eqn{R_t = \sum_j b_{j} * f_{jt} + -#' u_t},t=1..T,\eqn{b_{j}} is exposure to factor j and \eqn{f_{jt}} is factor -#' j. The returns attributed to factor j is \eqn{b_{j} * f_{jt}} and specific -#' returns is \eqn{u_t}. +#' Total returns can be decomposed into returns attributed to factors and +#' specific returns. \cr \eqn{R_t = \sum b_j * f_jt + u_t,t=1...T} \cr +#' \code{b_j} is exposure to factor j and \code{f_jt} is factor j. +#' The returns attributed to factor j is \code{b_j * f_jt} and specific +#' returns is \code{u_t}. #' #' @param fit Class of "TimeSeriesFactorModel", "FundamentalFactorModel" or #' "statFactorModel". Modified: pkg/FactorAnalytics/R/factorModelSdDecomposition.R =================================================================== --- pkg/FactorAnalytics/R/factorModelSdDecomposition.R 2013-09-11 19:30:24 UTC (rev 3062) +++ pkg/FactorAnalytics/R/factorModelSdDecomposition.R 2013-09-11 19:36:06 UTC (rev 3063) @@ -1,6 +1,7 @@ -#' Compute factor model factor risk (sd) decomposition for individual fund. +#' Compute factor model standard deviation decomposition #' -#' Compute factor model factor risk (sd) decomposition for individual fund. +#' Compute the factor model factor standard deviation decomposition for an +#' asset based on Euler's theorem given factor model parameters. #' #' #' @param beta.vec k x 1 vector of factor betas with factor names in the Modified: pkg/FactorAnalytics/man/factorModelCovariance.Rd =================================================================== --- pkg/FactorAnalytics/man/factorModelCovariance.Rd 2013-09-11 19:30:24 UTC (rev 3062) +++ pkg/FactorAnalytics/man/factorModelCovariance.Rd 2013-09-11 19:36:06 UTC (rev 3063) @@ -20,21 +20,20 @@ model parameters. } \description{ - Compute asset return covariance matrix from factor model - parameters. + Compute asset return covariance matrix from factor model. } \details{ - The return on asset \code{i} (\code{i = 1,...,N}) is - assumed to follow the factor model \cr \code{R(i,t) = - alpha + t(beta)*F(t) + e(i,t), e(i,t) ~ iid (0, - sig(i)^2)} \cr where \code{beta} is a \code{K x 1} vector - of factor exposures. The return variance is then \cr - \code{var(R(i,t) = t(beta)*var(F(t))*beta + sig(i)^2}, - \cr and the \code{N x N} covariance matrix of the return - vector \code{R} is \cr \code{var(R) = B*var(F(t))*t(B) + - D} \cr where B is the \code{N x K} matrix of asset betas - and \code{D} is a diagonal matrix with \code{sig(i)^2} - values along the diagonal. + The return on asset \code{i} is assumed to follow the + factor model \cr \code{R(i,t) = alpha + t(beta)*F(t) + + e(i,t), e(i,t) ~ iid(0, sig(i)^2)} \cr where \code{beta} + is a \code{K x 1} vector of factor exposures. The return + variance is then \cr \code{var(R(i,t) = + t(beta)*var(F(t))*beta + sig(i)^2}, \cr and the \code{N x + N} covariance matrix of the return vector \code{R} is \cr + \code{var(R) = B*var(F(t))*t(B) + D} \cr where B is the + \code{N x K} matrix of asset betas and \code{D} is a + diagonal matrix with \code{sig(i)^2} values along the + diagonal. } \examples{ \dontrun{ Modified: pkg/FactorAnalytics/man/factorModelEsDecomposition.Rd =================================================================== --- pkg/FactorAnalytics/man/factorModelEsDecomposition.Rd 2013-09-11 19:30:24 UTC (rev 3062) +++ pkg/FactorAnalytics/man/factorModelEsDecomposition.Rd 2013-09-11 19:36:06 UTC (rev 3063) @@ -1,6 +1,6 @@ \name{factorModelEsDecomposition} \alias{factorModelEsDecomposition} -\title{Compute Factor Model Factor ES Decomposition} +\title{Compute Factor Model ES Decomposition} \usage{ factorModelEsDecomposition(Data, beta.vec, sig2.e, tail.prob = 0.05, @@ -33,14 +33,13 @@ \item{VaR} {Scalar, nonparametric VaR value for fund reported as a positive number.} \item{n.exceed} Scalar, number of observations beyond VaR. \item{idx.exceed} - \code{n.exceed x 1} vector giving index values of - exceedences. \item{ES.fm} Scalar. nonparametric ES value - for fund reported as a positive number. \item{mES.fm} - \code{(K+1) x 1} vector of factor marginal contributions - to ES. \item{cES.fm} \code{(K+1) x 1} vector of factor - component contributions to ES. \item{pcES.fm} \code{(K+1) - x 1} vector of factor percentage component contributions - to ES. } + n.exceed x 1 vector giving index values of exceedences. + \item{ES.fm} Scalar. nonparametric ES value for fund + reported as a positive number. \item{mES.fm} (K+1) x 1 + vector of factor marginal contributions to ES. + \item{cES.fm} (K+1) x 1 vector of factor component + contributions to ES. \item{pcES.fm} (K+1) x 1 vector of + factor percentage component contributions to ES. } } \description{ Compute the factor model factor expected shortfall (ES) Modified: pkg/FactorAnalytics/man/factorModelMonteCarlo.Rd =================================================================== --- pkg/FactorAnalytics/man/factorModelMonteCarlo.Rd 2013-09-11 19:30:24 UTC (rev 3062) +++ pkg/FactorAnalytics/man/factorModelMonteCarlo.Rd 2013-09-11 19:36:06 UTC (rev 3063) @@ -56,11 +56,11 @@ return simulated residuals in output list object.} } \value{ - A list with the following components: \itemize{ \item - returns \code{n.boot x n.funds} matrix of simulated fund - returns. \item factors \code{n.boot x n.factors} matrix - of resampled factor returns. Returned only if - \code{return.factors = TRUE}. \item residuals + A list with the following components: \itemize{ + \item{returns} \code{n.boot x n.funds} matrix of + simulated fund returns. \item{factors} \code{n.boot x + n.factors} matrix of resampled factor returns. Returned + only if \code{return.factors = TRUE}. \item{residuals} \code{n.boot x n.funds} matrix of simulated fund residuals. Returned only if \code{return.residuals = TRUE}. } @@ -69,7 +69,7 @@ Simulate returns using factor model Monte Carlo method. Parametric method like normal distribution, Cornish-Fisher and skew-t distribution for residuals can - be selected. Resampling method like non-parametric + be selected. Resampling method such as non-parametric bootstrap or stationary bootstrap can be selected. } \details{ Modified: pkg/FactorAnalytics/man/factorModelPerformanceAttribution.Rd =================================================================== --- pkg/FactorAnalytics/man/factorModelPerformanceAttribution.Rd 2013-09-11 19:30:24 UTC (rev 3062) +++ pkg/FactorAnalytics/man/factorModelPerformanceAttribution.Rd 2013-09-11 19:36:06 UTC (rev 3063) @@ -19,19 +19,18 @@ attributed returns for every portfolio. } } \description{ - Decompose total returns or active returns into returns - attributed to factors and specific returns. Class of - FM.attribution is generated and generic function - \code{plot()} and \code{summary()},\code{print()} can be - used. + Decompose total returns into returns attributed to + factors and specific returns. Class of FM.attribution is + generated and generic function \code{plot()} and + \code{summary()},\code{print()} can be applied. } \details{ - total returns can be decomposed into returns attributed - to factors and specific returns. \eqn{R_t = \sum_j b_{j} - * f_{jt} + u_t},t=1..T,\eqn{b_{j}} is exposure to factor - j and \eqn{f_{jt}} is factor j. The returns attributed to - factor j is \eqn{b_{j} * f_{jt}} and specific returns is - \eqn{u_t}. + Total returns can be decomposed into returns attributed + to factors and specific returns. \cr \eqn{R_t = \sum b_j + * f_jt + u_t,t=1...T} \cr \code{b_j} is exposure to + factor j and \code{f_jt} is factor j. The returns + attributed to factor j is \code{b_j * f_jt} and specific + returns is \code{u_t}. } \examples{ \dontrun{ Modified: pkg/FactorAnalytics/man/factorModelSdDecomposition.Rd =================================================================== --- pkg/FactorAnalytics/man/factorModelSdDecomposition.Rd 2013-09-11 19:30:24 UTC (rev 3062) +++ pkg/FactorAnalytics/man/factorModelSdDecomposition.Rd 2013-09-11 19:36:06 UTC (rev 3063) @@ -1,6 +1,6 @@ \name{factorModelSdDecomposition} \alias{factorModelSdDecomposition} -\title{Compute factor model factor risk (sd) decomposition for individual fund.} +\title{Compute factor model standard deviation decomposition} \usage{ factorModelSdDecomposition(beta.vec, factor.cov, sig2.e) } @@ -24,8 +24,9 @@ } } \description{ - Compute factor model factor risk (sd) decomposition for - individual fund. + Compute the factor model factor standard deviation + decomposition for an asset based on Euler's theorem given + factor model parameters. } \examples{ # load data from the database From noreply at r-forge.r-project.org Wed Sep 11 23:31:25 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 11 Sep 2013 23:31:25 +0200 (CEST) Subject: [Returnanalytics-commits] r3064 - in pkg/FactorAnalytics: R man Message-ID: <20130911213125.E6821185BB0@r-forge.r-project.org> Author: chenyian Date: 2013-09-11 23:31:25 +0200 (Wed, 11 Sep 2013) New Revision: 3064 Modified: pkg/FactorAnalytics/R/factorModelEsDecomposition.R pkg/FactorAnalytics/R/factorModelSdDecomposition.R pkg/FactorAnalytics/R/factorModelVaRDecomposition.R pkg/FactorAnalytics/R/fitFundamentalFactorModel.R pkg/FactorAnalytics/R/fitStatisticalFactorModel.R pkg/FactorAnalytics/R/fitTimeSeriesFactorModel.R pkg/FactorAnalytics/R/plot.FM.attribution.r pkg/FactorAnalytics/R/plot.FundamentalFactorModel.r pkg/FactorAnalytics/R/plot.StatFactorModel.r pkg/FactorAnalytics/R/plot.TimeSeriesFactorModel.r pkg/FactorAnalytics/man/factorModelEsDecomposition.Rd pkg/FactorAnalytics/man/factorModelSdDecomposition.Rd pkg/FactorAnalytics/man/factorModelVaRDecomposition.Rd pkg/FactorAnalytics/man/fitFundamentalFactorModel.Rd pkg/FactorAnalytics/man/fitStatisticalFactorModel.Rd pkg/FactorAnalytics/man/fitTimeseriesFactorModel.Rd pkg/FactorAnalytics/man/plot.FM.attribution.Rd pkg/FactorAnalytics/man/plot.FundamentalFactorModel.Rd pkg/FactorAnalytics/man/plot.StatFactorModel.Rd pkg/FactorAnalytics/man/plot.TimeSeriesFactorModel.Rd Log: Improving documentation of all plot and fit method. Modified: pkg/FactorAnalytics/R/factorModelEsDecomposition.R =================================================================== --- pkg/FactorAnalytics/R/factorModelEsDecomposition.R 2013-09-11 19:36:06 UTC (rev 3063) +++ pkg/FactorAnalytics/R/factorModelEsDecomposition.R 2013-09-11 21:31:25 UTC (rev 3064) @@ -7,10 +7,9 @@ #' equal to its value-at-risk (VaR). VaR is compute as the sample quantile of #' the historic or simulated data. #' -#' The factor model has the form \cr \code{R(t) = t(beta)*F(t) + e(t) = -#' t(beta.star)*F.star(t)} \cr where \code{beta.star = t(beta, sig.e)} and -#' \code{F.star(t) = (t(F(t)), t(z(t)))} By Euler's theorem \cr \code{ES.fm = -#' sum(cES.fm) = sum(beta.star*mcES.fm)} \cr +#' The factor model has the form \cr \code{R(t) = beta'F(t) + e(t) = beta.star'F.star(t)}\cr +#' where beta.star = (beta, sig.e)' and F.star(t) = (F(t)', z(t))' By Euler's +#' theorem:\cr \code{ES.fm = sum(cES.fm) = sum(beta.star*mES.fm)} \cr #' #' @param Data \code{B x (k+2)} matrix of historic or simulated data. The first #' column contains the fund returns, the second through \code{k+1}st columns @@ -37,12 +36,14 @@ #' \item{pcES.fm} (K+1) x 1 vector of factor percentage component contributions to ES. #' } #' @author Eric Zviot and Yi-An Chen. -#' @references 1. Hallerback (2003), "Decomposing Portfolio Value-at-Risk: A -#' General Analysis", \emph{The Journal of Risk} 5/2. \cr 2. Yamai and Yoshiba -#' (2002). "Comparative Analyses of Expected Shortfall and Value-at-Risk: Their -#' Estimation Error, Decomposition, and Optimization", Bank of Japan. \cr 3. -#' Meucci (2007). "Risk Contributions from Generic User-Defined Factors," -#' \emph{Risk}. +#' @references \enumerate{ +#' \item Hallerback (2003), "Decomposing Portfolio Value-at-Risk: A +#' General Analysis", The Journal of Risk 5/2. +#' \item Yamai and Yoshiba (2002)."Comparative Analyses of Expected Shortfall and Value-at-Risk: Their +#' Estimation Error, Decomposition, and Optimization Bank of Japan. +#' \item Meucci (2007). "Risk Contributions from Generic User-Defined Factors," Risk. +#' \item Epperlein and Smillie (2006) "Cracking VAR with Kernels," Risk. +#' } #' @examples #' #' data(managers.df) Modified: pkg/FactorAnalytics/R/factorModelSdDecomposition.R =================================================================== --- pkg/FactorAnalytics/R/factorModelSdDecomposition.R 2013-09-11 19:36:06 UTC (rev 3063) +++ pkg/FactorAnalytics/R/factorModelSdDecomposition.R 2013-09-11 21:31:25 UTC (rev 3064) @@ -3,7 +3,11 @@ #' Compute the factor model factor standard deviation decomposition for an #' asset based on Euler's theorem given factor model parameters. #' +#' The factor model has the form \cr \code{R(t) = beta'F(t) + e(t) = beta.star'F.star(t)}\cr +#' where beta.star = (beta, sig.e)' and F.star(t) = [F(t)', z(t)]'. By Euler's +#' theorem:\cr \code{Sd.fm = sum(cSd.fm) = sum(beta.star*mSd.fm)} \cr #' +#' #' @param beta.vec k x 1 vector of factor betas with factor names in the #' rownames. #' @param factor.cov k x k factor excess return covariance matrix. @@ -11,11 +15,19 @@ #' @return an S3 object containing #' \itemize{ #' \item{Sd.fm} Scalar, std dev based on factor model. -#' \item{mSd.fm} (K+1) x 1 vector of factor marginal contributions to risk sd. -#' \item{cSd.fm} (K+1) x 1 vector of factor component contributions to risk sd. -#' \item{pcSd.fm} (K+1) x 1 vector of factor percentage component contributions to risk sd. +#' \item{mSd.fm} (K+1) x 1 vector of factor marginal contributions to sd. +#' \item{cSd.fm} (K+1) x 1 vector of factor component contributions to sd. +#' \item{pcSd.fm} (K+1) x 1 vector of factor percentage component contributions to sd. #' } #' @author Eric Zivot and Yi-An Chen +#' @references +#' \enumerate{ +#' \item Hallerback (2003), "Decomposing Portfolio Value-at-Risk: A +#' General Analysis", The Journal of Risk 5/2. +#' \item Yamai and Yoshiba (2002)."Comparative Analyses of Expected Shortfall and Value-at-Risk: Their +#' Estimation Error, Decomposition, and Optimization Bank of Japan. +#' \item Meucci (2007). "Risk Contributions from Generic User-Defined Factors," Risk. +#' }#' #' @examples #' #' # load data from the database Modified: pkg/FactorAnalytics/R/factorModelVaRDecomposition.R =================================================================== --- pkg/FactorAnalytics/R/factorModelVaRDecomposition.R 2013-09-11 19:36:06 UTC (rev 3063) +++ pkg/FactorAnalytics/R/factorModelVaRDecomposition.R 2013-09-11 21:31:25 UTC (rev 3064) @@ -1,4 +1,4 @@ -#' Compute factor model factor VaR decomposition +#' Compute factor model VaR decomposition #' #' Compute factor model factor VaR decomposition based on Euler's theorem given #' historic or simulated data and factor model parameters. The partial @@ -7,9 +7,9 @@ #' VaR is compute either as the sample quantile or as an estimated quantile #' using the Cornish-Fisher expansion. #' -#' The factor model has the form R(t) = beta'F(t) + e(t) = beta.star'F.star(t) +#' The factor model has the form \cr \code{R(t) = beta'F(t) + e(t) = beta.star'F.star(t)}\cr #' where beta.star = (beta, sig.e)' and F.star(t) = (F(t)', z(t))' By Euler's -#' theorem VaR.fm = sum(cVaR.fm) = sum(beta.star*mVaR.fm) +#' theorem:\cr \code{VaR.fm = sum(cVaR.fm) = sum(beta.star*mVaR.fm)} \cr #' #' @param Data B x (k+2) matrix of bootstrap data. First column contains #' the fund returns, second through k+1 columns contain factor returns, (k+2)nd @@ -32,12 +32,14 @@ #' \item{pcVaR.fm} (K+1) x 1 vector of factor percentage contributions to VaR. #' } #' @author Eric Zivot and Yi-An Chen -#' @references 1. Hallerback (2003), "Decomposing Portfolio Value-at-Risk: A -#' General Analysis", The Journal of Risk 5/2. 2. Yamai and Yoshiba (2002). -#' "Comparative Analyses of Expected Shortfall and Value-at-Risk: Their -#' Estimation Error, Decomposition, and Optimization Bank of Japan. 3. Meucci -#' (2007). "Risk Contributions from Generic User-Defined Factors," Risk. 4. -#' Epperlein and Smillie (2006) "Cracking VAR with Kernels," Risk. +#' @references +#' \enumerate{ +#' \item Hallerback (2003), "Decomposing Portfolio Value-at-Risk: A +#' General Analysis", The Journal of Risk 5/2. +#' \item Yamai and Yoshiba (2002)."Comparative Analyses of Expected Shortfall and Value-at-Risk: Their +#' Estimation Error, Decomposition, and Optimization Bank of Japan. +#' \item Meucci (2007). "Risk Contributions from Generic User-Defined Factors," Risk. +#' } #' @examples #' #' data(managers.df) Modified: pkg/FactorAnalytics/R/fitFundamentalFactorModel.R =================================================================== --- pkg/FactorAnalytics/R/fitFundamentalFactorModel.R 2013-09-11 19:36:06 UTC (rev 3063) +++ pkg/FactorAnalytics/R/fitFundamentalFactorModel.R 2013-09-11 21:31:25 UTC (rev 3064) @@ -1,28 +1,25 @@ #' fit fundamental factor model by classic OLS or Robust regression technique #' -#' fit fundamental factor model or cross-sectional time series factor model by -#' classic OLS or Robust regression technique. Fundamental factor models use +#' fit fundamental factor model or cross-sectional factor model by +#' classic OLS or Robust regression. Fundamental factor models use #' observable asset specific characteristics (fundamentals) like industry #' classification, market capitalization, style classification (value, growth) -#' etc. to determine the common risk factors. The function creates the class +#' etc. to calculate the common risk factors. The function creates the class #' "FundamentalFactorModel". #' #' @details #' If style factor exposure is standardized to regression-weighted mean zero, this makes -#' style factors orthogonal to the Word factor (intercept term), which in turn facilitted +#' style factors orthogonal to the world factor (intercept term), which in turn facilitted #' interpretation of the style factor returns. See Menchero 2010. #' #' The original function was designed by Doug Martin and originally implemented -#' in S-PLUS by a number of UW Ph.D. students:Christopher Green, Eric Aldrich, -#' and Yindeng Jiang. Guy Yullen re-implemented the function in R and requires -#' the following additional R libraries: zoo time series library, robust -#' Insightful robust library ported to R and robustbase Basic robust statistics -#' package for R. Yi-An Chen from UW economics deparment re-organize the codes and finalize this -#' function. +#' in S-PLUS by a number of UW Ph.D. students: Christopher Green, Eric Aldrich, +#' and Yindeng Jiang. Guy Yullen re-implemented the function in R. Yi-An Chen from +#' University of Washington re-writes the codes and finalizes the function. #' #' #' @param data data.frame, data must have \emph{assetvar}, \emph{returnvar}, \emph{datevar} -#' , and exposure.names. Generally, data is panel data setup, so it needs firm variabales +#' , and exposure.names. Generally, data has to look like panel data. It needs firm variabales #' and time variables. Data has to be a balanced panel. #' @param exposure.names a character vector of exposure names for the factor model #' @param wls logical flag, TRUE for weighted least squares, FALSE for ordinary @@ -75,6 +72,7 @@ #' \itemize{ #' \item "The Characteristics of Factor Portfolios", Fall 2010, MENCHERO Jose, #' Journal of Performance Measurement. +#' \item Grinold,R and Kahn R, \emph{Active Portfolio Management}. #' } #' #' @export Modified: pkg/FactorAnalytics/R/fitStatisticalFactorModel.R =================================================================== --- pkg/FactorAnalytics/R/fitStatisticalFactorModel.R 2013-09-11 19:36:06 UTC (rev 3063) +++ pkg/FactorAnalytics/R/fitStatisticalFactorModel.R 2013-09-11 21:31:25 UTC (rev 3064) @@ -1,11 +1,12 @@ -#' Fit statistical factor model using principle components +#' Fit statistical factor model using principle components analysis #' #' Fit statistical factor model using principle components. This function is -#' mainly adapted from S+FinMetric function mfactor. +#' mainly adapted from S+FinMetric function \code{mfactor}. #' #' #' @param data a vector, matrix, data.frame, xts, timeSeries or zoo object with asset returns -#' and factors retunrs names +#' and factors retunrs names. If data does not have xts class, rownames must provide +#' xts compatible time index. #' @param k numbers of factors if it is scalar or method of choosing optimal #' number of factors. "bn" represents Bai and Ng (2002) method and "ck" #' represents Connor and korajczyk (1993) method. Default is k = 1. @@ -20,23 +21,24 @@ #' #' @return #' \itemize{ -#' \item{factors}{T x K the estimated factors.} -#' \item{loadings}{K x N the asset specific factor loadings beta_i. +#' \item{factors}{ T x K the estimated factors.} +#' \item{loadings}{ K x N the asset specific factor loadings beta_i. #' estimated from regress the asset returns on factors.} -#' \item{alpha}{1 x N the estimated intercepts alpha_i} -#' \item{ret.cov}{N x N asset returns sample variance covariance matrix.} -#' \item{r2}{regression r square value from regress the asset returns on +#' \item{alpha}{ 1 x N the estimated intercepts alpha_i} +#' \item{ret.cov}{ N x N asset returns sample variance covariance matrix.} +#' \item{r2}{ regression r square value from regress the asset returns on #' factors.} -#' \item{k}{the number of the facotrs.} -#' \item{eigen}{eigenvalues from the sample covariance matrix.} -#' \item{residuals}{T x N matrix of residuals from regression.} -#' \item{asset.ret}{asset returns} -#' \item{asset.fit}{List of regression lm class of individual returns on +#' \item{k}{ the number of the facotrs.} +#' \item{eigen}{ eigenvalues from the sample covariance matrix.} +#' \item{residuals}{ T x N matrix of residuals from regression.} +#' \item{asset.ret}{ asset returns} +#' \item{asset.fit}{ List of regression lm class of individual returns on #' factors.} -#' \item{resid.variance}{vector of residual variances.} -#' \item{mimic}{N x K matrix of factor mimicking portfolio returns.} +#' \item{resid.variance}{ vector of residual variances.} +#' \item{mimic}{ N x K matrix of factor mimicking portfolio returns.} #' } #' @author Eric Zivot and Yi-An Chen +#' @references Zivot and Wang, (2006) "Modeling Financial Time Series with S-PLUS, 2nd edition" #' @examples #' #' # load data for fitStatisticalFactorModel.r Modified: pkg/FactorAnalytics/R/fitTimeSeriesFactorModel.R =================================================================== --- pkg/FactorAnalytics/R/fitTimeSeriesFactorModel.R 2013-09-11 19:36:06 UTC (rev 3063) +++ pkg/FactorAnalytics/R/fitTimeSeriesFactorModel.R 2013-09-11 21:31:25 UTC (rev 3064) @@ -1,9 +1,11 @@ #' Fit time series factor model by time series regression techniques. #' -#' @description Fit time series factor model by time series regression techniques. It -#' creates the class of "TimeSeriesFactorModel". +#' @description Fit time series factor model by time series regression techniques for single +#' or multiple assets. Classic OLS, Robust regression can be chosen and several model selection methods +#' can be applied. Class "TimeSeriesFactorModel" will be created too. #' -#' @details add.up.market.returns adds a max(0,Rm-Rf) term in the regression as suggested by +#' @details +#' \code{add.up.market.returns} adds a max(0,Rm-Rf) term in the regression as suggested by #' Merton-Henriksson Model (1981) to measure market timing. The coefficient can be interpreted as #' number of free put options. #' Modified: pkg/FactorAnalytics/R/plot.FM.attribution.r =================================================================== --- pkg/FactorAnalytics/R/plot.FM.attribution.r 2013-09-11 19:36:06 UTC (rev 3063) +++ pkg/FactorAnalytics/R/plot.FM.attribution.r 2013-09-11 21:31:25 UTC (rev 3064) @@ -1,24 +1,26 @@ #' plot FM.attribution class #' #' Generic function of plot method for factorModelPerformanceAttribution. -#' Either plot all fit models or choose a single asset to plot. +#' Either plot all assets or choose a single asset to plot. #' #' #' @param fm.attr FM.attribution object created by -#' factorModelPerformanceAttribution. -#' @param which.plot integer indicating which plot to create: "none" will -#' create a menu to choose. Defualt is none. 1 = attributed cumulative returns, -#' 2 = attributed returns on date selected by user, 3 = time series of -#' attributed returns +#' \code{factorModelPerformanceAttribution}. +#' @param which.plot Integer indicates which plot to create: "none" will +#' create a menu to choose. Defualt is none.\cr +#' 1 = attributed cumulative returns,\cr +#' 2 = attributed returns on date selected by user,\cr +#' 3 = time series of attributed returns #' @param max.show Maximum assets to plot. Default is 6. -#' @param date date indicates for attributed returns, the date format should be +#' @param date Indicates for attributed returns, the date format should be #' xts compatible. #' @param plot.single Plot a single asset of lm class. Defualt is FALSE. #' @param fundName Name of the portfolio to be plotted. -#' @param which.plot.single integer indicating which plot to create: "none" -#' will create a menu to choose. Defualt is none. 1 = attributed cumulative -#' returns, 2 = attributed returns on date selected by user, 3 = time series of -#' attributed returns +#' @param which.plot.single Integer indicates which plot to create: "none" +#' will create a menu to choose. Defualt is none.\cr +#' 1 = attributed cumulative returns,\cr +#' 2 = attributed returns on date selected by user, \cr +#' 3 = time series of attributed returns #' @param ... more arguements for \code{chart.TimeSeries} used for plotting #' time series #' @author Yi-An Chen. Modified: pkg/FactorAnalytics/R/plot.FundamentalFactorModel.r =================================================================== --- pkg/FactorAnalytics/R/plot.FundamentalFactorModel.r 2013-09-11 19:36:06 UTC (rev 3063) +++ pkg/FactorAnalytics/R/plot.FundamentalFactorModel.r 2013-09-11 21:31:25 UTC (rev 3064) @@ -1,9 +1,3 @@ -# plot.FundamentalFactorModel.r -# Yi-An Chen -# 7/16/2012 - - - #' plot FundamentalFactorModel object. #' #' Generic function of plot method for fitFundamentalFactorModel. @@ -11,31 +5,31 @@ #' #' @param x fit object created by fitFundamentalFactorModel. #' @param which.plot integer indicating which plot to create: "none" will -#' create a menu to choose. Defualt is none. -#' 1 = "Factor returns", -#' 2 = "Residual plots", -#' 3 = "Variance of Residuals", -#' 4 = "Factor Model Correlation", -#' 5 = "Factor Contributions to SD", -#' 6 = "Factor Contributions to ES", -#' 7 = "Factor Contributions to VaR" +#' create a menu to choose. Defualt is none. \cr +#' 1 = "Factor returns",\cr +#' 2 = "Residual plots",\cr +#' 3 = "Variance of Residuals",\cr +#' 4 = "Factor Model Correlation",\cr +#' 5 = "Factor Contributions to SD",\cr +#' 6 = "Factor Contributions to ES",\cr +#' 7 = "Factor Contributions to VaR"\cr #' @param max.show Maximum assets to plot. Default is 4. -#' #' @param plot.single Plot a single asset of lm class. Defualt is FALSE. +#' @param plot.single Plot a single asset of lm class. Defualt is FALSE. #' @param asset.name Name of the asset to be plotted. #' @param which.plot.single integer indicating which plot to create: "none" -#' will create a menu to choose. Defualt is none. -#' 1 = time series plot of actual and fitted values, -#' 2 = time series plot of residuals with standard error bands, -#' 3 = time series plot of squared residuals, -#' 4 = time series plot of absolute residuals, -#' 5 = SACF and PACF of residuals, -#' 6 = SACF and PACF of squared residuals, -#' 7 = SACF and PACF of absolute residuals, -#' 8 = histogram of residuals with normal curve overlayed, -#' 9 = normal qq-plot of residuals. +#' will create a menu to choose. Defualt is none.\cr +#' 1 = time series plot of actual and fitted values,\cr +#' 2 = time series plot of residuals with standard error bands,\cr +#' 3 = time series plot of squared residuals,\cr +#' 4 = time series plot of absolute residuals,\cr +#' 5 = SACF and PACF of residuals,\cr +#' 6 = SACF and PACF of squared residuals,\cr +#' 7 = SACF and PACF of absolute residuals,\cr +#' 8 = histogram of residuals with normal curve overlayed,\cr +#' 9 = normal qq-plot of residuals.\cr #' @param legend.txt Logical. TRUE will plot legend on barplot. Defualt is \code{TRUE}. -#' @param VaR.method haracter, method for computing VaR. Valid choices are -#' one of "modified","gaussian","historical", "kernel". computation is done with the \code{VaR} +#' @param VaR.method character, method for computing VaR. Valid choices are +#' one of "modified","gaussian","historical", "kernel". Computation is done with the \code{VaR} #' in the PerformanceAnalytics package. Default is "historical". #' @param ... other variables for barplot method. #' @author Eric Zivot and Yi-An Chen. Modified: pkg/FactorAnalytics/R/plot.StatFactorModel.r =================================================================== --- pkg/FactorAnalytics/R/plot.StatFactorModel.r 2013-09-11 19:36:06 UTC (rev 3063) +++ pkg/FactorAnalytics/R/plot.StatFactorModel.r 2013-09-11 21:31:25 UTC (rev 3064) @@ -1,42 +1,51 @@ #' plot StatFactorModel object. #' #' Generic function of plot method for fitStatisticFactorModel. Either plot all -#' fit models or choose a single asset to plot. +#' assets or choose a single asset to plot. #' -#' PCA works well. APCA is underconstruction. -#' -#' @param x fit object created by fitStatisticalFactorModel. -#' @param variables Optional. an integer vector telling which variables are to +#' +#' @param x fit object created by \code{fitStatisticalFactorModel}. +#' @param variables Optional. An integer vector shows which variables are to #' be plotted. The default is to plot all the variables, or the number of -#' variables explaining 90 percent of the variance, whichever is bigger. -#' @param cumulative a logical flag: if TRUE, the cumulative fraction of the +#' variables explaining 90 percent of the variance, whatever is bigger. +#' @param cumulative Logical flag: if \code{TRUE}, the cumulative fraction of the #' variance is printed above each bar in the plot. #' @param style Charater. bar or lines can be chosen. #' @param which.plot integer indicating which plot to create: "none" will -#' create a menu to choose. Defualt is none. 1 = "Screeplot of Eigenvalues", 2 -#' = "Factor returns", 3 = "FM Correlation", 4 = "R square", 5 = "Variance of -#' Residuals", 6 = "Factor Contributions to SD", 7 = "Factor Contributions to -#' ES", 8 = "Factor Contributions to VaR" -#' @param hgrid Logic. Whether to plot horizontal grid or not. Defualt is -#' FALSE. -#' @param vgrid Logic. Whether to plot vertical grid or not. Defualt is FALSE. -#' @param plot.single Plot a single asset of lm class. Defualt is FALSE. +#' create a menu to choose. Defualt is none.\cr +#' 1 = "Screeplot of Eigenvalues", \cr +#' 2 = "Factor returns", \cr +#' 3 = "FM Correlation", \cr +#' 4 = "R square",\cr +#' 5 = "Variance of Residuals", \cr +#' 6 = "Factor Contributions to SD", \cr +#' 7 = "Factor Contributions to ES", \cr +#' 8 = "Factor Contributions to VaR" \cr +#' @param hgrid Logic flag. Whether to plot horizontal grid or not. Defualt is +#' \code{FALSE}. +#' @param vgrid Logic flag. Whether to plot vertical grid or not. Defualt is \code{FALSE}. +#' @param plot.single Plot a single asset of lm class. Defualt is \code{FALSE}. #' @param asset.name Name of the asset to be plotted. #' @param which.plot.single integer indicating which plot to create: "none" -#' will create a menu to choose. Defualt is none. 1 = time series plot of -#' actual and fitted values 2 = time series plot of residuals with standard -#' error bands 3 = time series plot of squared residuals 4 = time series plot -#' of absolute residuals 5 = SACF and PACF of residuals 6 = SACF and PACF of -#' squared residuals 7 = SACF and PACF of absolute residuals 8 = histogram of -#' residuals with normal curve overlayed 9 = normal qq-plot of residuals 10= -#' CUSUM plot of recursive residuals 11= CUSUM plot of OLS residuals 12= CUSUM -#' plot of recursive estimates relative to full sample estimates 13= rolling -#' estimates over 24 month window +#' will create a menu to choose. Defualt is none. \cr +#' 1 = time series plot of actual and fitted values, \cr +#' 2 = time series plot of residuals with standard error bands,\cr +#' 3 = time series plot of squared residuals, \cr +#' 4 = time series plot of absolute residuals, \cr +#' 5 = SACF and PACF of residuals, \cr +#' 6 = SACF and PACF of squared residuals, \cr +#' 7 = SACF and PACF of absolute residuals, \cr +#' 8 = histogram of residuals with normal curve overlayed, \cr +#' 9 = normal qq-plot of residuals,\cr +#' 10= CUSUM plot of recursive residuals,\cr +#' 11= CUSUM plot of OLS residuals,\cr +#' 12= CUSUM plot of recursive estimates relative to full sample estimates,\cr +#' 13= rolling estimates over 24 month window. #' @param max.show Maximum assets to plot. Default is 6. -#' @param VaR.method haracter, method for computing VaR. Valid choices are -#' one of "modified","gaussian","historical", "kernel". computation is done with the \code{VaR} +#' @param VaR.method Character, method for computing VaR. Valid choices are +#' either "modified","gaussian","historical", "kernel". computation is done with the \code{VaR} #' in the PerformanceAnalytics package. Default is "historical". -#' @param ... other variables for barplot method. +#' @param ... Other variables for barplot method. #' @author Eric Zivot and Yi-An Chen. #' @examples #' Modified: pkg/FactorAnalytics/R/plot.TimeSeriesFactorModel.r =================================================================== --- pkg/FactorAnalytics/R/plot.TimeSeriesFactorModel.r 2013-09-11 19:36:06 UTC (rev 3063) +++ pkg/FactorAnalytics/R/plot.TimeSeriesFactorModel.r 2013-09-11 21:31:25 UTC (rev 3064) @@ -1,32 +1,41 @@ -#' plot TimeSeriesfactorModel object. +#' plot TimeSeriesFactorModel object. #' #' Generic function of plot method for fitTimeSeriesFactorModel. Either plot -#' all fit models or choose a single asset to plot. +#' all assets or choose a single asset to plot. #' #' -#' @param x fit object created by fitTimeSeriesFactorModel. -#' @param colorset Defualt colorset is c(1:12). -#' @param legend.loc plot legend or not. Defualt is \code{NULL}. -#' @param which.plot integer indicating which plot to create: "none" will -#' create a menu to choose. Defualt is none. 1 = "Fitted factor returns", 2 = -#' "R square", 3 = "Variance of Residuals", 4 = "FM Correlation", 5 = "Factor -#' Contributions to SD", 6 = "Factor Contributions to ES", 7 = "Factor -#' Contributions to VaR" +#' @param x fit object created by \code{fitTimeSeriesFactorModel}. +#' @param colorset Defualt colorset the same as \code{barplot}. +#' @param legend.loc Plot legend or not. Defualt is \code{NULL}. +#' @param which.plot Integer indicates which plot to create: "none" will +#' create a menu to choose. Defualt is none.\cr +#' 1 = "Fitted factor returns", \cr +#' 2 = "R square", \cr +#' 3 = "Variance of Residuals",\cr +#' 4 = "FM Correlation",\cr +#' 5 = "Factor Contributions to SD",\cr +#' 6 = "Factor Contributions to ES",\cr +#' 7 = "Factor Contributions to VaR" #' @param max.show Maximum assets to plot. Default is 6. -#' @param plot.single Plot a single asset of lm class. Defualt is FALSE. +#' @param plot.single Plot a single asset of lm class. Defualt is \code{FALSE}. #' @param asset.name Name of the asset to be plotted. -#' @param which.plot.single integer indicating which plot to create: "none" -#' will create a menu to choose. Defualt is none. 1 = time series plot of -#' actual and fitted values 2 = time series plot of residuals with standard -#' error bands 3 = time series plot of squared residuals 4 = time series plot -#' of absolute residuals 5 = SACF and PACF of residuals 6 = SACF and PACF of -#' squared residuals 7 = SACF and PACF of absolute residuals 8 = histogram of -#' residuals with normal curve overlayed 9 = normal qq-plot of residuals 10= -#' CUSUM plot of recursive residuals 11= CUSUM plot of OLS residuals 12= CUSUM -#' plot of recursive estimates relative to full sample estimates 13= rolling -#' estimates over 24 month window -#' @param VaR.method haracter, method for computing VaR. Valid choices are -#' one of "modified","gaussian","historical", "kernel". computation is done with the \code{VaR} +#' @param which.plot.single Integer indicates which plot to create: "none" +#' will create a menu to choose. Defualt is none.\cr +#' 1 = time series plot of actual and fitted values,\cr +#' 2 = time series plot of residuals with standard error bands, \cr +#' 3 = time series plot of squared residuals, \cr +#' 4 = time series plot of absolute residuals,\cr +#' 5 = SACF and PACF of residuals,\cr +#' 6 = SACF and PACF of squared residuals,\cr +#' 7 = SACF and PACF of absolute residuals,\cr +#' 8 = histogram of residuals with normal curve overlayed,\cr +#' 9 = normal qq-plot of residuals,\cr +#' 10= CUSUM plot of recursive residuals,\cr +#' 11= CUSUM plot of OLS residuals,\cr +#' 12= CUSUM plot of recursive estimates relative to full sample estimates,\cr +#' 13= rolling estimates over 24 month window. +#' @param VaR.method Character, method for computing VaR. Valid choices are +#' either "modified","gaussian","historical", "kernel". computation is done with the \code{VaR} #' in the PerformanceAnalytics package. Default is "historical". #' @author Eric Zivot and Yi-An Chen. #' @examples Modified: pkg/FactorAnalytics/man/factorModelEsDecomposition.Rd =================================================================== --- pkg/FactorAnalytics/man/factorModelEsDecomposition.Rd 2013-09-11 19:36:06 UTC (rev 3063) +++ pkg/FactorAnalytics/man/factorModelEsDecomposition.Rd 2013-09-11 21:31:25 UTC (rev 3064) @@ -52,11 +52,11 @@ simulated data. } \details{ - The factor model has the form \cr \code{R(t) = - t(beta)*F(t) + e(t) = t(beta.star)*F.star(t)} \cr where - \code{beta.star = t(beta, sig.e)} and \code{F.star(t) = - (t(F(t)), t(z(t)))} By Euler's theorem \cr \code{ES.fm = - sum(cES.fm) = sum(beta.star*mcES.fm)} \cr + The factor model has the form \cr \code{R(t) = beta'F(t) + + e(t) = beta.star'F.star(t)}\cr where beta.star = (beta, + sig.e)' and F.star(t) = (F(t)', z(t))' By Euler's + theorem:\cr \code{ES.fm = sum(cES.fm) = + sum(beta.star*mES.fm)} \cr } \examples{ data(managers.df) @@ -93,12 +93,13 @@ Eric Zviot and Yi-An Chen. } \references{ - 1. Hallerback (2003), "Decomposing Portfolio - Value-at-Risk: A General Analysis", \emph{The Journal of - Risk} 5/2. \cr 2. Yamai and Yoshiba (2002). "Comparative + \enumerate{ \item Hallerback (2003), "Decomposing + Portfolio Value-at-Risk: A General Analysis", The Journal + of Risk 5/2. \item Yamai and Yoshiba (2002)."Comparative Analyses of Expected Shortfall and Value-at-Risk: Their - Estimation Error, Decomposition, and Optimization", Bank - of Japan. \cr 3. Meucci (2007). "Risk Contributions from - Generic User-Defined Factors," \emph{Risk}. + Estimation Error, Decomposition, and Optimization Bank of + Japan. \item Meucci (2007). "Risk Contributions from + Generic User-Defined Factors," Risk. \item Epperlein and + Smillie (2006) "Cracking VAR with Kernels," Risk. } } Modified: pkg/FactorAnalytics/man/factorModelSdDecomposition.Rd =================================================================== --- pkg/FactorAnalytics/man/factorModelSdDecomposition.Rd 2013-09-11 19:36:06 UTC (rev 3063) +++ pkg/FactorAnalytics/man/factorModelSdDecomposition.Rd 2013-09-11 21:31:25 UTC (rev 3064) @@ -17,17 +17,23 @@ \value{ an S3 object containing \itemize{ \item{Sd.fm} Scalar, std dev based on factor model. \item{mSd.fm} (K+1) x 1 - vector of factor marginal contributions to risk sd. + vector of factor marginal contributions to sd. \item{cSd.fm} (K+1) x 1 vector of factor component - contributions to risk sd. \item{pcSd.fm} (K+1) x 1 vector - of factor percentage component contributions to risk sd. - } + contributions to sd. \item{pcSd.fm} (K+1) x 1 vector of + factor percentage component contributions to sd. } } \description{ Compute the factor model factor standard deviation decomposition for an asset based on Euler's theorem given factor model parameters. } +\details{ + The factor model has the form \cr \code{R(t) = beta'F(t) + + e(t) = beta.star'F.star(t)}\cr where beta.star = (beta, + sig.e)' and F.star(t) = [F(t)', z(t)]'. By Euler's + theorem:\cr \code{Sd.fm = sum(cSd.fm) = + sum(beta.star*mSd.fm)} \cr +} \examples{ # load data from the database data("stat.fm.data") @@ -44,4 +50,13 @@ \author{ Eric Zivot and Yi-An Chen } +\references{ + \enumerate{ \item Hallerback (2003), "Decomposing + Portfolio Value-at-Risk: A General Analysis", The Journal + of Risk 5/2. \item Yamai and Yoshiba (2002)."Comparative + Analyses of Expected Shortfall and Value-at-Risk: Their + Estimation Error, Decomposition, and Optimization Bank of + Japan. \item Meucci (2007). "Risk Contributions from + Generic User-Defined Factors," Risk. }#' +} Modified: pkg/FactorAnalytics/man/factorModelVaRDecomposition.Rd =================================================================== --- pkg/FactorAnalytics/man/factorModelVaRDecomposition.Rd 2013-09-11 19:36:06 UTC (rev 3063) +++ pkg/FactorAnalytics/man/factorModelVaRDecomposition.Rd 2013-09-11 21:31:25 UTC (rev 3064) @@ -1,6 +1,6 @@ \name{factorModelVaRDecomposition} \alias{factorModelVaRDecomposition} -\title{Compute factor model factor VaR decomposition} +\title{Compute factor model VaR decomposition} \usage{ factorModelVaRDecomposition(Data, beta.vec, sig2.e, tail.prob = 0.01, @@ -47,10 +47,11 @@ Cornish-Fisher expansion. } \details{ - The factor model has the form R(t) = beta'F(t) + e(t) = - beta.star'F.star(t) where beta.star = (beta, sig.e)' and - F.star(t) = (F(t)', z(t))' By Euler's theorem VaR.fm = - sum(cVaR.fm) = sum(beta.star*mVaR.fm) + The factor model has the form \cr \code{R(t) = beta'F(t) + + e(t) = beta.star'F.star(t)}\cr where beta.star = (beta, + sig.e)' and F.star(t) = (F(t)', z(t))' By Euler's + theorem:\cr \code{VaR.fm = sum(cVaR.fm) = + sum(beta.star*mVaR.fm)} \cr } \examples{ data(managers.df) @@ -70,13 +71,12 @@ Eric Zivot and Yi-An Chen } \references{ - 1. Hallerback (2003), "Decomposing Portfolio - Value-at-Risk: A General Analysis", The Journal of Risk - 5/2. 2. Yamai and Yoshiba (2002). "Comparative Analyses - of Expected Shortfall and Value-at-Risk: Their Estimation - Error, Decomposition, and Optimization Bank of Japan. 3. - Meucci (2007). "Risk Contributions from Generic - User-Defined Factors," Risk. 4. Epperlein and Smillie - (2006) "Cracking VAR with Kernels," Risk. + \enumerate{ \item Hallerback (2003), "Decomposing + Portfolio Value-at-Risk: A General Analysis", The Journal + of Risk 5/2. \item Yamai and Yoshiba (2002)."Comparative + Analyses of Expected Shortfall and Value-at-Risk: Their + Estimation Error, Decomposition, and Optimization Bank of + Japan. \item Meucci (2007). "Risk Contributions from + Generic User-Defined Factors," Risk. } } [TRUNCATED] To get the complete diff run: svnlook diff /svnroot/returnanalytics -r 3064 From noreply at r-forge.r-project.org Thu Sep 12 00:05:46 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 12 Sep 2013 00:05:46 +0200 (CEST) Subject: [Returnanalytics-commits] r3065 - in pkg/FactorAnalytics: R man Message-ID: <20130911220546.83A2A185DCF@r-forge.r-project.org> Author: chenyian Date: 2013-09-12 00:05:46 +0200 (Thu, 12 Sep 2013) New Revision: 3065 Modified: pkg/FactorAnalytics/R/predict.FundamentalFactorModel.r pkg/FactorAnalytics/R/predict.StatFactorModel.r pkg/FactorAnalytics/R/predict.TimeSeriesFactorModel.r pkg/FactorAnalytics/R/print.FM.attribution.r pkg/FactorAnalytics/R/print.FundamentalFactorModel.r pkg/FactorAnalytics/R/print.StatFactorModel.r pkg/FactorAnalytics/R/print.TimeSeriesFactorModel.r pkg/FactorAnalytics/R/rCornishFisher.R pkg/FactorAnalytics/R/summary.FM.attribution.r pkg/FactorAnalytics/R/summary.FundamentalFactorModel.r pkg/FactorAnalytics/R/summary.StatFactorModel.r pkg/FactorAnalytics/R/summary.TimeSeriesFactorModel.r pkg/FactorAnalytics/man/CornishFisher.Rd pkg/FactorAnalytics/man/predict.FundamentalFactorModel.Rd pkg/FactorAnalytics/man/predict.StatFactorModel.Rd pkg/FactorAnalytics/man/predict.TimeSeriesFactorModel.Rd pkg/FactorAnalytics/man/print.FM.attribution.Rd pkg/FactorAnalytics/man/print.FundamentalFactorModel.Rd pkg/FactorAnalytics/man/print.StatFactorModel.Rd pkg/FactorAnalytics/man/print.TimeSeriesFactorModel.Rd pkg/FactorAnalytics/man/summary.FM.attribution.Rd pkg/FactorAnalytics/man/summary.FundamentalFactorModel.Rd pkg/FactorAnalytics/man/summary.StatFactorModel.Rd pkg/FactorAnalytics/man/summary.TimeSeriesFactorModel.Rd Log: improving documentation for predit, print, summary methods Modified: pkg/FactorAnalytics/R/predict.FundamentalFactorModel.r =================================================================== --- pkg/FactorAnalytics/R/predict.FundamentalFactorModel.r 2013-09-11 21:31:25 UTC (rev 3064) +++ pkg/FactorAnalytics/R/predict.FundamentalFactorModel.r 2013-09-11 22:05:46 UTC (rev 3065) @@ -2,14 +2,14 @@ #' #' Generic function of predict method for fitFundamentalFactorModel. #' -#' newdata must be data.frame and contians date variable, asset variable and exact +#' \code{newdata} must be data.frame and contain date variable, asset variable and exact #' exposures names that are used in fit object by \code{fitFundamentalFactorModel} #' #' @param object fit "FundamentalFactorModel" object #' @param newdata An optional data frame in which to look for variables with which to predict. #' If omitted, the fitted values are used. -#' @param new.assetvar specify new asset variable in newdata if newdata is provided. -#' @param new.datevar speficy new date variable in newdata if newdata is provided. +#' @param new.assetvar Specify new asset variable in newdata if newdata is provided. +#' @param new.datevar Speficy new date variable in newdata if newdata is provided. #' @method predict FundamentalFactorModel #' @export #' @author Yi-An Chen Modified: pkg/FactorAnalytics/R/predict.StatFactorModel.r =================================================================== --- pkg/FactorAnalytics/R/predict.StatFactorModel.r 2013-09-11 21:31:25 UTC (rev 3064) +++ pkg/FactorAnalytics/R/predict.StatFactorModel.r 2013-09-11 22:05:46 UTC (rev 3065) @@ -4,8 +4,9 @@ #' function \code{predict.lm}. #' #' @param object A fit object created by fitStatisticalFactorModel. -#' @param newdata a vector, matrix, data.frame, xts, timeSeries or zoo object to be coerced. -#' @param ... Any other arguments used in \code{predict.lm}. For example like newdata and fit.se. +#' @param newdata A vector, matrix, data.frame, xts, timeSeries or zoo object to be coerced. +#' @param ... Any other arguments used in \code{predict.lm}, such as \code{newdata} and +#' \code{fit.se}. #' @author Yi-An Chen. #' @method predict StatFactorModel #' @export Modified: pkg/FactorAnalytics/R/predict.TimeSeriesFactorModel.r =================================================================== --- pkg/FactorAnalytics/R/predict.TimeSeriesFactorModel.r 2013-09-11 21:31:25 UTC (rev 3064) +++ pkg/FactorAnalytics/R/predict.TimeSeriesFactorModel.r 2013-09-11 22:05:46 UTC (rev 3065) @@ -4,8 +4,9 @@ #' function \code{predict.lm}. #' #' @param object A fit object created by fitTimeSeiresFactorModel. -#' @param newdata a vector, matrix, data.frame, xts, timeSeries or zoo object to be coerced. -#' @param ... Any other arguments used in \code{predict.lm}. for example newdata and se.fit. +#' @param newdata A vector, matrix, data.frame, xts, timeSeries or zoo object to be coerced. +#' @param ... Any other arguments used in \code{predict.lm}, such as \code{newdata} and +#' \code{fit.se}. #' @author Yi-An Chen. #' #' @examples Modified: pkg/FactorAnalytics/R/print.FM.attribution.r =================================================================== --- pkg/FactorAnalytics/R/print.FM.attribution.r 2013-09-11 21:31:25 UTC (rev 3064) +++ pkg/FactorAnalytics/R/print.FM.attribution.r 2013-09-11 22:05:46 UTC (rev 3065) @@ -1,10 +1,10 @@ #' Print FM.attribution object. #' -#' Generic function of print method for factorModelPerformanceAttribution. +#' Generic function of print method for \code{factorModelPerformanceAttribution}. #' #' #' @param fm.attr FM.attribution object created by -#' factorModelPerformanceAttribution. +#' \code{factorModelPerformanceAttribution}. #' @author Yi-An Chen. #' @examples #' \dontrun{ Modified: pkg/FactorAnalytics/R/print.FundamentalFactorModel.r =================================================================== --- pkg/FactorAnalytics/R/print.FundamentalFactorModel.r 2013-09-11 21:31:25 UTC (rev 3064) +++ pkg/FactorAnalytics/R/print.FundamentalFactorModel.r 2013-09-11 22:05:46 UTC (rev 3065) @@ -1,11 +1,11 @@ #' print FundamentalFactorModel object #' -#' Generic function of print method for fitFundamentalFactorModel. +#' Generic function of print method for \code{fitFundamentalFactorModel}. #' #' -#' @param x fit object created by fitFundamentalFactorModel. -#' @param digits integer indicating the number of decimal places. Default is 3. -#' @param ... Other arguments for print methods. +#' @param x Fit object created by fitFundamentalFactorModel. +#' @param digits Integer indicating the number of decimal places. Default is 3. +#' @param ... Other arguments for \code{print} methods. #' @author Yi-An Chen. #' @method print FundamentalFactorModel #' @export Modified: pkg/FactorAnalytics/R/print.StatFactorModel.r =================================================================== --- pkg/FactorAnalytics/R/print.StatFactorModel.r 2013-09-11 21:31:25 UTC (rev 3064) +++ pkg/FactorAnalytics/R/print.StatFactorModel.r 2013-09-11 22:05:46 UTC (rev 3065) @@ -1,11 +1,11 @@ #' print StatFactorModel object #' -#' Generic function of print method for fitStatFactorModel. +#' Generic function of print method for \code{fitStatFactorModel}. #' #' -#' @param x fit object created by fitStatisticalFactorModel. -#' @param digits integer indicating the number of decimal places. Default is 3. -#' @param ... Other arguments for print methods. +#' @param x Fit object created by \code{fitStatisticalFactorModel}. +#' @param digits Integer indicating the number of decimal places. Default is 3. +#' @param ... Other arguments for \code{print} methods. #' @author Eric Zivot and Yi-An Chen. #' @examples #' Modified: pkg/FactorAnalytics/R/print.TimeSeriesFactorModel.r =================================================================== --- pkg/FactorAnalytics/R/print.TimeSeriesFactorModel.r 2013-09-11 21:31:25 UTC (rev 3064) +++ pkg/FactorAnalytics/R/print.TimeSeriesFactorModel.r 2013-09-11 22:05:46 UTC (rev 3065) @@ -1,11 +1,11 @@ #' print TimeSeriesfactorModel object #' -#' Generic function of print method for fitTimeSeriesFactorModel. +#' Generic function of print method for \code{fitTimeSeriesFactorModel}. #' #' -#' @param x fit object created by fitTimeSeriesFactorModel. -#' @param digits integer indicating the number of decimal places. Default is 3. -#' @param ... arguments to be passed to print method. +#' @param x Fit object created by \code{fitTimeSeriesFactorModel}. +#' @param digits Integer indicating the number of decimal places. Default is 3. +#' @param ... Other arguments for \code{print} methods. #' @author Yi-An Chen. #' @method print TimeSeriesFactorModel #' @export Modified: pkg/FactorAnalytics/R/rCornishFisher.R =================================================================== --- pkg/FactorAnalytics/R/rCornishFisher.R 2013-09-11 21:31:25 UTC (rev 3064) +++ pkg/FactorAnalytics/R/rCornishFisher.R 2013-09-11 22:05:46 UTC (rev 3065) @@ -25,19 +25,19 @@ #' deviation, skewness and excess kurtosis. #'} #' -#'@param n scalar, number of simulated values in rCornishFisher. Sample length in +#'@param n Scalar, number of simulated values in rCornishFisher. Sample length in #' density,distribution,quantile function. -#' @param sigma scalar, standard deviation. -#' @param skew scalar, skewness. -#' @param ekurt scalar, excess kurtosis. -#' @param seed set seed here. Default is \code{NULL}. -#' @param x,q vector of standardized quantiles. See detail. -#' @param p vector of probabilities. +#' @param sigma Scalar, standard deviation. +#' @param skew Scalar, skewness. +#' @param ekurt Scalar, excess kurtosis. +#' @param seed Set seed here. Default is \code{NULL}. +#' @param x,q Vector of standardized quantiles. See detail. +#' @param p Vector of probabilities. #' -#' @return n simulated values from Cornish-Fisher distribution. +#' @return n Simulated values from Cornish-Fisher distribution. #' @author Eric Zivot and Yi-An Chen. #' @references -#' \itemize{ +#' \enumerate{ #' \item A.DasGupta, "Asymptotic Theory of Statistics and #' Probability", Springer Science+Business Media,LLC 2008 #' \item Thomas A.Severini, "Likelihood Methods in Statistics", Modified: pkg/FactorAnalytics/R/summary.FM.attribution.r =================================================================== --- pkg/FactorAnalytics/R/summary.FM.attribution.r 2013-09-11 21:31:25 UTC (rev 3064) +++ pkg/FactorAnalytics/R/summary.FM.attribution.r 2013-09-11 22:05:46 UTC (rev 3065) @@ -1,10 +1,12 @@ #' summary FM.attribution object. #' -#' Generic function of summary method for factorModelPerformanceAttribution. +#' Generic function of summary method for \code{factorModelPerformanceAttribution}. #' #' #' @param fm.attr FM.attribution object created by -#' factorModelPerformanceAttribution. +#' \code{factorModelPerformanceAttribution}. +#' @param digits integer indicating the number of decimal places. Default is 3. +#' @param ... Other arguments for \code{print} methods. #' @author Yi-An Chen. #' @examples #' # load data from the database @@ -19,14 +21,14 @@ #' @method summary FM.attribution #' @export #' -summary.FM.attribution <- function(fm.attr) { +summary.FM.attribution <- function(fm.attr,digits = max(3, .Options$digits - 3),...) { # n <- dim(fm.attr[[1]])[1] # k <- dim(fm.attr[[1]])[2]+1 # table.mat <- matrix(rep(NA,n*k*2),ncol=n) cat("\nMean of returns attributed to factors \n") - print(sapply(fm.attr[[3]],function(x) apply(x,2,mean))) + print(sapply(fm.attr[[3]],function(x) apply(x,2,mean)),digits = digits,...) cat("\nStandard Deviation of returns attributed to factors \n") - print(sapply(fm.attr[[3]],function(x) apply(x,2,sd))) + print(sapply(fm.attr[[3]],function(x) apply(x,2,sd)),digits = digits,...) } Modified: pkg/FactorAnalytics/R/summary.FundamentalFactorModel.r =================================================================== --- pkg/FactorAnalytics/R/summary.FundamentalFactorModel.r 2013-09-11 21:31:25 UTC (rev 3064) +++ pkg/FactorAnalytics/R/summary.FundamentalFactorModel.r 2013-09-11 22:05:46 UTC (rev 3065) @@ -1,11 +1,11 @@ #' summary FundamentalFactorModel object #' -#' Generic function of summary method for fitFundamentalFactorModel. +#' Generic function of summary method for \code{fitFundamentalFactorModel}. #' #' -#' @param object An object created by fitFundamentalFactorModel. +#' @param object An object created by \code{fitFundamentalFactorModel}. #' @param digits integer indicating the number of decimal places. Default is 3. -#' @param ... Other arguments for print methods. +#' @param ... Other arguments for \code{print} methods. #' @author Yi-An Chen. #' @method summary FundamentalFactorModel #' @export Modified: pkg/FactorAnalytics/R/summary.StatFactorModel.r =================================================================== --- pkg/FactorAnalytics/R/summary.StatFactorModel.r 2013-09-11 21:31:25 UTC (rev 3064) +++ pkg/FactorAnalytics/R/summary.StatFactorModel.r 2013-09-11 22:05:46 UTC (rev 3065) @@ -1,11 +1,11 @@ #' summary method for StatFactorModel object. #' -#' Generic function of summary method for fitStatisticalFactorModel. +#' Generic function of summary method for \code{fitStatisticalFactorModel}. #' #' -#' @param object An Object created by fitStatisticalFactorModel. -#' @param digits Integer indicating the number of decimal places. Default is 3. -#' @param ... other option used in \code{summary.lm} +#' @param object An Object created by \code{fitStatisticalFactorModel}. +#' @param digits Integer indicates the number of decimal places. Default is 3. +#' @param ... other option used in \code{print} method. #' @author Yi-An Chen. #' @method summary StatFactorModel #' @export @@ -18,7 +18,7 @@ #' summary(fit) #' #' -summary.StatFactorModel <- function(object,digits=3){ +summary.StatFactorModel <- function(object,digits=3,...){ if(!is.null(cl <- object$call)) { cat("\nCall:\n") dput(cl) @@ -30,7 +30,7 @@ cat("\n", object$assets.names[i], "\n") table.macro <- t(summary(object$asset.fit[[i]])$coefficients) colnames(table.macro)[1] <- "alpha" - print(table.macro,digits = digits) + print(table.macro,digits = digits,...) cat("\nR-square =", object$r2[i] ,",residual variance =" , object$resid.variance[i],"\n") } Modified: pkg/FactorAnalytics/R/summary.TimeSeriesFactorModel.r =================================================================== --- pkg/FactorAnalytics/R/summary.TimeSeriesFactorModel.r 2013-09-11 21:31:25 UTC (rev 3064) +++ pkg/FactorAnalytics/R/summary.TimeSeriesFactorModel.r 2013-09-11 22:05:46 UTC (rev 3065) @@ -1,11 +1,11 @@ #' summary method for TimeSeriesModel object. #' -#' Generic function of summary method for fitTimeSeriesFactorModel. +#' Generic function of summary method for \code{fitTimeSeriesFactorModel}. #' #' -#' @param object An object created by fitTimeSeiresFactorModel. -#' @param digits Integer indicating the number of decimal places. Default is 3. -#' @param ... other option used in \code{summary.lm} +#' @param object An object created by \code{fitTimeSeiresFactorModel}. +#' @param digits Integer indicates the number of decimal places. Default is 3. +#' @param ... Other option used in \code{print} method. #' @author Yi-An Chen. #' @examples #' @@ -19,7 +19,7 @@ #' @method summary TimeSeriesFactorModel #' @export #' -summary.TimeSeriesFactorModel <- function(object,digits=3){ +summary.TimeSeriesFactorModel <- function(object,digits=3,...){ if(!is.null(cl <- object$call)) { cat("\nCall:\n") dput(cl) @@ -31,7 +31,7 @@ cat("\n", object$assets.names[i], "\n") table.macro <- t(summary(object$asset.fit[[i]])$coefficients) colnames(table.macro)[1] <- "alpha" - print(table.macro,digits = digits) + print(table.macro,digits = digits,...) cat("\nR-square =", object$r2[i] ,",residual variance =" , object$resid.variance[i],"\n") } Modified: pkg/FactorAnalytics/man/CornishFisher.Rd =================================================================== --- pkg/FactorAnalytics/man/CornishFisher.Rd 2013-09-11 21:31:25 UTC (rev 3064) +++ pkg/FactorAnalytics/man/CornishFisher.Rd 2013-09-11 22:05:46 UTC (rev 3065) @@ -16,24 +16,24 @@ rCornishFisher(n, sigma, skew, ekurt, seed = NULL) } \arguments{ - \item{n}{scalar, number of simulated values in + \item{n}{Scalar, number of simulated values in rCornishFisher. Sample length in density,distribution,quantile function.} - \item{sigma}{scalar, standard deviation.} + \item{sigma}{Scalar, standard deviation.} - \item{skew}{scalar, skewness.} + \item{skew}{Scalar, skewness.} - \item{ekurt}{scalar, excess kurtosis.} + \item{ekurt}{Scalar, excess kurtosis.} - \item{seed}{set seed here. Default is \code{NULL}.} + \item{seed}{Set seed here. Default is \code{NULL}.} - \item{x,q}{vector of standardized quantiles. See detail.} + \item{x,q}{Vector of standardized quantiles. See detail.} - \item{p}{vector of probabilities.} + \item{p}{Vector of probabilities.} } \value{ - n simulated values from Cornish-Fisher distribution. + n Simulated values from Cornish-Fisher distribution. } \description{ \itemize{ \item \code{rCornishFisher} simulate @@ -78,7 +78,7 @@ Eric Zivot and Yi-An Chen. } \references{ - \itemize{ \item A.DasGupta, "Asymptotic Theory of + \enumerate{ \item A.DasGupta, "Asymptotic Theory of Statistics and Probability", Springer Science+Business Media,LLC 2008 \item Thomas A.Severini, "Likelihood Methods in Statistics", Oxford University Press, 2000 } Modified: pkg/FactorAnalytics/man/predict.FundamentalFactorModel.Rd =================================================================== --- pkg/FactorAnalytics/man/predict.FundamentalFactorModel.Rd 2013-09-11 21:31:25 UTC (rev 3064) +++ pkg/FactorAnalytics/man/predict.FundamentalFactorModel.Rd 2013-09-11 22:05:46 UTC (rev 3065) @@ -12,10 +12,10 @@ for variables with which to predict. If omitted, the fitted values are used.} - \item{new.assetvar}{specify new asset variable in newdata + \item{new.assetvar}{Specify new asset variable in newdata if newdata is provided.} - \item{new.datevar}{speficy new date variable in newdata + \item{new.datevar}{Speficy new date variable in newdata if newdata is provided.} } \description{ @@ -23,9 +23,10 @@ fitFundamentalFactorModel. } \details{ - newdata must be data.frame and contians date variable, - asset variable and exact exposures names that are used in - fit object by \code{fitFundamentalFactorModel} + \code{newdata} must be data.frame and contain date + variable, asset variable and exact exposures names that + are used in fit object by + \code{fitFundamentalFactorModel} } \examples{ data(Stock.df) Modified: pkg/FactorAnalytics/man/predict.StatFactorModel.Rd =================================================================== --- pkg/FactorAnalytics/man/predict.StatFactorModel.Rd 2013-09-11 21:31:25 UTC (rev 3064) +++ pkg/FactorAnalytics/man/predict.StatFactorModel.Rd 2013-09-11 22:05:46 UTC (rev 3065) @@ -9,11 +9,11 @@ \item{object}{A fit object created by fitStatisticalFactorModel.} - \item{newdata}{a vector, matrix, data.frame, xts, + \item{newdata}{A vector, matrix, data.frame, xts, timeSeries or zoo object to be coerced.} - \item{...}{Any other arguments used in \code{predict.lm}. - For example like newdata and fit.se.} + \item{...}{Any other arguments used in \code{predict.lm}, + such as \code{newdata} and \code{fit.se}.} } \description{ Generic function of predict method for Modified: pkg/FactorAnalytics/man/predict.TimeSeriesFactorModel.Rd =================================================================== --- pkg/FactorAnalytics/man/predict.TimeSeriesFactorModel.Rd 2013-09-11 21:31:25 UTC (rev 3064) +++ pkg/FactorAnalytics/man/predict.TimeSeriesFactorModel.Rd 2013-09-11 22:05:46 UTC (rev 3065) @@ -9,11 +9,11 @@ \item{object}{A fit object created by fitTimeSeiresFactorModel.} - \item{newdata}{a vector, matrix, data.frame, xts, + \item{newdata}{A vector, matrix, data.frame, xts, timeSeries or zoo object to be coerced.} - \item{...}{Any other arguments used in \code{predict.lm}. - for example newdata and se.fit.} + \item{...}{Any other arguments used in \code{predict.lm}, + such as \code{newdata} and \code{fit.se}.} } \description{ Generic function of predict method for Modified: pkg/FactorAnalytics/man/print.FM.attribution.Rd =================================================================== --- pkg/FactorAnalytics/man/print.FM.attribution.Rd 2013-09-11 21:31:25 UTC (rev 3064) +++ pkg/FactorAnalytics/man/print.FM.attribution.Rd 2013-09-11 22:05:46 UTC (rev 3065) @@ -6,11 +6,11 @@ } \arguments{ \item{fm.attr}{FM.attribution object created by - factorModelPerformanceAttribution.} + \code{factorModelPerformanceAttribution}.} } \description{ Generic function of print method for - factorModelPerformanceAttribution. + \code{factorModelPerformanceAttribution}. } \examples{ \dontrun{ Modified: pkg/FactorAnalytics/man/print.FundamentalFactorModel.Rd =================================================================== --- pkg/FactorAnalytics/man/print.FundamentalFactorModel.Rd 2013-09-11 21:31:25 UTC (rev 3064) +++ pkg/FactorAnalytics/man/print.FundamentalFactorModel.Rd 2013-09-11 22:05:46 UTC (rev 3065) @@ -6,17 +6,17 @@ digits = max(3, .Options$digits - 3), ...) } \arguments{ - \item{x}{fit object created by + \item{x}{Fit object created by fitFundamentalFactorModel.} - \item{digits}{integer indicating the number of decimal + \item{digits}{Integer indicating the number of decimal places. Default is 3.} - \item{...}{Other arguments for print methods.} + \item{...}{Other arguments for \code{print} methods.} } \description{ Generic function of print method for - fitFundamentalFactorModel. + \code{fitFundamentalFactorModel}. } \examples{ data(Stock.df) Modified: pkg/FactorAnalytics/man/print.StatFactorModel.Rd =================================================================== --- pkg/FactorAnalytics/man/print.StatFactorModel.Rd 2013-09-11 21:31:25 UTC (rev 3064) +++ pkg/FactorAnalytics/man/print.StatFactorModel.Rd 2013-09-11 22:05:46 UTC (rev 3065) @@ -6,16 +6,17 @@ digits = max(3, .Options$digits - 3), ...) } \arguments{ - \item{x}{fit object created by - fitStatisticalFactorModel.} + \item{x}{Fit object created by + \code{fitStatisticalFactorModel}.} - \item{digits}{integer indicating the number of decimal + \item{digits}{Integer indicating the number of decimal places. Default is 3.} - \item{...}{Other arguments for print methods.} + \item{...}{Other arguments for \code{print} methods.} } \description{ - Generic function of print method for fitStatFactorModel. + Generic function of print method for + \code{fitStatFactorModel}. } \examples{ # load data for fitStatisticalFactorModel.r Modified: pkg/FactorAnalytics/man/print.TimeSeriesFactorModel.Rd =================================================================== --- pkg/FactorAnalytics/man/print.TimeSeriesFactorModel.Rd 2013-09-11 21:31:25 UTC (rev 3064) +++ pkg/FactorAnalytics/man/print.TimeSeriesFactorModel.Rd 2013-09-11 22:05:46 UTC (rev 3065) @@ -6,16 +6,17 @@ digits = max(3, .Options$digits - 3), ...) } \arguments{ - \item{x}{fit object created by fitTimeSeriesFactorModel.} + \item{x}{Fit object created by + \code{fitTimeSeriesFactorModel}.} - \item{digits}{integer indicating the number of decimal + \item{digits}{Integer indicating the number of decimal places. Default is 3.} - \item{...}{arguments to be passed to print method.} + \item{...}{Other arguments for \code{print} methods.} } \description{ Generic function of print method for - fitTimeSeriesFactorModel. + \code{fitTimeSeriesFactorModel}. } \examples{ # load data from the database Modified: pkg/FactorAnalytics/man/summary.FM.attribution.Rd =================================================================== --- pkg/FactorAnalytics/man/summary.FM.attribution.Rd 2013-09-11 21:31:25 UTC (rev 3064) +++ pkg/FactorAnalytics/man/summary.FM.attribution.Rd 2013-09-11 22:05:46 UTC (rev 3065) @@ -2,15 +2,21 @@ \alias{summary.FM.attribution} \title{summary FM.attribution object.} \usage{ - \method{summary}{FM.attribution} (fm.attr) + \method{summary}{FM.attribution} (fm.attr, + digits = max(3, .Options$digits - 3), ...) } \arguments{ \item{fm.attr}{FM.attribution object created by - factorModelPerformanceAttribution.} + \code{factorModelPerformanceAttribution}.} + + \item{digits}{integer indicating the number of decimal + places. Default is 3.} + + \item{...}{Other arguments for \code{print} methods.} } \description{ Generic function of summary method for - factorModelPerformanceAttribution. + \code{factorModelPerformanceAttribution}. } \examples{ # load data from the database Modified: pkg/FactorAnalytics/man/summary.FundamentalFactorModel.Rd =================================================================== --- pkg/FactorAnalytics/man/summary.FundamentalFactorModel.Rd 2013-09-11 21:31:25 UTC (rev 3064) +++ pkg/FactorAnalytics/man/summary.FundamentalFactorModel.Rd 2013-09-11 22:05:46 UTC (rev 3065) @@ -7,16 +7,16 @@ } \arguments{ \item{object}{An object created by - fitFundamentalFactorModel.} + \code{fitFundamentalFactorModel}.} \item{digits}{integer indicating the number of decimal places. Default is 3.} - \item{...}{Other arguments for print methods.} + \item{...}{Other arguments for \code{print} methods.} } \description{ Generic function of summary method for - fitFundamentalFactorModel. + \code{fitFundamentalFactorModel}. } \examples{ data(Stock.df) Modified: pkg/FactorAnalytics/man/summary.StatFactorModel.Rd =================================================================== --- pkg/FactorAnalytics/man/summary.StatFactorModel.Rd 2013-09-11 21:31:25 UTC (rev 3064) +++ pkg/FactorAnalytics/man/summary.StatFactorModel.Rd 2013-09-11 22:05:46 UTC (rev 3065) @@ -2,20 +2,21 @@ \alias{summary.StatFactorModel} \title{summary method for StatFactorModel object.} \usage{ - \method{summary}{StatFactorModel} (object, digits = 3) + \method{summary}{StatFactorModel} (object, digits = 3, + ...) } \arguments{ \item{object}{An Object created by - fitStatisticalFactorModel.} + \code{fitStatisticalFactorModel}.} - \item{digits}{Integer indicating the number of decimal + \item{digits}{Integer indicates the number of decimal places. Default is 3.} - \item{...}{other option used in \code{summary.lm}} + \item{...}{other option used in \code{print} method.} } \description{ Generic function of summary method for - fitStatisticalFactorModel. + \code{fitStatisticalFactorModel}. } \examples{ # load data from the database Modified: pkg/FactorAnalytics/man/summary.TimeSeriesFactorModel.Rd =================================================================== --- pkg/FactorAnalytics/man/summary.TimeSeriesFactorModel.Rd 2013-09-11 21:31:25 UTC (rev 3064) +++ pkg/FactorAnalytics/man/summary.TimeSeriesFactorModel.Rd 2013-09-11 22:05:46 UTC (rev 3065) @@ -3,20 +3,20 @@ \title{summary method for TimeSeriesModel object.} \usage{ \method{summary}{TimeSeriesFactorModel} (object, - digits = 3) + digits = 3, ...) } \arguments{ \item{object}{An object created by - fitTimeSeiresFactorModel.} + \code{fitTimeSeiresFactorModel}.} - \item{digits}{Integer indicating the number of decimal + \item{digits}{Integer indicates the number of decimal places. Default is 3.} - \item{...}{other option used in \code{summary.lm}} + \item{...}{Other option used in \code{print} method.} } \description{ Generic function of summary method for - fitTimeSeriesFactorModel. + \code{fitTimeSeriesFactorModel}. } \examples{ # load data from the database From noreply at r-forge.r-project.org Thu Sep 12 03:05:37 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 12 Sep 2013 03:05:37 +0200 (CEST) Subject: [Returnanalytics-commits] r3066 - in pkg/PerformanceAnalytics/sandbox/pulkit: . R man src Message-ID: <20130912010537.4B431185A6F@r-forge.r-project.org> Author: pulkit Date: 2013-09-12 03:05:36 +0200 (Thu, 12 Sep 2013) New Revision: 3066 Added: pkg/PerformanceAnalytics/sandbox/pulkit/R/ExtremeDrawdown.R pkg/PerformanceAnalytics/sandbox/pulkit/R/gpdmle.R pkg/PerformanceAnalytics/sandbox/pulkit/man/DrawdownGPD.Rd pkg/PerformanceAnalytics/sandbox/pulkit/src/gpd.c Modified: pkg/PerformanceAnalytics/sandbox/pulkit/DESCRIPTION pkg/PerformanceAnalytics/sandbox/pulkit/NAMESPACE pkg/PerformanceAnalytics/sandbox/pulkit/R/chart.Penance.R pkg/PerformanceAnalytics/sandbox/pulkit/R/chart.REDD.R pkg/PerformanceAnalytics/sandbox/pulkit/R/redd.R pkg/PerformanceAnalytics/sandbox/pulkit/man/chart.Penance.Rd pkg/PerformanceAnalytics/sandbox/pulkit/man/rollDrawdown.Rd pkg/PerformanceAnalytics/sandbox/pulkit/src/moment.c Log: GPD files added Modified: pkg/PerformanceAnalytics/sandbox/pulkit/DESCRIPTION =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/DESCRIPTION 2013-09-11 22:05:46 UTC (rev 3065) +++ pkg/PerformanceAnalytics/sandbox/pulkit/DESCRIPTION 2013-09-12 01:05:36 UTC (rev 3066) @@ -49,3 +49,5 @@ 'psr_python.R' 'ret.R' 'Penance.R' + 'ExtremeDrawdown.R' + 'gpdmle.R' Modified: pkg/PerformanceAnalytics/sandbox/pulkit/NAMESPACE =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/NAMESPACE 2013-09-11 22:05:46 UTC (rev 3065) +++ pkg/PerformanceAnalytics/sandbox/pulkit/NAMESPACE 2013-09-12 01:05:36 UTC (rev 3066) @@ -7,6 +7,7 @@ export(chart.Penance) export(chart.REDD) export(chart.SRIndifference) +export(DrawdownGPD) export(EconomicDrawdown) export(EDDCOPS) export(golden_section) Added: pkg/PerformanceAnalytics/sandbox/pulkit/R/ExtremeDrawdown.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/ExtremeDrawdown.R (rev 0) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/ExtremeDrawdown.R 2013-09-12 01:05:36 UTC (rev 3066) @@ -0,0 +1,80 @@ +#'@title +#'Modelling Drawdown using Extreme Value Theory +#' +#"@description +#'It has been shown empirically that Drawdowns can be modelled using Modified Generalized Pareto +#'distribution(MGPD), Generalized Pareto Distribution(GPD) and other particular cases of MGPD such +#'as weibull distribution \eqn{MGPD(\gamma,0,\psi)} and unit exponential distribution\eqn{MGPD(1,0,\psi)} +#' +#' Modified Generalized Pareto Distribution is given by the following formula +#' +#' \deqn{ +#' G_{\eta}(m) = \begin{array}{l} 1-(1+\eta\frac{m^\gamma}{\psi})^(-1/\eta), if \eta \neq 0 \\ 1- e^{-frac{m^\gamma}{\psi}}, if \eta = 0,\end{array}} +#' +#' Here \eqn{\gamma{\epsilon}R} is the modifying parameter. When \eqn{\gamma<1} the corresponding densities are +#' strictly decreasing with heavier tail; the GDP is recovered by setting \eqn{\gamma = 1} .\eqn{\gamma \textgreater 1} +#' +#' The GDP is given by the following equation. \eqn{MGPD(1,\eta,\psi)} +#' +#'\deqn{G_{\eta}(m) = \begin{array}{l} 1-(1+\eta\frac{m}{\psi})^(-1/\eta), if \eta \neq 0 \\ 1- e^{-frac{m}{\psi}}, if \eta = 0,\end{array}} +#' +#' The weibull distribution is given by the following equation \eqn{MGPD(\gamma,0,\psi)} +#' +#'\deqn{G(m) = 1- e^{-frac{m^\gamma}{\psi}}} +#' +#'In this function generalized Pareto distribution has been covered. This function can be +#'expanded in the future to include more Extreme Value distributions as the literature on such distribution +#'matures in the future. +#' +#' @param R an xts, vector, matrix, data frame, timeSeries or zoo object of asset return +#' @param threshold The threshold beyond which the drawdowns have to be modelled +#' +#' +#'@references +#'Mendes, Beatriz V.M. and Leal, Ricardo P.C., Maximum Drawdown: Models and Applications (November 2003). +#'Coppead Working Paper Series No. 359.Available at SSRN: http://ssrn.com/abstract=477322 or http://dx.doi.org/10.2139/ssrn.477322. +#' +#'@examples +#'data(edhec) +#'DrawdownGPD(edhec) +#'data(managers) +#'DrawdownGPD(managers[,1:9],0.95) +#' +#'@export +DrawdownGPD<-function(R,threshold=0.90){ + x = checkData(R) + columns = ncol(R) + columnnames = colnames(R) + gpdfit<-function(data,threshold){ + gpd_fit = gpd(as.vector(data),as.vector(threshold)) + result = list(shape = gpd_fit$param[2],scale = gpd_fit$param[1]) + return(result) + } + for(column in 1:columns){ + dr = -Drawdowns(R[,column]) + thresh = quantile(na.omit(dr),threshold) + column.parameters = gpdfit(dr,thresh) + if(column == 1){ + shape = column.parameters$shape + scale = column.parameters$scale + } + else { + scale = c(scale, column.parameters$scale) + shape = c(shape, column.parameters$shape) + } + } + parameters = rbind(scale,shape) + colnames(parameters) = columnnames + parameters = reclass(parameters, x) + rownames(parameters)=c("scale","shape") + return(parameters) +} + + + + + + + + + Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/chart.Penance.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/chart.Penance.R 2013-09-11 22:05:46 UTC (rev 3065) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/chart.Penance.R 2013-09-12 01:05:36 UTC (rev 3066) @@ -42,7 +42,7 @@ #' #'@export -chart.Penance<-function(R,confidence,type=c("ar","normal"),reference.grid = TRUE,main=NULL,ylab = NULL,xlab = NULL,element.color="darkgrey",lwd = 2,pch = 1,cex = 1,cex.axis=0.8,cex.lab = 1,cex.main = 1,xlim = NULL,ylim = NULL,...){ +chart.Penance<-function(R,confidence=0.95,type=c("ar","normal"),reference.grid = TRUE,main=NULL,ylab = NULL,xlab = NULL,element.color="darkgrey",lwd = 2,pch = 1,cex = 1,cex.axis=0.8,cex.lab = 1,cex.main = 1,xlim = NULL,ylim = NULL,...){ # DESCRIPTION: # Draws the scatter plot of Phi vs Penance. Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/chart.REDD.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/chart.REDD.R 2013-09-11 22:05:46 UTC (rev 3065) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/chart.REDD.R 2013-09-12 01:05:36 UTC (rev 3066) @@ -9,7 +9,9 @@ #'@param rf risk free rate can be vector such as government security rate of return #'@param h lookback period #'@param geometric utilize geometric chaining (TRUE) or simple/arithmetic chaining(FALSE) to aggregate returns, default is TRUE. -#'@param legend.loc set the legend.loc, as in \code{\link{plot}} +#' @param legend.loc places a legend into one of nine locations on the chart: +#' bottomright, bottom, bottomleft, left, topleft, top, topright, right, or +#' center. #'@param colorset set the colorset label, as in \code{\link{plot}} #'@param \dots any other variable #'@author Pulkit Mehrotra @@ -19,8 +21,9 @@ #'Control Maximum Drawdown - The Case of Risk Based Dynamic Asset Allocation (February 25, 2012) #'@examples #'data(edhec) -#'chart.REDD(edhec,0.08,20) -#' +#'chart.REDD(edhec,0.08,20,legend.loc = "topleft") +#'data(managers) +#'chart.REDD(managers,0.08,20,legend.loc = "topleft") #'@export chart.REDD<-function(R,rf,h, geometric = TRUE,legend.loc = NULL, colorset = (1:12),...) @@ -34,7 +37,7 @@ # free return(rf) and the lookback period(h) is taken as the input. - rolldrawdown = rollDrawdown(R,geometric = TRUE,weights = NULL,rf,h) + rolldrawdown = rollDrawdown(R,geometric = TRUE,rf,h) chart.TimeSeries(rolldrawdown, colorset = colorset, legend.loc = legend.loc, ...) } Added: pkg/PerformanceAnalytics/sandbox/pulkit/R/gpdmle.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/gpdmle.R (rev 0) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/gpdmle.R 2013-09-12 01:05:36 UTC (rev 3066) @@ -0,0 +1,172 @@ +## This function comes from the package "POT" . The gpd function +## corresponds to the gpdmle function. So, I'm very gratefull to Mathieu Ribatet. +#'@useDynLib noniid.pm +gpd <- function(x, threshold, start, ..., + std.err.type = "observed", corr = FALSE, + method = "BFGS", warn.inf = TRUE){ + + if (all(c("observed", "expected", "none") != std.err.type)) + stop("``std.err.type'' must be one of 'observed', 'expected' or 'none'") + + nlpot <- function(scale, shape) { + -.C("gpdlik", exceed, nat, threshold, scale, + shape, dns = double(1))$dns + } + + nn <- length(x) + + threshold <- rep(threshold, length.out = nn) + + high <- (x > threshold) & !is.na(x) + threshold <- as.double(threshold[high]) + exceed <- as.double(x[high]) + nat <- length(exceed) + + if(!nat) stop("no data above threshold") + + pat <- nat/nn + param <- c("scale", "shape") + + if(missing(start)) { + + start <- list(scale = 0, shape = 0) + start$scale <- mean(exceed) - min(threshold) + + start <- start[!(param %in% names(list(...)))] + + } + + if(!is.list(start)) + stop("`start' must be a named list") + + if(!length(start)) + stop("there are no parameters left to maximize over") + + nm <- names(start) + l <- length(nm) + f <- formals(nlpot) + names(f) <- param + m <- match(nm, param) + + if(any(is.na(m))) + stop("`start' specifies unknown arguments") + + formals(nlpot) <- c(f[m], f[-m]) + nllh <- function(p, ...) nlpot(p, ...) + + if(l > 1) + body(nllh) <- parse(text = paste("nlpot(", paste("p[",1:l, + "]", collapse = ", "), ", ...)")) + + fixed.param <- list(...)[names(list(...)) %in% param] + + if(any(!(param %in% c(nm,names(fixed.param))))) + stop("unspecified parameters") + + start.arg <- c(list(p = unlist(start)), fixed.param) + if( warn.inf && do.call("nllh", start.arg) == 1e6 ) + warning("negative log-likelihood is infinite at starting values") + + opt <- optim(start, nllh, hessian = TRUE, ..., method = method) + + if ((opt$convergence != 0) || (opt$value == 1e6)) { + warning("optimization may not have succeeded") + if(opt$convergence == 1) opt$convergence <- "iteration limit reached" + } + + else opt$convergence <- "successful" + + if (std.err.type != "none"){ + + tol <- .Machine$double.eps^0.5 + + if(std.err.type == "observed") { + + var.cov <- qr(opt$hessian, tol = tol) + if(var.cov$rank != ncol(var.cov$qr)){ + warning("observed information matrix is singular; passing std.err.type to ``expected''") + obs.fish <- FALSE + return + } + + if (std.err.type == "observed"){ + var.cov <- try(solve(var.cov, tol = tol), silent = TRUE) + + if(!is.matrix(var.cov)){ + warning("observed information matrix is singular; passing std.err.type to ''none''") + std.err.type <- "expected" + return + } + + else{ + std.err <- diag(var.cov) + if(any(std.err <= 0)){ + warning("observed information matrix is singular; passing std.err.type to ``expected''") + std.err.type <- "expected" + return + } + + std.err <- sqrt(std.err) + + if(corr) { + .mat <- diag(1/std.err, nrow = length(std.err)) + corr.mat <- structure(.mat %*% var.cov %*% .mat, dimnames = list(nm,nm)) + diag(corr.mat) <- rep(1, length(std.err)) + } + else { + corr.mat <- NULL + } + } + } + } + + if (std.err.type == "expected"){ + + shape <- opt$par[2] + scale <- opt$par[1] + a22 <- 2/((1+shape)*(1+2*shape)) + a12 <- 1/(scale*(1+shape)*(1+2*shape)) + a11 <- 1/((scale^2)*(1+2*shape)) + ##Expected Matix of Information of Fisher + expFisher <- nat * matrix(c(a11,a12,a12,a22),nrow=2) + + expFisher <- qr(expFisher, tol = tol) + var.cov <- solve(expFisher, tol = tol) + std.err <- sqrt(diag(var.cov)) + + if(corr) { + .mat <- diag(1/std.err, nrow = length(std.err)) + corr.mat <- structure(.mat %*% var.cov %*% .mat, dimnames = list(nm,nm)) + diag(corr.mat) <- rep(1, length(std.err)) + } + else + corr.mat <- NULL + } + + colnames(var.cov) <- nm + rownames(var.cov) <- nm + names(std.err) <- nm + } + + else{ + std.err <- std.err.type <- corr.mat <- NULL + var.cov <- NULL + } + + + param <- c(opt$par, unlist(fixed.param)) + scale <- param["scale"] + + var.thresh <- !all(threshold == threshold[1]) + + if (!var.thresh) + threshold <- threshold[1] + + list(fitted.values = opt$par, std.err = std.err, std.err.type = std.err.type, + var.cov = var.cov, fixed = unlist(fixed.param), param = param, + deviance = 2*opt$value, corr = corr.mat, convergence = opt$convergence, + counts = opt$counts, message = opt$message, threshold = threshold, + nat = nat, pat = pat, data = x, exceed = exceed, scale = scale, + var.thresh = var.thresh, est = "MLE", logLik = -opt$value, + opt.value = opt$value, hessian = opt$hessian) +} Modified: pkg/PerformanceAnalytics/sandbox/pulkit/R/redd.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/R/redd.R 2013-09-11 22:05:46 UTC (rev 3065) +++ pkg/PerformanceAnalytics/sandbox/pulkit/R/redd.R 2013-09-12 01:05:36 UTC (rev 3066) @@ -27,7 +27,8 @@ #'@examples #'data(edhec) #'rollDrawdown(edhec,0.08,100) -#' +#'data(managers) +#'rollDrawdown(managers[,1:9],managers[,10],10) #' @export rollDrawdown<-function(R,Rf,h, geometric = TRUE,...) { Added: pkg/PerformanceAnalytics/sandbox/pulkit/man/DrawdownGPD.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/man/DrawdownGPD.Rd (rev 0) +++ pkg/PerformanceAnalytics/sandbox/pulkit/man/DrawdownGPD.Rd 2013-09-12 01:05:36 UTC (rev 3066) @@ -0,0 +1,91 @@ +\name{DrawdownGPD} +\alias{DrawdownGPD} +\title{Modelling Drawdown using Extreme Value Theory + +It has been shown empirically that Drawdowns can be modelled using Modified Generalized Pareto +distribution(MGPD), Generalized Pareto Distribution(GPD) and other particular cases of MGPD such +as weibull distribution \eqn{MGPD(\gamma,0,\psi)} and unit exponential distribution\eqn{MGPD(1,0,\psi)} + +Modified Generalized Pareto Distribution is given by the following formula + +\deqn{ +G_{\eta}(m) = \begin{array}{l} 1-(1+\eta\frac{m^\gamma}{\psi})^(-1/\eta), if \eta \neq 0 \\ 1- e^{-frac{m^\gamma}{\psi}}, if \eta = 0,\end{array}} + +Here \eqn{\gamma{\epsilon}R} is the modifying parameter. When \eqn{\gamma<1} the corresponding densities are +strictly decreasing with heavier tail; the GDP is recovered by setting \eqn{\gamma = 1} .\eqn{\gamma \textgreater 1} + +The GDP is given by the following equation. \eqn{MGPD(1,\eta,\psi)} + +\deqn{G_{\eta}(m) = \begin{array}{l} 1-(1+\eta\frac{m}{\psi})^(-1/\eta), if \eta \neq 0 \\ 1- e^{-frac{m}{\psi}}, if \eta = 0,\end{array}} + +The weibull distribution is given by the following equation \eqn{MGPD(\gamma,0,\psi)} + +\deqn{G(m) = 1- e^{-frac{m^\gamma}{\psi}}} + +In this function generalized Pareto distribution has been covered. This function can be +expanded in the future to include more Extreme Value distributions as the literature on such distribution +matures in the future.} +\usage{ + DrawdownGPD(R, threshold = 0.9) +} +\arguments{ + \item{R}{an xts, vector, matrix, data frame, timeSeries + or zoo object of asset return} + + \item{threshold}{The threshold beyond which the drawdowns + have to be modelled} +} +\description{ + Modelling Drawdown using Extreme Value Theory + + It has been shown empirically that Drawdowns can be + modelled using Modified Generalized Pareto + distribution(MGPD), Generalized Pareto Distribution(GPD) + and other particular cases of MGPD such as weibull + distribution \eqn{MGPD(\gamma,0,\psi)} and unit + exponential distribution\eqn{MGPD(1,0,\psi)} + + Modified Generalized Pareto Distribution is given by the + following formula + + \deqn{ G_{\eta}(m) = \begin{array}{l} + 1-(1+\eta\frac{m^\gamma}{\psi})^(-1/\eta), if \eta \neq 0 + \\ 1- e^{-frac{m^\gamma}{\psi}}, if \eta = 0,\end{array}} + + Here \eqn{\gamma{\epsilon}R} is the modifying parameter. + When \eqn{\gamma<1} the corresponding densities are + strictly decreasing with heavier tail; the GDP is + recovered by setting \eqn{\gamma = 1} .\eqn{\gamma + \textgreater 1} + + The GDP is given by the following equation. + \eqn{MGPD(1,\eta,\psi)} + + \deqn{G_{\eta}(m) = \begin{array}{l} + 1-(1+\eta\frac{m}{\psi})^(-1/\eta), if \eta \neq 0 \\ 1- + e^{-frac{m}{\psi}}, if \eta = 0,\end{array}} + + The weibull distribution is given by the following + equation \eqn{MGPD(\gamma,0,\psi)} + + \deqn{G(m) = 1- e^{-frac{m^\gamma}{\psi}}} + + In this function generalized Pareto distribution has been + covered. This function can be expanded in the future to + include more Extreme Value distributions as the + literature on such distribution matures in the future. +} +\examples{ +data(edhec) +DrawdownGPD(edhec) +data(managers) +DrawdownGPD(managers[,1:9],0.95) +} +\references{ + Mendes, Beatriz V.M. and Leal, Ricardo P.C., Maximum + Drawdown: Models and Applications (November 2003). + Coppead Working Paper Series No. 359.Available at SSRN: + http://ssrn.com/abstract=477322 or + http://dx.doi.org/10.2139/ssrn.477322. +} + Modified: pkg/PerformanceAnalytics/sandbox/pulkit/man/chart.Penance.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/man/chart.Penance.Rd 2013-09-11 22:05:46 UTC (rev 3065) +++ pkg/PerformanceAnalytics/sandbox/pulkit/man/chart.Penance.Rd 2013-09-12 01:05:36 UTC (rev 3066) @@ -2,11 +2,12 @@ \alias{chart.Penance} \title{Penance vs phi plot} \usage{ - chart.Penance(R, confidence, type = c("ar", "normal"), - reference.grid = TRUE, main = NULL, ylab = NULL, - xlab = NULL, element.color = "darkgrey", lwd = 2, - pch = 1, cex = 1, cex.axis = 0.8, cex.lab = 1, - cex.main = 1, xlim = NULL, ylim = NULL, ...) + chart.Penance(R, confidence = 0.95, + type = c("ar", "normal"), reference.grid = TRUE, + main = NULL, ylab = NULL, xlab = NULL, + element.color = "darkgrey", lwd = 2, pch = 1, cex = 1, + cex.axis = 0.8, cex.lab = 1, cex.main = 1, xlim = NULL, + ylim = NULL, ...) } \arguments{ \item{R}{an xts, vector, matrix, data frame, timeSeries Modified: pkg/PerformanceAnalytics/sandbox/pulkit/man/rollDrawdown.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/man/rollDrawdown.Rd 2013-09-11 22:05:46 UTC (rev 3065) +++ pkg/PerformanceAnalytics/sandbox/pulkit/man/rollDrawdown.Rd 2013-09-12 01:05:36 UTC (rev 3066) @@ -37,6 +37,8 @@ \examples{ data(edhec) rollDrawdown(edhec,0.08,100) +data(managers) +rollDrawdown(managers[,1:9],managers[,10],10) } \author{ Pulkit Mehrotra Added: pkg/PerformanceAnalytics/sandbox/pulkit/src/gpd.c =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/src/gpd.c (rev 0) +++ pkg/PerformanceAnalytics/sandbox/pulkit/src/gpd.c 2013-09-12 01:05:36 UTC (rev 3066) @@ -0,0 +1,41 @@ +#include +#include +#include + + +void gpdlik(double *data, int *n, double *loc, double *scale, + double *shape, double *dns) +{ + int i; + double *dvec; + + dvec = (double *)R_alloc(*n, sizeof(double)); + + if(*scale <= 0) { + *dns = -1e6; + return; + } + + for(i=0;i<*n;i++) { + data[i] = (data[i] - loc[i]) / *scale; + if (data[i] <= 0) { + *dns = -1e6; + return; + } + if(fabs(*shape) <= 1e-6){ + *shape = 0; + dvec[i] = -log(*scale) - data[i]; + } + else { + data[i] = 1 + *shape * data[i]; + if(data[i] <= 0) { + *dns = -1e6; + return; + } + dvec[i] = -log(*scale) - (1 / *shape + 1) * log(data[i]); + } + } + + for(i=0;i<*n;i++) + *dns = *dns + dvec[i]; +} Modified: pkg/PerformanceAnalytics/sandbox/pulkit/src/moment.c =================================================================== --- pkg/PerformanceAnalytics/sandbox/pulkit/src/moment.c 2013-09-11 22:05:46 UTC (rev 3065) +++ pkg/PerformanceAnalytics/sandbox/pulkit/src/moment.c 2013-09-12 01:05:36 UTC (rev 3066) @@ -56,5 +56,4 @@ return Rsum; } - From noreply at r-forge.r-project.org Thu Sep 12 05:58:14 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 12 Sep 2013 05:58:14 +0200 (CEST) Subject: [Returnanalytics-commits] r3067 - in pkg/PortfolioAnalytics: . R man Message-ID: <20130912035814.5346C1852AD@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-12 05:58:13 +0200 (Thu, 12 Sep 2013) New Revision: 3067 Added: pkg/PortfolioAnalytics/man/chart.RiskBudget.opt.list.Rd pkg/PortfolioAnalytics/man/chart.RiskBudget.optimize.portfolio.Rd Modified: pkg/PortfolioAnalytics/NAMESPACE pkg/PortfolioAnalytics/R/charts.risk.R pkg/PortfolioAnalytics/man/chart.RiskBudget.Rd pkg/PortfolioAnalytics/man/chart.RiskReward.Rd pkg/PortfolioAnalytics/man/chart.Weights.Rd Log: Making chart.RiskBudget a generic function. Adding chart.RiskBudget.opt.list function to chart risk contribution for opt.list objects. Modified: pkg/PortfolioAnalytics/NAMESPACE =================================================================== --- pkg/PortfolioAnalytics/NAMESPACE 2013-09-12 01:05:36 UTC (rev 3066) +++ pkg/PortfolioAnalytics/NAMESPACE 2013-09-12 03:58:13 UTC (rev 3067) @@ -82,6 +82,8 @@ S3method(chart.EfficientFrontier,efficient.frontier) S3method(chart.EfficientFrontier,optimize.portfolio.ROI) S3method(chart.EfficientFrontier,optimize.portfolio) +S3method(chart.RiskBudget,opt.list) +S3method(chart.RiskBudget,optimize.portfolio) S3method(chart.RiskReward,opt.list) S3method(chart.RiskReward,optimize.portfolio.DEoptim) S3method(chart.RiskReward,optimize.portfolio.GenSA) Modified: pkg/PortfolioAnalytics/R/charts.risk.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.risk.R 2013-09-12 01:05:36 UTC (rev 3066) +++ pkg/PortfolioAnalytics/R/charts.risk.R 2013-09-12 03:58:13 UTC (rev 3067) @@ -1,6 +1,18 @@ -#' Chart risk contribution or percent contribution +#' Generic method to chart risk contribution #' +#' This function is the generic method to chart risk budget objectives for +#' \code{optimize.portfolio} and \code{opt.list} objects. +#' +#' @param object optimal portfolio object created by \code{\link{optimize.portfolio}} +#' @param ... passthrough parameters to \code{\link{plot}} +#' @export +chart.RiskBudget <- function(object, ...){ + UseMethod("chart.RiskBudget") +} + +#' Chart risk contribution of an \code{optimize.portfolio} object +#' #' This function charts the contribution or percent contribution of the resulting #' objective measures in \code{risk_budget_objectives}. #' @@ -32,8 +44,9 @@ #' } #' @param ylim set the y-axis limit, same as in \code{\link{plot}} #' @author Ross Bennett -#' @export -chart.RiskBudget <- function(object, neighbors=NULL, ..., risk.type="absolute", main="Risk Contribution", ylab="", xlab=NULL, cex.axis=0.8, cex.lab=0.8, element.color="darkgray", las=3, ylim=NULL){ +#' @method chart.RiskBudget optimize.portfolio +#' @S3method chart.RiskBudget optimize.portfolio +chart.RiskBudget.optimize.portfolio <- function(object, ..., neighbors=NULL, risk.type="absolute", main="Risk Contribution", ylab="", xlab=NULL, cex.axis=0.8, cex.lab=0.8, element.color="darkgray", las=3, ylim=NULL){ if(!inherits(object, "optimize.portfolio")) stop("object must be of class optimize.portfolio") portfolio <- object$portfolio # class of each objective @@ -186,6 +199,136 @@ box(col = element.color) } # end for loop of risk_budget_objective } # end plot for pct_contrib risk.type +} + +#' Chart risk contribution of an \code{opt.list} object +#' +#' This function charts the absolute contribution or percent contribution of +#' the resulting objective measures in the \code{opt.list} object. +#' +#' @param object list of optimal portfolio objects created by \code{\link{optimizations.combine}} +#' @param \dots any other passthru parameter +#' @param match.col string of risk column to match. The \code{opt.list} object +#' may contain risk budgets for ES or StdDev and this will match the proper +#' column names (e.g. ES.contribution). +#' @param risk.type "absolute" or "percentage" plot risk contribution in absolute terms or percentage contribution +#' @param main main title for the chart +#' @param plot.type "line" or "barplot" +#' @param cex.axis The magnification to be used for axis annotation relative to the current setting of \code{cex} +#' @param cex.lab The magnification to be used for axis annotation relative to the current setting of \code{cex} +#' @param element.color color for the default plot lines +#' @param las numeric in \{0,1,2,3\}; the style of axis labels +#' \describe{ +#' \item{0:}{always parallel to the axis [\emph{default}],} +#' \item{1:}{always horizontal,} +#' \item{2:}{always perpendicular to the axis,} +#' \item{3:}{always vertical.} +#' } +#' @param ylim set the y-axis limit, same as in \code{\link{plot}} +#' @param colorset color palette or vector of colors to use +#' @param legend.loc legend.loc NULL, "topright", "right", or "bottomright". If legend.loc is NULL, the legend will not be plotted +#' @param cex.legend The magnification to be used for the legend relative to the current setting of \code{cex} +#' @author Ross Bennett +#' @method chart.RiskBudget opt.list +#' @S3method chart.RiskBudget opt.list +chart.RiskBudget.opt.list <- function(object, ..., match.col="ES", risk.type="absolute", main="Risk Budget", plot.type="line", cex.axis=0.8, cex.lab=0.8, element.color="darkgray", las=3, ylim=NULL, colorset=NULL, legend.loc=NULL, cex.legend=0.8){ + if(!inherits(object, "opt.list")) stop("object must be of class 'opt.list'") + xtract <- extractObjectiveMeasures(object) + if(risk.type == "absolute"){ + # get the index of columns with risk budget + rbcols <- grep(paste(match.col, "contribution", sep="."), colnames(xtract)) + dat <- na.omit(xtract[, rbcols]) + opt_names <- rownames(dat) + # remove everything up to the last dot (.) to extract the names + colnames(dat) <- gsub("(.*)\\.", "", colnames(dat)) + + # set the colors + if(is.null(colorset)) colorset <- 1:nrow(dat) + columnnames <- colnames(dat) + numassets <- length(columnnames) + + xlab <- NULL + if(is.null(xlab)) + minmargin <- 3 + else + minmargin <- 5 + if(main=="") topmargin=1 else topmargin=4 + if(las > 1) {# set the bottom border to accommodate labels + bottommargin = max(c(minmargin, (strwidth(columnnames,units="in"))/par("cin")[1])) * cex.lab + if(bottommargin > 10 ) { + bottommargin <- 10 + columnnames<-substr(columnnames,1,19) + # par(srt=45) #TODO figure out how to use text() and srt to rotate long labels + } + } + else { + bottommargin = minmargin + } + par(mar = c(bottommargin, 4, topmargin, 2) +.1) + + if(is.null(ylim)) ylim <- range(dat) + + plot(dat[1,], type="n", ylim=ylim, xlab='', ylab=paste(match.col, "Contribution", sep=" "), main=main, cex.lab=cex.lab, axes=FALSE) + for(i in 1:nrow(dat)){ + points(dat[i, ], type="b", col=colorset[i], ...) # add dots here + } + + # set the axis + axis(2, cex.axis=cex.axis, col=element.color) + axis(1, labels=columnnames, at=1:numassets, las=las, cex.axis=cex.axis, col=element.color) + box(col=element.color) + + # Add a legend + if(!is.null(legend.loc)) legend(legend.loc, legend=opt_names, col=colorset, lty=1, bty="n", cex=cex.legend) + } + + if(risk.type %in% c("percent", "percentage", "pct_contrib")){ + # get the index of columns with risk budget + rbcols <- grep(paste(match.col, "pct_contrib", sep="."), colnames(xtract)) + dat <- na.omit(xtract[, rbcols]) + opt_names <- rownames(dat) + # remove everything up to the last dot (.) to extract the names + colnames(dat) <- gsub("(.*)\\.", "", colnames(dat)) + + # set the colors + if(is.null(colorset)) colorset <- 1:nrow(dat) + + columnnames <- colnames(dat) + numassets <- length(columnnames) + + xlab <- NULL + if(is.null(xlab)) + minmargin <- 3 + else + minmargin <- 5 + if(main=="") topmargin=1 else topmargin=4 + if(las > 1) {# set the bottom border to accommodate labels + bottommargin = max(c(minmargin, (strwidth(columnnames,units="in"))/par("cin")[1])) * cex.lab + if(bottommargin > 10 ) { + bottommargin <- 10 + columnnames<-substr(columnnames,1,19) + # par(srt=45) #TODO figure out how to use text() and srt to rotate long labels + } + } + else { + bottommargin = minmargin + } + par(mar = c(bottommargin, 4, topmargin, 2) +.1) + + if(is.null(ylim)) ylim <- range(dat) + + plot(dat[1,], type="n", ylim=ylim, xlab='', ylab=paste(match.col, "% Contribution", sep=" "), main=main, cex.lab=cex.lab, axes=FALSE) + for(i in 1:nrow(dat)){ + points(dat[i, ], type="b", col=colorset[i], ...) # add dots here + } + + axis(2, cex.axis=cex.axis, col=element.color) + axis(1, labels=columnnames, at=1:numassets, las=las, cex.axis=cex.axis, col=element.color) + box(col=element.color) + + # Add a legend + if(!is.null(legend.loc)) legend(legend.loc, legend=opt_names, col=colorset, lty=1, bty="n", cex=cex.legend) + } } Modified: pkg/PortfolioAnalytics/man/chart.RiskBudget.Rd =================================================================== --- pkg/PortfolioAnalytics/man/chart.RiskBudget.Rd 2013-09-12 01:05:36 UTC (rev 3066) +++ pkg/PortfolioAnalytics/man/chart.RiskBudget.Rd 2013-09-12 03:58:13 UTC (rev 3067) @@ -1,67 +1,18 @@ \name{chart.RiskBudget} \alias{chart.RiskBudget} -\title{Chart risk contribution or percent contribution} +\title{Generic method to chart risk contribution} \usage{ - chart.RiskBudget(object, neighbors = NULL, ..., - risk.type = "absolute", main = "Risk Contribution", - ylab = "", xlab = NULL, cex.axis = 0.8, cex.lab = 0.8, - element.color = "darkgray", las = 3, ylim = NULL) + chart.RiskBudget(object, ...) } \arguments{ \item{object}{optimal portfolio object created by \code{\link{optimize.portfolio}}} - \item{neighbors}{risk contribution or pct_contrib of - neighbor portfolios to be plotted} - \item{...}{passthrough parameters to \code{\link{plot}}} - - \item{risk.type}{plot risk contribution in absolute terms - or percentage contribution} - - \item{main}{main title for the chart} - - \item{ylab}{label for the y-axis} - - \item{xlab}{a title for the x axis: see - \code{\link{title}}} - - \item{cex.lab}{The magnification to be used for x and y - labels relative to the current setting of \code{cex}} - - \item{cex.axis}{The magnification to be used for axis - annotation relative to the current setting of \code{cex}} - - \item{element.color}{color for the default plot lines} - - \item{las}{numeric in \{0,1,2,3\}; the style of axis - labels \describe{ \item{0:}{always parallel to the axis - [\emph{default}],} \item{1:}{always horizontal,} - \item{2:}{always perpendicular to the axis,} - \item{3:}{always vertical.} }} - - \item{ylim}{set the y-axis limit, same as in - \code{\link{plot}}} } \description{ - This function charts the contribution or percent - contribution of the resulting objective measures in - \code{risk_budget_objectives}. + This function is the generic method to chart risk budget + objectives for \code{optimize.portfolio} and + \code{opt.list} objects. } -\details{ - \code{neighbors} may be specified in three ways. The - first is as a single number of neighbors. This will - extract the \code{neighbors} closest portfolios in terms - of the \code{out} numerical statistic. The second method - consists of a numeric vector for \code{neighbors}. This - will extract the \code{neighbors} with portfolio index - numbers that correspond to the vector contents. The third - method for specifying \code{neighbors} is to pass in a - matrix. This matrix should look like the output of - \code{\link{extractStats}}, and should contain properly - named contribution and pct_contrib columns. -} -\author{ - Ross Bennett -} Added: pkg/PortfolioAnalytics/man/chart.RiskBudget.opt.list.Rd =================================================================== --- pkg/PortfolioAnalytics/man/chart.RiskBudget.opt.list.Rd (rev 0) +++ pkg/PortfolioAnalytics/man/chart.RiskBudget.opt.list.Rd 2013-09-12 03:58:13 UTC (rev 3067) @@ -0,0 +1,65 @@ +\name{chart.RiskBudget.opt.list} +\alias{chart.RiskBudget.opt.list} +\title{Chart risk contribution of an \code{opt.list} object} +\usage{ + \method{chart.RiskBudget}{opt.list} (object, ..., + match.col = "ES", risk.type = "absolute", + main = "Risk Budget", plot.type = "line", + cex.axis = 0.8, cex.lab = 0.8, + element.color = "darkgray", las = 3, ylim = NULL, + colorset = NULL, legend.loc = NULL, cex.legend = 0.8) +} +\arguments{ + \item{object}{list of optimal portfolio objects created + by \code{\link{optimizations.combine}}} + + \item{\dots}{any other passthru parameter} + + \item{match.col}{string of risk column to match. The + \code{opt.list} object may contain risk budgets for ES or + StdDev and this will match the proper column names (e.g. + ES.contribution).} + + \item{risk.type}{"absolute" or "percentage" plot risk + contribution in absolute terms or percentage + contribution} + + \item{main}{main title for the chart} + + \item{plot.type}{"line" or "barplot"} + + \item{cex.axis}{The magnification to be used for axis + annotation relative to the current setting of \code{cex}} + + \item{cex.lab}{The magnification to be used for axis + annotation relative to the current setting of \code{cex}} + + \item{element.color}{color for the default plot lines} + + \item{las}{numeric in \{0,1,2,3\}; the style of axis + labels \describe{ \item{0:}{always parallel to the axis + [\emph{default}],} \item{1:}{always horizontal,} + \item{2:}{always perpendicular to the axis,} + \item{3:}{always vertical.} }} + + \item{ylim}{set the y-axis limit, same as in + \code{\link{plot}}} + + \item{colorset}{color palette or vector of colors to use} + + \item{legend.loc}{legend.loc NULL, "topright", "right", + or "bottomright". If legend.loc is NULL, the legend will + not be plotted} + + \item{cex.legend}{The magnification to be used for the + legend relative to the current setting of \code{cex}} +} +\description{ + This function charts the absolute contribution or percent + contribution of the resulting objective measures in the + \code{opt.list} object. +} +\author{ + Ross Bennett +} + Added: pkg/PortfolioAnalytics/man/chart.RiskBudget.optimize.portfolio.Rd =================================================================== --- pkg/PortfolioAnalytics/man/chart.RiskBudget.optimize.portfolio.Rd (rev 0) +++ pkg/PortfolioAnalytics/man/chart.RiskBudget.optimize.portfolio.Rd 2013-09-12 03:58:13 UTC (rev 3067) @@ -0,0 +1,68 @@ +\name{chart.RiskBudget.optimize.portfolio} +\alias{chart.RiskBudget.optimize.portfolio} +\title{Chart risk contribution of an \code{optimize.portfolio} object} +\usage{ + \method{chart.RiskBudget}{optimize.portfolio} (object, + ..., neighbors = NULL, risk.type = "absolute", + main = "Risk Contribution", ylab = "", xlab = NULL, + cex.axis = 0.8, cex.lab = 0.8, + element.color = "darkgray", las = 3, ylim = NULL) +} +\arguments{ + \item{object}{optimal portfolio object created by + \code{\link{optimize.portfolio}}} + + \item{neighbors}{risk contribution or pct_contrib of + neighbor portfolios to be plotted} + + \item{...}{passthrough parameters to \code{\link{plot}}} + + \item{risk.type}{plot risk contribution in absolute terms + or percentage contribution} + + \item{main}{main title for the chart} + + \item{ylab}{label for the y-axis} + + \item{xlab}{a title for the x axis: see + \code{\link{title}}} + + \item{cex.lab}{The magnification to be used for x and y + labels relative to the current setting of \code{cex}} + + \item{cex.axis}{The magnification to be used for axis + annotation relative to the current setting of \code{cex}} + + \item{element.color}{color for the default plot lines} + + \item{las}{numeric in \{0,1,2,3\}; the style of axis + labels \describe{ \item{0:}{always parallel to the axis + [\emph{default}],} \item{1:}{always horizontal,} + \item{2:}{always perpendicular to the axis,} + \item{3:}{always vertical.} }} + + \item{ylim}{set the y-axis limit, same as in + \code{\link{plot}}} +} +\description{ + This function charts the contribution or percent + contribution of the resulting objective measures in + \code{risk_budget_objectives}. +} +\details{ + \code{neighbors} may be specified in three ways. The + first is as a single number of neighbors. This will + extract the \code{neighbors} closest portfolios in terms + of the \code{out} numerical statistic. The second method + consists of a numeric vector for \code{neighbors}. This + will extract the \code{neighbors} with portfolio index + numbers that correspond to the vector contents. The third + method for specifying \code{neighbors} is to pass in a + matrix. This matrix should look like the output of + \code{\link{extractStats}}, and should contain properly + named contribution and pct_contrib columns. +} +\author{ + Ross Bennett +} + Modified: pkg/PortfolioAnalytics/man/chart.RiskReward.Rd =================================================================== --- pkg/PortfolioAnalytics/man/chart.RiskReward.Rd 2013-09-12 01:05:36 UTC (rev 3066) +++ pkg/PortfolioAnalytics/man/chart.RiskReward.Rd 2013-09-12 03:58:13 UTC (rev 3067) @@ -8,6 +8,8 @@ \alias{chart.RiskReward.optimize.portfolio.ROI} \title{classic risk reward scatter} \usage{ + chart.RiskReward(object, ...) + \method{chart.RiskReward}{optimize.portfolio.DEoptim} (object, ..., neighbors = NULL, return.col = "mean", risk.col = "ES", chart.assets = FALSE, element.color = "darkgray", cex.axis = 0.8, @@ -33,8 +35,6 @@ element.color = "darkgray", cex.axis = 0.8, ylim = NULL, xlim = NULL, rp = FALSE) - chart.RiskReward(object, ...) - \method{chart.RiskReward}{opt.list} (object, ..., risk.col = "ES", return.col = "mean", main = "", ylim = NULL, xlim = NULL, labels.assets = TRUE, Modified: pkg/PortfolioAnalytics/man/chart.Weights.Rd =================================================================== --- pkg/PortfolioAnalytics/man/chart.Weights.Rd 2013-09-12 01:05:36 UTC (rev 3066) +++ pkg/PortfolioAnalytics/man/chart.Weights.Rd 2013-09-12 03:58:13 UTC (rev 3067) @@ -9,6 +9,10 @@ \alias{chart.Weights.optimize.portfolio.RP} \title{boxplot of the weights of the optimal portfolios} \usage{ + chart.Weights(object, neighbors = NULL, ..., + main = "Weights", las = 3, xlab = NULL, cex.lab = 1, + element.color = "darkgray", cex.axis = 0.8) + \method{chart.Weights}{optimize.portfolio.DEoptim} (object, neighbors = NULL, ..., main = "Weights", las = 3, xlab = NULL, cex.lab = 1, element.color = "darkgray", cex.axis = 0.8, @@ -42,10 +46,6 @@ legend.loc = "topright", cex.legend = 0.8, plot.type = "line") - chart.Weights(object, neighbors = NULL, ..., - main = "Weights", las = 3, xlab = NULL, cex.lab = 1, - element.color = "darkgray", cex.axis = 0.8) - \method{chart.Weights}{opt.list} (object, neighbors = NULL, ..., main = "Weights", las = 3, xlab = NULL, cex.lab = 1, element.color = "darkgray", From noreply at r-forge.r-project.org Thu Sep 12 19:43:38 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 12 Sep 2013 19:43:38 +0200 (CEST) Subject: [Returnanalytics-commits] r3068 - pkg/PortfolioAttribution Message-ID: <20130912174338.CCA14184D4D@r-forge.r-project.org> Author: peter_carl Date: 2013-09-12 19:43:38 +0200 (Thu, 12 Sep 2013) New Revision: 3068 Modified: pkg/PortfolioAttribution/DESCRIPTION Log: - improved description - added FinancialInstrument under suggests (for buildHeirarchy) - added mentors as Contributors Modified: pkg/PortfolioAttribution/DESCRIPTION =================================================================== --- pkg/PortfolioAttribution/DESCRIPTION 2013-09-12 03:58:13 UTC (rev 3067) +++ pkg/PortfolioAttribution/DESCRIPTION 2013-09-12 17:43:38 UTC (rev 3068) @@ -1,20 +1,20 @@ Package: PortfolioAttribution Type: Package -Title: Econometric tools for performance and risk analysis. +Title: Performance attribution tools used for identifying sources of portfolio return and risk. Version: 0.3 Date: $Date$ Author: Andrii Babii Maintainer: Andrii Babii -Description: This package provides functions for the ex-post Portfolio Attribution - methods from Bacon (2004), Carino (2009), etc. - The package was initially created as a part of the Google Summer of Code (GSoC) 2012 project. +Description: This package provides functions for the ex-post portfolio attribution methods described in Christopherson, Carino and Ferson (2009), Bacon (2008), and several other sources. The package was initially created as a part of the Google Summer of Code (GSoC) 2012 project. Depends: R (>= 2.15.0), zoo, xts (>= 0.8), PerformanceAnalytics(>= 1.0.4.3) Suggests: - plyr + plyr, + FinancialInstrument License: GPL URL: http://r-forge.r-project.org/projects/returnanalytics/ +Contributors: David Carino, Doug Martin, Brian Peterson, Peter Carl Copyright: (c) 2004-2013 From noreply at r-forge.r-project.org Thu Sep 12 19:46:42 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 12 Sep 2013 19:46:42 +0200 (CEST) Subject: [Returnanalytics-commits] r3069 - in pkg: PerformanceAnalytics/R PerformanceAnalytics/man PortfolioAttribution PortfolioAttribution/R PortfolioAttribution/man Message-ID: <20130912174642.19557184602@r-forge.r-project.org> Author: braverock Date: 2013-09-12 19:46:41 +0200 (Thu, 12 Sep 2013) New Revision: 3069 Added: pkg/PerformanceAnalytics/R/CAPM.dynamic.R pkg/PerformanceAnalytics/R/MarketTiming.R pkg/PerformanceAnalytics/R/Modigliani.R pkg/PerformanceAnalytics/man/CAPM.dynamic.Rd pkg/PerformanceAnalytics/man/MarketTiming.Rd pkg/PerformanceAnalytics/man/Modigliani.Rd Removed: pkg/PortfolioAttribution/R/CAPM.dynamic.R pkg/PortfolioAttribution/R/MarketTiming.R pkg/PortfolioAttribution/R/Modigliani.R pkg/PortfolioAttribution/man/CAPM.dynamic.Rd pkg/PortfolioAttribution/man/MarketTiming.Rd pkg/PortfolioAttribution/man/Modigliani.Rd Modified: pkg/PortfolioAttribution/NAMESPACE Log: - move CAPM.dynamic, MarketTiming, and Modigliani fns from PortfolioAttribution to PerformanceAnalytics Copied: pkg/PerformanceAnalytics/R/CAPM.dynamic.R (from rev 3067, pkg/PortfolioAttribution/R/CAPM.dynamic.R) =================================================================== --- pkg/PerformanceAnalytics/R/CAPM.dynamic.R (rev 0) +++ pkg/PerformanceAnalytics/R/CAPM.dynamic.R 2013-09-12 17:46:41 UTC (rev 3069) @@ -0,0 +1,101 @@ +#' Time-varying conditional beta +#' +#' CAPM is estimated assuming that betas and alphas change over time. It is +#' assumed that the market prices of securities fully reflect readily available +#' and public information. A matrix of market information variables, \eqn{Z} +#' measures this information. Possible variables in \eqn{Z} could be the +#' divident yield, Tresaury yield, etc. The betas of stocks and managed +#' portfolios are allowed to change with market conditions: +#' \deqn{\beta_{p}(z_{t})=b_{0p}+B_{p}'z_{t}}{beta(zt) = b0 + Bp'zt} +#' where \eqn{z_{t}=Z_{t}-E[Z]}{zt = Zt - E[Z]} - a normalized vector of the +#' deviations of \eqn{Z_{t}}{Zt}, \eqn{B_{p}}{Bp} - a vector with the same +#' dimension as \eqn{Z_{t}}{Zt}. The coefficient \eqn{b_{0p}}{b0} can be +#' interpreted as the "average beta" or the beta when all infromation variables +#' are at their means. The elements of \eqn{B_{p}}{Bp} measure the sensitivity +#' of the conditional beta to the deviations of the \eqn{Z_{t}}{Zt} from their +#' means. +#' In the similar way the time-varying conditional alpha is modeled: +#' \deqn{\alpha_{pt}=\alpha_{p}(z_{t})=\alpha_{0p}+A_{p}'z_{t}}{alpha(zt) = +#' a0 + Ap'zt} +#' The modified regression is therefore: +#' \deqn{r_{pt+1}=\alpha_{0p}+A_{p}'z_{t}+b_{0p}r_{bt+1}+B_{p}'[z_{t}r_{bt+1}]+ +#' \mu_{pt+1}} +#' +#' @param Ra an xts, vector, matrix, data frame, timeSeries or zoo object of +#' the asset returns +#' @param Rb an xts, vector, matrix, data frame, timeSeries or zoo object of +#' the benchmark asset return +#' @param Rf risk free rate, in same period as your returns +#' @param Z an xts, vector, matrix, data frame, timeSeries or zoo object of +#' k variables that reflect public information +#' @param lags number of lags before the current period on which the alpha and +#' beta are conditioned +#' @param \dots any other passthrough parameters +#' @author Andrii Babii +#' @seealso \code{\link{CAPM.beta}} +#' @references J. Christopherson, D. Carino, W. Ferson. \emph{Portfolio +#' Performance Measurement and Benchmarking}. 2009. McGraw-Hill. Chapter 12. +#' \cr Wayne E. Ferson and Rudi Schadt, "Measuring Fund Strategy and +#' Performance in Changing Economic Conditions," \emph{Journal of Finance}, +#' vol. 51, 1996, pp.425-462 \cr +#' @examples +#' +#' data(managers) +#' CAPM.dynamic(managers[,1,drop=FALSE], managers[,8,drop=FALSE], Rf=.035/12, Z=managers[, 9:10]) +#' CAPM.dynamic(managers[80:120,1:6], managers[80:120,7,drop=FALSE], Rf=managers[80:120,10,drop=FALSE], Z=managers[80:120, 9:10]) +#' CAPM.dynamic(managers[80:120,1:6], managers[80:120,8:7], managers[80:120,10,drop=FALSE], Z=managers[80:120, 9:10]) +#' +#' @export +CAPM.dynamic <- function (Ra, Rb, Rf = 0, Z, lags = 1, ...) +{ # @author Andrii Babii + + # FUNCTION + + Ra = checkData(Ra) + Rb = checkData(Rb) + Z = checkData(Z) + Z = na.omit(Z) + if (!is.null(dim(Rf))) + Rf = checkData(Rf) + Ra.ncols = NCOL(Ra) + Rb.ncols = NCOL(Rb) + pairs = expand.grid(1:Ra.ncols) + + xRa = Return.excess(Ra, Rf)[1:(nrow(Ra) - 1)] + xRb = Return.excess(Rb, Rf)[1:(nrow(Rb) - 1)] + z = Z - matrix(rep(mean(Z), nrow(Z)), nrow(Z), ncol(Z), byrow = TRUE) + # Construct the matrix with information regressors (lagged values) + inform = lag(z) + if (lags > 1){ + for (i in 2:lags) { + inform = cbind(inform, lag(z, i)) + } + } + z = inform[(lags + 1):nrow(z), ] + + dynamic <- function (xRa, xRb, z){ + y = xRa[1:nrow(z)] + X = cbind(z, coredata(xRb[1:nrow(z)]), z * matrix(rep(xRb[1:nrow(z)], ncol(z)), nrow(z), ncol(z))) + X.df = as.data.frame(X) + model = lm(xRa[1:nrow(z)] ~ 1 + ., data = X.df) + return(coef(model)) + } + result = apply(pairs, 1, FUN = function(n, xRa, xRb, z) + dynamic(xRa[, n[1]], xRb[, 1], z), xRa = xRa, xRb = xRb, z = z) + result = t(result) + + if (ncol(Rb) > 1){ + for (i in 2:ncol(xRb)){ + res = apply(pairs, 1, FUN = function(n, xRa, xRb, z) + dynamic(xRa[, n[1]], xRb[, i], z), xRa = xRa, xRb = xRb, z = z) + res = t(res) + result = rbind(result, res) + } + } + + a = paste(rep(colnames(Z), lags), "alpha at t -", expand.grid(1:ncol(Z), 1:lags)[, 2]) + b = paste(rep(colnames(Z), lags), "beta at t -", expand.grid(1:ncol(Z), 1:lags)[, 2]) + colnames(result) = c("Average alpha", a, "Average beta", b) + rownames(result) = paste(rep(colnames(Ra), ncol(Rb)), "to", rep(colnames(Rb), each = ncol(Ra))) + return(result) +} \ No newline at end of file Copied: pkg/PerformanceAnalytics/R/MarketTiming.R (from rev 3067, pkg/PortfolioAttribution/R/MarketTiming.R) =================================================================== --- pkg/PerformanceAnalytics/R/MarketTiming.R (rev 0) +++ pkg/PerformanceAnalytics/R/MarketTiming.R 2013-09-12 17:46:41 UTC (rev 3069) @@ -0,0 +1,96 @@ +#' Market timing models +#' +#' Allows to estimate Treynor-Mazuy or Merton-Henriksson market timing model. +#' The Treynor-Mazuy model is essentially a quadratic extension of the basic +#' CAPM. It is estimated using a multiple regression. The second term in the +#' regression is the value of excess return squared. If the gamma coefficient +#' in the regression is positive, then the estimated equation describes a +#' convex upward-sloping regression "line". The quadratic regression is: +#' \deqn{R_{p}-R_{f}=\alpha+\beta (R_{b} - R_{f})+\gamma (R_{b}-R_{f})^2+ +#' \varepsilon_{p}}{Rp - Rf = alpha + beta(Rb -Rf) + gamma(Rb - Rf)^2 + +#' epsilonp} +#' \eqn{\gamma}{gamma} is a measure of the curvature of the regression line. +#' If \eqn{\gamma}{gamma} is positive, this would indicate that the manager's +#' investment strategy demonstrates market timing ability. +#' +#' The basic idea of the Merton-Henriksson test is to perform a multiple +#' regression in which the dependent variable (portfolio excess return and a +#' second variable that mimics the payoff to an option). This second variable +#' is zero when the market excess return is at or below zero and is 1 when it +#' is above zero: +#' \deqn{R_{p}-R_{f}=\alpha+\beta (R_{b}-R_{f})+\gamma D+\varepsilon_{p}}{Rp - +#' Rf = alpha + beta * (Rb - Rf) + gamma * D + epsilonp} +#' where all variables are familiar from the CAPM model, except for the +#' up-market return \eqn{D=max(0,R_{b}-R_{f})}{D = max(0, Rb - Rf)} and market +#' timing abilities \eqn{\gamma}{gamma} +#' @param Ra an xts, vector, matrix, data frame, timeSeries or zoo object of +#' the asset returns +#' @param Rb an xts, vector, matrix, data frame, timeSeries or zoo object of +#' the benchmark asset return +#' @param Rf risk free rate, in same period as your returns +#' @param method used to select between Treynor-Mazuy and Henriksson-Merton +#' models. May be any of: \itemize{ \item TM - Treynor-Mazuy model, +#' \item HM - Henriksson-Merton model} By default Treynor-Mazuy is selected +#' @param \dots any other passthrough parameters +#' @author Andrii Babii, Peter Carl +#' @seealso \code{\link{CAPM.beta}} +#' @references J. Christopherson, D. Carino, W. Ferson. \emph{Portfolio +#' Performance Measurement and Benchmarking}. 2009. McGraw-Hill, p. 127-133. +#' \cr J. L. Treynor and K. Mazuy, "Can Mutual Funds Outguess the Market?" +#' \emph{Harvard Business Review}, vol44, 1966, pp. 131-136 +#' \cr Roy D. Henriksson and Robert C. Merton, "On Market Timing and Investment +#' Performance. II. Statistical Procedures for Evaluating Forecast Skills," +#' \emph{Journal of Business}, vol.54, October 1981, pp.513-533 \cr +#' @examples +#' +#' data(managers) +#' MarketTiming(managers[,1,drop=FALSE], managers[,8,drop=FALSE], Rf=.035/12, method = "HM") +#' MarketTiming(managers[80:120,1:6], managers[80:120,7,drop=FALSE], managers[80:120,10,drop=FALSE]) +#' MarketTiming(managers[80:120,1:6], managers[80:120,8:7], managers[80:120,10,drop=FALSE], method = "TM") +#' +#' @export +MarketTiming <- function (Ra, Rb, Rf = 0, method = c("TM", "HM")) +{ # @author Andrii Babii, Peter Carl + + # FUNCTION + + Ra = checkData(Ra) + Rb = checkData(Rb) + if (!is.null(dim(Rf))) + Rf = checkData(Rf) + Ra.ncols = NCOL(Ra) + Rb.ncols = NCOL(Rb) + pairs = expand.grid(1:Ra.ncols, 1) + method = method[1] + xRa = Return.excess(Ra, Rf) + xRb = Return.excess(Rb, Rf) + + mt <- function (xRa, xRb) + { + switch(method, + "HM" = { S = xRb > 0 }, + "TM" = { S = xRb } + ) + R = merge(xRa, xRb, xRb*S) + R.df = as.data.frame(R) + model = lm(R.df[, 1] ~ 1 + ., data = R.df[, -1]) + return(coef(model)) + } + + result = apply(pairs, 1, FUN = function(n, xRa, xRb) + mt(xRa[, n[1]], xRb[, 1]), xRa = xRa, xRb = xRb) + result = t(result) + + if (ncol(Rb) > 1){ + for (i in 2:ncol(xRb)){ + res = apply(pairs, 1, FUN = function(n, xRa, xRb) + mt(xRa[, n[1]], xRb[, i]), xRa = xRa, xRb = xRb) + res = t(res) + result = rbind(result, res) + } + } + + rownames(result) = paste(rep(colnames(Ra), ncol(Rb)), "to", rep(colnames(Rb), each = ncol(Ra))) + colnames(result) = c("Alpha", "Beta", "Gamma") + return(result) +} \ No newline at end of file Copied: pkg/PerformanceAnalytics/R/Modigliani.R (from rev 3067, pkg/PortfolioAttribution/R/Modigliani.R) =================================================================== --- pkg/PerformanceAnalytics/R/Modigliani.R (rev 0) +++ pkg/PerformanceAnalytics/R/Modigliani.R 2013-09-12 17:46:41 UTC (rev 3069) @@ -0,0 +1,60 @@ +#' Modigliani-Modigliani measure +#' +#' The Modigliani-Modigliani measure is the portfolio return adjusted upward +#' or downward to match the benchmark's standard deviation. This puts the +#' portfolio return and the benchmark return on 'equal footing' from a standard +#' deviation perspective. +#' \deqn{MM_{p}=\frac{E[R_{p} - R_{f}]}{\sigma_{p}}=SR_{p} * \sigma_{b} + +#' E[R_{f}]}{MMp = SRp * sigmab + E[Rf]} +#' where \eqn{SR_{p}}{SRp} - Sharpe ratio, \eqn{\sigma_{b}}{sigmab} - benchmark +#' standard deviation +#' +#' This is also analogous to some approaches to 'risk parity' portfolios, which +#' use (presumably costless) leverage to increase the portfolio standard +#' deviation to some target. +#' +#' @param Ra an xts, vector, matrix, data frame, timeSeries or zoo object of +#' asset returns +#' @param Rb return vector of the benchmark asset +#' @param Rf risk free rate, in same period as your returns +#' @param \dots any other passthrough parameters +#' @author Andrii Babii, Brian G. Peterson +#' @references J. Christopherson, D. Carino, W. Ferson. \emph{Portfolio +#' Performance Measurement and Benchmarking}. 2009. McGraw-Hill, p. 97-99. \cr +#' Franco Modigliani and Leah Modigliani, "Risk-Adjusted Performance: How to +#' Measure It and Why," \emph{Journal of Portfolio Management}, vol.23, no., +#' Winter 1997, pp.45-54 \cr +#' @seealso \code{\link{SharpeRatio}}, \code{\link{TreynorRatio}} +#' @examples +#' +#' data(managers) +#' Modigliani(managers[,1,drop=FALSE], managers[,8,drop=FALSE], Rf=.035/12) +#' Modigliani(managers[,1:6], managers[,8,drop=FALSE], managers[,8,drop=FALSE]) +#' Modigliani(managers[,1:6], managers[,8:7], managers[,8,drop=FALSE]) +#' +#' @export +Modigliani <- function (Ra, Rb, Rf=0, ...) +{ # @author Andrii Babii, Brian G. Peterson + Ra = checkData(Ra) + Rb = checkData(Rb) + if (!is.null(dim(Rf))) + Rf = checkData(Rf) + Ra.ncols = NCOL(Ra) + Rb.ncols = NCOL(Rb) + pairs = expand.grid(1:Ra.ncols, 1:Rb.ncols) + mm <- function(Ra, Rb, Rf){ + shr = SharpeRatio(Ra, Rf, FUN = "StdDev") + MM = shr * StdDev(Rb) + mean(Rf) + return(MM) + } + result = apply(pairs, 1, FUN = function(n, Ra, Rb, Rf) mm(Ra[, + n[1]], Rb[, n[2]], Rf), Ra = Ra, Rb = Rb, Rf = Rf) + if (length(result) == 1) + return(result) + else { + dim(result) = c(Ra.ncols, Rb.ncols) + colnames(result) = paste("Modigliani-Modigliani measure:", colnames(Rb)) + rownames(result) = colnames(Ra) + return(t(result)) + } +} Copied: pkg/PerformanceAnalytics/man/CAPM.dynamic.Rd (from rev 3067, pkg/PortfolioAttribution/man/CAPM.dynamic.Rd) =================================================================== --- pkg/PerformanceAnalytics/man/CAPM.dynamic.Rd (rev 0) +++ pkg/PerformanceAnalytics/man/CAPM.dynamic.Rd 2013-09-12 17:46:41 UTC (rev 3069) @@ -0,0 +1,70 @@ +\name{CAPM.dynamic} +\alias{CAPM.dynamic} +\title{Time-varying conditional beta} +\usage{ + CAPM.dynamic(Ra, Rb, Rf = 0, Z, lags = 1, ...) +} +\arguments{ + \item{Ra}{an xts, vector, matrix, data frame, timeSeries + or zoo object of the asset returns} + + \item{Rb}{an xts, vector, matrix, data frame, timeSeries + or zoo object of the benchmark asset return} + + \item{Rf}{risk free rate, in same period as your returns} + + \item{Z}{an xts, vector, matrix, data frame, timeSeries + or zoo object of k variables that reflect public + information} + + \item{lags}{number of lags before the current period on + which the alpha and beta are conditioned} + + \item{\dots}{any other passthrough parameters} +} +\description{ + CAPM is estimated assuming that betas and alphas change + over time. It is assumed that the market prices of + securities fully reflect readily available and public + information. A matrix of market information variables, + \eqn{Z} measures this information. Possible variables in + \eqn{Z} could be the divident yield, Tresaury yield, etc. + The betas of stocks and managed portfolios are allowed to + change with market conditions: + \deqn{\beta_{p}(z_{t})=b_{0p}+B_{p}'z_{t}}{beta(zt) = b0 + + Bp'zt} where \eqn{z_{t}=Z_{t}-E[Z]}{zt = Zt - E[Z]} - a + normalized vector of the deviations of \eqn{Z_{t}}{Zt}, + \eqn{B_{p}}{Bp} - a vector with the same dimension as + \eqn{Z_{t}}{Zt}. The coefficient \eqn{b_{0p}}{b0} can be + interpreted as the "average beta" or the beta when all + infromation variables are at their means. The elements of + \eqn{B_{p}}{Bp} measure the sensitivity of the + conditional beta to the deviations of the \eqn{Z_{t}}{Zt} + from their means. In the similar way the time-varying + conditional alpha is modeled: + \deqn{\alpha_{pt}=\alpha_{p}(z_{t})=\alpha_{0p}+A_{p}'z_{t}}{alpha(zt) + = a0 + Ap'zt} The modified regression is therefore: + \deqn{r_{pt+1}=\alpha_{0p}+A_{p}'z_{t}+b_{0p}r_{bt+1}+B_{p}'[z_{t}r_{bt+1}]+ + \mu_{pt+1}} +} +\examples{ +data(managers) +CAPM.dynamic(managers[,1,drop=FALSE], managers[,8,drop=FALSE], Rf=.035/12, Z=managers[, 9:10]) +CAPM.dynamic(managers[80:120,1:6], managers[80:120,7,drop=FALSE], Rf=managers[80:120,10,drop=FALSE], Z=managers[80:120, 9:10]) +CAPM.dynamic(managers[80:120,1:6], managers[80:120,8:7], managers[80:120,10,drop=FALSE], Z=managers[80:120, 9:10]) +} +\author{ + Andrii Babii +} +\references{ + J. Christopherson, D. Carino, W. Ferson. \emph{Portfolio + Performance Measurement and Benchmarking}. 2009. + McGraw-Hill. Chapter 12. \cr Wayne E. Ferson and Rudi + Schadt, "Measuring Fund Strategy and Performance in + Changing Economic Conditions," \emph{Journal of Finance}, + vol. 51, 1996, pp.425-462 \cr +} +\seealso{ + \code{\link{CAPM.beta}} +} + Copied: pkg/PerformanceAnalytics/man/MarketTiming.Rd (from rev 3067, pkg/PortfolioAttribution/man/MarketTiming.Rd) =================================================================== --- pkg/PerformanceAnalytics/man/MarketTiming.Rd (rev 0) +++ pkg/PerformanceAnalytics/man/MarketTiming.Rd 2013-09-12 17:46:41 UTC (rev 3069) @@ -0,0 +1,77 @@ +\name{MarketTiming} +\alias{MarketTiming} +\title{Market timing models} +\usage{ + MarketTiming(Ra, Rb, Rf = 0, method = c("TM", "HM")) +} +\arguments{ + \item{Ra}{an xts, vector, matrix, data frame, timeSeries + or zoo object of the asset returns} + + \item{Rb}{an xts, vector, matrix, data frame, timeSeries + or zoo object of the benchmark asset return} + + \item{Rf}{risk free rate, in same period as your returns} + + \item{method}{used to select between Treynor-Mazuy and + Henriksson-Merton models. May be any of: \itemize{ \item + TM - Treynor-Mazuy model, \item HM - Henriksson-Merton + model} By default Treynor-Mazuy is selected} + + \item{\dots}{any other passthrough parameters} +} +\description{ + Allows to estimate Treynor-Mazuy or Merton-Henriksson + market timing model. The Treynor-Mazuy model is + essentially a quadratic extension of the basic CAPM. It + is estimated using a multiple regression. The second term + in the regression is the value of excess return squared. + If the gamma coefficient in the regression is positive, + then the estimated equation describes a convex + upward-sloping regression "line". The quadratic + regression is: \deqn{R_{p}-R_{f}=\alpha+\beta (R_{b} - + R_{f})+\gamma (R_{b}-R_{f})^2+ \varepsilon_{p}}{Rp - Rf = + alpha + beta(Rb -Rf) + gamma(Rb - Rf)^2 + epsilonp} + \eqn{\gamma}{gamma} is a measure of the curvature of the + regression line. If \eqn{\gamma}{gamma} is positive, this + would indicate that the manager's investment strategy + demonstrates market timing ability. +} +\details{ + The basic idea of the Merton-Henriksson test is to + perform a multiple regression in which the dependent + variable (portfolio excess return and a second variable + that mimics the payoff to an option). This second + variable is zero when the market excess return is at or + below zero and is 1 when it is above zero: + \deqn{R_{p}-R_{f}=\alpha+\beta (R_{b}-R_{f})+\gamma + D+\varepsilon_{p}}{Rp - Rf = alpha + beta * (Rb - Rf) + + gamma * D + epsilonp} where all variables are familiar + from the CAPM model, except for the up-market return + \eqn{D=max(0,R_{b}-R_{f})}{D = max(0, Rb - Rf)} and + market timing abilities \eqn{\gamma}{gamma} +} +\examples{ +data(managers) +MarketTiming(managers[,1,drop=FALSE], managers[,8,drop=FALSE], Rf=.035/12, method = "HM") +MarketTiming(managers[80:120,1:6], managers[80:120,7,drop=FALSE], managers[80:120,10,drop=FALSE]) +MarketTiming(managers[80:120,1:6], managers[80:120,8:7], managers[80:120,10,drop=FALSE], method = "TM") +} +\author{ + Andrii Babii, Peter Carl +} +\references{ + J. Christopherson, D. Carino, W. Ferson. \emph{Portfolio + Performance Measurement and Benchmarking}. 2009. + McGraw-Hill, p. 127-133. \cr J. L. Treynor and K. Mazuy, + "Can Mutual Funds Outguess the Market?" \emph{Harvard + Business Review}, vol44, 1966, pp. 131-136 \cr Roy D. + Henriksson and Robert C. Merton, "On Market Timing and + Investment Performance. II. Statistical Procedures for + Evaluating Forecast Skills," \emph{Journal of Business}, + vol.54, October 1981, pp.513-533 \cr +} +\seealso{ + \code{\link{CAPM.beta}} +} + Copied: pkg/PerformanceAnalytics/man/Modigliani.Rd (from rev 3067, pkg/PortfolioAttribution/man/Modigliani.Rd) =================================================================== --- pkg/PerformanceAnalytics/man/Modigliani.Rd (rev 0) +++ pkg/PerformanceAnalytics/man/Modigliani.Rd 2013-09-12 17:46:41 UTC (rev 3069) @@ -0,0 +1,54 @@ +\name{Modigliani} +\alias{Modigliani} +\title{Modigliani-Modigliani measure} +\usage{ + Modigliani(Ra, Rb, Rf = 0, ...) +} +\arguments{ + \item{Ra}{an xts, vector, matrix, data frame, timeSeries + or zoo object of asset returns} + + \item{Rb}{return vector of the benchmark asset} + + \item{Rf}{risk free rate, in same period as your returns} + + \item{\dots}{any other passthrough parameters} +} +\description{ + The Modigliani-Modigliani measure is the portfolio return + adjusted upward or downward to match the benchmark's + standard deviation. This puts the portfolio return and + the benchmark return on 'equal footing' from a standard + deviation perspective. \deqn{MM_{p}=\frac{E[R_{p} - + R_{f}]}{\sigma_{p}}=SR_{p} * \sigma_{b} + E[R_{f}]}{MMp = + SRp * sigmab + E[Rf]} where \eqn{SR_{p}}{SRp} - Sharpe + ratio, \eqn{\sigma_{b}}{sigmab} - benchmark standard + deviation +} +\details{ + This is also analogous to some approaches to 'risk + parity' portfolios, which use (presumably costless) + leverage to increase the portfolio standard deviation to + some target. +} +\examples{ +data(managers) +Modigliani(managers[,1,drop=FALSE], managers[,8,drop=FALSE], Rf=.035/12) +Modigliani(managers[,1:6], managers[,8,drop=FALSE], managers[,8,drop=FALSE]) +Modigliani(managers[,1:6], managers[,8:7], managers[,8,drop=FALSE]) +} +\author{ + Andrii Babii, Brian G. Peterson +} +\references{ + J. Christopherson, D. Carino, W. Ferson. \emph{Portfolio + Performance Measurement and Benchmarking}. 2009. + McGraw-Hill, p. 97-99. \cr Franco Modigliani and Leah + Modigliani, "Risk-Adjusted Performance: How to Measure It + and Why," \emph{Journal of Portfolio Management}, vol.23, + no., Winter 1997, pp.45-54 \cr +} +\seealso{ + \code{\link{SharpeRatio}}, \code{\link{TreynorRatio}} +} + Modified: pkg/PortfolioAttribution/NAMESPACE =================================================================== --- pkg/PortfolioAttribution/NAMESPACE 2013-09-12 17:43:38 UTC (rev 3068) +++ pkg/PortfolioAttribution/NAMESPACE 2013-09-12 17:46:41 UTC (rev 3069) @@ -2,16 +2,13 @@ export(AttributionFixedIncome) export(Attribution.geometric) export(Attribution.levels) -export(CAPM.dynamic) export(Carino) export(Conv.option) export(DaviesLaker) export(Frongello) export(Grap) export(HierarchyQuintiles) -export(MarketTiming) export(Menchero) -export(Modigliani) export(Return.annualized.excess) export(Return.level) export(Weight.level) Deleted: pkg/PortfolioAttribution/R/CAPM.dynamic.R =================================================================== --- pkg/PortfolioAttribution/R/CAPM.dynamic.R 2013-09-12 17:43:38 UTC (rev 3068) +++ pkg/PortfolioAttribution/R/CAPM.dynamic.R 2013-09-12 17:46:41 UTC (rev 3069) @@ -1,101 +0,0 @@ -#' Time-varying conditional beta -#' -#' CAPM is estimated assuming that betas and alphas change over time. It is -#' assumed that the market prices of securities fully reflect readily available -#' and public information. A matrix of market information variables, \eqn{Z} -#' measures this information. Possible variables in \eqn{Z} could be the -#' divident yield, Tresaury yield, etc. The betas of stocks and managed -#' portfolios are allowed to change with market conditions: -#' \deqn{\beta_{p}(z_{t})=b_{0p}+B_{p}'z_{t}}{beta(zt) = b0 + Bp'zt} -#' where \eqn{z_{t}=Z_{t}-E[Z]}{zt = Zt - E[Z]} - a normalized vector of the -#' deviations of \eqn{Z_{t}}{Zt}, \eqn{B_{p}}{Bp} - a vector with the same -#' dimension as \eqn{Z_{t}}{Zt}. The coefficient \eqn{b_{0p}}{b0} can be -#' interpreted as the "average beta" or the beta when all infromation variables -#' are at their means. The elements of \eqn{B_{p}}{Bp} measure the sensitivity -#' of the conditional beta to the deviations of the \eqn{Z_{t}}{Zt} from their -#' means. -#' In the similar way the time-varying conditional alpha is modeled: -#' \deqn{\alpha_{pt}=\alpha_{p}(z_{t})=\alpha_{0p}+A_{p}'z_{t}}{alpha(zt) = -#' a0 + Ap'zt} -#' The modified regression is therefore: -#' \deqn{r_{pt+1}=\alpha_{0p}+A_{p}'z_{t}+b_{0p}r_{bt+1}+B_{p}'[z_{t}r_{bt+1}]+ -#' \mu_{pt+1}} -#' -#' @param Ra an xts, vector, matrix, data frame, timeSeries or zoo object of -#' the asset returns -#' @param Rb an xts, vector, matrix, data frame, timeSeries or zoo object of -#' the benchmark asset return -#' @param Rf risk free rate, in same period as your returns -#' @param Z an xts, vector, matrix, data frame, timeSeries or zoo object of -#' k variables that reflect public information -#' @param lags number of lags before the current period on which the alpha and -#' beta are conditioned -#' @param \dots any other passthrough parameters -#' @author Andrii Babii -#' @seealso \code{\link{CAPM.beta}} -#' @references J. Christopherson, D. Carino, W. Ferson. \emph{Portfolio -#' Performance Measurement and Benchmarking}. 2009. McGraw-Hill. Chapter 12. -#' \cr Wayne E. Ferson and Rudi Schadt, "Measuring Fund Strategy and -#' Performance in Changing Economic Conditions," \emph{Journal of Finance}, -#' vol. 51, 1996, pp.425-462 \cr -#' @examples -#' -#' data(managers) -#' CAPM.dynamic(managers[,1,drop=FALSE], managers[,8,drop=FALSE], Rf=.035/12, Z=managers[, 9:10]) -#' CAPM.dynamic(managers[80:120,1:6], managers[80:120,7,drop=FALSE], Rf=managers[80:120,10,drop=FALSE], Z=managers[80:120, 9:10]) -#' CAPM.dynamic(managers[80:120,1:6], managers[80:120,8:7], managers[80:120,10,drop=FALSE], Z=managers[80:120, 9:10]) -#' -#' @export -CAPM.dynamic <- function (Ra, Rb, Rf = 0, Z, lags = 1, ...) -{ # @author Andrii Babii - - # FUNCTION - - Ra = checkData(Ra) - Rb = checkData(Rb) - Z = checkData(Z) - Z = na.omit(Z) - if (!is.null(dim(Rf))) - Rf = checkData(Rf) - Ra.ncols = NCOL(Ra) - Rb.ncols = NCOL(Rb) - pairs = expand.grid(1:Ra.ncols) - - xRa = Return.excess(Ra, Rf)[1:(nrow(Ra) - 1)] - xRb = Return.excess(Rb, Rf)[1:(nrow(Rb) - 1)] - z = Z - matrix(rep(mean(Z), nrow(Z)), nrow(Z), ncol(Z), byrow = TRUE) - # Construct the matrix with information regressors (lagged values) - inform = lag(z) - if (lags > 1){ - for (i in 2:lags) { - inform = cbind(inform, lag(z, i)) - } - } - z = inform[(lags + 1):nrow(z), ] - - dynamic <- function (xRa, xRb, z){ - y = xRa[1:nrow(z)] - X = cbind(z, coredata(xRb[1:nrow(z)]), z * matrix(rep(xRb[1:nrow(z)], ncol(z)), nrow(z), ncol(z))) - X.df = as.data.frame(X) - model = lm(xRa[1:nrow(z)] ~ 1 + ., data = X.df) - return(coef(model)) - } - result = apply(pairs, 1, FUN = function(n, xRa, xRb, z) - dynamic(xRa[, n[1]], xRb[, 1], z), xRa = xRa, xRb = xRb, z = z) - result = t(result) - - if (ncol(Rb) > 1){ - for (i in 2:ncol(xRb)){ - res = apply(pairs, 1, FUN = function(n, xRa, xRb, z) - dynamic(xRa[, n[1]], xRb[, i], z), xRa = xRa, xRb = xRb, z = z) - res = t(res) - result = rbind(result, res) - } - } - - a = paste(rep(colnames(Z), lags), "alpha at t -", expand.grid(1:ncol(Z), 1:lags)[, 2]) - b = paste(rep(colnames(Z), lags), "beta at t -", expand.grid(1:ncol(Z), 1:lags)[, 2]) - colnames(result) = c("Average alpha", a, "Average beta", b) - rownames(result) = paste(rep(colnames(Ra), ncol(Rb)), "to", rep(colnames(Rb), each = ncol(Ra))) - return(result) -} \ No newline at end of file Deleted: pkg/PortfolioAttribution/R/MarketTiming.R =================================================================== --- pkg/PortfolioAttribution/R/MarketTiming.R 2013-09-12 17:43:38 UTC (rev 3068) +++ pkg/PortfolioAttribution/R/MarketTiming.R 2013-09-12 17:46:41 UTC (rev 3069) @@ -1,96 +0,0 @@ -#' Market timing models -#' -#' Allows to estimate Treynor-Mazuy or Merton-Henriksson market timing model. -#' The Treynor-Mazuy model is essentially a quadratic extension of the basic -#' CAPM. It is estimated using a multiple regression. The second term in the -#' regression is the value of excess return squared. If the gamma coefficient -#' in the regression is positive, then the estimated equation describes a -#' convex upward-sloping regression "line". The quadratic regression is: -#' \deqn{R_{p}-R_{f}=\alpha+\beta (R_{b} - R_{f})+\gamma (R_{b}-R_{f})^2+ -#' \varepsilon_{p}}{Rp - Rf = alpha + beta(Rb -Rf) + gamma(Rb - Rf)^2 + -#' epsilonp} -#' \eqn{\gamma}{gamma} is a measure of the curvature of the regression line. -#' If \eqn{\gamma}{gamma} is positive, this would indicate that the manager's -#' investment strategy demonstrates market timing ability. -#' -#' The basic idea of the Merton-Henriksson test is to perform a multiple -#' regression in which the dependent variable (portfolio excess return and a -#' second variable that mimics the payoff to an option). This second variable -#' is zero when the market excess return is at or below zero and is 1 when it -#' is above zero: -#' \deqn{R_{p}-R_{f}=\alpha+\beta (R_{b}-R_{f})+\gamma D+\varepsilon_{p}}{Rp - -#' Rf = alpha + beta * (Rb - Rf) + gamma * D + epsilonp} -#' where all variables are familiar from the CAPM model, except for the -#' up-market return \eqn{D=max(0,R_{b}-R_{f})}{D = max(0, Rb - Rf)} and market -#' timing abilities \eqn{\gamma}{gamma} -#' @param Ra an xts, vector, matrix, data frame, timeSeries or zoo object of -#' the asset returns -#' @param Rb an xts, vector, matrix, data frame, timeSeries or zoo object of -#' the benchmark asset return -#' @param Rf risk free rate, in same period as your returns -#' @param method used to select between Treynor-Mazuy and Henriksson-Merton -#' models. May be any of: \itemize{ \item TM - Treynor-Mazuy model, -#' \item HM - Henriksson-Merton model} By default Treynor-Mazuy is selected -#' @param \dots any other passthrough parameters -#' @author Andrii Babii, Peter Carl -#' @seealso \code{\link{CAPM.beta}} -#' @references J. Christopherson, D. Carino, W. Ferson. \emph{Portfolio -#' Performance Measurement and Benchmarking}. 2009. McGraw-Hill, p. 127-133. -#' \cr J. L. Treynor and K. Mazuy, "Can Mutual Funds Outguess the Market?" -#' \emph{Harvard Business Review}, vol44, 1966, pp. 131-136 -#' \cr Roy D. Henriksson and Robert C. Merton, "On Market Timing and Investment -#' Performance. II. Statistical Procedures for Evaluating Forecast Skills," -#' \emph{Journal of Business}, vol.54, October 1981, pp.513-533 \cr -#' @examples -#' -#' data(managers) -#' MarketTiming(managers[,1,drop=FALSE], managers[,8,drop=FALSE], Rf=.035/12, method = "HM") -#' MarketTiming(managers[80:120,1:6], managers[80:120,7,drop=FALSE], managers[80:120,10,drop=FALSE]) -#' MarketTiming(managers[80:120,1:6], managers[80:120,8:7], managers[80:120,10,drop=FALSE], method = "TM") -#' -#' @export -MarketTiming <- function (Ra, Rb, Rf = 0, method = c("TM", "HM")) -{ # @author Andrii Babii, Peter Carl - - # FUNCTION - - Ra = checkData(Ra) - Rb = checkData(Rb) - if (!is.null(dim(Rf))) - Rf = checkData(Rf) - Ra.ncols = NCOL(Ra) - Rb.ncols = NCOL(Rb) - pairs = expand.grid(1:Ra.ncols, 1) - method = method[1] - xRa = Return.excess(Ra, Rf) - xRb = Return.excess(Rb, Rf) - - mt <- function (xRa, xRb) - { - switch(method, - "HM" = { S = xRb > 0 }, - "TM" = { S = xRb } - ) - R = merge(xRa, xRb, xRb*S) - R.df = as.data.frame(R) - model = lm(R.df[, 1] ~ 1 + ., data = R.df[, -1]) - return(coef(model)) - } - - result = apply(pairs, 1, FUN = function(n, xRa, xRb) - mt(xRa[, n[1]], xRb[, 1]), xRa = xRa, xRb = xRb) - result = t(result) - - if (ncol(Rb) > 1){ - for (i in 2:ncol(xRb)){ - res = apply(pairs, 1, FUN = function(n, xRa, xRb) - mt(xRa[, n[1]], xRb[, i]), xRa = xRa, xRb = xRb) - res = t(res) - result = rbind(result, res) - } - } - - rownames(result) = paste(rep(colnames(Ra), ncol(Rb)), "to", rep(colnames(Rb), each = ncol(Ra))) - colnames(result) = c("Alpha", "Beta", "Gamma") - return(result) -} \ No newline at end of file Deleted: pkg/PortfolioAttribution/R/Modigliani.R =================================================================== --- pkg/PortfolioAttribution/R/Modigliani.R 2013-09-12 17:43:38 UTC (rev 3068) +++ pkg/PortfolioAttribution/R/Modigliani.R 2013-09-12 17:46:41 UTC (rev 3069) @@ -1,60 +0,0 @@ -#' Modigliani-Modigliani measure -#' -#' The Modigliani-Modigliani measure is the portfolio return adjusted upward -#' or downward to match the benchmark's standard deviation. This puts the -#' portfolio return and the benchmark return on 'equal footing' from a standard -#' deviation perspective. -#' \deqn{MM_{p}=\frac{E[R_{p} - R_{f}]}{\sigma_{p}}=SR_{p} * \sigma_{b} + -#' E[R_{f}]}{MMp = SRp * sigmab + E[Rf]} -#' where \eqn{SR_{p}}{SRp} - Sharpe ratio, \eqn{\sigma_{b}}{sigmab} - benchmark -#' standard deviation -#' -#' This is also analogous to some approaches to 'risk parity' portfolios, which -#' use (presumably costless) leverage to increase the portfolio standard -#' deviation to some target. -#' -#' @param Ra an xts, vector, matrix, data frame, timeSeries or zoo object of -#' asset returns -#' @param Rb return vector of the benchmark asset -#' @param Rf risk free rate, in same period as your returns -#' @param \dots any other passthrough parameters -#' @author Andrii Babii, Brian G. Peterson -#' @references J. Christopherson, D. Carino, W. Ferson. \emph{Portfolio -#' Performance Measurement and Benchmarking}. 2009. McGraw-Hill, p. 97-99. \cr -#' Franco Modigliani and Leah Modigliani, "Risk-Adjusted Performance: How to -#' Measure It and Why," \emph{Journal of Portfolio Management}, vol.23, no., -#' Winter 1997, pp.45-54 \cr -#' @seealso \code{\link{SharpeRatio}}, \code{\link{TreynorRatio}} -#' @examples -#' -#' data(managers) -#' Modigliani(managers[,1,drop=FALSE], managers[,8,drop=FALSE], Rf=.035/12) [TRUNCATED] To get the complete diff run: svnlook diff /svnroot/returnanalytics -r 3069 From noreply at r-forge.r-project.org Thu Sep 12 20:07:30 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 12 Sep 2013 20:07:30 +0200 (CEST) Subject: [Returnanalytics-commits] r3070 - in pkg: PerformanceAnalytics/R PerformanceAnalytics/man PortfolioAttribution PortfolioAttribution/R PortfolioAttribution/man Message-ID: <20130912180730.A38EA184602@r-forge.r-project.org> Author: braverock Date: 2013-09-12 20:07:30 +0200 (Thu, 12 Sep 2013) New Revision: 3070 Added: pkg/PerformanceAnalytics/R/Return.annualized.excess.R pkg/PerformanceAnalytics/man/Return.annualized.excess.Rd Removed: pkg/PortfolioAttribution/R/Return.annualized.excess.R pkg/PortfolioAttribution/man/Return.annualized.excess.Rd Modified: pkg/PortfolioAttribution/NAMESPACE Log: - move Return.annualized.excess to PerformanceAnalytics from PortfolioAttribution Copied: pkg/PerformanceAnalytics/R/Return.annualized.excess.R (from rev 3067, pkg/PortfolioAttribution/R/Return.annualized.excess.R) =================================================================== --- pkg/PerformanceAnalytics/R/Return.annualized.excess.R (rev 0) +++ pkg/PerformanceAnalytics/R/Return.annualized.excess.R 2013-09-12 18:07:30 UTC (rev 3070) @@ -0,0 +1,78 @@ +#' calculates an annualized excess return for comparing instruments with different +#' length history +#' +#' An average annualized excess return is convenient for comparing excess +#' returns. +#' +#' Annualized returns are useful for comparing two assets. To do so, you must +#' scale your observations to an annual scale by raising the compound return to +#' the number of periods in a year, and taking the root to the number of total +#' observations: +#' \deqn{prod(1+R_{a})^{\frac{scale}{n}}-1=\sqrt[n]{prod(1+R_{a})^{scale}}- +#' 1}{prod(1 + Ra)^(scale/n) - 1} +#' +#' where scale is the number of periods in a year, and n is the total number of +#' periods for which you have observations. +#' +#' Finally having annualized returns for portfolio and benchmark we can compute +#' annualized excess return as difference in the annualized portfolio and +#' benchmark returns in the arithmetic case: +#' \deqn{er = R_{pa} - R_{ba}}{er = Rpa - Rba} +#' +#' and as a geometric difference in the geometric case: +#' \deqn{er = \frac{(1 + R_{pa})}{(1 + R_{ba})} - 1}{er = (1 + Rpa) / (1 + Rba) - 1} +#' +#' @param Rp an xts, vector, matrix, data frame, timeSeries or zoo object of +#' portfolio returns +#' @param Rb an xts, vector, matrix, data frame, timeSeries or zoo object of +#' benchmark returns +#' @param scale number of periods in a year (daily scale = 252, monthly scale = +#' 12, quarterly scale = 4) +#' @param geometric generate geometric (TRUE) or simple (FALSE) excess returns, +#' default TRUE +#' @author Andrii Babii +#' @seealso \code{\link{Return.annualized}}, +#' @references Bacon, Carl. \emph{Practical Portfolio Performance Measurement +#' and Attribution}. Wiley. 2004. p. 206-207 +#' @keywords ts multivariate distribution models +#' @examples +#' +#' data(attrib) +#' Return.annualized.excess(Rp = attrib.returns[, 21], Rb = attrib.returns[, 22]) +#' +#' @export +Return.annualized.excess <- +function (Rp, Rb, scale = NA, geometric = TRUE ) +{ # @author Andrii Babii + Rp = checkData(Rp) + Rb = checkData(Rb) + + Rp = na.omit(Rp) + Rb = na.omit(Rb) + n = nrow(Rp) + if(is.na(scale)) { + freq = periodicity(Rp) + switch(freq$scale, + minute = {stop("Data periodicity too high")}, + hourly = {stop("Data periodicity too high")}, + daily = {scale = 252}, + eekly = {scale = 52}, + monthly = {scale = 12}, + quarterly = {scale = 4}, + yearly = {scale = 1} + ) + } + Rpa = apply(1 + Rp, 2, prod)^(scale/n) - 1 + Rba = apply(1 + Rb, 2, prod)^(scale/n) - 1 + if (geometric) { + # geometric excess returns + result = (1 + Rpa) / (1 + Rba) - 1 + } else { + # arithmetic excess returns + result = Rpa - Rba + } + dim(result) = c(1,NCOL(Rp)) + colnames(result) = colnames(Rp) + rownames(result) = "Annualized Return" + return(result) +} \ No newline at end of file Copied: pkg/PerformanceAnalytics/man/Return.annualized.excess.Rd (from rev 3067, pkg/PortfolioAttribution/man/Return.annualized.excess.Rd) =================================================================== --- pkg/PerformanceAnalytics/man/Return.annualized.excess.Rd (rev 0) +++ pkg/PerformanceAnalytics/man/Return.annualized.excess.Rd 2013-09-12 18:07:30 UTC (rev 3070) @@ -0,0 +1,67 @@ +\name{Return.annualized.excess} +\alias{Return.annualized.excess} +\title{calculates an annualized excess return for comparing instruments with different +length history} +\usage{ + Return.annualized.excess(Rp, Rb, scale = NA, + geometric = TRUE) +} +\arguments{ + \item{Rp}{an xts, vector, matrix, data frame, timeSeries + or zoo object of portfolio returns} + + \item{Rb}{an xts, vector, matrix, data frame, timeSeries + or zoo object of benchmark returns} + + \item{scale}{number of periods in a year (daily scale = + 252, monthly scale = 12, quarterly scale = 4)} + + \item{geometric}{generate geometric (TRUE) or simple + (FALSE) excess returns, default TRUE} +} +\description{ + An average annualized excess return is convenient for + comparing excess returns. +} +\details{ + Annualized returns are useful for comparing two assets. + To do so, you must scale your observations to an annual + scale by raising the compound return to the number of + periods in a year, and taking the root to the number of + total observations: + \deqn{prod(1+R_{a})^{\frac{scale}{n}}-1=\sqrt[n]{prod(1+R_{a})^{scale}}- + 1}{prod(1 + Ra)^(scale/n) - 1} + + where scale is the number of periods in a year, and n is + the total number of periods for which you have + observations. + + Finally having annualized returns for portfolio and + benchmark we can compute annualized excess return as + difference in the annualized portfolio and benchmark + returns in the arithmetic case: \deqn{er = R_{pa} - + R_{ba}}{er = Rpa - Rba} + + and as a geometric difference in the geometric case: + \deqn{er = \frac{(1 + R_{pa})}{(1 + R_{ba})} - 1}{er = (1 + + Rpa) / (1 + Rba) - 1} +} +\examples{ +data(attrib) +Return.annualized.excess(Rp = attrib.returns[, 21], Rb = attrib.returns[, 22]) +} +\author{ + Andrii Babii +} +\references{ + Bacon, Carl. \emph{Practical Portfolio Performance + Measurement and Attribution}. Wiley. 2004. p. 206-207 +} +\seealso{ + \code{\link{Return.annualized}}, +} +\keyword{distribution} +\keyword{models} +\keyword{multivariate} +\keyword{ts} + Modified: pkg/PortfolioAttribution/NAMESPACE =================================================================== --- pkg/PortfolioAttribution/NAMESPACE 2013-09-12 17:46:41 UTC (rev 3069) +++ pkg/PortfolioAttribution/NAMESPACE 2013-09-12 18:07:30 UTC (rev 3070) @@ -9,7 +9,6 @@ export(Grap) export(HierarchyQuintiles) export(Menchero) -export(Return.annualized.excess) export(Return.level) export(Weight.level) export(Weight.transform) Deleted: pkg/PortfolioAttribution/R/Return.annualized.excess.R =================================================================== --- pkg/PortfolioAttribution/R/Return.annualized.excess.R 2013-09-12 17:46:41 UTC (rev 3069) +++ pkg/PortfolioAttribution/R/Return.annualized.excess.R 2013-09-12 18:07:30 UTC (rev 3070) @@ -1,78 +0,0 @@ -#' calculates an annualized excess return for comparing instruments with different -#' length history -#' -#' An average annualized excess return is convenient for comparing excess -#' returns. -#' -#' Annualized returns are useful for comparing two assets. To do so, you must -#' scale your observations to an annual scale by raising the compound return to -#' the number of periods in a year, and taking the root to the number of total -#' observations: -#' \deqn{prod(1+R_{a})^{\frac{scale}{n}}-1=\sqrt[n]{prod(1+R_{a})^{scale}}- -#' 1}{prod(1 + Ra)^(scale/n) - 1} -#' -#' where scale is the number of periods in a year, and n is the total number of -#' periods for which you have observations. -#' -#' Finally having annualized returns for portfolio and benchmark we can compute -#' annualized excess return as difference in the annualized portfolio and -#' benchmark returns in the arithmetic case: -#' \deqn{er = R_{pa} - R_{ba}}{er = Rpa - Rba} -#' -#' and as a geometric difference in the geometric case: -#' \deqn{er = \frac{(1 + R_{pa})}{(1 + R_{ba})} - 1}{er = (1 + Rpa) / (1 + Rba) - 1} -#' -#' @param Rp an xts, vector, matrix, data frame, timeSeries or zoo object of -#' portfolio returns -#' @param Rb an xts, vector, matrix, data frame, timeSeries or zoo object of -#' benchmark returns -#' @param scale number of periods in a year (daily scale = 252, monthly scale = -#' 12, quarterly scale = 4) -#' @param geometric generate geometric (TRUE) or simple (FALSE) excess returns, -#' default TRUE -#' @author Andrii Babii -#' @seealso \code{\link{Return.annualized}}, -#' @references Bacon, Carl. \emph{Practical Portfolio Performance Measurement -#' and Attribution}. Wiley. 2004. p. 206-207 -#' @keywords ts multivariate distribution models -#' @examples -#' -#' data(attrib) -#' Return.annualized.excess(Rp = attrib.returns[, 21], Rb = attrib.returns[, 22]) -#' -#' @export -Return.annualized.excess <- -function (Rp, Rb, scale = NA, geometric = TRUE ) -{ # @author Andrii Babii - Rp = checkData(Rp) - Rb = checkData(Rb) - - Rp = na.omit(Rp) - Rb = na.omit(Rb) - n = nrow(Rp) - if(is.na(scale)) { - freq = periodicity(Rp) - switch(freq$scale, - minute = {stop("Data periodicity too high")}, - hourly = {stop("Data periodicity too high")}, - daily = {scale = 252}, - eekly = {scale = 52}, - monthly = {scale = 12}, - quarterly = {scale = 4}, - yearly = {scale = 1} - ) - } - Rpa = apply(1 + Rp, 2, prod)^(scale/n) - 1 - Rba = apply(1 + Rb, 2, prod)^(scale/n) - 1 - if (geometric) { - # geometric excess returns - result = (1 + Rpa) / (1 + Rba) - 1 - } else { - # arithmetic excess returns - result = Rpa - Rba - } - dim(result) = c(1,NCOL(Rp)) - colnames(result) = colnames(Rp) - rownames(result) = "Annualized Return" - return(result) -} \ No newline at end of file Deleted: pkg/PortfolioAttribution/man/Return.annualized.excess.Rd =================================================================== --- pkg/PortfolioAttribution/man/Return.annualized.excess.Rd 2013-09-12 17:46:41 UTC (rev 3069) +++ pkg/PortfolioAttribution/man/Return.annualized.excess.Rd 2013-09-12 18:07:30 UTC (rev 3070) @@ -1,67 +0,0 @@ -\name{Return.annualized.excess} -\alias{Return.annualized.excess} -\title{calculates an annualized excess return for comparing instruments with different -length history} -\usage{ - Return.annualized.excess(Rp, Rb, scale = NA, - geometric = TRUE) -} -\arguments{ - \item{Rp}{an xts, vector, matrix, data frame, timeSeries - or zoo object of portfolio returns} - - \item{Rb}{an xts, vector, matrix, data frame, timeSeries - or zoo object of benchmark returns} - - \item{scale}{number of periods in a year (daily scale = - 252, monthly scale = 12, quarterly scale = 4)} - - \item{geometric}{generate geometric (TRUE) or simple - (FALSE) excess returns, default TRUE} -} -\description{ - An average annualized excess return is convenient for - comparing excess returns. -} -\details{ - Annualized returns are useful for comparing two assets. - To do so, you must scale your observations to an annual - scale by raising the compound return to the number of - periods in a year, and taking the root to the number of - total observations: - \deqn{prod(1+R_{a})^{\frac{scale}{n}}-1=\sqrt[n]{prod(1+R_{a})^{scale}}- - 1}{prod(1 + Ra)^(scale/n) - 1} - - where scale is the number of periods in a year, and n is - the total number of periods for which you have - observations. - - Finally having annualized returns for portfolio and - benchmark we can compute annualized excess return as - difference in the annualized portfolio and benchmark - returns in the arithmetic case: \deqn{er = R_{pa} - - R_{ba}}{er = Rpa - Rba} - - and as a geometric difference in the geometric case: - \deqn{er = \frac{(1 + R_{pa})}{(1 + R_{ba})} - 1}{er = (1 - + Rpa) / (1 + Rba) - 1} -} -\examples{ -data(attrib) -Return.annualized.excess(Rp = attrib.returns[, 21], Rb = attrib.returns[, 22]) -} -\author{ - Andrii Babii -} -\references{ - Bacon, Carl. \emph{Practical Portfolio Performance - Measurement and Attribution}. Wiley. 2004. p. 206-207 -} -\seealso{ - \code{\link{Return.annualized}}, -} -\keyword{distribution} -\keyword{models} -\keyword{multivariate} -\keyword{ts} - From noreply at r-forge.r-project.org Thu Sep 12 20:10:48 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 12 Sep 2013 20:10:48 +0200 (CEST) Subject: [Returnanalytics-commits] r3071 - in pkg/PortfolioAttribution: . inst inst/doc Message-ID: <20130912181048.8C5C61850D1@r-forge.r-project.org> Author: ababii Date: 2013-09-12 20:10:48 +0200 (Thu, 12 Sep 2013) New Revision: 3071 Added: pkg/PortfolioAttribution/inst/ pkg/PortfolioAttribution/inst/doc/ pkg/PortfolioAttribution/inst/doc/PortfolioAttribution.pdf pkg/PortfolioAttribution/inst/doc/PortfolioAttribution.tex pkg/PortfolioAttribution/inst/doc/References.bib Log: Added: pkg/PortfolioAttribution/inst/doc/PortfolioAttribution.pdf =================================================================== --- pkg/PortfolioAttribution/inst/doc/PortfolioAttribution.pdf (rev 0) +++ pkg/PortfolioAttribution/inst/doc/PortfolioAttribution.pdf 2013-09-12 18:10:48 UTC (rev 3071) @@ -0,0 +1,3222 @@ +%PDF-1.5 +%???? +4 0 obj +<< /S /GoTo /D (section.1) >> +endobj +7 0 obj +(Introduction) +endobj +8 0 obj +<< /S /GoTo /D (section.2) >> +endobj +11 0 obj +(Implementation of the performance attribution) +endobj +12 0 obj +<< /S /GoTo /D (subsection.2.1) >> +endobj +15 0 obj +(Arithmetic attribution) +endobj +16 0 obj +<< /S /GoTo /D (subsubsection.2.1.1) >> +endobj +19 0 obj +(brinson1985measuring vs brinson1986determinants) +endobj +20 0 obj +<< /S /GoTo /D (subsubsection.2.1.2) >> +endobj +23 0 obj +(Top-down vs Bottom-up) +endobj +24 0 obj +<< /S /GoTo /D (subsubsection.2.1.3) >> +endobj +27 0 obj +(Multi-period linking) +endobj +28 0 obj +<< /S /GoTo /D (subsubsection.2.1.4) >> +endobj +31 0 obj +(Examples) +endobj +32 0 obj +<< /S /GoTo /D (subsection.2.2) >> +endobj +35 0 obj +(Geometric attribution) +endobj +36 0 obj +<< /S /GoTo /D (subsubsection.2.2.1) >> +endobj +39 0 obj +(Examples) +endobj +40 0 obj +<< /S /GoTo /D (subsection.2.3) >> +endobj +43 0 obj +(Multi-level attribution) +endobj +44 0 obj +<< /S /GoTo /D (subsection.2.4) >> +endobj +47 0 obj +(Fixed income attribution) +endobj +48 0 obj +<< /S /GoTo /D (subsubsection.2.4.1) >> +endobj +51 0 obj +(Examples) +endobj +52 0 obj +<< /S /GoTo /D (subsection.2.5) >> +endobj +55 0 obj +(Multi-currency attribution) +endobj +56 0 obj +<< /S /GoTo /D (section.3) >> +endobj +59 0 obj +(Return and risk metrics) +endobj +60 0 obj +<< /S /GoTo /D (subsection.3.1) >> +endobj +63 0 obj +(Time-Varying Conditional alpha and beta) +endobj +64 0 obj +<< /S /GoTo /D (subsection.3.2) >> +endobj +67 0 obj +(Market timing metrics: henriksson1981market and treynor1966can models) +endobj +68 0 obj +<< /S /GoTo /D (subsection.3.3) >> +endobj +71 0 obj +(modigliani1997risk measure) +endobj +72 0 obj +<< /S /GoTo /D [73 0 R /Fit] >> +endobj +83 0 obj << +/Length 2331 +/Filter /FlateDecode +>> +stream +x?????8??_???:? +$}?m?f??V???0???U$????????I???????#?w8NA??.?????w?}J? ?#??&x<*?"e???#????:?=????B;??\v?}??X?8M?;???;:?'????>?H?A?DE?*$??Q??`?u???I???9@b??b?KL?????$l,M??????C???`@????????i?/????1N?????$???P?F????4?&}?????D?????Z??????_?o1??pY???{?T?T?v?(@L?*??l2<9?sWG4.?Q@@?V??z%????D??o??N?i??=??3?}??Q ????/?jk?&Ud? ??^??5_??v?m?????q??z^?m???#???Z#?/i +f??u??????#??Bf??}??E?Yd h?e??a???r?????+;??G^(??c???G?r???]O a????3??RC ?G????D????KS?a???5?@??? +?Rz4(0? t?1?vo????@?a??? +??9??z?!??or?=??j?dr?Z? ?*?J??,+?#??~@??1?EX???~?1E6)???fKf?`?????p?}Z?-?x?[bp??{7i??T??#???W???f??/z???i??vpq? +Ja%D-??7??HL"???h???DL$&)r?????p ?]?)[?}?e???M??ed?o?a?A,?5?1Nv@???????L?e?4?o?px@ ?R?)?? xJ??d?O??$?O????6? ??'?????d??? 5?Z?"??;z'?]w???U?g??K%jD@ Q?? ?H g??b????$X5 ? G?(?}??k???r"24???%?? zmb????[??????4???#?Fs???`??l ?aAw +\???3????$?>l? ?oR6Lgf+8p??)? ????0H???Z?s;?]9?5X?%??P||?S6@???B???*p ??F???S!???;5>???Jg? +,??R???????h/???6???w???`?T?T???.??F???p?1???0?&??~??p???}?:wbi?l?:??? ???x??V(?,?a?&7?*?2???Z??K????Un????Ct?/:C ?R?s?\????7JZ????3????Cn*b%t??$????(????????I??x?{???AN"?$?w???H?B?? ?2????A?*??Q??0?Rd?????hO??r?6?? ?k??H?'?Q?????5?:??i???? [?????Z-??#??C?{[???P??(1???vv?.L+n%???? +??ns??a?,???f9?IpDz??> endobj +74 0 obj << +/Type /Annot +/Subtype /Link +/Border[0 0 0]/H/I/C[0 1 0] +/Rect [241.639 540.822 333.865 552.777] +/A << /S /GoTo /D (cite.christopherson2009portfolio) >> +>> endobj +75 0 obj << +/Type /Annot +/Subtype /Link +/Border[0 0 0]/H/I/C[0 1 0] +/Rect [338.44 540.822 360.358 552.777] +/A << /S /GoTo /D (cite.christopherson2009portfolio) >> +>> endobj +76 0 obj << +/Type /Annot +/Subtype /Link +/Border[0 0 0]/H/I/C[0 1 0] +/Rect [383.676 540.822 412.65 552.777] +/A << /S /GoTo /D (cite.bacon2008practical) >> +>> endobj +77 0 obj << +/Type /Annot +/Subtype /Link +/Border[0 0 0]/H/I/C[0 1 0] +/Rect [417.225 540.822 439.143 552.777] +/A << /S /GoTo /D (cite.bacon2008practical) >> +>> endobj +78 0 obj << +/Type /Annot +/Subtype /Link +/Border[0 0 0]/H/I/C[0 1 0] +/Rect [477.971 362.043 506.175 374.945] +/A << /S /GoTo /D (cite.fama1972components) >> +>> endobj +79 0 obj << +/Type /Annot +/Subtype /Link +/Border[0 0 0]/H/I/C[0 1 0] +/Rect [512.199 362.043 536.009 374.945] +/A << /S /GoTo /D (cite.fama1972components) >> +>> endobj +80 0 obj << +/Type /Annot +/Subtype /Link +/Border[0 0 0]/H/I/C[0 1 0] +/Rect [316.468 329.173 418.642 342.074] +/A << /S /GoTo /D (cite.christopherson2009portfolio) >> +>> endobj +81 0 obj << +/Type /Annot +/Subtype /Link +/Border[0 0 0]/H/I/C[0 1 0] +/Rect [424.528 329.173 448.339 342.074] +/A << /S /GoTo /D (cite.christopherson2009portfolio) >> +>> endobj +84 0 obj << +/D [73 0 R /XYZ 84.039 794.712 null] +>> endobj +85 0 obj << +/D [73 0 R /XYZ 85.039 756.85 null] +>> endobj +5 0 obj << +/D [73 0 R /XYZ 85.039 513.368 null] +>> endobj +82 0 obj << + /ColorSpace 3 0 R /Pattern 2 0 R /ExtGState 1 0 R +/Font << /F16 86 0 R /F17 87 0 R /F35 88 0 R /F8 89 0 R /F37 90 0 R /F38 91 0 R >> +/ProcSet [ /PDF /Text ] +>> endobj +118 0 obj << +/Length 2402 +/Filter /FlateDecode +>> +stream +x??Z[o??~???r(4????,???m???E?r?@Q? +?H????;?3K?2m?>??"@?????|3?r??Z???B??x????U??b??T..?,t"B?/?8:?????@.WQ$???n_??i?* +???l?\?, ???[C?=|???_l?+??G??o???????Otr$?$q'????D&??"?c/????L_?????(y??%b?d>"??f??,(??`bL? +?,? ?7??;? +??? 0%}???wi?????m????YJ?g???0?#?? +??{?W?KD??UB?(???u??????# [L[??????YF??????0??K)?xe?Z`?I0??"?J?Lk???>'ehF:?+:?j?YZ+?D????E?????Ug"?B?P ?\[?`n???drp????% ?????9|??,cZ????? >.z7C +??%????t"?XN???,??-??5???? +??U???B?|?w?????4?O?i%????N?????"-'??fN???v~??j?:sZ*R???>???,O=M ????%,u????e??$??g-?? ??? ?h???????g@?$ ~l??s.?x3??????<?]*lk?2Bq??k?j??D?NNv?%???????LO?Z??MJ???l???k??3??i????e???r?-W??i +$?1&?"{,K?g\?T???*???\?b'?6S?0??S????B,\??????Tp#?G????{????kC?_m,??7?P?Q?f???V???U?????-??? ?J`c?a ???0???????f?S8;?/?? +*\f?n???8????N??1????(?bmG?J?t}{(???)?g?2???6????????1????t????????l?HC Z??????s???]???Uc&??7A(@ +Jr at l?;??c?rAm?N70????u?3??Wf?:G????@?qu+|=??i??u????????F +???#u+??X?i> endobj +96 0 obj << +/Type /Annot +/Subtype /Link +/Border[0 0 0]/H/I/C[0 1 0] +/Rect [122.376 601.34 235.051 614.242] +/A << /S /GoTo /D (cite.brinson1985measuring) >> +>> endobj +97 0 obj << +/Type /Annot +/Subtype /Link +/Border[0 0 0]/H/I/C[0 1 0] +/Rect [242.119 601.34 269.203 614.242] +/A << /S /GoTo /D (cite.brinson1985measuring) >> +>> endobj +98 0 obj << +/Type /Annot +/Subtype /Link +/Border[0 0 0]/H/I/C[0 1 0] +/Rect [292.022 601.34 368.811 614.242] +/A << /S /GoTo /D (cite.brinson1986determinants) >> +>> endobj +99 0 obj << +/Type /Annot +/Subtype /Link +/Border[0 0 0]/H/I/C[0 1 0] +/Rect [375.879 601.34 402.962 614.242] +/A << /S /GoTo /D (cite.brinson1986determinants) >> +>> endobj +100 0 obj << +/Type /Annot +/Subtype /Link +/Border[0 0 0]/H/I/C[0 1 0] +/Rect [84.043 561.424 150.824 574.326] +/A << /S /GoTo /D (cite.brinson1986determinants) >> +>> endobj +101 0 obj << +/Type /Annot +/Subtype /Link +/Border[0 0 0]/H/I/C[0 1 0] +/Rect [156.71 561.424 180.521 574.326] +/A << /S /GoTo /D (cite.brinson1986determinants) >> +>> endobj +102 0 obj << +/Type /Annot +/Subtype /Link +/Border[0 0 0]/H/I/C[0 1 0] +/Rect [169.339 485.385 266.328 498.286] +/A << /S /GoTo /D (cite.brinson1985measuring) >> +>> endobj +103 0 obj << +/Type /Annot +/Subtype /Link +/Border[0 0 0]/H/I/C[0 1 0] +/Rect [271.621 485.385 295.432 498.286] +/A << /S /GoTo /D (cite.brinson1985measuring) >> +>> endobj +104 0 obj << +/Type /Annot +/Subtype /Link +/Border[0 0 0]/H/I/C[0 1 0] +/Rect [174.498 468.95 241.278 481.851] +/A << /S /GoTo /D (cite.brinson1986determinants) >> +>> endobj +105 0 obj << +/Type /Annot +/Subtype /Link +/Border[0 0 0]/H/I/C[0 1 0] +/Rect [247.164 468.95 270.975 481.851] +/A << /S /GoTo /D (cite.brinson1986determinants) >> +>> endobj +106 0 obj << +/Type /Annot +/Subtype /Link +/Border[0 0 0]/H/I/C[0 1 0] +/Rect [111.316 213.814 145.46 226.715] +/A << /S /GoTo /D (cite.carino1999combining) >> +>> endobj +107 0 obj << +/Type /Annot +/Subtype /Link +/Border[0 0 0]/H/I/C[0 1 0] +/Rect [151.346 213.814 175.157 226.715] +/A << /S /GoTo /D (cite.carino1999combining) >> +>> endobj +108 0 obj << +/Type /Annot +/Subtype /Link +/Border[0 0 0]/H/I/C[0 1 0] +/Rect [111.316 197.379 159.399 210.28] +/A << /S /GoTo /D (cite.menchero2000optimized) >> +>> endobj +109 0 obj << +/Type /Annot +/Subtype /Link +/Border[0 0 0]/H/I/C[0 1 0] +/Rect [165.286 197.379 189.096 210.28] +/A << /S /GoTo /D (cite.menchero2000optimized) >> +>> endobj +110 0 obj << +/Type /Annot +/Subtype /Link +/Border[0 0 0]/H/I/C[0 1 0] +/Rect [111.316 180.944 145.505 193.845] +/A << /S /GoTo /D (cite.grap1997) >> +>> endobj +111 0 obj << +/Type /Annot +/Subtype /Link +/Border[0 0 0]/H/I/C[0 1 0] +/Rect [152.114 180.944 175.925 193.845] +/A << /S /GoTo /D (cite.grap1997) >> +>> endobj +112 0 obj << +/Type /Annot +/Subtype /Link +/Border[0 0 0]/H/I/C[0 1 0] +/Rect [111.316 148.073 157.127 160.975] +/A << /S /GoTo /D (cite.frongello2002linking) >> +>> endobj +113 0 obj << +/Type /Annot +/Subtype /Link +/Border[0 0 0]/H/I/C[0 1 0] +/Rect [163.824 148.073 187.635 160.975] +/A << /S /GoTo /D (cite.frongello2002linking) >> +>> endobj +114 0 obj << +/Type /Annot +/Subtype /Link +/Border[0 0 0]/H/I/C[0 1 0] +/Rect [111.316 115.203 198.695 128.105] +/A << /S /GoTo /D (cite.davies2001multiple) >> +>> endobj +115 0 obj << +/Type /Annot +/Subtype /Link +/Border[0 0 0]/H/I/C[0 1 0] +/Rect [205.714 115.203 229.524 128.105] +/A << /S /GoTo /D (cite.davies2001multiple) >> +>> endobj +119 0 obj << +/D [117 0 R /XYZ 84.039 794.712 null] +>> endobj +9 0 obj << +/D [117 0 R /XYZ 85.039 756.85 null] +>> endobj +13 0 obj << +/D [117 0 R /XYZ 85.039 732.299 null] +>> endobj +17 0 obj << +/D [117 0 R /XYZ 85.039 614.242 null] +>> endobj +21 0 obj << +/D [117 0 R /XYZ 85.039 424.494 null] +>> endobj +25 0 obj << +/D [117 0 R /XYZ 85.039 321.072 null] +>> endobj +116 0 obj << + /ColorSpace 3 0 R /Pattern 2 0 R /ExtGState 1 0 R +/Font << /F37 90 0 R /F8 89 0 R /F41 120 0 R /F21 121 0 R /F38 91 0 R /F42 122 0 R /F18 123 0 R /F35 88 0 R >> +/ProcSet [ /PDF /Text ] +>> endobj +133 0 obj << +/Length 1411 +/Filter /FlateDecode +>> +stream +x??XmO?F?~?"BU?H????Oj???: +Q?*?Lb?%????_?}?wB I?!?<3?;;?????E???~@?{8?ppB??8A Ln???&?x??e6??H?c6a?Qt??>.?Y=???zp??P4s&???CE?l8??F?2?9???&??&u?:k?P????pDdTV?m9?KgK ??F7?(+?C??L?g??fYul??~N??8????X?@?8?6?????i??FK??2}1?9"+?/?????1???j?Y?G???sr????8/???"N?\Y???KZC???b:_?t?F?&Q??uSZ<-??3Z?M????0?i??V?eb?*K?e????E??????>g?^H? u$/?b?=????????j???q6y*?2??CE??k??? /???????[?h???)?|DT??Y?}??????Us_.?????f?F#?????M?o?? k??r???????s?i?b?=?)?~?w???{?+??9??.??c??M????????t?{??S?B +??0???8??[H? +?Z&c$???????X/L?nG|?? ?<:9?VjN??c?????7??????????l??/???C)???? B???;q??SJ??|?P?)?@-H&4?`? + +a,?A<1?2??D??? +??C/?p?T`2LDhIX?Ip?H? Q? ????? @? ZD$Phh????? ??eP(??9 ??= ????????@I at L????4Aa???{?? pj?vR??@P?q?"8???D?H???\?0?? `?p@??9?dU?e*?`?@c +??%?d0@*@?"p?!?!? ????c?q?Y????oD?"!???" ??????#?/???r???b??B?B?????"?"??krH?.?=??H???6?Jn>????.@????????2Av6????T??D|????-??T???$?????Tq +????/??Tl?H?M? +??a????JZ??F???? ??^??x????u?????OF_???jb???o??Z?? +_?f?R+?N???^?V$?I??h'1???m\7?????? {b??$?Jm?W?0?+)]??????M?????e?/???i???m???????sco???????}?x??????'P??u?n???q??G?e?_\?lQu??a\?????m?????E??Yh*/_p??d????)w??yA?n???wa^x?W6m????*}?YS?_,?[??????? +????g+?{M??g?S????????v??U^?{o??????y???????f{*KJVqW?????J????T?m,??EDR???+ ?|+?z?6????B_n7?/??????u"??I?[?????X???????=???r?KV???0 ?jk?%?R?9?+#?'??o? +endstream +endobj +132 0 obj << +/Type /Page +/Contents 133 0 R +/Resources 131 0 R +/MediaBox [0 0 595.276 841.89] +/Parent 92 0 R +>> endobj +134 0 obj << +/D [132 0 R /XYZ 84.039 794.712 null] +>> endobj +29 0 obj << +/D [132 0 R /XYZ 85.039 756.85 null] +>> endobj +131 0 obj << + /ColorSpace 3 0 R /Pattern 2 0 R /ExtGState 1 0 R +/Font << /F35 88 0 R /F8 89 0 R /F46 135 0 R >> +/ProcSet [ /PDF /Text ] +>> endobj +138 0 obj << +/Length 2184 +/Filter /FlateDecode +>> +stream +x??YK?????? ?? ???????8?c9Z?,?j???pMr$???U]?a?Kj!_?_??_UW}U??m"??W"?_?\}??????<7?????L?"qZf???????]?????????o?q?p:?J?Y?H??u??z?U???????J?l??E????????????????^<}{?|{{?~bJ +??R?p?Io??<? +I(?n????{;?!2!d??`?Br1M{?8????!?qVL??????bj4W3?c/?8???U??????`:`????>??a??T1??p1?A????nC: ??\+oV??#\??53xx?p???k????Y????v??j?cT??H?_YM?]? +???a??WXv?rb?????u9?"??3???N???o.?5Z??u??Y? +???\???W????dp?????K +3? ?k0 +??????ji?B??z??? +o?H????>?f1>\"eVX???f??????*?NBf}^uCz???3?*z~Ik?X??????O?)]C?!?Aj9?L??xF +?lT='???nG?8*)??? H#???Cbj??;(???N????J???????????<|??]}?EJ??r0a?$?3I??8??7???iC?D??k??????S Z?ZeAt??????!P??1?n? ?l?'L&??]=????J?"? 0>8?N?q?/`w???g??alJ?wA???AQ ?C???{?'??F?,.?r??k???????1U?'??9GR??R?T????????;\???O?Z????`?xm e??q[???L??????B?k?c?FR0?4~?????????ja?'x???y?W ~fF?>???Z at N?Nk?L.QY^??w?_&?r??P (???+????????r?PE0????????&bu?3k??K?P??`?6&}??> endobj +139 0 obj << +/D [137 0 R /XYZ 84.039 794.712 null] +>> endobj +33 0 obj << +/D [137 0 R /XYZ 85.039 250.048 null] +>> endobj +136 0 obj << + /ColorSpace 3 0 R /Pattern 2 0 R /ExtGState 1 0 R +/Font << /F46 135 0 R /F8 89 0 R /F41 120 0 R /F38 91 0 R /F37 90 0 R /F21 121 0 R /F42 122 0 R /F18 123 0 R >> +/ProcSet [ /PDF /Text ] +>> endobj +142 0 obj << +/Length 1545 +/Filter /FlateDecode +>> +stream +x??XMo?6??W??-P)?uh????p?&?6 at S ???Uh%w?M???Ej5??l??D?og?C?du?"???_m^???rEI????6?+-??U*d??d??3b K?:?T???1?=T?]???????S?*f4?"u???u???>?Y???3????+??l???7??Fm ?eQvV?? +6??;??L?/ '?;h?,?l???x{u?????s(???\?h[?w???UT:rf?="?????D? ??c?y???WD???l?@a???????P+?dJ??W<J;????d2:W??H???['?44?? ????a??????o??@?0g?????i[??7?a_?zM-???p??GV#T<"#$uh??u?rH?????2_?JC#mN?Y?-?6crA[{m???P L? mMN at c?YFO@????>??C^????G?? l8???(?b?????#yVUM?SU? Iy???Yr}??????y???^????N??Nc"?????=>???????? ???RG???-?T4?jO???a??Ht?~?"?5???%????#???J?p??Z????{???>??g???x\???????????_?O:zE?IdZJ?C?+([??????J2??b?????o??*Lon`????? ?=Y??kZ?R??{h??Q=9?n?r???????I????WtN???? +c?=?/`? +?? q/? {???>?/?-y??y?uG??L? +.????ZE;?s??2?(i?b?J\???/??x?o?]??+s?9?u^???7?I??F???s?}?;s>????????|?Y?Lp?#.x?Ai?u?c??[-????9????g??q?5?????tt???Y???)??Ez??O9?Rb%w?S???n?9{??@? ??)?V?h_???/;?Fk??@?[??z> endobj +143 0 obj << +/D [141 0 R /XYZ 84.039 794.712 null] +>> endobj +37 0 obj << +/D [141 0 R /XYZ 85.039 756.85 null] +>> endobj +41 0 obj << +/D [141 0 R /XYZ 85.039 115.729 null] +>> endobj +140 0 obj << + /ColorSpace 3 0 R /Pattern 2 0 R /ExtGState 1 0 R +/Font << /F35 88 0 R /F8 89 0 R /F46 135 0 R /F37 90 0 R >> +/ProcSet [ /PDF /Text ] +>> endobj +146 0 obj << +/Length 1629 +/Filter /FlateDecode +>> +stream +x??X?o?6?????Aj?oR6 m????f??X?E?[?,y??????XnZ?(*yw???w???|?Fg?P???>{y?F???hz3R~]v_7ug3??\???,lVb??????zI ???>?????M?~R?v??9?J B2B???h?}zq??R?5E???2?%g'?S0AaX Z???o_?P)(???bQ7U?QKB???????????k???u?_W6hvK????? ?/N.??- !??l??*??????-_?>?-?x&%?AC?p?????W?(?-?e +F!??.??%?uS??"{????^????????V?E?Q*? ?Sw?????&]??Ey????H???|Y?|?W>???????+??r?;???3???????0 hn] +???\???mB????&??Z??;??8(??0?-??2{???y8??G$?/lv???f?N?|??C?M?u?@q ???x??X?T?e]?Mol vO???????P???d?n$l?? ????v??%??@O+m ? N?8i?5????z??X?????'6{???f?b????n^{??E??'3????&P??i???=????Sw!??Z?V??B?s.4>??~?3V???N????J??u./l?q?? ?? :???t)??oz(Q????&??@Q? #?6???d??0??9,??>?]?d;??)R?)??S?wL6rg?8v???i?p?{?s??? |?/>\?;?b??8??Q?9???????ioz +}~ur??n%Zm?m?]9N?RI +.g?Yj????X??I??WA$???R??R??0\? p??q???~@? +?8?????n5[?'?c?Y???v??!??u??9f????0??F??r$K?}$???)'.??)???)????.?_??8d'> endobj +147 0 obj << +/D [145 0 R /XYZ 84.039 794.712 null] +>> endobj +144 0 obj << + /ColorSpace 3 0 R /Pattern 2 0 R /ExtGState 1 0 R +/Font << /F8 89 0 R /F47 148 0 R /F46 135 0 R >> +/ProcSet [ /PDF /Text ] +>> endobj +151 0 obj << +/Length 703 +/Filter /FlateDecode +>> +stream +x??W?n?0??+x??F!ewIJ????@Q?[Q?N??F] ?????"?,???'??????]? ???g??w?????D??,S??()E$?+?E?C|[?????{?I$*O!?"A?j???/MzqMnL ?V????,L*?G0?P.<8?R.?9?4e?3h???K????v???L???026;_?m?????l??]7n???????z?????g;??h???d???T? `??iIf[Fc?l'?izH??8??f?O?k???l??yF??DbD?A?7?????????jN??t2?????zaT????tqU????;t'Hb???{w?n0.???c?? +endstream +endobj +150 0 obj << +/Type /Page +/Contents 151 0 R +/Resources 149 0 R +/MediaBox [0 0 595.276 841.89] +/Parent 153 0 R +>> endobj +152 0 obj << +/D [150 0 R /XYZ 84.039 794.712 null] +>> endobj +149 0 obj << + /ColorSpace 3 0 R /Pattern 2 0 R /ExtGState 1 0 R +/Font << /F46 135 0 R /F8 89 0 R >> +/ProcSet [ /PDF /Text ] +>> endobj +160 0 obj << +/Length 2117 +/Filter /FlateDecode +>> +stream +x??YKs?6????at?? @???V?lf???ac????? +MB2c?????????(R??T??@????Zl?^???o??]????LY???\\???Y?/?Tq???u??]?C?9[?Th-?Y2y???b????????-8?s??V ??y??$??????,K"s?Lx??l:STzX?zS?qxM ??pK???m??????????J?_M????|S{????<Y??=???4??wf?wM|??I]{?y??Ko&?2Qd????b????G?;?`?6??O|??%?to +`;??@???????BGe1?m?h????f????I`??V????6??l?q??h?D67?#???6?LEc??9?+?m?~???T1E??y?+%0? +?@????????7{???`n"?eF????/u?????????4/A??2????L?h?)6?? ?i2{{42?(*??9????79'?u???????X??Q?nlZ??aH[7+N9??????2ai????,????B???o?=?????|?X? S??:?i??7?L????;?,?I"?y?l?q?;? _???0???\?Y=.??X ???0??yH???e??#?d? +?N,z?CZ?9`????S>?S???? +?????3??j?!5????%?,?i6v?b?)??L?D? j??']?LD????o?e????3l}?????????? +?7??|??h?znp??D???@j?:????zS +]???#??5??EwG?n??wK8?S???????????????2??#???jY??-)??W??2??C??1?m???6?1&/w???nry?W??ru?c?O??????? +pi? ?o?z?< +~??????????.N??>?:?;???? +??k??&N? (?`f?q?tH??8?q??d??"jB,????[ p?1??2}?s?Yg/Ot?????w+?1??og.?>+?????7???> +endstream +endobj +159 0 obj << +/Type /Page +/Contents 160 0 R +/Resources 158 0 R +/MediaBox [0 0 595.276 841.89] +/Parent 153 0 R +/Annots [ 154 0 R 155 0 R 156 0 R 157 0 R ] +>> endobj +154 0 obj << +/Type /Annot +/Subtype /Link +/Border[0 0 0]/H/I/C[0 1 0] +/Rect [193.835 482.249 265.688 495.151] +/A << /S /GoTo /D (cite.van2000fixed) >> +>> endobj +155 0 obj << +/Type /Annot +/Subtype /Link +/Border[0 0 0]/H/I/C[0 1 0] +/Rect [271.858 482.249 295.669 495.151] +/A << /S /GoTo /D (cite.van2000fixed) >> +>> endobj +156 0 obj << +/Type /Annot +/Subtype /Link +/Border[0 0 0]/H/I/C[0 1 0] +/Rect [412.503 482.249 444.041 495.151] +/A << /S /GoTo /D (cite.bacon2008practical) >> +>> endobj +157 0 obj << +/Type /Annot +/Subtype /Link +/Border[0 0 0]/H/I/C[0 1 0] +/Rect [450.211 482.249 474.022 495.151] +/A << /S /GoTo /D (cite.bacon2008practical) >> +>> endobj +161 0 obj << +/D [159 0 R /XYZ 84.039 794.712 null] +>> endobj +45 0 obj << +/D [159 0 R /XYZ 85.039 657.853 null] +>> endobj +49 0 obj << +/D [159 0 R /XYZ 85.039 382.579 null] +>> endobj +158 0 obj << + /ColorSpace 3 0 R /Pattern 2 0 R /ExtGState 1 0 R +/Font << /F46 135 0 R /F8 89 0 R /F37 90 0 R /F41 120 0 R /F21 121 0 R /F38 91 0 R /F24 162 0 R /F35 88 0 R >> +/ProcSet [ /PDF /Text ] +>> endobj +168 0 obj << +/Length 1956 +/Filter /FlateDecode +>> +stream +x??YKo?6??W??-P+|?,?I??n`?mm?$@?]??z??????C?P+?IO=I$??y|? +?Kf732{{B???????f:7J???z?eN??B????b5??1B??T2??*????? d?y? ?9e&'?{???\??m?y?r??/?????8a^?J??)?$A??????4SY????p?????(!???A????0???0}yv9??y???????? +?E?S2?-?]?q??&????"???vp?H??< ?@{(G???%{?a?@????>%2?#af? ??.?t?2???H??K?J-??G??????#)QL1???o?s???b&0????'?Kz????#*???q/??:??d????}?1???????c?????}???^?????(???0?b?}?Pdl???'??,G)+??j?M1??ZC???m +????I??????&??>??????K??cc??%A????9?? ? +??R?td??????!?7M LLd?????B?,???]??=?{%?0??mP?C*??????6,??~?tV?ww=[????uX(??[o??^?q?5???7???0?M?^?d???@???OV???*?l?6+??r???Wm???]>?Le?u????????? O??w??l??jc?n?7g,???_V~v??:????)??x?%D?=??????"{??;?Nsm;L?-?i:???\?wm???j5 ??r?o??'??47R2?e?,?:?Z?[t?Svq?t?i? +?v??>?m?4??????Be????[?OW???r?????%???0?z f??}{??#0????:/w?M??lL?????j6??R??' ?'?f?1?iwQJQh{??Z??????}??7??m'?r???T??w????u?BPCd^5???c??4??d!d?(?? ????????a??u?k?YA?q?M?R??:?]?&@?D? +?? h +(?|???6???y_ ??{??j???a?????]?--'??6???~?P?H?}8D?*??GUR?{?!??????V?z[6????&x???????]?$c?J?? ??m#r????U???? ?}e[*om?2??r+??n5??&??Dh?????Mcvr?q?'?Gs?'???A;?"[??Y???7'????????SG)???ns>'"!??.XUvU?'???G?????s????'7Y?]i?C?V?:??????1|%?M?}U????#??O??t???I?@?$M?q?SN]W?Ry?&?q?8?.?; +endstream +endobj +167 0 obj << +/Type /Page +/Contents 168 0 R +/Resources 166 0 R +/MediaBox [0 0 595.276 841.89] +/Parent 153 0 R +/Annots [ 164 0 R 165 0 R ] +>> endobj +164 0 obj << +/Type /Annot +/Subtype /Link +/Border[0 0 0]/H/I/C[0 1 0] +/Rect [242.927 166.115 339.862 179.016] +/A << /S /GoTo /D (cite.ankrim1994multicurrency) >> +>> endobj +165 0 obj << +/Type /Annot +/Subtype /Link +/Border[0 0 0]/H/I/C[0 1 0] +/Rect [346.961 166.115 370.772 179.016] +/A << /S /GoTo /D (cite.ankrim1994multicurrency) >> +>> endobj +169 0 obj << +/D [167 0 R /XYZ 84.039 794.712 null] +>> endobj +53 0 obj << +/D [167 0 R /XYZ 85.039 237.307 null] +>> endobj +166 0 obj << + /ColorSpace 3 0 R /Pattern 2 0 R /ExtGState 1 0 R +/Font << /F46 135 0 R /F8 89 0 R /F37 90 0 R >> +/ProcSet [ /PDF /Text ] +>> endobj +173 0 obj << +/Length 1275 +/Filter /FlateDecode +>> +stream +x??X?o?6~?_!{??Z?#?"e`?" ?!h?[??@GI?9r`+???~?(Yw?Tyo}?H?x???_?!x ?8???f~??????qQ0?? A??%???]?q??xb?]????g?X??&? +7I????_??ZOI}??ZX? ?Zjn?QCzjZjTK??j??F?Es??d???$??[????rs??*???? ?v?????D??"????Dk5Zg?o????O?:???????c?F_w?k?b???~???0??J?y%??q?U?j#?t?O?"=?Q???E?xH??l???YrZ??JV?N?)?\l?l}Z?????????d???U?~)%?V+)>???c%]z??LE??^??jD??}*t???j?^.vaY.?-Lh???,????B???>?'h?s?_?o5?.???wo.?W???^]?]U??/?????? +?? +B??F?u?X??B!#??Q +????????2~??B@?&p?J??B??n? D??uj {L?"4L"[??Ol?h?l??@J ??3?c??????#?+`????P'????????;NA?8(&?y?D|??8m???9\?R +??? :?e?C1??b.7@?n??OL???J |?PU?-5??4?(?"???Zi??G????A?n!`?+?? r??i?*?D????)???!?L5_?|-?i??qF"?6^?@?I??Zl?V:P7a z?M?*n=??????Y/^??Jr?Q?I?*??,????D????x /????E??0?v??+??A???@??#?O"?z??r??=?P???;??)9x??F(N-??L??jt=pD?:??v?? ?4 ?kc??u???.b????G?h??8???8??;V$j??? ???????????d@?Z?K??P?Z^zK ?f]?u> endobj +174 0 obj << +/D [172 0 R /XYZ 84.039 794.712 null] +>> endobj +171 0 obj << + /ColorSpace 3 0 R /Pattern 2 0 R /ExtGState 1 0 R +/Font << /F46 135 0 R /F8 89 0 R >> +/ProcSet [ /PDF /Text ] +>> endobj +181 0 obj << +/Length 1955 +/Filter /FlateDecode +>> +stream +x??XYo?F~???C(4??}H??[)??J?? P??m??@RI?_?Y?R??)??P0w???o?Y??.????h|~?{??\?????f??U? +???u,?????Sj'S?U????????LU4???h?Ed? +??@F9Cj??x? ?a???????g?d?$Q?!\??`?????e?*??e?j????&???g?.&S?????dXuF ??c?.q?Qiq??xc>???P??y??*?G??8??????wr????fF??!iI??V??q?h +?j?????J63?l??Q?l(??????#?I-?r??;R??????%?S?Pa??????7{Uc???RF????GJ?%5qz#p?3?T/l]HpM+?Ut?2????? +??v??%??. ?$?8?3?@1g?D???xB????p?6?????????J???;?r?,a?:^{G?H???&)?l???????5b*q??M???????7???????+?)D?9Ce p????9:?'????? r????yb?E???p?u???d?WH??-? K????aN?/?v[?&S??X???????jY?uu? qJ? ?!?G? ?"?Y?,??M????s?? ?N??y?V???*t???bO?5j?)2???8???Z$????Q?^????v)?$???D)?M}$??a[?*?????Dh1d7?#?4?)?F?? + ??P?4a???x??rk??Y?????b???6>?ux?]?m?e?????=???#t???co???k +~?l? +^??J??u??n???ux?}???x??b?l}???? +?[F?M??? Author: xavierv Date: 2013-09-12 21:10:50 +0200 (Thu, 12 Sep 2013) New Revision: 3072 Added: pkg/Meucci/R/ButterflyTradingFunctions.R pkg/Meucci/R/RankingInformationFunctions.R pkg/Meucci/data/factorsDistribution.rda pkg/Meucci/man/HorizonPricing.Rd pkg/Meucci/man/ViewCurveSlope.Rd pkg/Meucci/man/ViewImpliedVol.Rd pkg/Meucci/man/ViewRealizedVol.Rd pkg/Meucci/man/factorsDistribution.Rd Modified: pkg/Meucci/DESCRIPTION pkg/Meucci/NAMESPACE pkg/Meucci/R/BlackScholesCallPrice.R pkg/Meucci/R/data.R pkg/Meucci/data/butterfliesAnalytics.rda pkg/Meucci/demo/ButterflyTrading.R pkg/Meucci/man/BlackScholesCallPrice.Rd pkg/Meucci/man/butterfliesAnalytics.Rd pkg/Meucci/man/returnsDistribution.Rd Log: - documented some functions for the ButterflyAnalitics paper and changed the datafiles Modified: pkg/Meucci/DESCRIPTION =================================================================== --- pkg/Meucci/DESCRIPTION 2013-09-12 18:10:48 UTC (rev 3071) +++ pkg/Meucci/DESCRIPTION 2013-09-12 19:10:50 UTC (rev 3072) @@ -41,7 +41,6 @@ MASS, reshape2, Hmisc, - fOptions, moments, nloptr, ggplot2, Modified: pkg/Meucci/NAMESPACE =================================================================== --- pkg/Meucci/NAMESPACE 2013-09-12 18:10:48 UTC (rev 3071) +++ pkg/Meucci/NAMESPACE 2013-09-12 19:10:50 UTC (rev 3072) @@ -1,5 +1,7 @@ export(BlackLittermanFormula) export(BlackScholesCallPrice) +export(BlackScholesCallPutPrice) +export(BlackScholesPutPrice) export(Central2Raw) export(CentralAndStandardizedStatistics) export(CMAcombination) @@ -25,6 +27,7 @@ export(GenerateLogNormalDistribution) export(GenerateUniformDrawsOnUnitSphere) export(hermitePolynomial) +export(HorizonPricing) export(integrateSubIntervals) export(InterExtrapolate) export(LeastInfoKernel) @@ -64,3 +67,6 @@ export(SummStats) export(Tweak) export(TwoDimEllipsoid) +export(ViewCurveSlope) +export(ViewImpliedVol) +export(ViewRealizedVol) Modified: pkg/Meucci/R/BlackScholesCallPrice.R =================================================================== --- pkg/Meucci/R/BlackScholesCallPrice.R 2013-09-12 18:10:48 UTC (rev 3071) +++ pkg/Meucci/R/BlackScholesCallPrice.R 2013-09-12 19:10:50 UTC (rev 3072) @@ -1,5 +1,5 @@ -#' Compute the Black-Scholes price of a European call option -#' as described in A. Meucci, "Risk and Asset Allocation", Springer, 2005. +#' Compute the Black-Scholes price of a European call or put option +#' as described in A. Meucci, "Risk and Asset Allocation", Springer, 2005. #' #' @param spot : [scalar] spot price of underlying #' @param K : [scalar] strike of the call optioon @@ -8,7 +8,8 @@ #' @param T : [scalar] time to maturity in years #' #' @return c : [scalar] price of European call(s) -#' @return delta : [scalar] delta of the call(s) +#' @return p : [scalar] price of European put(s) +#' @return delta : [scalar] delta of the call(s) or put(s) #' @return cash : [scalar] cash held in a replicating portfolio #' #' @note @@ -21,13 +22,41 @@ #' @author Xavier Valls \email{flamejat@@gmail.com} #' @export -BlackScholesCallPrice = function(spot, K, r, vol, T) +BlackScholesCallPrice = function( spot, K, r, vol, T ) { - d1 = ( log( spot / K ) + ( r + vol * vol / 2) * T) / (vol * sqrt(T)); - d2 = d1 - vol * sqrt(T); + d1 = ( log( spot / K ) + ( r + vol * vol / 2) * T) / (vol * sqrt(T)); + d2 = d1 - vol * sqrt(T); delta = pnorm(d1); - cash = -K * exp( -r * T ) * pnorm( d2 ); - c = spot * delta + cash; + cash = -K * exp( -r * T ) * pnorm( d2 ); + c = spot * delta + cash; return( list( c = c, delta = delta, cash = cash ) ); -} \ No newline at end of file +} + +#' @rdname BlackScholesCallPrice +#' @export + +BlackScholesPutPrice = function( spot, K, r, vol, T ) +{ + d1 = ( log( spot / K ) + ( r + vol * vol / 2) * T) / (vol * sqrt(T)); + d2 = d1 - vol * sqrt(T); + delta = pnorm( -d1 ); + cash = -K * exp( -r * T ) * pnorm( d2 ); + p = -( spot * delta + cash ); + + return( list( put = p, delta = delta, cash = cash ) ); +} + +#' @rdname BlackScholesCallPrice +#' @export + +BlackScholesCallPutPrice = function( spot, K, r, vol, T ) +{ + d1 = ( log( spot / K ) + ( r + vol * vol / 2) * T) / (vol * sqrt(T)); + d2 = d1 - vol * sqrt(T); + cash = -K * exp( -r * T ) * pnorm( d2 ); + c = spot * pnorm( d1 ) + cash; + p = -( spot * pnorm( -d1 ) + cash); + + return( list( call = c, put = p, cash = cash ) ); +} Added: pkg/Meucci/R/ButterflyTradingFunctions.R =================================================================== --- pkg/Meucci/R/ButterflyTradingFunctions.R (rev 0) +++ pkg/Meucci/R/ButterflyTradingFunctions.R 2013-09-12 19:10:50 UTC (rev 3072) @@ -0,0 +1,394 @@ +# In order of appearance in the demo script ButterflyTrading.R + +MapVol = function( sig , y , K , T ) +{ + # in real life a and b below should be calibrated to security-specific time series + + a = -0.00000000001 + b = 0.00000000001 + + s = sig + a/sqrt(T) * ( log(K) - log(y) ) + b/T*( log(K) - log(y) )^2 + + return( s ) +} + +#' Compute the pricing in the horizon, as it appears in A. Meucci, "Fully Flexible Views: Theory and Practice", +#' The Risk Magazine, October 2008, p 100-106. +#' +#' @param Butterflies : List of securities with some analytics computed. +#' @param X : Panel of joint factors realizations +#' +#' @return PnL : Matrix of profit and loss scenarios +#' +#' @references +#' A. Meucci, "Fully Flexible Views: Theory and Practice" \url{http://www.symmys.com/node/158} +#' See Meucci script for "ButterflyTrading/HorizonPricing.m" +#' +#' @author Ram Ahluwalia \email{ram@@wingedfootcapital.com} and Xavier Valls \email{flamejat@@gmail.com} +#' @export + +HorizonPricing = function( Butterflies , X ) +{ + r = 0.04 # risk-free rate + tau = 1/252 # investment horizon + + # factors: 1. 'MSFT_close' 2. 'MSFT_vol_30' 3. 'MSFT_vol_91' 4. 'MSFT_vol_182' + # securities: 1. 'MSFT_vol_30' 2. 'MSFT_vol_91' 3. 'MSFT_vol_182' + + # create a new row called DlnY and Dsig + # create a new row called 'DlnY'. Assign the first row (vector) of X to this DlnY for the 1:3 securities + for ( s in 1:3 ) { Butterflies[[s]]$DlnY = X[ , 1 ] } + + # assign the 2nd row of X to a new element called Dsig + Butterflies[[1]]$Dsig=X[ , 2 ] + Butterflies[[2]]$Dsig=X[ , 3 ] + Butterflies[[3]]$Dsig=X[ , 4 ] + + # factors: 5. 'YHOO_close' 6. 'YHOO_vol_30' 7. 'YHOO_vol_91' 8. 'YHOO_vol_182' + # securities: 4. 'YHOO_vol_30' 5. 'YHOO_vol_91' 6. 'YHOO_vol_182' + for ( s in 4:6 ) { Butterflies[[s]]$DlnY=X[ , 5 ] } + + Butterflies[[4]]$Dsig=X[ , 6 ] + Butterflies[[5]]$Dsig=X[ , 7 ] + Butterflies[[6]]$Dsig=X[ , 8 ] + + # factors: # 9. 'GOOG_close' 10. 'GOOG_vol_30' 11. 'GOOG_vol_91' 12. 'GOOG_vol_182' + # securities: 7. 'GOOG_vol_30' 8. 'GOOG_vol_91' 9. 'GOOG_vol_182' + for ( s in 7:9 ) { Butterflies[[s]]$DlnY=X[ , 9 ] } + + Butterflies[[7]]$Dsig=X[ , 10 ] + Butterflies[[8]]$Dsig=X[ , 11 ] + Butterflies[[9]]$Dsig=X[ , 12 ] + + PnL = matrix( NA , nrow = nrow(X) ) + + for ( s in 1:length(Butterflies) ) + { + Y = Butterflies[[s]]$Y_0 * exp(Butterflies[[s]]$DlnY) + ATMsig = apply( cbind( Butterflies[[s]]$sig_0 + Butterflies[[s]]$Dsig , 10^-6 ) , 1 , max ) + t = Butterflies[[s]]$T - tau + K = Butterflies[[s]]$K + sig = MapVol(ATMsig , Y , K , t ) + + ############# Ram's Code: Substituted with package's own functions ################################# + # + ## library(RQuantLib) # this function can only operate on one option at a time, so we use fOptions + ##C = EuropeanOption( type = "call" , underlying = Y , strike = K , dividendYield = 0 , riskFreeRate = r , maturity = t , volatility = sig )$value + ## P = EuropeanOption( type = "put" , underlying = Y , strike = K , dividendYield = 0 , riskFreeRate = r , maturity = t , volatility = sig )$value + + ## use fOptions to value options + #library( fOptions ) + #C = GBSOption( TypeFlag = "c" , S = Y , X = K , r = r , b = 0 , Time = t , sigma = sig ) + #P = GBSOption( TypeFlag = "p" , S = Y , X = K , r = r , b = 0 , Time = t , sigma = sig ) + # + #################################################################################################### + + BS = BlackScholesCallPutPrice( Y, K, r, sig, t ) + + Butterflies[[s]]$P_T = BS$call + BS$put + PnL = cbind( PnL , Butterflies[[s]]$P_T ) + } + + PnL = PnL[ , -1 ] + + return( PnL ) +} + +ViewCurveSlopeTest = function( X , p ) +{ + J = nrow( X ) ; K = ncol( X ) + + # constrain probabilities to sum to one... + Aeq = matrix( 1, 1 , J ) + beq = matrix( 1 , nrow = 1 , ncol = 1 ) + browser() + # ...constrain the expectation... + V = matrix( , nrow = nrow( X ) , ncol = 0 ) + # Add 3 equality views + V = cbind( V , X[ , 14 ] - X[ , 13 ] ) # View 1: spread on treasuries + V = cbind( V , X[ , 14 ] - X[ , 13 ] ) # View 2: identical view (spread on treasuries) + V = cbind( V , X[ , 6 ] - X[ , 5 ] ) # View 3: difference in YHOO Vol + v = matrix( c( .0005 , 0 ) , nrow = ncol( V ) , ncol = 1 ) + + Aeq = rbind( Aeq , t(V) ) + + beq = rbind( beq , v ) + + # add an inequality view + # ...constrain the median... + V = abs( X[ , 1 ] ) # absolute value of the log of changes in MSFT close prices (definition of realized volatility) + V_Sort = sort( V , decreasing = FALSE ) # sorting of the abs value of log changes in prices from smallest to largest + I_Sort = order( V ) + + F = cumsum( p[ I_Sort ] ) # represents the cumulative sum of probabilities from ~0 to 1 + + I_Reference = max( matlab:::find( F <= 3/5 ) ) # finds the (max) index corresponding to element with value <= 3/5 along the empirical cumulative density function for the abs log-changes in price + V_Reference = V_Sort[ I_Reference ] # returns the corresponding abs log of change in price at the 3/5 of the cumulative density function + + I_Select = find( V <= V_Reference ) # finds all indices with value of abs log-change in price less than the reference value + a = zeros( 1 , J ) + a[ I_Select ] = 1 # select those cases where the abs log-change in price is less than the 3/5 of the empirical cumulative density... + + A = a + b = 0.5 # ... and assign the probability of these cases occuring as 50%. This moves the media of the distribution + + # ...compute posterior probabilities + p_ = EntropyProg( p , A , b , Aeq ,beq ) + return( p_ ) +} + + +#' Process the inequality view, as it appears in A. Meucci, "Fully Flexible Views: Theory and Practice", +#' The Risk Magazine, October 2008, p 100-106. +#' +#' @param X : panel of joint factors realizations +#' @param p : vector of probabilities +#' +#' @return p_ : vector of posterior probabilities +#' +#' @references +#' A. Meucci, "Fully Flexible Views: Theory and Practice" \url{http://www.symmys.com/node/158} +#' See Meucci script for "ButterflyTrading/ViewRealizedVol.m" +#' +#' @author Ram Ahluwalia \email{ram@@wingedfootcapital.com} and Xavier Valls \email{flamejat@@gmail.com} +#' @export + +ViewImpliedVol = function( X , p ) +{ + # View 1 (inequality view): bearish on on 2m-6m implied volaility spread for Google + + J = nrow( X ) ; + K = ncol( X ); + + # constrain probabilities to sum to one... + Aeq = matrix( 1, 1 , J ) + beq = 1 + + # ...constrain the expectation... + V = X[ , 12 ] - X[ , 11 ] # GOOG_vol_182 (6m implied vol) - GOOG_vol_91 (2m implied vol) + m = mean( V ) + s = std( V ) + + A = t( V ) + b = m - s + + # ...compute posterior probabilities + p_ = EntropyProg( p , A , b , Aeq , beq )$p_ + + return( p_ ) +} + +#' Process the relative inequality view on median, as it appears in A. Meucci, +#' "Fully Flexible Views: Theory and Practice", The Risk Magazine, October 2008, +#' p 100-106 +#' +#' @param X : panel of joint factors realizations +#' @param p : vector of probabilities +#' +#' @return p_ : vector of posterior probabilities +#' +#' @references +#' A. Meucci, "Fully Flexible Views: Theory and Practice" \url{http://www.symmys.com/node/158} +#' See Meucci script for "ButterflyTrading/ViewRealizedVol.m" +#' +#' @author Ram Ahluwalia \email{ram@@wingedfootcapital.com} and Xavier Valls \email{flamejat@@gmail.com} +#' @export + +ViewRealizedVol = function( X , p ) +{ + # view 2 bullish on realized volatility of MSFT (i.e. absolute log-change in the underlying). + # This is the variable such that, if larger than a threshold, a long position in the butterfly turns into a profit (e.g. Rachev 2003) + # we issue a relative statement on the media comparing it with the third quintile implied by the reference market model + + library( matlab ) + J = nrow( X ) ; K = ncol( X ) + + # constrain probabilities to sum to one... + Aeq = matrix( 1, 1 , J ) + beq = 1 + + # ...constrain the median... + V = abs( X[ , 1 ] ) # absolute value of the log of changes in MSFT close prices (definition of realized volatility) + + V_Sort = sort( V , decreasing = FALSE ) # sorting of the abs value of log changes in prices from smallest to largest + I_Sort = order( V ) + + F = cumsum( p[ I_Sort ] ) # represents the cumulative sum of probabilities from ~0 to 1 + + I_Reference = max( matlab:::find( F <= 3/5 ) ) # finds the (max) index corresponding to element with value <= 3/5 along the empirical cumulative density function for the abs log-changes in price + V_Reference = V_Sort[ I_Reference ] # returns the corresponding abs log of change in price at the 3/5 of the cumulative density function + + I_Select = find( V <= V_Reference ) # finds all indices with value of abs log-change in price less than the reference value + + a = zeros( 1 , J ) + a[ I_Select ] = 1 # select those cases where the abs log-change in price is less than the 3/5 of the empirical cumulative density... + + A = a + b = .5 # ... and assign the probability of these cases occuring as 50%. This moves the media of the distribution + + # ...compute posterior probabilities + p_ = EntropyProg( p , A , b , Aeq , beq )$p_ + + return( p_ ) +} + +#' Process views for the expectations and binding constraints as it appears in A. Meucci, +#' "Fully Flexible Views: Theory and Practice", The Risk Magazine, October 2008, +#' p 100-106 +#' +#' @param X : panel of joint factors realizations +#' @param p : vector of probabilities +#' +#' @return p_ : vector of posterior probabilities +#' +#' @references +#' A. Meucci, "Fully Flexible Views: Theory and Practice" \url{http://www.symmys.com/node/158} +#' See Meucci script for "ButterflyTrading/ViewCurveSlope.m" +#' +#' @author Xavier Valls \email{flamejat@@gmail.com} +#' @export + +ViewCurveSlope = function( X , p ) +{ + # view 3 + + J = nrow( X ); + K = ncol( X ); + + # constrain probabilities to sum to one... + Aeq = matrix( 1, 1 , J ); + beq = 1; + + # ...constrain the expectation... + V = X[ , 14 ] - X[ , 13 ]; + v = 0.0005; + + Aeq = rbind( Aeq , t(V) ); + + beq = rbind( beq , v ); + + A = b = matrix( nrow = 0 , ncol = 0 ); + + # ...compute posterior probabilities + p_ = EntropyProg( p , A , b , Aeq ,beq )$p_; + + return( p_ ); +} + +ComputeCVaR = function( Units , Scenarios , Conf ) +{ + PnL = Scenarios %*% Units + Sort_PnL = PnL[ order( PnL , decreasing = FALSE ) ] + + J = length( PnL ) + Cut = round( J %*% ( 1 - Conf ) , 0 ) + + CVaR = -mean( Sort_PnL[ 1:Cut ] ) + + return( CVaR ) +} + +LongShortMeanCVaRFrontier = function( PnL , Probs , Butterflies , Options ) +{ + library( matlab ) + library( quadprog ) + library( limSolve ) + + # setup constraints + J = nrow(PnL); N = ncol(PnL) + P_0s = matrix( , nrow = 1 , ncol = 0 ) + D_s = matrix( , nrow = 1 , ncol = 0 ) + emptyMatrix = matrix( nrow = 0 , ncol = 0 ) + + for ( n in 1:N ) + { + P_0s = cbind( P_0s , Butterflies[[n]]$P_0 ) # 1x9 matrix + D_s = cbind( D_s , Butterflies[[n]]$Delta ) # 1x9 matrix + } + + Constr = list() + Constr$Aeq = P_0s # linear coefficients in the constraints Aeq*X = beq (equality constraints) + Constr$beq = Options$Budget # the constant vector in the constraints Aeq*x = beq + + if ( Options$DeltaNeutral == TRUE ) + { + Constr$Aeq = rbind( Constr$Aeq , D_s ) # 2x9 matrix + Constr$beq = rbind( Constr$beq , 0 ) # 2x9 matrix + } + + Constr$Aleq = rbind( diag( as.vector( P_0s ) ) , -diag( as.vector( P_0s ) ) ) # linear coefficients in the constraints A*x <= b. an 18x9 matrix + Constr$bleq = rbind( Options$Limit * matrix( 1,N,1) , Options$Limit * matrix( 1,N,1) ) # constant vector in the constraints A*x <= b. an 18x1 matrix + + # determine expectation of minimum-variance portfolio + Exps = t(PnL) %*% Probs + Scnd_Mom = t(PnL) %*% (PnL * (Probs %*% matrix( 1,1,N) ) ) + Scnd_Mom = ( Scnd_Mom + t(Scnd_Mom) ) / 2 + Covs = Scnd_Mom - Exps %*% t(Exps) + + Amat = rbind( Constr$Aeq , Constr$Aleq ) # stack the equality constraints on top of the inequality constraints + bvec = rbind( Constr$beq , Constr$bleq ) # stack the equality constraints on top of the inequality constraints + + #if ( nrow(Covs) != length( zeros(N,1) ) ) stop("Dmat and dvec are incompatible!") + #if ( nrow(Covs) != nrow(Amat)) stop("Amat and dvec are incompatible!") + + MinSDev_Units = solve.QP( Dmat = Covs , dvec = -1 * zeros(N,1) , Amat = -1*t(Amat) , bvec = -1*bvec , meq = length( Constr$beq) ) # TODO: Check this + MinSDev_Exp = t( MinSDev_Units$solution ) %*% Exps + + # determine expectation of maximum-expectation portfolio + + MaxExp_Units = linp( E = Constr$Aeq , F = Constr$beq , G = -1*Constr$Aleq , H = -1*Constr$bleq , Cost = -Exps , ispos = FALSE )$X + + MaxExp_Exp = t( MaxExp_Units ) %*% Exps + + # slice efficient frontier in NumPortf equally thick horizontal sections + Grid = t( seq( from = Options$FrontierSpan[1] , to = Options$FrontierSpan[2] , length.out = Options$NumPortf ) ) + TargetExp = as.numeric( MinSDev_Exp ) + Grid * as.numeric( ( MaxExp_Exp - MinSDev_Exp ) ) + + # compute composition, expectation, s.dev. and CVaR of the efficient frontier + Composition = matrix( , ncol = N , nrow = 0 ) + Exp = matrix( , ncol = 1 , nrow = 0 ) + SDev = matrix( , ncol = 1 , nrow = 0 ) + CVaR = matrix( , ncol = 1 , nrow = 0 ) + + for (i in 1:Options$NumPortf ) + { + # determine least risky portfolio for given expectation + AEq = rbind( Constr$Aeq , t(Exps) ) # equality constraint: set expected return for each asset... + bEq = rbind( Constr$beq , TargetExp[i] ) + + Amat = rbind( AEq , Constr$Aleq ) # stack the equality constraints on top of the inequality constraints + bvec = rbind( bEq , Constr$bleq ) # ...and target portfolio return for i'th efficient portfolio + + # Why is FirstDegree "expected returns" set to 0? + # Becasuse we capture the equality view in the equality constraints matrix + # In other words, we have a constraint that the Expected Returns by Asset %*% Weights = Target Return + Units = solve.QP( Dmat = Covs , dvec = -1*zeros(N,1) , Amat = -1*t(Amat) , bvec = -1*bvec , meq = length( bEq ) ) + + # store results + Composition = rbind( Composition , t( Units$solution ) ) + + Exp = rbind( Exp , t( Units$solution ) %*% Exps ) + SDev = rbind( SDev , sqrt( t( Units$solution ) %*% Covs %*% Units$solution ) ) + CVaR = rbind( CVaR , ComputeCVaR( Units$solution , PnL , Options$Quant ) ) + } + + colnames( Composition ) = c( "MSFT_vol_30" , "MSFT_vol_91" , "MSFT_vol_182" , + "YHOO_vol_30" , "YHOO_vol_91" , "YHOO_vol_182" , + "GOOG_vol_30" , "GOOG_vol_91" , "GOOG_vol_182" ) + + return( list( Exp = Exp , SDev = SDev , CVaR = CVaR , Composition = Composition ) ) +} + + +MapVol = function( sig , y , K , T ) +{ + # in real life a and b below should be calibrated to security-specific time series + + a = -0.00000000001 + b = 0.00000000001 + + s = sig + a/sqrt(T) * ( log(K) - log(y) ) + b/T*( log(K) - log(y) )^2 + + return( s ) +} + Added: pkg/Meucci/R/RankingInformationFunctions.R =================================================================== --- pkg/Meucci/R/RankingInformationFunctions.R (rev 0) +++ pkg/Meucci/R/RankingInformationFunctions.R 2013-09-12 19:10:50 UTC (rev 3072) @@ -0,0 +1,224 @@ +# TODO: add max weights constraint to EfficientFrontier() +# TODO: add computeCVaR to EfficientFrontier() +# TODO: confirm QuadProg does not have a bug (i.e. it can optimize expected returns without use dvec by adding an equality constraint) + +#' Plots the efficient frontier, as it appears in A. Meucci, "Fully Flexible Views: Theory and Practice", The Risk Magazine, +#' October 2008, p 100-106. +#' +#' @param e the NumPortf x 1 matrix of expected returns for each portfolio along the efficient frontier +#' @param s the NumPortf x 1 matrix of standard deviation of returns for each portfolio along the efficient frontier +#' @param w the NumPortf x N matrix of compositions (security weights) for each portfolio along the efficient frontier +#' +#' @references +#' A. Meucci, "Fully Flexible Views: Theory and Practice" \url{http://www.symmys.com/node/158} +#' See Meucci script for "RankingInformation/PlotFrontier.m" +#' +#' @author Xavier Valls \email{flamejat@@gmail.com} +#' @export + + +PlotFrontier = function( e, s, w ) +{ + xx = dim( w )[ 1 ]; + N = dim( w )[ 2 ]; + Data = t( apply( w, 1, cumsum ) ); + + plot( c(min(s), 0), xlim = c( min(s) , max(s) ), ylim = c( 0, max(Data) ), + main= "frontier", xlab = " Portfolio # risk propensity", ylab = "Portfolio composition" ); + + for( n in 1 : N ) + { + x = rbind( min(s), s, max(s) ); + y = rbind( 0, matrix( Data[ , N-n+1 ] ), 0 ); + polygon( x, y, col = rgb( 0.9 - mod(n,3)*0.2, 0.9 - mod(n,3)*0.2, 0.9 - mod(n,3)*0.2) ); + } +} + +#' Plots the results of computing the efficient frontier (Expected returns and frontier), as it appears in A. Meucci, "Fully Flexible Views: Theory and Practice", The Risk Magazine, +#' October 2008, p 100-106. +#' +#' @param e the NumPortf x 1 matrix of expected returns for each portfolio along the efficient frontier +#' @param s the NumPortf x 1 matrix of standard deviation of returns for each portfolio along the efficient frontier +#' @param w the NumPortf x N matrix of compositions (security weights) for each portfolio along the efficient frontier +#' @param M the NumPortf x 1 vector of expected returns for each asset +#' @param Lower constraints +#' @param Upper constraints +#' +#' @references +#' A. Meucci, "Fully Flexible Views: Theory and Practice" \url{http://www.symmys.com/node/158} +#' See Meucci script for "RankingInformation/PlotResults.m" +#' +#' @author Xavier Valls \email{flamejat@@gmail.com} + +PlotResults = function( e, s, w, M, Lower = NULL , Upper = NULL) +{ + N = length( M ); + dev.new(); + par( mfrow = c( 1, 2 ) ); + h1 = hist( M*100, plot = F ) + barplot( h1$density, horiz = T, main = "expected returns", xlab = "", ylab = "" ); + if(length(Lower) || length(Upper)) + { + Changed = array( 0, N ); + Changed[ union( Lower, Upper ) ] = M[ union( Lower, Upper ) ] * 100; + h2 = hist(Changed, plot = F ); + barplot( h2$density, horiz = T, col = "red", add = T ); + } + + PlotFrontier( e*100, s*100, w ); +} + + + +#' Computes posterior probabilities to view the rankings, as it appears in A. Meucci, +#' "Fully Flexible Views: Theory and Practice", The Risk Magazine, October 2008, p 100-106. +#' +#' @param X a vector containing returns for all the asset classes +#' @param p a vector containing the prior probability values +#' @param Lower a vector of indexes indicating which column is lower than the corresponding column number in Upper +#' @param Upper a vector of indexes indicating which column is lower than the corresponding column number in Upper +#' +#' @references +#' A. Meucci, "Fully Flexible Views: Theory and Practice" \url{http://www.symmys.com/node/158} +#' See Meucci script for "RankingInformation/ViewRanking.m" +#' +#' @author Ram Ahluwalia \email{ram@@wingedfootcapital.com} +#' @export EntropyProg + +# example ViewRanking( X , p , Lower = c(3,4) , Upper = c(4,5) ) # two inequality views: asset 3 < asset 4 returns, and asset 4 < asset 5 returns + +ViewRanking = function( X , p , Lower , Upper ) +{ + library( matlab ) + J = nrow( X ) + N = ncol( X ) + + K = length( Lower ) + + # constrain probabilities to sum to one across all scenarios... + Aeq = ones( 1 , J ) + beq = 1 + + # ...constrain the expectations... A*x <= 0 + # X[,Lower] refers to the column of returns for Asset-lower + # X[,Upper] refers to the column of returns for Asset-lower + # X[ , Lower ] - X[ , Upper ] is vector returns of the "lower"" asset less the returns of the "higher" asset + V = X[ , Lower ] - X[ , Upper ] # Jx1 vector. Expectation is assigned to each scenario + + A = t( V ) + b = 0 # The expectation is that (Lower - Upper)x <= 0. (i.e. The returns of upper are greater than zero for each scenario) + + # ...compute posterior probabilities + p_ = EntropyProg( p , A , as.matrix(b) , Aeq , as.matrix(beq) ) + + return( p_ ) +} + +#' Generates an efficient frontier based on Meucci's Ranking Information version and returns a A list with +#' NumPortf efficient portfolios whos returns are equally spaced along the whole range of the efficient frontier, +#' as it appears in A. Meucci, "Fully Flexible Views: Theory and Practice", The Risk Magazine, October 2008, +#' p 100-106. +#' +#' Most recent version of article and MATLAB code available at +#' http://www.symmys.com/node/158 +#' +#' @param X a matrix with the joint-scenario probabilities by asset (rows are joint-scenarios, columns are assets) +#' @param p a vector of probabilities associated with each scenario in matrix X +#' @param Options a list of options....TBD +#' +#' @return Exps the NumPortf x 1 vector of expected returns for each asset +#' @return Covs the NumPortf x N vector of security volatilities along the efficient frontier +#' @return w the NumPortf x N matrix of compositions (security weights) for each portfolio along the efficient frontier +#' @return e the NumPortf x 1 matrix of expected returns for each portfolio along the efficient frontier +#' @return s the NumPortf x 1 matrix of standard deviation of returns for each portfolio along the efficient frontier +#' +#' @references +#' A. Meucci, "Fully Flexible Views: Theory and Practice" \url{http://www.symmys.com/node/158} +#' See Meucci script for "RankingInformation/EfficientFrontier.m" +#' +#' @author Ram Ahluwalia \email{ram@@wingedfootcapital.com} and Xavier Valls \email{flamejat@@gmail.com} +#' @export + +RIEfficientFrontier = function( X , p , Options) +{ + + if( !require("limSolve") ) stop("This script requieres the limSolve package installed") + + + library( matlab ) + + J = nrow( X ) # number of scenarios + N = ncol( X ) # number of assets + + Exps = t(X) %*% p # probability-weighted expected return of each asset + + Scnd_Mom = t(X) %*% (X * ( p %*% matrix( 1, 1 , N ) ) ) + Scnd_Mom = ( Scnd_Mom + t(Scnd_Mom) ) / 2 # an N*N matrix + Covs = Scnd_Mom - Exps %*% t( Exps ) + + Constr = list() + + # constrain the sum of weights to 1 + Constr$Aeq = matrix( 1, 1 , N ) + Constr$beq = 1 + + # constrain the weight of any security to between 0 and 1 + Constr$Aleq = rbind( diag( 1, N ) , - diag( 1, N ) ) # linear coefficients matrix A in the inequality constraint A*x <= b + Constr$bleq = rbind( matrix( 1, N, 1 ) , matrix( 0, N, 1 ) ) # constraint vector b in the inequality constraint A*x <= b + + Amat = rbind( Constr$Aeq , Constr$Aleq ) # stack the equality constraints on top of the inequality constraints + bvec = rbind( Constr$beq , Constr$bleq ) # stack the equality constraints on top of the inequality constraints + + ############################################################################################ + # determine return of minimum-risk portfolio + FirstDegree = matrix( 0, N , 1 ) # TODO: assumes that securities have zero expected returns when computing efficient frontier? + SecondDegree = Covs + # Why is FirstDegree "expected returns" set to 0? + # We capture the equality view in the equality constraints matrix + # In other words, we have a constraint that the Expected Returns by Asset %*% Weights = Target Return + MinVol_Weights = solve.QP( Dmat = SecondDegree , dvec = -1*FirstDegree , Amat = -1*t(Amat) , bvec = -1*bvec , meq = length( Constr$beq ) ) + MinSDev_Exp = t( MinVol_Weights$solution ) %*% Exps + + ############################################################################################ + # determine return of maximum-return portfolio + FirstDegree = -Exps + MaxRet_Weights = linp( E = Constr$Aeq , F = Constr$beq , G = -1*Constr$Aleq , H = -1*Constr$bleq , Cost = FirstDegree , ispos = FALSE )$X + MaxExp_Exp = t( MaxRet_Weights) %*% Exps + + ############################################################################################ + # slice efficient frontier in NumPortf equally thick horizontal sections + Grid = matrix( , ncol = 0 , nrow = 0 ) + Grid = t( seq( from = Options$FrontierSpan[1] , to = Options$FrontierSpan[2] , length.out = Options$NumPortf ) ) + + # the portfolio return varies from a minimum of MinSDev_Exp up to a maximum of MaxExp_Exp + # We establish equally-spaced portfolio return targets and use this find efficient portfolios + # in the next step + Targets = as.numeric( MinSDev_Exp ) + Grid * as.numeric( ( MaxExp_Exp - MinSDev_Exp ) ) + + ############################################################################################ + # compute the NumPortf compositions and risk-return coordinates + FirstDegree = matrix( 0, N , 1 ) + + w = matrix( , ncol = N , nrow = 0 ) + e = matrix( , ncol = 1 , nrow = 0 ) + s = matrix( , ncol = 1 , nrow = 0 ) + + for ( i in 1:Options$NumPortf ) + { + # determine least risky portfolio for given expected return + # Ax = b ; Exps %*% weights = Target Return + AEq = rbind( Constr$Aeq , t( Exps ) ) # equality constraint: set expected return for each asset... + bEq = rbind( Constr$beq , Targets[ i ] ) # ...and target portfolio return for i'th efficient portfolio + + Amat = rbind( AEq , Constr$Aleq ) # stack the equality constraints on top of the inequality constraints + bvec = rbind( bEq , Constr$bleq ) + + Weights = solve.QP( Dmat = SecondDegree , dvec = -1*FirstDegree , Amat = -1*t(Amat) , bvec = -1*bvec , meq = length( bEq ) ) + + w = rbind( w , Weights$solution ) + s = rbind( s , sqrt( t(Weights$solution) %*% Covs %*% Weights$solution ) ) + e = rbind( e , Weights$solution %*% Exps ) + } + + return( list( e = e , Sdev = s , Composition = w , Exps = Exps , Covs = Covs ) ) +} Modified: pkg/Meucci/R/data.R =================================================================== --- pkg/Meucci/R/data.R 2013-09-12 18:10:48 UTC (rev 3071) +++ pkg/Meucci/R/data.R 2013-09-12 19:10:50 UTC (rev 3072) @@ -210,7 +210,7 @@ #' @keywords data NULL -#' @title Panel X of joint returns realizations and vector p of respective probabilities +#' @title Panel X of joint returns realizations and vector p of respective probabilities for returns #' #' @name returnsDistribution #' @docType data @@ -220,9 +220,9 @@ #' @keywords data NULL -#' @title Factor Distribution Butterflies +#' @title Panel X of joint factors realizations and vector p of respective probabilities for factors #' -#' @name FDButterflies +#' @name factorsDistribution #' @docType data #' @author Xavier Valls\email{flamejat@@gmail.com} #' @references A. Meucci, "Fully Flexible Views: Theory and Practice", The Risk Magazine, @@ -230,7 +230,7 @@ #' @keywords data NULL -#' @title Butterflies Analytics +#' @title list of securities with analytics computed. #' #' @name butterfliesAnalytics #' @docType data Modified: pkg/Meucci/data/butterfliesAnalytics.rda =================================================================== (Binary files differ) Added: pkg/Meucci/data/factorsDistribution.rda =================================================================== (Binary files differ) Property changes on: pkg/Meucci/data/factorsDistribution.rda ___________________________________________________________________ Added: svn:mime-type + application/octet-stream Modified: pkg/Meucci/demo/ButterflyTrading.R =================================================================== --- pkg/Meucci/demo/ButterflyTrading.R 2013-09-12 18:10:48 UTC (rev 3071) +++ pkg/Meucci/demo/ButterflyTrading.R 2013-09-12 19:10:50 UTC (rev 3072) @@ -9,27 +9,25 @@ #' A. Meucci, "Fully Flexible Views: Theory and Practice" \url{http://www.symmys.com/node/158} #' See Meucci script for "ButterflyTrading/S_MAIN.m" #' -#' @author Ram Ahluwalia \email{ram@@wingedfootcapital.com} and Xavier Valls \email{flamejat@@gmail.com} +#' @author Xavier Valls \email{flamejat@@gmail.com} and Ram Ahluwalia \email{ram@@wingedfootcapital.com} +########################################################################################################### +# Load panel X of joint factors realizations and vector p of respective probabilities +# In real life, these are provided by the estimation process +########################################################################################################### -################################################################### -#' Load panel X of joint factors realizations and vector p of respective probabilities -#' In real life, these are provided by the estimation process -################################################################### -load("butterflyTradingX.rda") +load( "../data/factorsDistribution.rda" ) -#library( R.matlab ) -#library( matlab ) [TRUNCATED] To get the complete diff run: svnlook diff /svnroot/returnanalytics -r 3072 From noreply at r-forge.r-project.org Thu Sep 12 21:30:31 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 12 Sep 2013 21:30:31 +0200 (CEST) Subject: [Returnanalytics-commits] r3073 - in pkg/PortfolioAnalytics/sandbox: . symposium2013 symposium2013/data Message-ID: <20130912193031.49AB6185E37@r-forge.r-project.org> Author: peter_carl Date: 2013-09-12 21:30:30 +0200 (Thu, 12 Sep 2013) New Revision: 3073 Added: pkg/PortfolioAnalytics/sandbox/symposium2013/ pkg/PortfolioAnalytics/sandbox/symposium2013/R/ pkg/PortfolioAnalytics/sandbox/symposium2013/README.md pkg/PortfolioAnalytics/sandbox/symposium2013/cache/ pkg/PortfolioAnalytics/sandbox/symposium2013/data/ pkg/PortfolioAnalytics/sandbox/symposium2013/data/EDHEC-index-history.csv pkg/PortfolioAnalytics/sandbox/symposium2013/logs/ pkg/PortfolioAnalytics/sandbox/symposium2013/results/ pkg/PortfolioAnalytics/sandbox/symposium2013/src/ Log: - initial project structure Added: pkg/PortfolioAnalytics/sandbox/symposium2013/README.md =================================================================== Added: pkg/PortfolioAnalytics/sandbox/symposium2013/data/EDHEC-index-history.csv =================================================================== --- pkg/PortfolioAnalytics/sandbox/symposium2013/data/EDHEC-index-history.csv (rev 0) +++ pkg/PortfolioAnalytics/sandbox/symposium2013/data/EDHEC-index-history.csv 2013-09-12 19:30:30 UTC (rev 3073) @@ -0,0 +1,201 @@ +date;Convertible Arbitrage;CTA Global;Distressed Securities;Emerging Markets;Equity Market Neutral;Event Driven;Fixed Income Arbitrage;Global Macro;Long/Short Equity;Merger Arbitrage;Relative Value;Short Selling;Funds Of Funds +31/01/1997;1.19%;3.93%;1.78%;7.91%;1.89%;2.13%;1.91%;5.73%;2.81%;1.50%;1.80%;-1.66%;3.17% +28/02/1997;1.23%;2.98%;1.22%;5.25%;1.01%;0.84%;1.22%;1.75%;-0.06%;0.34%;1.18%;4.26%;1.06% +31/03/1997;0.78%;-0.21%;-0.12%;-1.20%;0.16%;-0.23%;1.09%;-1.19%;-0.84%;0.60%;0.10%;7.78%;-0.77% +30/04/1997;0.86%;-1.70%;0.30%;1.19%;1.19%;-0.05%;1.30%;1.72%;0.84%;-0.01%;1.22%;-1.29%;0.09% +31/05/1997;1.56%;-0.15%;2.33%;3.15%;1.89%;3.46%;1.18%;1.08%;3.94%;1.97%;1.73%;-7.37%;2.75% +30/06/1997;2.12%;0.85%;2.17%;5.81%;1.65%;2.58%;1.08%;2.18%;2.23%;2.31%;1.98%;-0.65%;2.25% +31/07/1997;1.93%;5.91%;2.34%;5.60%;2.47%;3.07%;0.95%;7.38%;4.54%;2.00%;1.81%;-4.29%;4.35% +31/08/1997;1.34%;-4.73%;1.47%;-0.66%;0.17%;0.71%;0.87%;-1.80%;1.07%;0.79%;1.03%;-0.72%;0.51% +30/09/1997;1.22%;1.98%;3.50%;2.29%;2.02%;3.29%;1.19%;2.90%;4.29%;1.97%;1.83%;-1.55%;3.34% +31/10/1997;1.00%;-0.98%;-0.64%;-5.72%;0.95%;0.61%;-0.32%;-1.42%;0.10%;0.94%;0.79%;5.72%;-0.99% +30/11/1997;0.00%;1.33%;0.54%;-3.78%;0.41%;1.34%;0.53%;1.06%;-0.26%;2.23%;1.11%;2.17%;-0.34% +31/12/1997;0.68%;2.86%;0.73%;1.60%;0.66%;1.54%;0.79%;2.64%;1.04%;1.58%;0.82%;1.61%;0.89% +31/01/1998;1.45%;1.04%;0.95%;-4.29%;0.60%;0.55%;-0.26%;-0.50%;0.13%;0.55%;1.32%;0.14%;-0.36% +28/02/1998;1.46%;-0.65%;2.27%;3.39%;1.35%;2.94%;0.98%;1.28%;3.42%;2.12%;1.30%;1.55%;2.56% +31/03/1998;1.44%;1.22%;2.52%;3.18%;1.79%;2.63%;1.28%;5.70%;3.36%;1.64%;1.45%;6.37%;3.73% +30/04/1998;1.26%;-2.96%;1.65%;0.41%;0.67%;1.04%;0.75%;0.34%;1.20%;1.39%;1.45%;6.57%;1.25% +31/05/1998;0.56%;1.93%;0.06%;-8.25%;0.80%;-0.83%;0.40%;0.95%;-0.87%;-0.09%;0.53%;14.37%;-0.72% +30/06/1998;-0.06%;0.51%;-0.47%;-4.22%;1.08%;0.02%;-0.80%;1.20%;1.67%;0.72%;0.26%;-0.53%;0.21% +31/07/1998;0.60%;-0.10%;-0.69%;0.19%;0.12%;-0.37%;1.06%;0.58%;-0.06%;0.07%;0.11%;3.43%;-0.07% +31/08/1998;-3.19%;6.91%;-8.36%;-19.22%;-1.07%;-8.86%;-1.43%;-2.63%;-5.52%;-5.44%;-3.41%;24.63%;-6.16% +30/09/1998;-1.96%;4.54%;-2.15%;-3.95%;0.61%;-1.10%;-3.62%;-0.59%;2.06%;0.76%;0.05%;-3.76%;-0.37% +31/10/1998;-2.14%;0.04%;-0.29%;1.40%;0.52%;0.91%;-8.01%;-2.23%;1.69%;1.59%;-1.40%;-10.77%;-0.02% +30/11/1998;2.69%;-0.89%;1.64%;4.30%;1.58%;2.44%;0.52%;1.94%;2.91%;2.20%;1.98%;-7.56%;2.20% +31/12/1998;1.13%;2.21%;1.08%;-0.98%;2.09%;2.19%;1.20%;2.33%;4.08%;2.24%;1.64%;-5.31%;2.22% +31/01/1999;2.19%;-1.67%;1.81%;-1.20%;1.01%;2.01%;1.58%;0.86%;2.58%;1.12%;1.95%;-6.65%;2.02% +28/02/1999;0.82%;1.97%;-0.21%;1.02%;0.23%;-0.42%;2.08%;-1.11%;-1.69%;0.36%;0.85%;8.33%;-0.63% +31/03/1999;1.36%;-0.65%;1.59%;5.85%;0.33%;1.93%;1.60%;0.24%;2.29%;1.33%;1.16%;-1.54%;2.13% +30/04/1999;2.43%;2.10%;4.18%;6.30%;1.07%;4.29%;1.06%;3.29%;3.12%;2.18%;2.38%;-3.75%;4.00% +31/05/1999;1.66%;-1.50%;2.07%;0.61%;0.89%;2.15%;0.72%;-0.55%;0.95%;2.10%;1.46%;0.09%;1.19% +30/06/1999;1.02%;2.34%;2.73%;6.54%;1.68%;2.97%;0.88%;2.14%;3.15%;2.22%;1.48%;-4.12%;2.82% +31/07/1999;1.01%;-0.51%;0.84%;-0.61%;1.35%;0.96%;0.51%;-0.18%;1.77%;1.47%;1.10%;0.92%;0.88% +31/08/1999;0.48%;-0.27%;0.20%;-1.47%;0.95%;-0.27%;-0.28%;-0.61%;0.22%;0.50%;0.62%;4.68%;0.28% +30/09/1999;0.96%;0.64%;-0.41%;-0.69%;0.95%;0.90%;0.92%;-0.02%;1.13%;1.16%;1.05%;4.01%;0.52% +31/10/1999;0.45%;-3.54%;0.27%;2.88%;0.66%;0.54%;0.87%;0.73%;2.12%;0.96%;0.70%;-1.30%;1.30% +30/11/1999;1.24%;1.66%;2.20%;6.92%;1.33%;2.84%;1.06%;4.05%;4.81%;2.37%;1.37%;-12.39%;4.83% +31/12/1999;1.40%;1.42%;3.00%;12.30%;1.98%;2.86%;0.97%;6.12%;7.45%;0.90%;1.83%;-11.37%;6.22% +31/01/2000;2.27%;1.28%;0.88%;0.77%;0.75%;0.88%;0.41%;0.21%;0.75%;1.43%;1.73%;4.27%;1.69% +29/02/2000;2.67%;-0.22%;4.21%;5.28%;2.53%;3.46%;0.97%;4.08%;6.99%;2.39%;1.85%;-13.40%;6.66% +31/03/2000;2.43%;-1.38%;1.03%;3.18%;1.34%;0.69%;-0.61%;-1.04%;0.06%;1.31%;1.63%;-2.30%;0.39% +30/04/2000;2.23%;-2.41%;-1.01%;-5.41%;1.68%;-0.59%;-0.06%;-3.04%;-2.01%;1.88%;0.92%;10.28%;-2.69% +31/05/2000;1.49%;1.14%;-1.32%;-4.33%;0.62%;-0.34%;1.07%;-0.70%;-0.97%;1.46%;0.80%;7.04%;-1.22% +30/06/2000;1.79%;-1.24%;2.03%;3.34%;1.71%;2.68%;0.58%;1.54%;3.49%;1.67%;1.76%;-11.07%;3.11% +31/07/2000;0.93%;-1.31%;0.64%;0.25%;0.63%;0.57%;0.18%;0.37%;0.06%;1.16%;0.84%;5.53%;-0.22% +31/08/2000;1.62%;1.89%;1.40%;3.68%;2.10%;1.73%;1.07%;2.48%;3.45%;1.57%;1.57%;-11.35%;2.67% +30/09/2000;1.41%;-2.08%;-0.19%;-4.62%;0.58%;0.48%;0.76%;-1.49%;-0.16%;1.37%;0.75%;12.04%;-0.69% +31/10/2000;0.52%;0.75%;-0.73%;-2.56%;0.40%;-0.68%;0.06%;-0.24%;-0.84%;0.26%;-0.04%;7.84%;-1.04% +30/11/2000;-0.81%;4.25%;-2.09%;-3.85%;0.45%;-1.36%;0.66%;1.25%;-1.53%;1.02%;0.06%;16.57%;-2.05% +31/12/2000;-0.02%;6.82%;0.01%;1.16%;1.60%;1.27%;0.48%;4.72%;2.48%;1.25%;0.75%;0.63%;1.33% +31/01/2001;3.44%;0.25%;3.08%;5.86%;0.75%;2.98%;1.63%;2.14%;1.65%;1.11%;3.33%;-2.71%;2.23% +28/02/2001;1.82%;-0.16%;1.00%;-2.21%;1.20%;0.45%;0.54%;-0.72%;-2.64%;0.54%;0.30%;10.21%;-0.89% +31/03/2001;1.62%;4.38%;-0.37%;-1.75%;1.08%;-0.42%;0.51%;0.38%;-1.99%;-0.61%;-0.11%;6.20%;-0.68% +30/04/2001;1.57%;-3.62%;0.48%;1.14%;0.75%;1.10%;0.94%;0.49%;2.46%;0.58%;1.74%;-9.91%;1.04% +31/05/2001;0.33%;0.81%;2.35%;2.78%;0.77%;1.85%;0.68%;0.32%;0.43%;1.61%;1.41%;-1.30%;0.80% +30/06/2001;0.12%;-0.77%;3.60%;1.60%;0.17%;0.63%;0.17%;0.17%;0.19%;-0.87%;0.19%;1.10%;0.13% +31/07/2001;0.91%;-0.40%;0.73%;-2.86%;0.31%;0.49%;0.54%;-0.40%;-1.44%;0.79%;0.10%;3.53%;-0.40% +31/08/2001;1.42%;1.53%;1.06%;0.30%;0.94%;0.90%;1.05%;0.06%;-0.96%;0.99%;-0.31%;7.52%;0.19% +30/09/2001;0.78%;2.46%;-0.14%;-4.25%;0.23%;-2.54%;-0.13%;-0.70%;-3.48%;-2.67%;-2.21%;9.41%;-1.42% +31/10/2001;1.17%;3.36%;1.03%;2.78%;0.58%;1.48%;1.34%;2.08%;0.99%;0.85%;1.64%;-2.98%;0.95% +30/11/2001;0.80%;-5.43%;0.86%;4.83%;0.55%;1.05%;-0.24%;0.21%;2.00%;0.14%;1.36%;-6.55%;0.58% +31/12/2001;-0.94%;1.48%;0.15%;4.21%;0.56%;1.07%;0.53%;1.38%;1.80%;0.45%;0.97%;-2.51%;0.99% +31/01/2002;1.48%;-0.72%;1.86%;2.73%;0.65%;0.78%;0.86%;0.69%;-0.37%;0.77%;0.97%;3.43%;0.30% +28/02/2002;-0.49%;-2.02%;-0.33%;1.81%;-0.07%;-0.71%;0.56%;-0.35%;-1.23%;-0.44%;-0.11%;3.90%;-0.15% +31/03/2002;0.53%;0.09%;0.52%;3.31%;0.47%;1.53%;0.45%;0.64%;1.55%;0.73%;1.45%;-4.46%;0.90% +30/04/2002;0.96%;-1.04%;1.39%;1.44%;0.76%;0.46%;1.13%;0.98%;-0.42%;-0.13%;0.70%;4.83%;0.52% +31/05/2002;0.33%;2.70%;0.91%;0.01%;0.53%;0.01%;0.99%;1.23%;-0.34%;0.00%;0.31%;3.46%;0.50% +30/06/2002;0.04%;6.55%;-1.17%;-2.92%;0.22%;-2.83%;0.69%;-0.22%;-2.49%;-1.70%;-1.07%;5.48%;-0.95% +31/07/2002;-1.59%;4.13%;-1.33%;-3.09%;-0.13%;-3.00%;0.57%;-0.78%;-3.89%;-1.74%;-1.85%;6.44%;-1.40% +31/08/2002;0.50%;2.20%;0.09%;1.19%;0.69%;0.60%;0.97%;0.63%;0.41%;0.61%;0.58%;0.15%;0.37% +30/09/2002;1.46%;2.84%;-0.44%;-2.52%;0.15%;-0.70%;-0.33%;0.54%;-1.60%;-0.28%;-1.10%;7.31%;-0.33% +31/10/2002;1.04%;-3.76%;-0.31%;1.54%;0.16%;0.31%;-0.63%;-0.86%;1.23%;0.32%;0.84%;-4.05%;-0.31% +30/11/2002;2.51%;-1.64%;2.39%;1.90%;0.25%;2.16%;0.54%;0.47%;2.24%;0.54%;1.85%;-5.47%;1.06% +31/12/2002;1.57%;4.89%;2.22%;0.48%;0.94%;0.44%;1.53%;1.92%;-1.49%;0.46%;0.23%;4.43%;0.77% +31/01/2003;2.83%;4.41%;2.43%;0.12%;0.83%;1.54%;1.06%;1.82%;0.05%;0.40%;0.67%;1.62%;0.72% +28/02/2003;1.33%;4.02%;0.92%;0.84%;0.24%;0.26%;0.79%;1.66%;-0.37%;0.18%;-0.04%;1.30%;0.31% +31/03/2003;0.89%;-4.45%;1.13%;0.19%;0.15%;0.83%;0.19%;-1.22%;0.20%;-0.07%;0.49%;-0.75%;-0.04% +30/04/2003;1.50%;0.65%;3.45%;4.50%;0.31%;2.72%;0.91%;1.17%;2.98%;0.99%;1.86%;-6.56%;1.34% +31/05/2003;1.36%;4.90%;2.70%;4.33%;1.07%;3.01%;2.07%;3.97%;3.62%;1.54%;2.12%;-4.99%;2.05% +30/06/2003;-0.58%;-1.92%;2.67%;2.68%;0.34%;1.81%;0.44%;0.56%;1.28%;0.48%;0.71%;-1.62%;0.68% +31/07/2003;-0.72%;-1.71%;1.17%;1.04%;-0.06%;1.19%;-0.92%;-0.35%;1.18%;0.53%;0.41%;-3.61%;0.25% +31/08/2003;-0.87%;0.78%;1.37%;3.74%;0.31%;1.33%;0.43%;2.02%;1.79%;0.70%;0.58%;-3.54%;0.78% +30/09/2003;1.71%;-0.19%;2.42%;2.64%;0.78%;1.33%;1.05%;2.15%;0.94%;0.77%;0.86%;1.36%;1.21% +31/10/2003;1.46%;1.04%;2.67%;2.59%;1.15%;1.91%;0.35%;1.11%;2.99%;1.11%;1.59%;-6.56%;1.52% +30/11/2003;0.92%;0.18%;1.54%;0.96%;0.46%;1.16%;0.69%;0.31%;1.30%;0.44%;1.02%;-1.36%;0.70% +31/12/2003;0.54%;3.81%;1.98%;4.03%;0.54%;1.72%;1.01%;2.93%;1.91%;0.98%;1.27%;-1.78%;1.39% +31/01/2004;1.19%;1.99%;3.01%;2.51%;1.09%;2.34%;0.92%;1.17%;1.92%;0.97%;1.46%;-0.90%;1.56% +29/02/2004;0.17%;5.29%;0.75%;2.53%;0.63%;1.13%;0.84%;1.50%;1.23%;0.51%;0.57%;0.18%;1.11% +31/03/2004;0.61%;-0.51%;0.46%;1.72%;0.32%;0.16%;0.03%;0.64%;0.41%;0.17%;0.38%;-1.48%;0.43% +30/04/2004;0.20%;-5.32%;0.93%;-2.52%;-0.82%;0.02%;0.62%;-1.78%;-1.65%;-0.39%;-0.45%;3.84%;-0.68% +31/05/2004;-1.28%;-1.18%;-0.10%;-1.81%;0.24%;-0.23%;0.40%;-0.81%;-0.35%;0.00%;-0.37%;-0.24%;-0.82% +30/06/2004;-1.06%;-3.16%;2.02%;0.20%;0.42%;1.13%;0.55%;-0.19%;0.91%;0.17%;0.22%;-0.51%;0.34% +31/07/2004;0.13%;-1.19%;0.19%;-0.27%;0.06%;-0.82%;0.62%;-0.14%;-1.54%;-0.92%;0.07%;6.38%;-0.49% +31/08/2004;0.40%;-0.84%;0.88%;1.33%;-0.09%;0.35%;0.36%;-0.39%;-0.22%;0.11%;0.31%;1.26%;-0.10% +30/09/2004;-0.17%;2.20%;1.04%;2.80%;0.85%;1.03%;0.12%;0.08%;2.10%;0.42%;0.52%;-2.16%;0.99% +31/10/2004;-0.44%;3.58%;1.43%;1.85%;-0.05%;1.24%;0.28%;1.38%;0.74%;0.74%;0.40%;-0.92%;0.68% +30/11/2004;0.81%;4.75%;3.37%;3.28%;1.40%;3.06%;0.75%;2.80%;3.08%;1.64%;1.49%;-5.74%;2.44% +31/12/2004;0.56%;0.00%;2.66%;2.01%;0.58%;2.44%;0.60%;0.33%;1.78%;1.33%;0.99%;-3.91%;1.45% +31/01/2005;-0.96%;-4.38%;0.37%;1.43%;0.81%;0.04%;0.44%;-0.47%;-0.17%;0.00%;0.12%;3.87%;0.06% +28/02/2005;-0.58%;0.05%;1.34%;3.46%;0.80%;1.44%;0.85%;1.71%;2.10%;0.65%;0.81%;1.18%;1.36% +31/03/2005;-1.40%;-0.06%;0.32%;-1.97%;0.19%;-0.04%;0.24%;-0.27%;-0.96%;0.32%;-0.42%;2.44%;-0.44% +30/04/2005;-3.16%;-3.54%;-0.52%;-0.49%;-0.30%;-1.28%;-0.03%;-0.80%;-1.84%;-1.05%;-1.08%;3.93%;-1.41% +31/05/2005;-1.33%;2.32%;0.06%;0.72%;0.47%;0.65%;-0.10%;0.88%;1.15%;0.95%;-0.02%;-4.75%;0.18% +30/06/2005;1.07%;2.60%;1.33%;1.60%;0.81%;1.33%;0.10%;1.16%;1.95%;0.85%;0.95%;-0.32%;1.31% +31/07/2005;1.64%;-0.13%;1.73%;2.57%;0.78%;2.15%;0.81%;1.19%;2.65%;1.15%;1.49%;-2.42%;1.34% +31/08/2005;0.66%;1.00%;1.24%;1.52%;0.62%;0.92%;0.36%;0.83%;0.97%;0.61%;0.53%;2.59%;0.79% +30/09/2005;1.42%;0.79%;1.12%;4.02%;0.87%;1.00%;0.62%;2.69%;2.22%;0.35%;1.22%;1.98%;1.47% +31/10/2005;-0.15%;-0.92%;-0.32%;-2.30%;0.01%;-1.73%;0.57%;-0.74%;-1.74%;-1.45%;-0.38%;2.33%;-1.49% +30/11/2005;0.04%;3.79%;1.00%;2.79%;0.61%;1.25%;0.15%;1.64%;2.11%;1.12%;0.67%;-3.00%;1.60% +31/12/2005;0.92%;-1.53%;1.22%;2.84%;0.68%;1.42%;0.54%;1.35%;2.49%;1.38%;1.26%;-0.35%;1.91% +31/01/2006;2.50%;1.74%;2.53%;5.26%;1.15%;3.41%;0.93%;2.58%;3.81%;2.72%;2.38%;-2.88%;2.86% +28/02/2006;1.16%;-1.86%;0.65%;1.61%;0.46%;0.51%;0.41%;0.02%;0.16%;1.04%;0.73%;0.64%;0.37% +31/03/2006;1.07%;2.84%;1.72%;1.22%;0.98%;1.85%;0.55%;0.94%;2.38%;1.44%;1.57%;-1.39%;1.64% +30/04/2006;0.64%;3.87%;1.93%;3.65%;1.02%;1.64%;1.21%;2.38%;1.72%;1.19%;1.26%;-0.12%;1.71% +31/05/2006;0.91%;-1.46%;0.86%;-3.89%;0.02%;0.08%;0.59%;-1.55%;-2.48%;0.09%;-0.25%;2.46%;-1.33% +30/06/2006;0.12%;-1.42%;-0.15%;-0.97%;0.63%;0.12%;0.36%;-0.15%;-0.62%;0.87%;0.21%;1.18%;-0.28% +31/07/2006;0.66%;-2.16%;0.09%;0.67%;0.51%;-0.11%;0.64%;0.06%;-0.31%;0.58%;0.17%;1.73%;-0.05% +31/08/2006;0.98%;0.20%;0.99%;1.33%;-0.09%;1.12%;0.37%;-0.39%;1.14%;0.53%;0.92%;-1.56%;0.66% +30/09/2006;0.93%;-0.55%;0.33%;0.11%;0.09%;0.35%;0.14%;-0.67%;0.05%;0.41%;0.40%;-2.36%;-0.03% +31/10/2006;0.54%;1.02%;1.94%;2.57%;0.65%;2.06%;0.67%;0.97%;1.94%;1.32%;1.32%;-3.80%;1.63% +30/11/2006;0.92%;2.26%;1.79%;3.23%;0.75%;1.82%;0.60%;1.99%;2.00%;1.42%;1.29%;-2.68%;1.85% +31/12/2006;1.27%;1.46%;1.65%;2.91%;1.07%;1.68%;0.72%;1.16%;1.53%;1.33%;1.28%;0.39%;1.75% +31/01/2007;1.30%;1.13%;1.50%;0.79%;0.83%;2.01%;0.69%;0.61%;1.21%;1.91%;1.35%;-1.07%;1.21% +28/02/2007;1.17%;-1.44%;1.45%;1.00%;0.51%;2.07%;1.06%;0.18%;0.82%;2.55%;1.14%;0.28%;0.96% +31/03/2007;0.60%;-1.41%;1.08%;1.85%;1.01%;1.46%;0.60%;0.27%;1.15%;0.63%;0.81%;-0.51%;0.96% +30/04/2007;0.26%;2.41%;1.64%;2.55%;0.89%;1.97%;0.71%;1.52%;1.98%;1.60%;1.34%;-2.65%;1.63% +31/05/2007;1.10%;2.30%;1.80%;2.70%;1.21%;2.13%;0.55%;1.92%;2.24%;1.71%;1.56%;-1.99%;2.04% +30/06/2007;0.11%;2.29%;0.27%;2.36%;0.77%;-0.07%;0.48%;1.07%;0.77%;-0.53%;1.00%;2.36%;0.82% +31/07/2007;-0.53%;-1.22%;-0.56%;2.75%;0.51%;-0.32%;0.07%;1.16%;0.09%;-0.54%;0.04%;4.86%;0.41% +31/08/2007;-1.45%;-2.80%;-1.18%;-2.74%;-0.94%;-1.44%;-0.48%;-1.16%;-1.60%;0.01%;-0.77%;0.92%;-2.22% +30/09/2007;1.61%;4.69%;0.95%;4.28%;1.23%;1.34%;1.64%;3.30%;2.56%;1.31%;1.53%;-2.07%;1.99% +31/10/2007;1.77%;2.80%;1.75%;4.85%;1.68%;2.14%;1.14%;3.04%;2.81%;1.91%;2.00%;-0.26%;3.03% +30/11/2007;-1.31%;-0.16%;-1.69%;-2.37%;-0.18%;-2.02%;-0.94%;-0.63%;-2.25%;-1.49%;-1.12%;7.19%;-1.48% +31/12/2007;-0.77%;1.17%;0.02%;1.30%;0.54%;0.07%;0.36%;1.04%;0.43%;-0.25%;0.22%;0.56%;0.40% +31/01/2008;-0.09%;2.55%;-2.33%;-5.03%;-1.12%;-2.71%;-0.12%;-0.10%;-4.00%;-1.26%;-1.18%;5.56%;-2.72% +29/02/2008;-0.83%;6.20%;0.14%;2.80%;1.20%;0.84%;-0.49%;3.12%;1.40%;0.60%;0.64%;3.00%;1.42% +31/03/2008;-3.17%;-0.56%;-1.26%;-3.79%;-0.49%;-1.68%;-3.06%;-1.69%;-2.36%;-0.45%;-1.62%;1.92%;-2.62% +30/04/2008;0.76%;-0.78%;0.88%;1.90%;0.59%;1.18%;1.87%;0.78%;2.23%;1.49%;1.30%;-4.61%;0.97% +31/05/2008;1.07%;1.62%;1.37%;1.63%;1.26%;1.76%;1.03%;1.14%;2.27%;1.36%;1.59%;-1.42%;1.72% +30/06/2008;-0.81%;3.30%;-0.31%;-2.74%;1.56%;-1.13%;-0.27%;0.30%;-1.64%;-1.09%;-0.84%;7.51%;-0.68% +31/07/2008;-1.88%;-3.33%;-1.82%;-3.30%;-1.00%;-1.66%;-0.23%;-2.13%;-2.61%;0.11%;-1.25%;0.72%;-2.64% +31/08/2008;-0.66%;-1.14%;-0.72%;-3.36%;-1.35%;-0.25%;-0.03%;-1.33%;-1.46%;0.51%;-0.23%;-2.15%;-1.56% +30/09/2008;-10.27%;0.10%;-5.18%;-9.82%;-2.85%;-6.27%;-5.06%;-3.13%;-6.75%;-2.76%;-5.38%;3.78%;-6.18% +31/10/2008;-12.37%;3.45%;-7.75%;-13.31%;-0.44%;-6.25%;-8.67%;-1.57%;-6.29%;-2.45%;-6.92%;11.70%;-6.00% +30/11/2008;-2.76%;2.14%;-4.35%;-3.91%;-5.87%;-3.01%;-3.08%;0.33%;-1.88%;0.06%;-2.09%;4.28%;-1.92% +31/12/2008;1.77%;1.40%;-1.97%;-0.10%;0.05%;-0.71%;-0.35%;1.18%;0.81%;1.62%;0.31%;-1.46%;-1.19% +31/01/2009;4.91%;-0.16%;0.82%;-1.12%;0.79%;1.32%;1.12%;0.29%;-0.17%;0.56%;1.00%;2.82%;0.60% +28/02/2009;1.64%;-0.31%;-1.22%;-1.33%;-0.46%;-0.91%;0.65%;-0.55%;-1.61%;0.06%;-0.16%;3.28%;-0.37% +31/03/2009;2.35%;-1.80%;0.22%;3.50%;0.21%;1.17%;0.57%;0.48%;1.88%;1.25%;1.00%;-4.62%;0.08% +30/04/2009;5.00%;-1.40%;3.87%;6.63%;-0.12%;3.37%;2.21%;1.27%;3.75%;0.81%;3.42%;-8.20%;0.92% +31/05/2009;5.78%;2.13%;5.04%;8.84%;1.46%;4.42%;3.65%;3.48%;5.16%;1.07%;3.92%;0.08%;3.12% +30/06/2009;2.41%;-1.47%;1.98%;0.13%;0.36%;1.23%;1.26%;-0.76%;0.09%;1.04%;1.01%;-0.94%;0.24% +31/07/2009;6.11%;-0.12%;3.11%;4.51%;0.42%;2.91%;3.22%;1.66%;2.77%;0.68%;2.60%;-5.96%;1.53% +31/08/2009;3.15%;0.54%;2.44%;1.66%;0.70%;2.07%;2.02%;0.50%;1.57%;1.02%;1.62%;-1.65%;1.13% +30/09/2009;3.68%;1.51%;4.10%;5.36%;0.85%;3.36%;3.25%;2.31%;2.85%;1.10%;2.20%;-3.94%;1.71% +31/10/2009;1.19%;-1.47%;1.39%;1.08%;-0.05%;0.43%;1.85%;-0.04%;-0.86%;0.26%;0.32%;3.14%;-0.21% +30/11/2009;0.80%;3.32%;2.02%;1.51%;0.09%;1.52%;0.96%;1.65%;1.30%;0.68%;0.89%;-2.27%;0.82% +31/12/2009;2.15%;-2.53%;3.24%;2.29%;0.72%;2.35%;1.41%;-0.28%;1.86%;1.02%;1.61%;-3.73%;0.66% +31/01/2010;0.53%;-2.78%;1.87%;-0.78%;0.28%;0.77%;1.72%;-0.84%;-0.95%;0.48%;0.60%;1.83%;-0.36% +28/02/2010;0.36%;0.90%;0.31%;-0.19%;0.50%;0.76%;0.23%;0.47%;0.84%;0.57%;0.57%;-2.61%;0.13% +31/03/2010;2.29%;2.75%;3.36%;4.21%;0.77%;2.73%;1.36%;1.71%;2.87%;1.09%;1.79%;-4.96%;1.71% +30/04/2010;1.99%;1.00%;2.51%;1.33%;0.18%;1.64%;1.13%;0.76%;1.00%;0.38%;1.22%;-2.87%;0.85% +31/05/2010;-2.44%;-2.48%;-2.50%;-4.80%;-0.50%;-2.57%;-1.02%;-1.28%;-3.71%;-1.15%;-1.86%;4.45%;-2.72% +30/06/2010;0.17%;0.11%;-1.05%;-0.39%;-0.45%;-1.06%;0.78%;-0.23%;-1.61%;0.23%;-0.34%;4.19%;-0.79% +31/07/2010;2.27%;-0.80%;1.43%;2.96%;0.90%;1.70%;1.00%;0.33%;1.91%;1.27%;1.74%;-4.26%;0.69% +31/08/2010;1.21%;3.10%;-0.49%;0.28%;-0.44%;-0.32%;0.88%;1.08%;-0.96%;0.70%;0.31%;3.89%;0.06% +30/09/2010;1.89%;2.67%;2.17%;4.29%;1.60%;2.72%;1.32%;2.63%;4.08%;1.34%;2.15%;-8.26%;2.19% +31/10/2010;2.14%;3.11%;2.06%;2.32%;1.05%;1.87%;1.07%;1.59%;2.08%;0.47%;1.49%;-1.79%;1.48% +30/11/2010;-0.12%;-2.24%;0.65%;-0.49%;0.44%;0.28%;0.56%;-0.47%;0.66%;-0.25%;0.37%;-1.62%;-0.09% +31/12/2010;1.45%;4.36%;3.11%;2.26%;0.94%;2.60%;0.55%;2.50%;3.42%;1.16%;1.57%;-5.07%;2.05% +31/01/2011;1.81%;-0.63%;1.73%;-0.65%;0.56%;1.41%;1.81%;-0.55%;0.52%;0.93%;0.97%;-0.76%;0.13% +28/02/2011;1.62%;1.77%;1.44%;0.11%;0.59%;1.29%;1.03%;0.95%;1.39%;0.62%;1.12%;-3.14%;0.83% +31/03/2011;0.38%;-1.64%;0.43%;1.54%;0.80%;0.20%;0.22%;-0.28%;0.21%;0.31%;0.34%;-1.70%;-0.18% +30/04/2011;0.20%;3.67%;1.29%;1.40%;1.05%;1.14%;0.97%;2.11%;1.39%;0.89%;0.72%;-1.97%;1.14% +31/05/2011;-0.17%;-3.39%;-0.14%;-1.91%;-0.37%;-0.52%;0.33%;-1.50%;-1.08%;-0.11%;-0.05%;1.34%;-1.16% +30/06/2011;-0.91%;-2.39%;-0.68%;-1.00%;-0.14%;-1.38%;-0.10%;-1.30%;-1.17%;-0.14%;-0.56%;2.70%;-1.38% +31/07/2011;-0.33%;2.58%;-0.15%;0.77%;-0.03%;-0.30%;0.18%;1.29%;-0.30%;-0.49%;-0.06%;1.99%;0.34% +31/08/2011;-1.95%;0.07%;-4.02%;-3.90%;-1.61%;-4.01%;-0.85%;-0.63%;-4.21%;-1.19%;-2.00%;6.69%;-2.52% +30/09/2011;-1.90%;-0.57%;-3.70%;-6.95%;-1.59%;-3.41%;-0.48%;-1.42%;-4.61%;-1.02%;-1.97%;7.77%;-2.62% +31/10/2011;1.29%;-3.17%;2.37%;3.65%;1.61%;2.98%;0.63%;0.53%;3.90%;1.72%;1.47%;-7.21%;0.94% +30/11/2011;-0.94%;0.14%;-1.08%;-2.72%;0.02%;-0.58%;-0.34%;-0.66%;-1.31%;0.17%;-0.24%;1.31%;-0.93% +31/12/2011;0.29%;0.34%;0.50%;-1.81%;0.06%;-0.34%;0.45%;-0.22%;-0.56%;0.56%;0.12%;0.41%;-0.54% +31/01/2012;2.36%;0.46%;3.24%;3.95%;0.95%;2.88%;1.16%;1.90%;3.27%;0.95%;2.02%;-6.79%;1.47% +29/02/2012;2.18%;0.91%;2.02%;3.41%;0.71%;1.70%;1.19%;1.34%;2.48%;1.11%;1.72%;-5.22%;1.40% +31/03/2012;0.65%;-2.04%;0.91%;-1.33%;0.23%;0.58%;0.61%;-0.61%;0.33%;0.24%;0.55%;-1.23%;0.01% +30/04/2012;-0.17%;-0.03%;-0.12%;-0.27%;-0.13%;-0.18%;0.34%;-0.59%;-0.68%;-0.11%;-0.20%;0.57%;-0.33% +31/05/2012;-0.77%;2.59%;-1.86%;-5.31%;-1.15%;-1.91%;-0.03%;-0.67%;-3.75%;-0.48%;-1.00%;6.77%;-1.40% +30/06/2012;0.72%;-2.72%;-0.09%;0.26%;0.06%;0.07%;0.46%;-0.63%;0.60%;0.23%;0.70%;-2.58%;-0.37% +31/07/2012;0.92%;3.12%;0.89%;0.81%;0.41%;0.48%;1.25%;1.44%;0.39%;0.06%;1.17%;0.87%;0.76% +31/08/2012;0.76%;-0.82%;1.61%;0.75%;0.49%;1.29%;0.82%;0.37%;1.08%;0.44%;0.95%;-3.67%;0.68% +30/09/2012;0.57%;-1.04%;1.69%;2.73%;0.43%;1.20%;0.76%;0.51%;1.57%;0.06%;1.02%;-3.85%;0.73% +31/10/2012;0.17%;-3.10%;1.18%;0.28%;0.28%;0.48%;0.58%;-0.83%;0.09%;-0.79%;0.30%;0.05%;-0.25% +30/11/2012;0.41%;-0.04%;0.82%;1.31%;0.37%;0.80%;0.54%;0.27%;0.52%;1.00%;0.56%;-1.50%;0.46% +31/12/2012;0.98%;0.57%;2.59%;3.30%;0.37%;1.93%;0.73%;1.11%;1.45%;1.45%;1.17%;-3.87%;1.08% +31/01/2013;1.58%;1.86%;2.19%;3.03%;1.35%;2.14%;1.16%;1.65%;3.08%;-0.16%;2.04%;-4.52%;2.06% +28/02/2013;0.09%;-0.99%;0.52%;-0.12%;0.33%;0.41%;0.56%;-0.12%;0.37%;0.39%;0.47%;-0.35%;0.32% +31/03/2013;0.77%;0.89%;1.49%;0.68%;0.35%;1.43%;1.13%;0.64%;1.44%;0.69%;1.30%;-1.89%;0.93% +30/04/2013;0.52%;1.92%;1.75%;0.63%;0.08%;0.78%;0.50%;1.14%;0.64%;0.61%;0.55%;-2.86%;0.76% +31/05/2013;1.34%;-2.61%;2.23%;0.33%;0.24%;1.87%;0.50%;-0.23%;1.35%;0.59%;1.01%;-2.45%;0.67% +30/06/2013;-0.81%;-2.73%;-1.05%;-3.21%;0.28%;-1.13%;-1.22%;-1.76%;-0.94%;-0.22%;-1.02%;0.56%;-1.33% +31/07/2013;0.60%;-0.76%;1.40%;0.75%;1.00%;1.49%;0.67%;0.16%;2.15%;1.03%;1.17%;-3.69%;0.91% + From noreply at r-forge.r-project.org Thu Sep 12 21:38:05 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 12 Sep 2013 21:38:05 +0200 (CEST) Subject: [Returnanalytics-commits] r3074 - in pkg/PortfolioAnalytics/sandbox/symposium2013: . docs Message-ID: <20130912193805.37F54185043@r-forge.r-project.org> Author: peter_carl Date: 2013-09-12 21:38:04 +0200 (Thu, 12 Sep 2013) New Revision: 3074 Added: pkg/PortfolioAnalytics/sandbox/symposium2013/docs/ pkg/PortfolioAnalytics/sandbox/symposium2013/docs/symposium-slides-2013.Rmd Log: - initial project setup - draft of slides without any code Added: pkg/PortfolioAnalytics/sandbox/symposium2013/docs/symposium-slides-2013.Rmd =================================================================== --- pkg/PortfolioAnalytics/sandbox/symposium2013/docs/symposium-slides-2013.Rmd (rev 0) +++ pkg/PortfolioAnalytics/sandbox/symposium2013/docs/symposium-slides-2013.Rmd 2013-09-12 19:38:04 UTC (rev 3074) @@ -0,0 +1,314 @@ +% Constructing Portfolios of Dynamic Strategies using Downside Risk Measures +% Peter Carl, Hedge Fund Strategies, William Blair & Co. +% November 11, 2013 + + + +# Introduction + +- Discuss the challenges of constructing hedge fund portfolios +- Offer a framework for considering strategic allocation of dynamic strategies +- Show the relative performance of multiple objectives +- Discuss extensions to the framework + +# Objectives + +- Visualization can help you build intuition about your objectives and constraints +- Rebalancing periodically and examining out of sample performance will help refine objectives +- Analytic solvers and parallel computation are valuable as problems get more complex + +# Process +Insert process diagram here? Optional + +# Strategic allocation +...broadly described as periodically reallocating the portfolio to achieve a long-term goal + +- Understanding the nature and sources of investment risk within the portfolio +- Manage the resulting balance of risk and return of the portfolio +- Apply within the cotext of the current economic and market situation +- Think systematically about preferences and constraints + +# Selected strategies +Daily data from the ... + + + +# Performance of strategies +Cumulative returns and drawdowns chart + +# Performance of strategies +BarVaR charts + +# Performance of strategies +Rolling 36-month Performance + +# Performance of strategies +Pair of scatterplots since inception/last 36 months + +# Performance of strategies +Comparison of distributions + +# Correlation of strategies +Correlation charts, from inception and trailing 36-months + +# Portfolio issues +Markowitz (1952) described an investor's objectives as: + +* maximizing some measure of gain while +* minimizing some measure of risk + +Many approaches follow Markowitz by using mean return and standard devation of returns for "risk" + +# Portfolio issues +Most investors would prefer: + +* to be approximately correct rather than precisely wrong +* to define risk as potential loss rather than volatility +* the flexibility to define any kind of objective and combine the constraints +* a framework for considering different sets of portfolio constraints for comparison through time +* to intuitively understand optimization through visualization + +# Portfolio issues +Construct a portfolio that: + +* maximizes return, +* with per-asset conditional constraints, +* with a specific univariate risk limit, +* while minimizing component risk concentration, +* and limiting drawdowns to a threshhold value. + +Not a quadratic (or linear, or conical) problem any more. + +# Risk rather than volatility + +* Expected Tail Loss (ETL) is also called Conditional Value-at-Risk (CVaR) and Expected Shortfall (ES) +* ETL is the mean expected loss when the loss exceeds the VaR +* ETL has all the properties a risk measure should have to be coherent and is a convex function of the portfolio weights +* Returns are skewed and/or kurtotic, so we use Cornish-Fisher (or "modified") estimates of ETL instead + +# Use Random Portfolios +[Burns (2009)](http://www.portfolioprobe.com/blog/) describes Random Portfolios + +* From a portfolio seed, generate random pemutations of weights that meet your constraints on each asset +* add more here + +Sampling can help provide insight into the goals and constraints of the optimization + +* Covers the 'edge case' (min/max) constraints well +* Covers the 'interior' portfolios +* Useful for finding the search space for an optimizer +* Allows arbitrary number o fsamples +* Allows massively parallel execution + +# Add general constraints +Constraints specified for each asset in the portfolio: + +* Maximum position: +* Minimum position: +* Weights sum to 100% +* Weights step size of 0.5% + +Other settings: + +* Confidence for VaR/ETL set at +* Random portfolios with X000 permutations +* Rebalancing quarterly (or monthly?) + +# Estimation + +* Optimizer chooses portfolios based on forward looking estimates of risk and return based on the portfolio moments +* Estimates use the first four moments and co-moments +* Historical sample moments work fine as predictors in normal market regimes, but poorly when the market regime shifts + +One of the largest challenges in optimization is improving the estimates of return and volatility + +# Forecasting +## Returns + +## Volatility + + +# Forecasting correlation + +# Equal-weight portfolio + +* Provides a benchmark to evaluate the performance of an optimized portfolio against +* Each asset in the portfolio is purchased in the same quantity at the beginning of the period +* The portfolio is rebalanced back to equal weight at the beginning of the next period +* Implies no information about return or risk +* Is the re-weighting adding or subtracting value? +* Do we have a useful view of return and risk? + +# Sampled portfolios +scatter chart with equal weight portfolio + +# Turnover from equal-weight +scatter chart colored by degree of turnover + +# Multiple objectives + +Equal contribution to: + +* weight +* variance +* risk + +Reward to risk: + +* mean-variance +* mean-modified ETL + +Minimum: + +* variance +* modified ETL + +# Equal contribution... +...to Weight + +* Implies diversification but has nothing to say about returns or risk + +...to Variance + +* Allocates portfolio variance equally across the portfolio components + +...to Risk + +* Use (percentage) ETL contributions to directly diversify downside risk among components +* Actually the minimum component risk contribution concentration portfolio + +# Reward to risk ratios... +...mean/variance + +* A traditional reward to risk objective that penalizes upside volatility as risk + +...mean/modified ETL + +* A reward-to-downside-risk objective that uses higher moments to estimate the tail + +# Minimum... +...variance + +* The portfolio with the forecasted variance of return + +...ETL + +* The portfolio with the minimum forecasted ETL + +Minimum risk portfolios generally suffer from the drawback of portfolio concentration. + +# Ex-ante results +Unstacked bar chart comparing allocations across objectives + +# Ex-ante results +scatter plot with objectives + +# Ex-ante vs. ex-post results +scatter plot with both overlaid + +# Out-of-sample results +timeseries charts for cumulative return and drawdown + +# Risk contribution +stacked bar chart of risk contribution through time (ex ante and ex post) + +# Conclusions +As a framework for strategic allocation: + +* Random Portfolios can help you build intuition about your objectives and constraints +* Rebalancing periodically and examining out of sample performance can help you refine objectives +* Differential Optimization and parallelization are valuable as objectives get more complicated + +# References +Figure out bibtex links in markup + +# Appendix +Slides after this point are not likely to be included in the final presentation + +# _PortfolioAnalytics_ + +- Provides numerical solutions to portfolios with complex constraints and objectives +- Unifies the interface across different numerical and closed-form optimizers +- Implements a front-end to two analytical solvers: **Differential Evolution** and **Random Portfolios** +- Preserves the flexibility to define any kind of objective and constraint +- Work-in-progress, available on R-Forge in the _ReturnAnalytics_ project + +# Other packages + +* _PerformanceAnalytics_ + * Library of econometric functions for performance and risk analysis of financial instruments and portfolios + +* _rugarch_ and _rmgarch_ + * By Alexios Ghalanos + * The univariate and multivariate GARCH parts of the rgarch project on R-Forge + +* _xts_ + * By Jeff Ryan and Jush Ulrich + * Time series package specifically for finance + +# Scratch +Slides likely to be deleted after this point \ No newline at end of file From noreply at r-forge.r-project.org Fri Sep 13 04:15:25 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 13 Sep 2013 04:15:25 +0200 (CEST) Subject: [Returnanalytics-commits] r3075 - pkg/PortfolioAnalytics/R Message-ID: <20130913021525.CE969185DA7@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-13 04:15:21 +0200 (Fri, 13 Sep 2013) New Revision: 3075 Modified: pkg/PortfolioAnalytics/R/charts.multiple.R pkg/PortfolioAnalytics/R/charts.risk.R Log: Adding function for risk budget barplot. Modified: pkg/PortfolioAnalytics/R/charts.multiple.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.multiple.R 2013-09-12 19:38:04 UTC (rev 3074) +++ pkg/PortfolioAnalytics/R/charts.multiple.R 2013-09-13 02:15:21 UTC (rev 3075) @@ -79,7 +79,7 @@ # data to plot dat <- na.omit(obj[, c(risk.col, return.col)]) - if(nrow(dat) < 1) stop("No data to plot after na.omit") + if(ncol(dat) < 1) stop("No data to plot after na.omit") dat_names <- rownames(dat) # colors to plot Modified: pkg/PortfolioAnalytics/R/charts.risk.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.risk.R 2013-09-12 19:38:04 UTC (rev 3074) +++ pkg/PortfolioAnalytics/R/charts.risk.R 2013-09-13 02:15:21 UTC (rev 3075) @@ -234,18 +234,127 @@ chart.RiskBudget.opt.list <- function(object, ..., match.col="ES", risk.type="absolute", main="Risk Budget", plot.type="line", cex.axis=0.8, cex.lab=0.8, element.color="darkgray", las=3, ylim=NULL, colorset=NULL, legend.loc=NULL, cex.legend=0.8){ if(!inherits(object, "opt.list")) stop("object must be of class 'opt.list'") + if(plot.type %in% c("bar", "barplot")){ + barplotRiskBudget(object=object, ...=..., match.col=match.col, risk.type=risk.type, main=main, ylim=ylim, cex.axis=cex.axis, cex.lab=cex.lab, element.color=element.color, las=las, colorset=colorset, legend.loc=legend.loc, cex.legend=cex.legend) + } else if(plot.type == "line"){ + + xtract <- extractObjectiveMeasures(object) + + if(risk.type == "absolute"){ + # get the index of columns with risk budget + rbcols <- grep(paste(match.col, "contribution", sep="."), colnames(xtract)) + dat <- na.omit(xtract[, rbcols]) + if(ncol(dat) < 1) stop("No data to plot after na.omit") + opt_names <- rownames(dat) + # remove everything up to the last dot (.) to extract the names + colnames(dat) <- gsub("(.*)\\.", "", colnames(dat)) + + # set the colors + if(is.null(colorset)) colorset <- 1:nrow(dat) + columnnames <- colnames(dat) + numassets <- length(columnnames) + + xlab <- NULL + if(is.null(xlab)) + minmargin <- 3 + else + minmargin <- 5 + if(main=="") topmargin=1 else topmargin=4 + if(las > 1) {# set the bottom border to accommodate labels + bottommargin = max(c(minmargin, (strwidth(columnnames,units="in"))/par("cin")[1])) * cex.lab + if(bottommargin > 10 ) { + bottommargin <- 10 + columnnames<-substr(columnnames,1,19) + # par(srt=45) #TODO figure out how to use text() and srt to rotate long labels + } + } + else { + bottommargin = minmargin + } + par(mar = c(bottommargin, 4, topmargin, 2) +.1) + + if(is.null(ylim)) ylim <- range(dat) + + plot(dat[1,], type="n", ylim=ylim, xlab='', ylab=paste(match.col, "Contribution", sep=" "), main=main, cex.lab=cex.lab, axes=FALSE) + for(i in 1:nrow(dat)){ + points(dat[i, ], type="b", col=colorset[i], ...) # add dots here + } + + # set the axis + axis(2, cex.axis=cex.axis, col=element.color) + axis(1, labels=columnnames, at=1:numassets, las=las, cex.axis=cex.axis, col=element.color) + box(col=element.color) + + # Add a legend + if(!is.null(legend.loc)) legend(legend.loc, legend=opt_names, col=colorset, lty=1, bty="n", cex=cex.legend) + } + + if(risk.type %in% c("percent", "percentage", "pct_contrib")){ + # get the index of columns with risk budget + rbcols <- grep(paste(match.col, "pct_contrib", sep="."), colnames(xtract)) + dat <- na.omit(xtract[, rbcols]) + if(ncol(dat) < 1) stop("No data to plot after na.omit") + opt_names <- rownames(dat) + # remove everything up to the last dot (.) to extract the names + colnames(dat) <- gsub("(.*)\\.", "", colnames(dat)) + + # set the colors + if(is.null(colorset)) colorset <- 1:nrow(dat) + + columnnames <- colnames(dat) + numassets <- length(columnnames) + + xlab <- NULL + if(is.null(xlab)) + minmargin <- 3 + else + minmargin <- 5 + if(main=="") topmargin=1 else topmargin=4 + if(las > 1) {# set the bottom border to accommodate labels + bottommargin = max(c(minmargin, (strwidth(columnnames,units="in"))/par("cin")[1])) * cex.lab + if(bottommargin > 10 ) { + bottommargin <- 10 + columnnames<-substr(columnnames,1,19) + # par(srt=45) #TODO figure out how to use text() and srt to rotate long labels + } + } + else { + bottommargin = minmargin + } + par(mar = c(bottommargin, 4, topmargin, 2) +.1) + + if(is.null(ylim)) ylim <- range(dat) + + plot(dat[1,], type="n", ylim=ylim, xlab='', ylab=paste(match.col, "% Contribution", sep=" "), main=main, cex.lab=cex.lab, axes=FALSE) + for(i in 1:nrow(dat)){ + points(dat[i, ], type="b", col=colorset[i], ...) # add dots here + } + + axis(2, cex.axis=cex.axis, col=element.color) + axis(1, labels=columnnames, at=1:numassets, las=las, cex.axis=cex.axis, col=element.color) + box(col=element.color) + + # Add a legend + if(!is.null(legend.loc)) legend(legend.loc, legend=opt_names, col=colorset, lty=1, bty="n", cex=cex.legend) + } + } +} + +# This function is called inside chart.RiskBudget.opt.list when plot.type == "bar" or "barplot" +barplotRiskBudget <- function(object, ..., match.col="ES", risk.type="absolute", main="Risk Budget", cex.axis=0.8, cex.lab=0.8, element.color="darkgray", las=3, colorset=NULL, legend.loc=NULL, cex.legend=0.8){ + if(!inherits(object, "opt.list")) stop("object must be of class 'opt.list'") + xtract <- extractObjectiveMeasures(object) if(risk.type == "absolute"){ # get the index of columns with risk budget rbcols <- grep(paste(match.col, "contribution", sep="."), colnames(xtract)) dat <- na.omit(xtract[, rbcols]) + if(ncol(dat) < 1) stop("No data to plot after na.omit") opt_names <- rownames(dat) # remove everything up to the last dot (.) to extract the names colnames(dat) <- gsub("(.*)\\.", "", colnames(dat)) - # set the colors - if(is.null(colorset)) colorset <- 1:nrow(dat) columnnames <- colnames(dat) numassets <- length(columnnames) @@ -268,33 +377,30 @@ } par(mar = c(bottommargin, 4, topmargin, 2) +.1) - if(is.null(ylim)) ylim <- range(dat) + # set the colors + if(is.null(colorset)) colorset <- 1:nrow(dat) - plot(dat[1,], type="n", ylim=ylim, xlab='', ylab=paste(match.col, "Contribution", sep=" "), main=main, cex.lab=cex.lab, axes=FALSE) - for(i in 1:nrow(dat)){ - points(dat[i, ], type="b", col=colorset[i], ...) # add dots here - } + # plot the data + barplot(dat, names.arg=columnnames, las=las, cex.names=cex.axis, xlab='', col=colorset, main=main, ylab=paste(match.col, "Contribution", sep=" "), cex.lab=cex.lab, cex.axis=cex.axis, ...) # set the axis - axis(2, cex.axis=cex.axis, col=element.color) - axis(1, labels=columnnames, at=1:numassets, las=las, cex.axis=cex.axis, col=element.color) + #axis(2, cex.axis=cex.axis, col=element.color) + #axis(1, labels=columnnames, at=1:numassets, las=las, cex.axis=cex.axis, col=element.color) box(col=element.color) # Add a legend - if(!is.null(legend.loc)) legend(legend.loc, legend=opt_names, col=colorset, lty=1, bty="n", cex=cex.legend) + if(!is.null(legend.loc)) legend(legend.loc, legend=opt_names, fill=colorset, bty="n", cex=cex.legend) } if(risk.type %in% c("percent", "percentage", "pct_contrib")){ # get the index of columns with risk budget rbcols <- grep(paste(match.col, "pct_contrib", sep="."), colnames(xtract)) dat <- na.omit(xtract[, rbcols]) + if(ncol(dat) < 1) stop("No data to plot after na.omit") opt_names <- rownames(dat) # remove everything up to the last dot (.) to extract the names colnames(dat) <- gsub("(.*)\\.", "", colnames(dat)) - # set the colors - if(is.null(colorset)) colorset <- 1:nrow(dat) - columnnames <- colnames(dat) numassets <- length(columnnames) @@ -317,18 +423,17 @@ } par(mar = c(bottommargin, 4, topmargin, 2) +.1) - if(is.null(ylim)) ylim <- range(dat) + # set the colors + if(is.null(colorset)) colorset <- 1:nrow(dat) - plot(dat[1,], type="n", ylim=ylim, xlab='', ylab=paste(match.col, "% Contribution", sep=" "), main=main, cex.lab=cex.lab, axes=FALSE) - for(i in 1:nrow(dat)){ - points(dat[i, ], type="b", col=colorset[i], ...) # add dots here - } + # plot the data + barplot(dat, names.arg=columnnames, las=las, cex.names=cex.axis, col=colorset, main=main, ylab=paste(match.col, "% Contribution", sep=" "), cex.lab=cex.lab, cex.axis=cex.axis, ...) - axis(2, cex.axis=cex.axis, col=element.color) - axis(1, labels=columnnames, at=1:numassets, las=las, cex.axis=cex.axis, col=element.color) + #axis(2, cex.axis=cex.axis, col=element.color) + #axis(1, labels=columnnames, at=1:numassets, las=las, cex.axis=cex.axis, col=element.color) box(col=element.color) # Add a legend - if(!is.null(legend.loc)) legend(legend.loc, legend=opt_names, col=colorset, lty=1, bty="n", cex=cex.legend) + if(!is.null(legend.loc)) legend(legend.loc, legend=opt_names, fill=colorset, bty="n", cex=cex.legend) } } From noreply at r-forge.r-project.org Fri Sep 13 07:18:31 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 13 Sep 2013 07:18:31 +0200 (CEST) Subject: [Returnanalytics-commits] r3076 - in pkg/PortfolioAnalytics: . R man Message-ID: <20130913051831.7D847185DF3@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-13 07:18:31 +0200 (Fri, 13 Sep 2013) New Revision: 3076 Added: pkg/PortfolioAnalytics/man/rp_simplex.Rd Modified: pkg/PortfolioAnalytics/NAMESPACE pkg/PortfolioAnalytics/R/random_portfolios.R Log: Adding rp_simplex function to generate random portfolios based on Shaw method. Modified: pkg/PortfolioAnalytics/NAMESPACE =================================================================== --- pkg/PortfolioAnalytics/NAMESPACE 2013-09-13 02:15:21 UTC (rev 3075) +++ pkg/PortfolioAnalytics/NAMESPACE 2013-09-13 05:18:31 UTC (rev 3076) @@ -61,6 +61,7 @@ export(return_constraint) export(return_objective) export(risk_budget_objective) +export(rp_simplex) export(rp_transform) export(scatterFUN) export(set.portfolio.moments_v1) Modified: pkg/PortfolioAnalytics/R/random_portfolios.R =================================================================== --- pkg/PortfolioAnalytics/R/random_portfolios.R 2013-09-13 02:15:21 UTC (rev 3075) +++ pkg/PortfolioAnalytics/R/random_portfolios.R 2013-09-13 05:18:31 UTC (rev 3076) @@ -358,6 +358,68 @@ #' @export random_portfolios <- random_portfolios_v2 +#' Generate random portfolios using the simplex method +#' +#' This function generates random portfolios based on the method outlined in the +#' Shaw paper. Need to add reference. +#' +#' @details +#' The simplex method is useful to generate random portfolios with the full +#' investment constraint where the sum of the weights is equal to 1 and min and +#' max box constraints. All other constraints such as group and position limit +#' constraints will be handled by elimination. If the constraints are very +#' restrictive, this may result in very few feasible portfolios remaining. +#' +#' The random portfolios are created by first generating a set of uniform +#' random numbers. +#' \deqn{U \sim [0, 1]} +#' The portfolio weights are then transformed to satisfy the min of the +#' box constraints. +#' \deqn{w_{i} = min_{i} + (1 - \sum_{j=1}^{N} min_{j}) \frac{log(U_{i}^{q}}{\sum_{k=1}^{N}log(U_{k}^{q}}} +#' +#' \code{p} controls the Face-Edge-Vertex (FEV) biasing where \deqn{q=2^p}. As +#' \code{q} approaches infinity, the set of weights will be concentrated in a +#' single asset. To sample the interior and exterior, \code{p} can be passed +#' in as a vector. The number of portfolios, \code{permutations}, and the +#' length of \code{p} affect how the random portfolios are generated. For +#' example if \code{permutations=10000} and \code{p=0:4}, 2000 portfolios will +#' be generated for each value of \code{p}. +#' +#' @param portfolio an object of type "portfolio" specifying the constraints for the optimization, see \code{\link{portfolio.spec}} +#' @param permutations integer: number of unique constrained random portfolios to generate +#' @param p scalar or vector for FEV biasing +#' @param \dots any other passthru parameters +#' @return a matrix of random portfolios +#' @export +rp_simplex <- function(portfolio, permutations, p=0:5, ...){ + # get the assets from the portfolio + assets <- portfolio$assets + nassets <- length(assets) + + # get the constraints + # the simplex method for generating random portfolios requires that the sum of weights is equal to 1 + constraints <- get_constraints(portfolio) + L <- constraints$min + + # number of portfolios for each p to generate + k <- floor(permutations / length(p)) + + # generate uniform[0, 1] random numbers + U <- runif(n=k*permutations, 0, 1) + Umat <- matrix(data=U, nrow=k, ncol=nassets) + + # do the transformation to the set of weights to satisfy lower bounds + stopifnot("package:foreach" %in% search() || require("foreach",quietly = TRUE)) + out <- foreach(j = 1:length(p), .combine=c) %:% foreach(i=1:nrow(Umat)) %dopar% { + q <- 2^p[j] + tmp <- L + (1 - sum(L)) * log(Umat[i,])^q / sum(log(Umat[i,])^q) + tmp + } + # the foreach loop returns a list of each random portfolio + out <- do.call(rbind, out) + return(out) +} + # EXAMPLE: start_t<- Sys.time(); x=random_walk_portfolios(rep(1/5,5), generatesequence(min=0.01, max=0.30, by=0.01), max_permutations=500, permutations=5000, min_sum=.99, max_sum=1.01); end_t<-Sys.time(); end_t-start_t; # > nrow(unique(x)) # [1] 4906 Added: pkg/PortfolioAnalytics/man/rp_simplex.Rd =================================================================== --- pkg/PortfolioAnalytics/man/rp_simplex.Rd (rev 0) +++ pkg/PortfolioAnalytics/man/rp_simplex.Rd 2013-09-13 05:18:31 UTC (rev 3076) @@ -0,0 +1,53 @@ +\name{rp_simplex} +\alias{rp_simplex} +\title{Generate random portfolios using the simplex method} +\usage{ + rp_simplex(portfolio, permutations, p = 0:5, ...) +} +\arguments{ + \item{portfolio}{an object of type "portfolio" specifying + the constraints for the optimization, see + \code{\link{portfolio.spec}}} + + \item{permutations}{integer: number of unique constrained + random portfolios to generate} + + \item{p}{scalar or vector for FEV biasing} + + \item{\dots}{any other passthru parameters} +} +\value{ + a matrix of random portfolios +} +\description{ + This function generates random portfolios based on the + method outlined in the Shaw paper. Need to add reference. +} +\details{ + The simplex method is useful to generate random + portfolios with the full investment constraint where the + sum of the weights is equal to 1 and min and max box + constraints. All other constraints such as group and + position limit constraints will be handled by + elimination. If the constraints are very restrictive, + this may result in very few feasible portfolios + remaining. + + The random portfolios are created by first generating a + set of uniform random numbers. \deqn{U \sim [0, 1]} The + portfolio weights are then transformed to satisfy the min + of the box constraints. \deqn{w_{i} = min_{i} + (1 - + \sum_{j=1}^{N} min_{j}) + \frac{log(U_{i}^{q}}{\sum_{k=1}^{N}log(U_{k}^{q}}} + + \code{p} controls the Face-Edge-Vertex (FEV) biasing + where \deqn{q=2^p}. As \code{q} approaches infinity, the + set of weights will be concentrated in a single asset. To + sample the interior and exterior, \code{p} can be passed + in as a vector. The number of portfolios, + \code{permutations}, and the length of \code{p} affect + how the random portfolios are generated. For example if + \code{permutations=10000} and \code{p=0:4}, 2000 + portfolios will be generated for each value of \code{p}. +} + From noreply at r-forge.r-project.org Fri Sep 13 11:29:05 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 13 Sep 2013 11:29:05 +0200 (CEST) Subject: [Returnanalytics-commits] r3077 - in pkg/Meucci: R demo Message-ID: <20130913092905.865FD185DF5@r-forge.r-project.org> Author: xavierv Date: 2013-09-13 11:29:05 +0200 (Fri, 13 Sep 2013) New Revision: 3077 Removed: pkg/Meucci/R/RankingInformation.R Modified: pkg/Meucci/demo/ButterflyTrading.R Log: -deleted missing svn files from the R folder Deleted: pkg/Meucci/R/RankingInformation.R =================================================================== --- pkg/Meucci/R/RankingInformation.R 2013-09-13 05:18:31 UTC (rev 3076) +++ pkg/Meucci/R/RankingInformation.R 2013-09-13 09:29:05 UTC (rev 3077) @@ -1,224 +0,0 @@ -# TODO: add max weights constraint to EfficientFrontier() -# TODO: add computeCVaR to EfficientFrontier() -# TODO: confirm QuadProg does not have a bug (i.e. it can optimize expected returns without use dvec by adding an equality constraint) - -#' Plots the efficient frontier, as it appears in A. Meucci, "Fully Flexible Views: Theory and Practice", The Risk Magazine, -#' October 2008, p 100-106. -#' -#' @param e the NumPortf x 1 matrix of expected returns for each portfolio along the efficient frontier -#' @param s the NumPortf x 1 matrix of standard deviation of returns for each portfolio along the efficient frontier -#' @param w the NumPortf x N matrix of compositions (security weights) for each portfolio along the efficient frontier -#' -#' @references -#' A. Meucci, "Fully Flexible Views: Theory and Practice" \url{http://www.symmys.com/node/158} -#' See Meucci script for "RankingInformation/PlotFrontier.m" -#' -#' @author Xavier Valls \email{flamejat@@gmail.com} -#' @export - - -PlotFrontier = function( e, s, w ) -{ - xx = dim( w )[ 1 ]; - N = dim( w )[ 2 ]; - Data = t( apply( w, 1, cumsum ) ); - - plot( c(min(s), 0), xlim = c( min(s) , max(s) ), ylim = c( 0, max(Data) ), - main= "frontier", xlab = " Portfolio # risk propensity", ylab = "Portfolio composition" ); - - for( n in 1 : N ) - { - x = rbind( min(s), s, max(s) ); - y = rbind( 0, matrix( Data[ , N-n+1 ] ), 0 ); - polygon( x, y, col = rgb( 0.9 - mod(n,3)*0.2, 0.9 - mod(n,3)*0.2, 0.9 - mod(n,3)*0.2) ); - } -} - -#' Plots the results of computing the efficient frontier (Expected returns and frontier), as it appears in A. Meucci, "Fully Flexible Views: Theory and Practice", The Risk Magazine, -#' October 2008, p 100-106. -#' -#' @param e the NumPortf x 1 matrix of expected returns for each portfolio along the efficient frontier -#' @param s the NumPortf x 1 matrix of standard deviation of returns for each portfolio along the efficient frontier -#' @param w the NumPortf x N matrix of compositions (security weights) for each portfolio along the efficient frontier -#' @param M the NumPortf x 1 vector of expected returns for each asset -#' @param Lower constraints -#' @param Upper constraints -#' -#' @references -#' A. Meucci, "Fully Flexible Views: Theory and Practice" \url{http://www.symmys.com/node/158} -#' See Meucci script for "RankingInformation/PlotResults.m" -#' -#' @author Xavier Valls \email{flamejat@@gmail.com} - -PlotResults = function( e, s, w, M, Lower = NULL , Upper = NULL) -{ - N = length( M ); - dev.new(); - par( mfrow = c( 1, 2 ) ); - h1 = hist( M*100, plot = F ) - barplot( h$density, horiz = T, main = "expected returns", xlab = "", ylab = "" ); - if(length(Lower) || length(Upper)) - { - Changed = array( 0, N ); - Changed[ union( Lower, Upper ) ] = M[ union( Lower, Upper ) ] * 100; - h2 = hist(Changed, plot = F ); - barplot( h2$density, horiz = T, col = "red", add = T ); - } - - PlotFrontier( e*100, s*100, w ); -} - - - -#' Computes posterior probabilities to view the rankings, as it appears in A. Meucci, -#' "Fully Flexible Views: Theory and Practice", The Risk Magazine, October 2008, p 100-106. -#' -#' @param X a vector containing returns for all the asset classes -#' @param p a vector containing the prior probability values -#' @param Lower a vector of indexes indicating which column is lower than the corresponding column number in Upper -#' @param Upper a vector of indexes indicating which column is lower than the corresponding column number in Upper -#' -#' @references -#' A. Meucci, "Fully Flexible Views: Theory and Practice" \url{http://www.symmys.com/node/158} -#' See Meucci script for "RankingInformation/ViewRanking.m" -#' -#' @author Ram Ahluwalia \email{ram@@wingedfootcapital.com} -#' @export EntropyProg - -# example ViewRanking( X , p , Lower = c(3,4) , Upper = c(4,5) ) # two inequality views: asset 3 < asset 4 returns, and asset 4 < asset 5 returns - -ViewRanking = function( X , p , Lower , Upper ) -{ - library( matlab ) - J = nrow( X ) - N = ncol( X ) - - K = length( Lower ) - - # constrain probabilities to sum to one across all scenarios... - Aeq = ones( 1 , J ) - beq = 1 - - # ...constrain the expectations... A*x <= 0 - # X[,Lower] refers to the column of returns for Asset-lower - # X[,Upper] refers to the column of returns for Asset-lower - # X[ , Lower ] - X[ , Upper ] is vector returns of the "lower"" asset less the returns of the "higher" asset - V = X[ , Lower ] - X[ , Upper ] # Jx1 vector. Expectation is assigned to each scenario - - A = t( V ) - b = 0 # The expectation is that (Lower - Upper)x <= 0. (i.e. The returns of upper are greater than zero for each scenario) - - # ...compute posterior probabilities - p_ = EntropyProg( p , A , as.matrix(b) , Aeq , as.matrix(beq) ) - - return( p_ ) -} - -#' Generates an efficient frontier based on Meucci's Ranking Information version and returns a A list with -#' NumPortf efficient portfolios whos returns are equally spaced along the whole range of the efficient frontier, -#' as it appears in A. Meucci, "Fully Flexible Views: Theory and Practice", The Risk Magazine, October 2008, -#' p 100-106. -#' -#' Most recent version of article and MATLAB code available at -#' http://www.symmys.com/node/158 -#' -#' @param X a matrix with the joint-scenario probabilities by asset (rows are joint-scenarios, columns are assets) -#' @param p a vector of probabilities associated with each scenario in matrix X -#' @param Options a list of options....TBD -#' -#' @return Exps the NumPortf x 1 vector of expected returns for each asset -#' @return Covs the NumPortf x N vector of security volatilities along the efficient frontier -#' @return w the NumPortf x N matrix of compositions (security weights) for each portfolio along the efficient frontier -#' @return e the NumPortf x 1 matrix of expected returns for each portfolio along the efficient frontier -#' @return s the NumPortf x 1 matrix of standard deviation of returns for each portfolio along the efficient frontier -#' -#' @references -#' A. Meucci, "Fully Flexible Views: Theory and Practice" \url{http://www.symmys.com/node/158} -#' See Meucci script for "RankingInformation/EfficientFrontier.m" -#' -#' @author Ram Ahluwalia \email{ram@@wingedfootcapital.com} and Xavier Valls \email{flamejat@@gmail.com} -#' @export - -RIEfficientFrontier = function( X , p , Options) -{ - - if( !require("limSolve") ) stop("This script requieres the limSolve package installed") - - - library( matlab ) - - J = nrow( X ) # number of scenarios - N = ncol( X ) # number of assets - - Exps = t(X) %*% p # probability-weighted expected return of each asset - - Scnd_Mom = t(X) %*% (X * ( p %*% matrix( 1, 1 , N ) ) ) - Scnd_Mom = ( Scnd_Mom + t(Scnd_Mom) ) / 2 # an N*N matrix - Covs = Scnd_Mom - Exps %*% t( Exps ) - - Constr = list() - - # constrain the sum of weights to 1 - Constr$Aeq = matrix( 1, 1 , N ) - Constr$beq = 1 - - # constrain the weight of any security to between 0 and 1 - Constr$Aleq = rbind( diag( 1, N ) , - diag( 1, N ) ) # linear coefficients matrix A in the inequality constraint A*x <= b - Constr$bleq = rbind( matrix( 1, N, 1 ) , matrix( 0, N, 1 ) ) # constraint vector b in the inequality constraint A*x <= b - - Amat = rbind( Constr$Aeq , Constr$Aleq ) # stack the equality constraints on top of the inequality constraints - bvec = rbind( Constr$beq , Constr$bleq ) # stack the equality constraints on top of the inequality constraints - - ############################################################################################ - # determine return of minimum-risk portfolio - FirstDegree = matrix( 0, N , 1 ) # TODO: assumes that securities have zero expected returns when computing efficient frontier? - SecondDegree = Covs - # Why is FirstDegree "expected returns" set to 0? - # We capture the equality view in the equality constraints matrix - # In other words, we have a constraint that the Expected Returns by Asset %*% Weights = Target Return - MinVol_Weights = solve.QP( Dmat = SecondDegree , dvec = -1*FirstDegree , Amat = -1*t(Amat) , bvec = -1*bvec , meq = length( Constr$beq ) ) - MinSDev_Exp = t( MinVol_Weights$solution ) %*% Exps - - ############################################################################################ - # determine return of maximum-return portfolio - FirstDegree = -Exps - MaxRet_Weights = linp( E = Constr$Aeq , F = Constr$beq , G = -1*Constr$Aleq , H = -1*Constr$bleq , Cost = FirstDegree , ispos = FALSE )$X - MaxExp_Exp = t( MaxRet_Weights) %*% Exps - - ############################################################################################ - # slice efficient frontier in NumPortf equally thick horizontal sections - Grid = matrix( , ncol = 0 , nrow = 0 ) - Grid = t( seq( from = Options$FrontierSpan[1] , to = Options$FrontierSpan[2] , length.out = Options$NumPortf ) ) - - # the portfolio return varies from a minimum of MinSDev_Exp up to a maximum of MaxExp_Exp - # We establish equally-spaced portfolio return targets and use this find efficient portfolios - # in the next step - Targets = as.numeric( MinSDev_Exp ) + Grid * as.numeric( ( MaxExp_Exp - MinSDev_Exp ) ) - - ############################################################################################ - # compute the NumPortf compositions and risk-return coordinates - FirstDegree = matrix( 0, N , 1 ) - - w = matrix( , ncol = N , nrow = 0 ) - e = matrix( , ncol = 1 , nrow = 0 ) - s = matrix( , ncol = 1 , nrow = 0 ) - - for ( i in 1:Options$NumPortf ) - { - # determine least risky portfolio for given expected return - # Ax = b ; Exps %*% weights = Target Return - AEq = rbind( Constr$Aeq , t( Exps ) ) # equality constraint: set expected return for each asset... - bEq = rbind( Constr$beq , Targets[ i ] ) # ...and target portfolio return for i'th efficient portfolio - - Amat = rbind( AEq , Constr$Aleq ) # stack the equality constraints on top of the inequality constraints - bvec = rbind( bEq , Constr$bleq ) - - Weights = solve.QP( Dmat = SecondDegree , dvec = -1*FirstDegree , Amat = -1*t(Amat) , bvec = -1*bvec , meq = length( bEq ) ) - - w = rbind( w , Weights$solution ) - s = rbind( s , sqrt( t(Weights$solution) %*% Covs %*% Weights$solution ) ) - e = rbind( e , Weights$solution %*% Exps ) - } - - return( list( e = e , Sdev = s , Composition = w , Exps = Exps , Covs = Covs ) ) -} Modified: pkg/Meucci/demo/ButterflyTrading.R =================================================================== --- pkg/Meucci/demo/ButterflyTrading.R 2013-09-13 05:18:31 UTC (rev 3076) +++ pkg/Meucci/demo/ButterflyTrading.R 2013-09-13 09:29:05 UTC (rev 3077) @@ -63,7 +63,7 @@ PlotFrontier( optimalPortfolios$Exp , optimalPortfolios$CVaR , optimalPortfolios$Composition ) -[Exp,SDev,CVaR,w] = LongShortMeanCVaRFrontier(PnL,p,butterfliesAnalytics,Options); +#[Exp,SDev,CVaR,w] = LongShortMeanCVaRFrontier(PnL,p,butterfliesAnalytics,Options); #PlotEfficientFrontier(Exp,CVaR,w) ########################################################################################################### From noreply at r-forge.r-project.org Fri Sep 13 11:47:18 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 13 Sep 2013 11:47:18 +0200 (CEST) Subject: [Returnanalytics-commits] r3078 - in pkg/Meucci: data demo Message-ID: <20130913094719.110BC185322@r-forge.r-project.org> Author: braverock Date: 2013-09-13 11:47:18 +0200 (Fri, 13 Sep 2013) New Revision: 3078 Removed: pkg/Meucci/data/bondAttribution.Rda pkg/Meucci/data/covNRets.Rda pkg/Meucci/data/db.Rda pkg/Meucci/data/dbFFP.Rda pkg/Meucci/data/derivatives.Rda pkg/Meucci/data/equities.Rda pkg/Meucci/data/fILMR.Rda pkg/Meucci/data/fX.Rda pkg/Meucci/data/fixedIncome.Rda pkg/Meucci/data/highYieldIndices.Rda pkg/Meucci/data/implVol.Rda pkg/Meucci/data/linearModel.Rda pkg/Meucci/data/sectorsTS.Rda pkg/Meucci/data/securitiesIndustryClassification.Rda pkg/Meucci/data/securitiesTS.Rda pkg/Meucci/data/stockSeries.Rda pkg/Meucci/data/swap2y4y.Rda pkg/Meucci/data/swapParRates.Rda pkg/Meucci/data/swaps.Rda pkg/Meucci/data/timeSeries.Rda pkg/Meucci/data/usSwapRates.Rda pkg/Meucci/demo/00index Log: remove files with duplicate case-insensitive names Deleted: pkg/Meucci/data/bondAttribution.Rda =================================================================== (Binary files differ) Deleted: pkg/Meucci/data/covNRets.Rda =================================================================== (Binary files differ) Deleted: pkg/Meucci/data/db.Rda =================================================================== (Binary files differ) Deleted: pkg/Meucci/data/dbFFP.Rda =================================================================== (Binary files differ) Deleted: pkg/Meucci/data/derivatives.Rda =================================================================== (Binary files differ) Deleted: pkg/Meucci/data/equities.Rda =================================================================== (Binary files differ) Deleted: pkg/Meucci/data/fILMR.Rda =================================================================== (Binary files differ) Deleted: pkg/Meucci/data/fX.Rda =================================================================== (Binary files differ) Deleted: pkg/Meucci/data/fixedIncome.Rda =================================================================== (Binary files differ) Deleted: pkg/Meucci/data/highYieldIndices.Rda =================================================================== (Binary files differ) Deleted: pkg/Meucci/data/implVol.Rda =================================================================== (Binary files differ) Deleted: pkg/Meucci/data/linearModel.Rda =================================================================== (Binary files differ) Deleted: pkg/Meucci/data/sectorsTS.Rda =================================================================== (Binary files differ) Deleted: pkg/Meucci/data/securitiesIndustryClassification.Rda =================================================================== (Binary files differ) Deleted: pkg/Meucci/data/securitiesTS.Rda =================================================================== (Binary files differ) Deleted: pkg/Meucci/data/stockSeries.Rda =================================================================== (Binary files differ) Deleted: pkg/Meucci/data/swap2y4y.Rda =================================================================== (Binary files differ) Deleted: pkg/Meucci/data/swapParRates.Rda =================================================================== (Binary files differ) Deleted: pkg/Meucci/data/swaps.Rda =================================================================== (Binary files differ) Deleted: pkg/Meucci/data/timeSeries.Rda =================================================================== (Binary files differ) Deleted: pkg/Meucci/data/usSwapRates.Rda =================================================================== (Binary files differ) Deleted: pkg/Meucci/demo/00index =================================================================== --- pkg/Meucci/demo/00index 2013-09-13 09:29:05 UTC (rev 3077) +++ pkg/Meucci/demo/00index 2013-09-13 09:47:18 UTC (rev 3078) @@ -1,20 +0,0 @@ -AnalyticalvsNumerical This example script compares the numerical and the analytical solution of entropy-pooling -ButterflyTrading This example script performs the butterfly-trading case study for the Entropy-Pooling approach by Attilio Meucci -DetectOutliersviaMVE This example script detects outliers in two-asset and multi-asset case -FullyFlexibleBayesNets This case study uses Entropy Pooling to compute Fully Flexible Bayesian networks for risk management -HermiteGrid_CaseStudy This script estimates the prior of a hedge fund return and processes extreme views on CVaR according to Entropy Pooling -HermiteGrid_CVaR_Recursion This script illustrates the discrete Newton recursion to process views on CVaR according to Entropy Pooling -HermiteGrid_demo This script compares the performance of plain Monte Carlo versus grid in applying Entropy Pooling to process extreme views -InvariantProjection This script projects summary statistics to arbitrary horizons under i.i.d. assumption -logToArithmeticCovariance This example script generates arithmetric returns and arithmetric covariance matrix given a distribution of log returns -Prior2Posterior This example script compares the numerical and the analytical solution of entropy-pooling -RankingInformation This script performs ranking allocation using the Entropy-Pooling approach by Attilio Meucci -RobustBayesianAllocation This script replicates the example from Meucci's MATLAB script S_SimulationsCaseStudy.M -S_plotGaussHermite This example script displays mesh points based on Gaussian-Hermite quadrature -S_SnPCaseStudy This script replicates the example from Meucci's MATLAB script S_SnPCaseStudy.M -S_ToyExample This toy example illustrates the use of Entropy Pooling to compute Fully Flexible Bayesian networks -S_FitProjectRates This script fits the swap rates dynamics to a multivariate Ornstein-Uhlenbeck process and computes and plots the estimated future distribution -S_CheckDiagonalization This script verifies the correctness of the eigenvalue-eigenvector representation in terms of real matrices for the transition matrix of an OU process -S_CovarianceEvolution This script represents the evolution of the covariance of an OU process in terms of the dispersion ellipsoid -S_DeterministicEvolution This script animates the evolution of the determinstic component of an OU process -MeanDiversificationFrontier This script computes the mean-diversification efficient frontier \ No newline at end of file From noreply at r-forge.r-project.org Fri Sep 13 11:51:55 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 13 Sep 2013 11:51:55 +0200 (CEST) Subject: [Returnanalytics-commits] r3079 - pkg/Meucci/data Message-ID: <20130913095155.38D8B1855D8@r-forge.r-project.org> Author: braverock Date: 2013-09-13 11:51:54 +0200 (Fri, 13 Sep 2013) New Revision: 3079 Removed: pkg/Meucci/data/FactorDistributions.rda pkg/Meucci/data/ReturnsDistribution.rda Log: - remove files with duplicate case-insensitive names Deleted: pkg/Meucci/data/FactorDistributions.rda =================================================================== (Binary files differ) Deleted: pkg/Meucci/data/ReturnsDistribution.rda =================================================================== (Binary files differ) From noreply at r-forge.r-project.org Fri Sep 13 11:58:18 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 13 Sep 2013 11:58:18 +0200 (CEST) Subject: [Returnanalytics-commits] r3080 - pkg/Meucci/data Message-ID: <20130913095818.EEA401855D8@r-forge.r-project.org> Author: xavierv Date: 2013-09-13 11:58:18 +0200 (Fri, 13 Sep 2013) New Revision: 3080 Removed: pkg/Meucci/data/00Index pkg/Meucci/data/00index Log: - deleted two useless files from the data folder Deleted: pkg/Meucci/data/00Index =================================================================== --- pkg/Meucci/data/00Index 2013-09-13 09:51:54 UTC (rev 3079) +++ pkg/Meucci/data/00Index 2013-09-13 09:58:18 UTC (rev 3080) @@ -1,10 +0,0 @@ -butterflyAnalytics X p FactorNames -FactorDistributions Butterflies -ghq1000 ghqx -MeucciFreaqEst DY Data Dates X p Names -MeucciTweakTest A_ Aeq_ b_ beq_ db_ g_ lb_ ub_ -pseudodata data -ReturnsDistribution P X -SectorsSnP500 DP P -MeanDiversificationFrontier S Mu w_b -DB_SwapParRates Rates Dates \ No newline at end of file Deleted: pkg/Meucci/data/00index =================================================================== --- pkg/Meucci/data/00index 2013-09-13 09:51:54 UTC (rev 3079) +++ pkg/Meucci/data/00index 2013-09-13 09:58:18 UTC (rev 3080) @@ -1,10 +0,0 @@ -butterflyAnalytics X p FactorNames -FactorDistributions Butterflies -ghq1000 ghqx -MeucciFreaqEst DY Data Dates X p Names -MeucciTweakTest A_ Aeq_ b_ beq_ db_ g_ lb_ ub_ -pseudodata data -ReturnsDistribution P X -SectorsSnP500 DP P -MeanDiversificationFrontier S Mu w_b -DB_SwapParRates Rates Dates \ No newline at end of file From noreply at r-forge.r-project.org Fri Sep 13 12:03:50 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 13 Sep 2013 12:03:50 +0200 (CEST) Subject: [Returnanalytics-commits] r3081 - pkg/Meucci/demo Message-ID: <20130913100350.D6B9D181299@r-forge.r-project.org> Author: braverock Date: 2013-09-13 12:03:49 +0200 (Fri, 13 Sep 2013) New Revision: 3081 Removed: pkg/Meucci/demo/covNRets.Rda Log: - remove file present in data/ dir from demo/ Deleted: pkg/Meucci/demo/covNRets.Rda =================================================================== (Binary files differ) From noreply at r-forge.r-project.org Fri Sep 13 12:14:28 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 13 Sep 2013 12:14:28 +0200 (CEST) Subject: [Returnanalytics-commits] r3082 - pkg/Meucci/data Message-ID: <20130913101428.753E81833B9@r-forge.r-project.org> Author: xavierv Date: 2013-09-13 12:14:28 +0200 (Fri, 13 Sep 2013) New Revision: 3082 Removed: pkg/Meucci/data/MeucciReturnsDistribution.rda pkg/Meucci/data/butterflyTradingX.rda pkg/Meucci/data/factorDistributions.rda Log: - removed two data files not needed anymore from the data folder Deleted: pkg/Meucci/data/MeucciReturnsDistribution.rda =================================================================== (Binary files differ) Deleted: pkg/Meucci/data/butterflyTradingX.rda =================================================================== (Binary files differ) Deleted: pkg/Meucci/data/factorDistributions.rda =================================================================== (Binary files differ) From noreply at r-forge.r-project.org Fri Sep 13 12:29:37 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 13 Sep 2013 12:29:37 +0200 (CEST) Subject: [Returnanalytics-commits] r3083 - in pkg/Meucci: demo man Message-ID: <20130913102937.7BCD61833B9@r-forge.r-project.org> Author: xavierv Date: 2013-09-13 12:29:37 +0200 (Fri, 13 Sep 2013) New Revision: 3083 Removed: pkg/Meucci/demo/S_EquitiesInvariance.R pkg/Meucci/man/StackedBarChart.Rd Log: - deleted a duplicated and a documentation file for a non existing anyore function Deleted: pkg/Meucci/demo/S_EquitiesInvariance.R =================================================================== --- pkg/Meucci/demo/S_EquitiesInvariance.R 2013-09-13 10:14:28 UTC (rev 3082) +++ pkg/Meucci/demo/S_EquitiesInvariance.R 2013-09-13 10:29:37 UTC (rev 3083) @@ -1,38 +0,0 @@ -#' This file performs the quest for invariance in the stock market, as described in -#' A. Meucci "Risk and Asset Allocation", Springer, 2005, chapter 3. -#' -#' @references -#' \url{http://} -#' See Meucci's script for "S_EquitiesInvariance.m" -#' -#' @author Xavier Valls \email{flamejat@@gmail.com} - - - -################################################################################################################## -### Load daily stock prices from the utility sector in the S&P 500 -load("../data/equities.Rda"); - -################################################################################################################## -### Pick one stock from database -Stock_Index = 20; -P = Equities$Prices[ 632 : nrow( Equities$Prices ), Stock_Index ]; # select data after 1/8 rule - -################################################################################################################## -### Quest for invariance -# first invariant -X = P[ -1 ] / P[ -length( P )]; -PerformIidAnalysis( 1 : length( X ), X, 'Analysis for X' ); - -# second invariant -Y = P[ -1 ] / P[ -length( P )]; -PerformIidAnalysis(1 : length( Y ), Y, 'Analysis for Y' ); - -# third invariant -Z = X ^ 2; -PerformIidAnalysis( 1 : length(Z), Z, 'Analysis for Z' ); - -# fourth invariant -W = P[ 3 : length( P ) ] - 2 * P[ 2: ( length( P ) -1 ) ] + P[ 1 : ( length( P ) -2 ) ]; -PerformIidAnalysis( 1 : length( W ), W, 'Analysis for W' ); - Deleted: pkg/Meucci/man/StackedBarChart.Rd =================================================================== --- pkg/Meucci/man/StackedBarChart.Rd 2013-09-13 10:14:28 UTC (rev 3082) +++ pkg/Meucci/man/StackedBarChart.Rd 2013-09-13 10:29:37 UTC (rev 3083) @@ -1,16 +0,0 @@ -\name{StackedBarChart} -\alias{StackedBarChart} -\title{Generate a Stacked Bar Chart based on the frontier weights matrix} -\usage{ - StackedBarChart(weightsMatrix) -} -\arguments{ - \item{weightsMatrix}{a matrix of weights where rows are - efficient portfolios summing to one, and columns are - assets} -} -\description{ - Generate a Stacked Bar Chart based on the frontier - weights matrix -} - From noreply at r-forge.r-project.org Fri Sep 13 13:12:05 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 13 Sep 2013 13:12:05 +0200 (CEST) Subject: [Returnanalytics-commits] r3084 - in pkg/Meucci: R data demo man Message-ID: <20130913111205.2606F184CC5@r-forge.r-project.org> Author: xavierv Date: 2013-09-13 13:12:03 +0200 (Fri, 13 Sep 2013) New Revision: 3084 Removed: pkg/Meucci/data/MeucciFreaqEst.rda pkg/Meucci/data/MeucciTweakTest.rda Modified: pkg/Meucci/R/RobustBayesianAllocation.R pkg/Meucci/R/data.R pkg/Meucci/demo/FullyFlexibleBayesNets.R pkg/Meucci/demo/RobustBayesianAllocation.R pkg/Meucci/demo/S_SnPCaseStudy.R pkg/Meucci/man/robustBayesianPortfolioOptimization.Rd Log: - fixed variable scope in robustBayesianPortfolioOptimization and two datafiles Modified: pkg/Meucci/R/RobustBayesianAllocation.R =================================================================== --- pkg/Meucci/R/RobustBayesianAllocation.R 2013-09-13 10:29:37 UTC (rev 3083) +++ pkg/Meucci/R/RobustBayesianAllocation.R 2013-09-13 11:12:03 UTC (rev 3084) @@ -87,6 +87,7 @@ #' @param mean_post the posterior vector of means (after blending prior and sample data) #' @param cov_post the posterior covariance matrix (after blending prior and sample data) #' @param nu_post a numeric with the relative confidence in the prior vs. the sample data. A value of 2 indicates twice as much weight to assign to the prior vs. the sample data. Must be greater than or equal to zero +#' @param time_post a numeric #' @param riskAversionMu risk aversion coefficient for estimation of means. #' @param riskAversionSigma risk aversion coefficient for estimation of Sigma. #' @param discretizations an integer with the number of portfolios to generate along efficient frontier (equally distanced in return space). Parameter must be an integer greater or equal to 1. @@ -111,7 +112,7 @@ #' \url{ http://papers.ssrn.com/sol3/papers.cfm?abstract_id=681553 } #' @author Ram Ahluwalia \email{ram@@wingedfootcapital.com} #' @export -robustBayesianPortfolioOptimization = function( mean_post , cov_post , nu_post , riskAversionMu = .1 , riskAversionSigma = .1 , discretizations = 10 , longonly = FALSE , volatility ) +robustBayesianPortfolioOptimization = function( mean_post , cov_post , nu_post , time_post, riskAversionMu = .1 , riskAversionSigma = .1 , discretizations = 10 , longonly = FALSE , volatility ) { # parameter checks N = length( mean ) # number of assets Modified: pkg/Meucci/R/data.R =================================================================== --- pkg/Meucci/R/data.R 2013-09-13 10:29:37 UTC (rev 3083) +++ pkg/Meucci/R/data.R 2013-09-13 11:12:03 UTC (rev 3084) @@ -238,4 +238,13 @@ #' @references A. Meucci, "Fully Flexible Views: Theory and Practice", The Risk Magazine, #' October 2008, p 100-106. \url{http://symmys.com/node/158} #' @keywords data +NULL + +#' @title scenarios table and prior distribution for changes in SWAP2YR SWAP10YR CDXIG S&P500 DollarIndex Crude Gold VIX 10YRInflationSwapRate +#' +#' @name freaqEst +#' @docType data +#' @author Xavier Valls\email{flamejat@@gmail.com} +#' @references A. Meucci, "Stress-Testing with Fully Flexible Causal Inputs" \url{http://symmys.com/node/152} +#' @keywords data NULL \ No newline at end of file Deleted: pkg/Meucci/data/MeucciFreaqEst.rda =================================================================== (Binary files differ) Deleted: pkg/Meucci/data/MeucciTweakTest.rda =================================================================== (Binary files differ) Modified: pkg/Meucci/demo/FullyFlexibleBayesNets.R =================================================================== --- pkg/Meucci/demo/FullyFlexibleBayesNets.R 2013-09-13 10:29:37 UTC (rev 3083) +++ pkg/Meucci/demo/FullyFlexibleBayesNets.R 2013-09-13 11:12:03 UTC (rev 3084) @@ -8,15 +8,14 @@ # upload scenarios table and prior distribution for changes in # SWAP2YR SWAP10YR CDXIG S&P500 DollarIndex Crude Gold VIX 10YRInflationSwapRate library( matlab ) -data( "MeucciFreaqEst" ) -Names = unlist( Names ) -colnames( X ) = colnames( DY ) = colnames( Data ) = Names -rownames( Data ) = Dates +data( "freaqEst" ) +colnames( freaqEst$X ) = colnames( freaqEst$DY ) = colnames( freaqEst$Data ) = freaqEst$Names +rownames( freaqEst$Data ) = freaqEst$Dates -J = nrow( X ) ; N = ncol( X ) -e = .01 +J = nrow( freaqEst$X ) ; N = ncol( freaqEst$X ) +e = 0.01 p = ( 1 - e ) * p + e * ones( J , 1 ) / J # assigns a minimum probability to each scenario of e * number of scenarios -moments = ComputeMoments( X , p ) +moments = ComputeMoments( freaqEst$X , p ) m = moments$means ; s = moments$sd ; C = moments$correlationMatrix ; rm( moments ) ################################################################################## @@ -76,12 +75,12 @@ View[[k]]$c = .1 # create linear constraint representation of views on probabilities -constraints = CondProbViews( View , X ) +constraints = CondProbViews( View , freaqEst$X ) A = constraints$A ; b = constraints$b ; g = constraints$g ; rm( constraints ) # add constraint for view on correlation C_12_ = .6 -New_A = t( X[ , 1 ] * X[ , 2 ] ) +New_A = t( freaqEst$X[ , 1 ] * freaqEst$X[ , 2 ] ) New_b = s[1] * s[2] * C_12_ + m[1] * m[2] New_g = -log( 1 - .1 ) Modified: pkg/Meucci/demo/RobustBayesianAllocation.R =================================================================== --- pkg/Meucci/demo/RobustBayesianAllocation.R 2013-09-13 10:29:37 UTC (rev 3083) +++ pkg/Meucci/demo/RobustBayesianAllocation.R 2013-09-13 11:12:03 UTC (rev 3084) @@ -76,6 +76,7 @@ frontierResults = robustBayesianPortfolioOptimization( mean_post = mean_post , cov_post = cov_post , nu_post = nu_post , + time_post = time_post, riskAversionMu = p_m , riskAversionSigma = p_s , discretizations = NumPortf , Modified: pkg/Meucci/demo/S_SnPCaseStudy.R =================================================================== --- pkg/Meucci/demo/S_SnPCaseStudy.R 2013-09-13 10:29:37 UTC (rev 3083) +++ pkg/Meucci/demo/S_SnPCaseStudy.R 2013-09-13 11:12:03 UTC (rev 3084) @@ -6,8 +6,8 @@ load("SectorsSnP500") -p_m = .1 # aversion to estimation risk for mu -p_s = .1 # aversion to estimation risk for sigma +p_m = 0.1 # aversion to estimation risk for mu +p_s = 0.1 # aversion to estimation risk for sigma Ps <- P[seq(from=1, to=nrow(P),by=5),] R <- data.frame((Ps[2:nrow(Ps),]/Ps[1:nrow(Ps)-1,]) - 1) Modified: pkg/Meucci/man/robustBayesianPortfolioOptimization.Rd =================================================================== --- pkg/Meucci/man/robustBayesianPortfolioOptimization.Rd 2013-09-13 10:29:37 UTC (rev 3083) +++ pkg/Meucci/man/robustBayesianPortfolioOptimization.Rd 2013-09-13 11:12:03 UTC (rev 3084) @@ -1,83 +1,86 @@ -\name{robustBayesianPortfolioOptimization} -\alias{robustBayesianPortfolioOptimization} -\title{Construct a Bayesian mean-variance efficient frontier and identifies the most robust portfolio} -\usage{ - robustBayesianPortfolioOptimization(mean_post, cov_post, - nu_post, riskAversionMu = 0.1, riskAversionSigma = 0.1, - discretizations = 10, longonly = FALSE, volatility) -} -\arguments{ - \item{mean_post}{the posterior vector of means (after - blending prior and sample data)} - - \item{cov_post}{the posterior covariance matrix (after - blending prior and sample data)} - - \item{nu_post}{a numeric with the relative confidence in - the prior vs. the sample data. A value of 2 indicates - twice as much weight to assign to the prior vs. the - sample data. Must be greater than or equal to zero} - - \item{riskAversionMu}{risk aversion coefficient for - estimation of means.} - - \item{riskAversionSigma}{risk aversion coefficient for - estimation of Sigma.} - - \item{discretizations}{an integer with the number of - portfolios to generate along efficient frontier (equally - distanced in return space). Parameter must be an integer - greater or equal to 1.} - - \item{longonly}{a boolean for suggesting whether an asset - in a portfolio can be shorted or not} - - \item{volatility}{a numeric with the volatility used to - calculate gamma-m. gamma-m acts as a constraint on the - maximum volatility of the robust portfolio. A higher - volatility means a higher volatile robust portfolio may - be identified.} -} -\value{ - a list of portfolios along the frontier from least risky - to most risky bayesianFrontier a list with portfolio - along the Bayesian efficient frontier. Specifically: - returns: the expected returns of each portfolio along the - Bayesian efficient frontier volatility: the expected - volatility of each portfolio along the Bayesian efficient - frontier weights: the weights of each portfolio along the - Bayesian efficient frontier robustPortfolio the most - robust portfolio along the Bayesian efficient frontier. - Specifically: returns: the expected returns of each - portfolio along the Bayesian efficient frontier - volatility: the expected volatility of each portfolio - along the Bayesian efficient frontier weights: the - weights of each portfolio along the Bayesian efficient - frontier - - \deqn{ w_{rB}^{(i)} = argmax_{w \in C, w' \Sigma_{1} w - \leq \gamma_{\Sigma}^{(i)} } \big\{w' \mu^{1} - \gamma - _{\mu} \sqrt{w' \Sigma_{1} w} \big\}, \gamma_{\mu} \equiv - \sqrt{ \frac{q_{\mu}^{2}}{T_{1}} \frac{v_{1}}{v_{1} - 2} - } \gamma_{\Sigma}^{(i)} \equiv \frac{v^{(i)}}{ \frac{ - \nu_{1}}{\nu_{1}+N+1} + \sqrt{ - \frac{2\nu_{1}^{2}q_{\Sigma}^{2}}{ (\nu_{1}+N+1)^{3} } } - } } -} -\description{ - Construct a collection of portfolios along the Bayesian - mean-variance efficient frontier where each portfolio is - equally distanced in return space. The function also - returns the most robust portfolio along the Bayesian - efficient frontier -} -\author{ - Ram Ahluwalia \email{ram at wingedfootcapital.com} -} -\references{ - A. Meucci - Robust Bayesian Allocation - See formula (19) - - (21) \url{ - http://papers.ssrn.com/sol3/papers.cfm?abstract_id=681553 - } -} - +\name{robustBayesianPortfolioOptimization} +\alias{robustBayesianPortfolioOptimization} +\title{Construct a Bayesian mean-variance efficient frontier and identifies the most robust portfolio} +\usage{ + robustBayesianPortfolioOptimization(mean_post, cov_post, + nu_post, time_post, riskAversionMu = 0.1, + riskAversionSigma = 0.1, discretizations = 10, + longonly = FALSE, volatility) +} +\arguments{ + \item{mean_post}{the posterior vector of means (after + blending prior and sample data)} + + \item{cov_post}{the posterior covariance matrix (after + blending prior and sample data)} + + \item{nu_post}{a numeric with the relative confidence in + the prior vs. the sample data. A value of 2 indicates + twice as much weight to assign to the prior vs. the + sample data. Must be greater than or equal to zero} + + \item{time_post}{a numeric} + + \item{riskAversionMu}{risk aversion coefficient for + estimation of means.} + + \item{riskAversionSigma}{risk aversion coefficient for + estimation of Sigma.} + + \item{discretizations}{an integer with the number of + portfolios to generate along efficient frontier (equally + distanced in return space). Parameter must be an integer + greater or equal to 1.} + + \item{longonly}{a boolean for suggesting whether an asset + in a portfolio can be shorted or not} + + \item{volatility}{a numeric with the volatility used to + calculate gamma-m. gamma-m acts as a constraint on the + maximum volatility of the robust portfolio. A higher + volatility means a higher volatile robust portfolio may + be identified.} +} +\value{ + a list of portfolios along the frontier from least risky + to most risky bayesianFrontier a list with portfolio + along the Bayesian efficient frontier. Specifically: + returns: the expected returns of each portfolio along the + Bayesian efficient frontier volatility: the expected + volatility of each portfolio along the Bayesian efficient + frontier weights: the weights of each portfolio along the + Bayesian efficient frontier robustPortfolio the most + robust portfolio along the Bayesian efficient frontier. + Specifically: returns: the expected returns of each + portfolio along the Bayesian efficient frontier + volatility: the expected volatility of each portfolio + along the Bayesian efficient frontier weights: the + weights of each portfolio along the Bayesian efficient + frontier + + \deqn{ w_{rB}^{(i)} = argmax_{w \in C, w' \Sigma_{1} w + \leq \gamma_{\Sigma}^{(i)} } \big\{w' \mu^{1} - \gamma + _{\mu} \sqrt{w' \Sigma_{1} w} \big\}, \gamma_{\mu} \equiv + \sqrt{ \frac{q_{\mu}^{2}}{T_{1}} \frac{v_{1}}{v_{1} - 2} + } \gamma_{\Sigma}^{(i)} \equiv \frac{v^{(i)}}{ \frac{ + \nu_{1}}{\nu_{1}+N+1} + \sqrt{ + \frac{2\nu_{1}^{2}q_{\Sigma}^{2}}{ (\nu_{1}+N+1)^{3} } } + } } +} +\description{ + Construct a collection of portfolios along the Bayesian + mean-variance efficient frontier where each portfolio is + equally distanced in return space. The function also + returns the most robust portfolio along the Bayesian + efficient frontier +} +\author{ + Ram Ahluwalia \email{ram at wingedfootcapital.com} +} +\references{ + A. Meucci - Robust Bayesian Allocation - See formula (19) + - (21) \url{ + http://papers.ssrn.com/sol3/papers.cfm?abstract_id=681553 + } +} + From noreply at r-forge.r-project.org Fri Sep 13 13:48:13 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 13 Sep 2013 13:48:13 +0200 (CEST) Subject: [Returnanalytics-commits] r3085 - in pkg/Meucci: R data demo man Message-ID: <20130913114813.B090F18469D@r-forge.r-project.org> Author: xavierv Date: 2013-09-13 13:48:13 +0200 (Fri, 13 Sep 2013) New Revision: 3085 Added: pkg/Meucci/data/freaqEst.rda pkg/Meucci/data/sectorsSnP500.rda pkg/Meucci/man/freaqEst.Rd pkg/Meucci/man/sectorsSnP500.Rd Removed: pkg/Meucci/data/SectorsSnP500.rda Modified: pkg/Meucci/R/data.R pkg/Meucci/demo/S_SnPCaseStudy.R Log: - fixed documentation for the last two undocumented datafiles Modified: pkg/Meucci/R/data.R =================================================================== --- pkg/Meucci/R/data.R 2013-09-13 11:12:03 UTC (rev 3084) +++ pkg/Meucci/R/data.R 2013-09-13 11:48:13 UTC (rev 3085) @@ -247,4 +247,14 @@ #' @author Xavier Valls\email{flamejat@@gmail.com} #' @references A. Meucci, "Stress-Testing with Fully Flexible Causal Inputs" \url{http://symmys.com/node/152} #' @keywords data +NULL + + +#' @title data from the sectors in the S&P 500 +#' +#' @name sectorsSnP500 +#' @docType data +#' @author Xavier Valls\email{flamejat@@gmail.com} +#' @references Attilio Meucci, 2011, "Robust Bayesian Allocation". \url{http://symmys.com/node/102} +#' @keywords data NULL \ No newline at end of file Deleted: pkg/Meucci/data/SectorsSnP500.rda =================================================================== (Binary files differ) Added: pkg/Meucci/data/freaqEst.rda =================================================================== (Binary files differ) Property changes on: pkg/Meucci/data/freaqEst.rda ___________________________________________________________________ Added: svn:mime-type + application/octet-stream Added: pkg/Meucci/data/sectorsSnP500.rda =================================================================== (Binary files differ) Property changes on: pkg/Meucci/data/sectorsSnP500.rda ___________________________________________________________________ Added: svn:mime-type + application/octet-stream Modified: pkg/Meucci/demo/S_SnPCaseStudy.R =================================================================== --- pkg/Meucci/demo/S_SnPCaseStudy.R 2013-09-13 11:12:03 UTC (rev 3084) +++ pkg/Meucci/demo/S_SnPCaseStudy.R 2013-09-13 11:48:13 UTC (rev 3085) @@ -4,15 +4,15 @@ # source on www.symmys.com #################################################################### -load("SectorsSnP500") +load("../data/sectorsSnP500.rda") p_m = 0.1 # aversion to estimation risk for mu p_s = 0.1 # aversion to estimation risk for sigma -Ps <- P[seq(from=1, to=nrow(P),by=5),] +Ps <- sectorsSnP500$P[seq(from=1, to=nrow(sectorsSnP500$P),by=5),] R <- data.frame((Ps[2:nrow(Ps),]/Ps[1:nrow(Ps)-1,]) - 1) -Dates_P <- DP[seq(from=1, to=nrow(DP), by=5),] -Dates_R <- DP[2:nrow(DP),] +Dates_P <- sectorsSnP500$DP[seq(from=1, to=nrow(sectorsSnP500$DP), by=5),] +Dates_R <- sectorsSnP500$DP[2:nrow(DP),] Ttot = nrow(R) N = ncol(R) Added: pkg/Meucci/man/freaqEst.Rd =================================================================== --- pkg/Meucci/man/freaqEst.Rd (rev 0) +++ pkg/Meucci/man/freaqEst.Rd 2013-09-13 11:48:13 UTC (rev 3085) @@ -0,0 +1,19 @@ +\docType{data} +\name{freaqEst} +\alias{freaqEst} +\title{scenarios table and prior distribution for changes in SWAP2YR SWAP10YR CDXIG S&P500 DollarIndex Crude Gold VIX 10YRInflationSwapRate} +\description{ + scenarios table and prior distribution for changes in + SWAP2YR SWAP10YR CDXIG S&P500 DollarIndex Crude Gold VIX + 10YRInflationSwapRate +} +\author{ + Xavier Valls\email{flamejat at gmail.com} +} +\references{ + A. Meucci, "Stress-Testing with Fully Flexible Causal + Inputs" \url{http://symmys.com/node/152} +} +\keyword{data} +\keyword{datasets} + Added: pkg/Meucci/man/sectorsSnP500.Rd =================================================================== --- pkg/Meucci/man/sectorsSnP500.Rd (rev 0) +++ pkg/Meucci/man/sectorsSnP500.Rd 2013-09-13 11:48:13 UTC (rev 3085) @@ -0,0 +1,17 @@ +\docType{data} +\name{sectorsSnP500} +\alias{sectorsSnP500} +\title{data from the sectors in the S&P 500} +\description{ + data from the sectors in the S&P 500 +} +\author{ + Xavier Valls\email{flamejat at gmail.com} +} +\references{ + Attilio Meucci, 2011, "Robust Bayesian Allocation". + \url{http://symmys.com/node/102} +} +\keyword{data} +\keyword{datasets} + From noreply at r-forge.r-project.org Fri Sep 13 16:11:38 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 13 Sep 2013 16:11:38 +0200 (CEST) Subject: [Returnanalytics-commits] r3086 - pkg/PortfolioAnalytics/sandbox/symposium2013 Message-ID: <20130913141138.D42C5185BA1@r-forge.r-project.org> Author: peter_carl Date: 2013-09-13 16:11:38 +0200 (Fri, 13 Sep 2013) New Revision: 3086 Added: pkg/PortfolioAnalytics/sandbox/symposium2013/parse.EDHEC.R Log: - renaming the script to potentially functionalize it Copied: pkg/PortfolioAnalytics/sandbox/symposium2013/parse.EDHEC.R (from rev 3071, pkg/PortfolioAnalytics/sandbox/script.buildEDHEC.R) =================================================================== --- pkg/PortfolioAnalytics/sandbox/symposium2013/parse.EDHEC.R (rev 0) +++ pkg/PortfolioAnalytics/sandbox/symposium2013/parse.EDHEC.R 2013-09-13 14:11:38 UTC (rev 3086) @@ -0,0 +1,32 @@ +### Construct an xts object of EDHEC composite hedge fund style indexes + +# Peter Carl + +# Used for updating the edhec data object in PerformanceAnalytics + +require(gdata) +require(xts) + +# Download the following file to the working directory: +# http://www.edhec-risk.com/indexes/pure_style/data/table/history.csv +### @TODO: Is there a way to download it directly? Maybe not, seems to require a login +x=read.csv(file="history.csv", sep=";", header=TRUE, check.names=FALSE) +x.dates = as.Date(x[,1], format="%d/%m/%Y") +x.data = apply(x[,-1], MARGIN=2, FUN=function(x){as.numeric(sub("%","", x, fixed=TRUE))/100}) # get rid of percentage signs +edhec = xts(x.data, order.by=x.dates) +colnames(edhec) + +# calculate a wealth index +edhec.idx = apply(edhec, MARGIN=2, FUN=function(x){cumprod(1 + na.omit(x))}) +# identify quarters +edhec.Q.idx=edhec.idx[endpoints(edhec.idx, on="quarters"),] +# calculate quarterly returns +edhec.Q.R=ROC(edhec.Q.idx) +# trim the last data point, if needed +# dim(edhec.Q.R) +# edhec.Q.R=edhec.Q.R[-61,] +# reclass the object +edhec.Q.R=as.xts(edhec.Q.R) +# lm requires safe names +colnames(edhec.Q.R)=make.names(colnames(edhec)) + From noreply at r-forge.r-project.org Fri Sep 13 16:37:41 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 13 Sep 2013 16:37:41 +0200 (CEST) Subject: [Returnanalytics-commits] r3087 - pkg/PortfolioAnalytics/sandbox/symposium2013/R Message-ID: <20130913143741.B8856184632@r-forge.r-project.org> Author: peter_carl Date: 2013-09-13 16:37:41 +0200 (Fri, 13 Sep 2013) New Revision: 3087 Added: pkg/PortfolioAnalytics/sandbox/symposium2013/R/table.RiskStats.R Log: - needs to be added to PerfA Added: pkg/PortfolioAnalytics/sandbox/symposium2013/R/table.RiskStats.R =================================================================== --- pkg/PortfolioAnalytics/sandbox/symposium2013/R/table.RiskStats.R (rev 0) +++ pkg/PortfolioAnalytics/sandbox/symposium2013/R/table.RiskStats.R 2013-09-13 14:37:41 UTC (rev 3087) @@ -0,0 +1,229 @@ +# Additional and re-organized tables for WB presentations + +table.RiskStats <- +function (R, ci = 0.95, scale = NA, Rf = 0, MAR = .1/12, p= 0.95, digits = 4) +{# @author Peter Carl + # Risk Statistics: Statistics and Stylized Facts + + y = checkData(R, method = "zoo") + if(!is.null(dim(Rf))) + Rf = checkData(Rf, method = "zoo") + # Set up dimensions and labels + columns = ncol(y) + rows = nrow(y) + columnnames = colnames(y) + rownames = rownames(y) + + if(is.na(scale)) { + freq = periodicity(y) + switch(freq$scale, + minute = {stop("Data periodicity too high")}, + hourly = {stop("Data periodicity too high")}, + daily = {scale = 252}, + weekly = {scale = 52}, + monthly = {scale = 12}, + quarterly = {scale = 4}, + yearly = {scale = 1} + ) + } + + # for each column, do the following: + for(column in 1:columns) { + x = na.omit(y[,column,drop=FALSE]) + # for each column, make sure that R and Rf are for the same dates + if(!is.null(dim(Rf))){ # if Rf is a column + z = merge(x,Rf) + zz = na.omit(z) + x = zz[,1,drop=FALSE] + Rf.subset = zz[,2,drop=FALSE] + } + else { # unless Rf is a single number + Rf.subset = Rf + } + + z = c( + Return.annualized(x, scale = scale), + StdDev.annualized(x, scale = scale), + SharpeRatio.annualized(x, scale = scale, Rf = Rf), + DownsideDeviation(x,MAR=0)*sqrt(scale),# Add annualization to this function + SortinoRatio(x)*sqrt(scale), # New function adds annualization + PerformanceAnalytics:::AverageDrawdown(x), + maxDrawdown(x), + SterlingRatio(x), + VaR(x, p=p,method="historical"), + ES(x, p=p,method="historical"), + skewness(x), + kurtosis(x), + VaR(x, p=p), + ES(x, p=p), + SharpeRatio(x, p=p, Rf=Rf, FUN="ES", annualize=TRUE), + length(x) + ) + znames = c( + "Annualized Return", + "Annualized Std Dev", + "Annualized Sharpe Ratio", + "Annualized Downside Deviation", + "Annualized Sortino Ratio", + "Average Drawdown", + "Maximum Drawdown", + "Sterling Ratio (10%)", + paste("Historical VaR (",base::round(p*100,1),"%)",sep=""), + paste("Historical ETL (",base::round(p*100,1),"%)",sep=""), + "Skewness", + "Excess Kurtosis", + paste("Modified VaR (",base::round(p*100,1),"%)",sep=""), + paste("Modified ETL (",base::round(p*100,1),"%)",sep=""), + paste("Annualized Modified Sharpe Ratio (ETL ", base::round(p*100,1),"%)",sep=""), + "# Obs" + ) + if(column == 1) { + resultingtable = data.frame(Value = z, row.names = znames) + } + else { + nextcolumn = data.frame(Value = z, row.names = znames) + resultingtable = cbind(resultingtable, nextcolumn) + } + } + colnames(resultingtable) = columnnames + ans = base::round(resultingtable, digits) + ans +} + +table.PerfStats <- +function (R, scale = NA, Rf = 0, digits = 4) +{# @author Peter Carl + # Performance Statistics: Statistics and Stylized Facts + + y = checkData(R) + if(!is.null(dim(Rf))) + Rf = checkData(Rf) + # Set up dimensions and labels + columns = ncol(y) + rows = nrow(y) + columnnames = colnames(y) + rownames = rownames(y) + + if(is.na(scale)) { + freq = periodicity(y) + switch(freq$scale, + minute = {stop("Data periodicity too high")}, + hourly = {stop("Data periodicity too high")}, + daily = {scale = 252}, + weekly = {scale = 52}, + monthly = {scale = 12}, + quarterly = {scale = 4}, + yearly = {scale = 1} + ) + } + + # for each column, do the following: + for(column in 1:columns) { + x = na.omit(y[,column,drop=FALSE]) + # for each column, make sure that R and Rf are for the same dates + if(!is.null(dim(Rf))){ # if Rf is a column + z = merge(x,Rf) + zz = na.omit(z) + x = zz[,1,drop=FALSE] + Rf.subset = zz[,2,drop=FALSE] + } + else { # unless Rf is a single number + Rf.subset = Rf + } + + z = c( + Return.cumulative(x), + Return.annualized(x, scale = scale), + StdDev.annualized(x, scale = scale), + length(subset(x, x>0)), + length(subset(x, x<=0)), + length(subset(x, x>0))/length(x), + mean(subset(x, x>0)), + mean(subset(x, x<=0)), + mean(x), + AverageDrawdown(x), + AverageRecovery(x) + ) + znames = c( + "Cumulative Return", + "Annualized Return", + "Annualized Std Dev", + "# Positive Months", + "# Negative Months", + "% Positive Months", + "Average Positive Month", + "Average Negative Month", + "Average Month", + "Average Drawdown", + "Average Months to Recovery" + ) + if(column == 1) { + resultingtable = data.frame(Value = z, row.names = znames) + } + else { + nextcolumn = data.frame(Value = z, row.names = znames) + resultingtable = cbind(resultingtable, nextcolumn) + } + } + colnames(resultingtable) = columnnames + ans = base::round(resultingtable, digits) + ans +} + +table.RiskContribution <- function(R, p, ..., weights=NULL, scale=NA, geometric = TRUE) { + + R = na.omit(R) + if(is.null(weights)) { + message("no weights passed in, assuming equal weighted portfolio") + weights = rep(1/dim(R)[[2]], dim(R)[[2]]) + } + if (is.na(scale)) { + freq = periodicity(R) + switch(freq$scale, minute = { + stop("Data periodicity too high") + }, hourly = { + stop("Data periodicity too high") + }, daily = { + scale = 252 + }, weekly = { + scale = 52 + }, monthly = { + scale = 12 + }, quarterly = { + scale = 4 + }, yearly = { + scale = 1 + }) + } + + # Returns + # ret.col = colMeans(R)*weights + ret.col = Return.annualized(R, geometric=geometric)*weights + percret.col = ret.col/sum(ret.col) + result = cbind(t(ret.col), t(percret.col)) + # Standard Deviation + sd.cols = StdDev(R, weights=weights, invert=TRUE, portfolio_method="component", p=(1-1/12)) + result = cbind(sd.cols$contribution*sqrt(scale), sd.cols$pct_contrib_StdDev, result) + # VaR? + var.cols = VaR(R, weights=weights, method="gaussian", portfolio_method="component", p=(1-1/12)) + result = cbind(var.cols$contribution, var.cols$pct_contrib_VaR, result) + + mvar.cols = VaR(R, weights=weights, method="gaussian", portfolio_method="component", p=(1-1/12)) + result = cbind(mvar.cols$contribution, mvar.cols$pct_contrib_VaR, result) + + # ES + es.cols = ES(R, weights=weights, method="gaussian", portfolio_method="component", p=(1-1/12)) + result = cbind(es.cols$contribution, es.cols$pct_contrib_ES, result) + + mes.cols = ES(R, weights=weights, method="modified", portfolio_method="component", p=(1-1/12)) + result = cbind(weights, mes.cols$contribution, mes.cols$pct_contrib_MES, result) + total = colSums(result) + + result = rbind(result, colSums(result)) + rownames(result) = c(colnames(R),"Total") +# colnames(result) = c("Weights", "Contribution to mETL", "Percentage Contribution to mETL", "Contribution to gETL", "Percentage Contribution to gETL", "Contribution to Annualized StdDev", "Percentage Contribution to StdDev", "Contribution to Annualized E(R)", "Percentage Contribution to E(R)") + + colnames(result) = c("Weights", "Contribution to mETL", "%Contribution to mETL", "Contribution to gETL", "%Contribution to gETL", "Contribution to mVaR", "%Contribution to mVaR", "Contribution to gVaR", "%Contribution to gVaR", "Contribution to Annualized StdDev", "%Contribution to StdDev", "Contribution to Annualized E(R)", "%Contribution to E(R)") + return(result) + +} From noreply at r-forge.r-project.org Fri Sep 13 17:37:55 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 13 Sep 2013 17:37:55 +0200 (CEST) Subject: [Returnanalytics-commits] r3088 - in pkg/Meucci: . R demo man Message-ID: <20130913153755.BEFFE18594B@r-forge.r-project.org> Author: xavierv Date: 2013-09-13 17:37:55 +0200 (Fri, 13 Sep 2013) New Revision: 3088 Added: pkg/Meucci/man/ComputeCVaR.Rd pkg/Meucci/man/LongShortMeanCVaRFrontier.Rd Modified: pkg/Meucci/NAMESPACE pkg/Meucci/R/ButterflyTradingFunctions.R pkg/Meucci/demo/ButterflyTrading.R pkg/Meucci/demo/FullFlexProbs.R pkg/Meucci/demo/FullyIntegratedLiquidityAndMarketRisk.R pkg/Meucci/demo/HermiteGrid_demo.R pkg/Meucci/demo/RankingInformation.R pkg/Meucci/demo/S_BlackLittermanBasic.R pkg/Meucci/demo/S_CallsProjectionPricing.R pkg/Meucci/demo/S_CrossSectionConstrainedIndustries.R pkg/Meucci/demo/S_CrossSectionIndustries.R pkg/Meucci/demo/S_EquitiesInvariants.R pkg/Meucci/demo/S_ExpectationMaximizationHighYield.R pkg/Meucci/demo/S_FitSwapToStudentT.R pkg/Meucci/demo/S_FixedIncomeInvariants.R pkg/Meucci/demo/S_FxCopulaMarginal.R pkg/Meucci/demo/S_HedgeOptions.R pkg/Meucci/demo/S_HorizonEffect.R pkg/Meucci/demo/S_MaximumLikelihood.R pkg/Meucci/demo/S_MeanVarianceBenchmark.R pkg/Meucci/demo/S_MeanVarianceCalls.R pkg/Meucci/demo/S_MeanVarianceHorizon.R pkg/Meucci/demo/S_MeanVarianceOptimization.R pkg/Meucci/demo/S_MultiVarSqrRootRule.R pkg/Meucci/demo/S_ProjectNPriceMvGarch.R pkg/Meucci/demo/S_PureResidualBonds.R pkg/Meucci/demo/S_SnPCaseStudy.R pkg/Meucci/demo/S_StatArbSwaps.R pkg/Meucci/demo/S_SwapPca2Dim.R pkg/Meucci/demo/S_TimeSeriesConstrainedIndustries.R pkg/Meucci/demo/S_TimeSeriesIndustries.R pkg/Meucci/demo/S_TimeSeriesVsCrossSectionIndustries.R Log: - changed the way data is loaded along the package Modified: pkg/Meucci/NAMESPACE =================================================================== --- pkg/Meucci/NAMESPACE 2013-09-13 14:37:41 UTC (rev 3087) +++ pkg/Meucci/NAMESPACE 2013-09-13 15:37:55 UTC (rev 3088) @@ -6,6 +6,7 @@ export(CentralAndStandardizedStatistics) export(CMAcombination) export(CMAseparation) +export(ComputeCVaR) export(ComputeMoments) export(ComputeMVE) export(CondProbViews) @@ -36,6 +37,7 @@ export(LognormalCopulaPdf) export(LognormalMoments2Parameters) export(LognormalParam2Statistics) +export(LongShortMeanCVaRFrontier) export(MaxRsqCS) export(MaxRsqTS) export(MleRecursionForStudentT) Modified: pkg/Meucci/R/ButterflyTradingFunctions.R =================================================================== --- pkg/Meucci/R/ButterflyTradingFunctions.R 2013-09-13 14:37:41 UTC (rev 3087) +++ pkg/Meucci/R/ButterflyTradingFunctions.R 2013-09-13 15:37:55 UTC (rev 3088) @@ -275,6 +275,22 @@ return( p_ ); } +#' Computes the conditional value at risk as it appears in A. Meucci, "Fully Flexible Views: Theory and Practice", +#' The Risk Magazine, October 2008, p 100-106 +#' +#' @param Units panel of joint factors realizations +#' @param Scenarios vector of probabilities +#' @param Conf Confidence +#' +#' @return CVaR Conditional Value at Risk +#' +#' @references +#' A. Meucci, "Fully Flexible Views: Theory and Practice" \url{http://www.symmys.com/node/158} +#' See Meucci script for "ButterflyTrading/ComputeCVaR.m" +#' +#' @author Ram Ahluwalia \email{ram@@wingedfootcapital.com} +#' @export + ComputeCVaR = function( Units , Scenarios , Conf ) { PnL = Scenarios %*% Units @@ -288,6 +304,26 @@ return( CVaR ) } +#' Computes the long-short conditional value at risk frontier as it appears in A. Meucci, +#' "Fully Flexible Views: Theory and Practice", The Risk Magazine, October 2008, p 100-106 +#' +#' @param PnL Profit and Loss scenarios +#' @param Probs vector of probabilities +#' @param Butterflies list of securities with some analytics computed. +#' @param Options list of options +#' +#' @return Exp vector of expected returns for each asset +#' @return SDev vector of security volatilities along the efficient frontier +#' @return CVaR Conditional Value at Risk for each portfolio +#' @return Composition matrix of compositions (security weights) for each portfolio along the efficient frontier +#' +#' @references +#' A. Meucci, "Fully Flexible Views: Theory and Practice" \url{http://www.symmys.com/node/158} +#' See Meucci script for "ButterflyTrading/LongShortMeanCVaRFrontier.m" +#' +#' @author Ram Ahluwalia \email{ram@@wingedfootcapital.com}, Xavier Valls \email{flamejat@@gmail.com} +#' @export + LongShortMeanCVaRFrontier = function( PnL , Probs , Butterflies , Options ) { library( matlab ) @@ -378,17 +414,3 @@ return( list( Exp = Exp , SDev = SDev , CVaR = CVaR , Composition = Composition ) ) } - - -MapVol = function( sig , y , K , T ) -{ - # in real life a and b below should be calibrated to security-specific time series - - a = -0.00000000001 - b = 0.00000000001 - - s = sig + a/sqrt(T) * ( log(K) - log(y) ) + b/T*( log(K) - log(y) )^2 - - return( s ) -} - Modified: pkg/Meucci/demo/ButterflyTrading.R =================================================================== --- pkg/Meucci/demo/ButterflyTrading.R 2013-09-13 14:37:41 UTC (rev 3087) +++ pkg/Meucci/demo/ButterflyTrading.R 2013-09-13 15:37:55 UTC (rev 3088) @@ -1,3 +1,4 @@ + #' This script performs the butterfly-trading case study for the Entropy-Pooling approach by Attilio Meucci, #' as it appears in A. Meucci, "Fully Flexible Views: Theory and Practice", The Risk Magazine, October 2008, #' p 100-106 @@ -16,29 +17,15 @@ # In real life, these are provided by the estimation process ########################################################################################################### -load( "../data/factorsDistribution.rda" ) +data("factorsDistribution" ) -emptyMatrix = matrix( nrow = 0 , ncol = 0 ) - ########################################################################################################### # Load current prices, deltas and other analytics of the securities # In real life, these are provided by data provider ########################################################################################################### -load("../data/butterfliesAnalytics.rda") +data("butterfliesAnalytics") - -# create Butterflies as a list of named arrays -Butterflies = as.matrix( Butterflies[[1]] , nrow = 8 , ncol = 9 ) -Butterflies = matrix(Butterflies, ncol = 9 , nrow = 8 ) -rownames( Butterflies ) = c( "Name" , "P_0" , "Y_0" , "K" , "T" , "sig_0" , "Delta" , "Vega" ) -colnames( Butterflies ) = c( "MSFT_vol_30" , "MSFT_vol_91" , "MSFT_vol_182" , - "YHOO_vol_30" , "YHOO_vol_91" , "YHOO_vol_182" , - "GOOG_vol_30" , "GOOG_vol_91" , "GOOG_vol_182" ) - -colnames( X ) = FactorNames -Butterflies = lapply( seq_len( ncol( Butterflies ) ), function( i ) Butterflies[ , i ] ) - ########################################################################################################### # Map factors scenarios into p&l scenarios at the investment horizon # In real life with complex products, the pricing can be VERY costly @@ -59,11 +46,10 @@ optimalPortfolios = LongShortMeanCVaRFrontier( PnL , as.matrix(factorsDistribution$p ) , butterfliesAnalytics , Options ) -View( optimalPortfolios ) # Note that composition is measured in dollars. Here we are short GOOG_vol_91 and long GOOG_vol_182 +#View( optimalPortfolios ) # Note that composition is measured in dollars. Here we are short GOOG_vol_91 and long GOOG_vol_182 PlotFrontier( optimalPortfolios$Exp , optimalPortfolios$CVaR , optimalPortfolios$Composition ) -#[Exp,SDev,CVaR,w] = LongShortMeanCVaRFrontier(PnL,p,butterfliesAnalytics,Options); #PlotEfficientFrontier(Exp,CVaR,w) ########################################################################################################### Modified: pkg/Meucci/demo/FullFlexProbs.R =================================================================== --- pkg/Meucci/demo/FullFlexProbs.R 2013-09-13 14:37:41 UTC (rev 3087) +++ pkg/Meucci/demo/FullFlexProbs.R 2013-09-13 15:37:55 UTC (rev 3088) @@ -47,7 +47,7 @@ # risk drivers scenarios ########################################################################### -load( "../data/dbFFP.rda" ) +data("dbFFP" ) Infl = dbFFP$Data[ , length( dbFFP$Names ) ]; Vix = dbFFP$Data[ , length( dbFFP$Names ) - 1 ]; Modified: pkg/Meucci/demo/FullyIntegratedLiquidityAndMarketRisk.R =================================================================== --- pkg/Meucci/demo/FullyIntegratedLiquidityAndMarketRisk.R 2013-09-13 14:37:41 UTC (rev 3087) +++ pkg/Meucci/demo/FullyIntegratedLiquidityAndMarketRisk.R 2013-09-13 15:37:55 UTC (rev 3088) @@ -25,7 +25,7 @@ # load fILMR$Daily_Prices: closing prices # fILMR$Daily_Volumes_Shares: daily volumes # fILMR$Daily_Liq: Morgan Stanley liquidity index -load("../data/fILMR.rda") +data("fILMR") # Prices and returns #Daily_Prices = Daily_Prices(:,Selectstock); Modified: pkg/Meucci/demo/HermiteGrid_demo.R =================================================================== --- pkg/Meucci/demo/HermiteGrid_demo.R 2013-09-13 14:37:41 UTC (rev 3087) +++ pkg/Meucci/demo/HermiteGrid_demo.R 2013-09-13 15:37:55 UTC (rev 3088) @@ -25,7 +25,7 @@ # numerical (Gauss-Hermite grid) prior ghqMesh = emptyMatrix -load( "ghq1000.rda" ) +load( "ghq1000" ) tmp = ( ghqx - min( ghqx ) ) / ( max( ghqx ) - min( ghqx ) ) # rescale GH zeros so they belong to [0,1] epsilon = 1e-10 Modified: pkg/Meucci/demo/RankingInformation.R =================================================================== --- pkg/Meucci/demo/RankingInformation.R 2013-09-13 14:37:41 UTC (rev 3087) +++ pkg/Meucci/demo/RankingInformation.R 2013-09-13 15:37:55 UTC (rev 3088) @@ -17,7 +17,7 @@ # Load panel X of joint returns realizations and vector p of respective probabilities # In real life, these are provided by the estimation process ############################################################################# -load("../data/returnsDistribution.rda"); +data("returnsDistribution"); ########################################################################################################### # compute and plot efficient frontier based on prior market distribution Modified: pkg/Meucci/demo/S_BlackLittermanBasic.R =================================================================== --- pkg/Meucci/demo/S_BlackLittermanBasic.R 2013-09-13 14:37:41 UTC (rev 3087) +++ pkg/Meucci/demo/S_BlackLittermanBasic.R 2013-09-13 15:37:55 UTC (rev 3088) @@ -12,7 +12,7 @@ ################################################################################################################## ### Load inputs -load("../data/covNRets.rda"); +data("covNRets"); ################################################################################################################## ### Compute efficient frontier Modified: pkg/Meucci/demo/S_CallsProjectionPricing.R =================================================================== --- pkg/Meucci/demo/S_CallsProjectionPricing.R 2013-09-13 14:37:41 UTC (rev 3087) +++ pkg/Meucci/demo/S_CallsProjectionPricing.R 2013-09-13 15:37:55 UTC (rev 3088) @@ -13,7 +13,7 @@ # load 'spot' for underlying and current vol surface, given by # 'impVol' for different 'days2Maturity' and 'moneyness' (K/S) -load("../data/implVol.rda"); +data("implVol"); ################################################################################################################## ### Inputs Modified: pkg/Meucci/demo/S_CrossSectionConstrainedIndustries.R =================================================================== --- pkg/Meucci/demo/S_CrossSectionConstrainedIndustries.R 2013-09-13 14:37:41 UTC (rev 3087) +++ pkg/Meucci/demo/S_CrossSectionConstrainedIndustries.R 2013-09-13 15:37:55 UTC (rev 3088) @@ -10,10 +10,10 @@ ################################################################################################################## ### Loads weekly stock returns X and indices stock returns F -load("../data/securitiesTS.rda"); +data("securitiesTS"); Data_Securities = securitiesTS$data[ , -1 ]; # 1st column is date -load("../data/securitiesIndustryClassification.rda"); +data("securitiesIndustryClassification"); Securities_IndustryClassification = securitiesIndustryClassification$data; ################################################################################################################## ### Linear returns for stocks Modified: pkg/Meucci/demo/S_CrossSectionIndustries.R =================================================================== --- pkg/Meucci/demo/S_CrossSectionIndustries.R 2013-09-13 14:37:41 UTC (rev 3087) +++ pkg/Meucci/demo/S_CrossSectionIndustries.R 2013-09-13 15:37:55 UTC (rev 3088) @@ -10,10 +10,10 @@ ################################################################################################################## ### Load data # loads weekly stock returns X and indices stock returns F -load("../data/securitiesTS.rda"); +data("securitiesTS"); Data_Securities = securitiesTS$data[ , -1 ]; # 1st column is date -load("../data/securitiesIndustryClassification.rda"); +data("securitiesIndustryClassification"); Securities_IndustryClassification = securitiesIndustryClassification$data; ################################################################################################################## Modified: pkg/Meucci/demo/S_EquitiesInvariants.R =================================================================== --- pkg/Meucci/demo/S_EquitiesInvariants.R 2013-09-13 14:37:41 UTC (rev 3087) +++ pkg/Meucci/demo/S_EquitiesInvariants.R 2013-09-13 15:37:55 UTC (rev 3088) @@ -11,7 +11,7 @@ ################################################################################################################## ### Load daily stock prices from the utility sector in the S&P 500 -load("../data/equities.rda"); +data("equities"); ################################################################################################################## ### Pick one stock from database Modified: pkg/Meucci/demo/S_ExpectationMaximizationHighYield.R =================================================================== --- pkg/Meucci/demo/S_ExpectationMaximizationHighYield.R 2013-09-13 14:37:41 UTC (rev 3087) +++ pkg/Meucci/demo/S_ExpectationMaximizationHighYield.R 2013-09-13 15:37:55 UTC (rev 3088) @@ -10,7 +10,7 @@ ################################################################################################################## ### Load data -load("../data/highYieldIndices.rda"); +data("highYieldIndices"); ################################################################################################################## ### Compute invariants and set NaN for large values Modified: pkg/Meucci/demo/S_FitSwapToStudentT.R =================================================================== --- pkg/Meucci/demo/S_FitSwapToStudentT.R 2013-09-13 14:37:41 UTC (rev 3087) +++ pkg/Meucci/demo/S_FitSwapToStudentT.R 2013-09-13 15:37:55 UTC (rev 3088) @@ -11,7 +11,7 @@ ################################################################################################################## ### Load data -load( "../data/usSwapRates.rda" ); +data("usSwapRates" ); ################################################################################################################## ### Inputs Modified: pkg/Meucci/demo/S_FixedIncomeInvariants.R =================================================================== --- pkg/Meucci/demo/S_FixedIncomeInvariants.R 2013-09-13 14:37:41 UTC (rev 3087) +++ pkg/Meucci/demo/S_FixedIncomeInvariants.R 2013-09-13 15:37:55 UTC (rev 3088) @@ -9,7 +9,7 @@ ################################################################################################################## ### Load government yield curve and bond yield data for different dates -load("../data/fixedIncome.rda"); +data("fixedIncome"); ################################################################################################################## ### Pick time-to-maturity for one point on the yield curve Modified: pkg/Meucci/demo/S_FxCopulaMarginal.R =================================================================== --- pkg/Meucci/demo/S_FxCopulaMarginal.R 2013-09-13 14:37:41 UTC (rev 3087) +++ pkg/Meucci/demo/S_FxCopulaMarginal.R 2013-09-13 15:37:55 UTC (rev 3088) @@ -9,7 +9,7 @@ #' @export ### Load data and select the pair to display -load( "../data/fX.rda" ) +data("fX" ) Display = c( 1, 2 ); # 1 = Spot USD/EUR; 2 = Spot USD/GBP; 3 = Spot USD/JPY; Modified: pkg/Meucci/demo/S_HedgeOptions.R =================================================================== --- pkg/Meucci/demo/S_HedgeOptions.R 2013-09-13 14:37:41 UTC (rev 3087) +++ pkg/Meucci/demo/S_HedgeOptions.R 2013-09-13 15:37:55 UTC (rev 3088) @@ -9,7 +9,7 @@ ################################################################################################################## ### Load data -load( "../data/implVol.rda" ); +data("implVol" ); ################################################################################################################## ### Inputs Modified: pkg/Meucci/demo/S_HorizonEffect.R =================================================================== --- pkg/Meucci/demo/S_HorizonEffect.R 2013-09-13 14:37:41 UTC (rev 3087) +++ pkg/Meucci/demo/S_HorizonEffect.R 2013-09-13 15:37:55 UTC (rev 3088) @@ -17,7 +17,7 @@ ################################################################################################################## # Load parameters of the model: D, muX, sigmaF, sigmaEps -load( "../data/linearModel.rda" ); +data("linearModel" ); # Specify range of investment horizon, weeks tauRangeWeeks = 1:52; Modified: pkg/Meucci/demo/S_MaximumLikelihood.R =================================================================== --- pkg/Meucci/demo/S_MaximumLikelihood.R 2013-09-13 14:37:41 UTC (rev 3087) +++ pkg/Meucci/demo/S_MaximumLikelihood.R 2013-09-13 15:37:55 UTC (rev 3088) @@ -51,7 +51,7 @@ ########################################################################################################## ### Load data -load( "../data/timeSeries.rda"); +data("timeSeries"); ########################################################################################################## ### inputs Modified: pkg/Meucci/demo/S_MeanVarianceBenchmark.R =================================================================== --- pkg/Meucci/demo/S_MeanVarianceBenchmark.R 2013-09-13 14:37:41 UTC (rev 3087) +++ pkg/Meucci/demo/S_MeanVarianceBenchmark.R 2013-09-13 15:37:55 UTC (rev 3088) @@ -13,7 +13,7 @@ ################################################################################################################## ### Load data -load("../data/stockSeries.rda"); +data("stockSeries"); ################################################################################################################### ### Inputs Modified: pkg/Meucci/demo/S_MeanVarianceCalls.R =================================================================== --- pkg/Meucci/demo/S_MeanVarianceCalls.R 2013-09-13 14:37:41 UTC (rev 3087) +++ pkg/Meucci/demo/S_MeanVarianceCalls.R 2013-09-13 15:37:55 UTC (rev 3088) @@ -9,7 +9,7 @@ ################################################################################################################## ### Load dat -load("../data/db.rda" ); +data("db" ); ################################################################################################################## ### Inputs Modified: pkg/Meucci/demo/S_MeanVarianceHorizon.R =================================================================== --- pkg/Meucci/demo/S_MeanVarianceHorizon.R 2013-09-13 14:37:41 UTC (rev 3087) +++ pkg/Meucci/demo/S_MeanVarianceHorizon.R 2013-09-13 15:37:55 UTC (rev 3088) @@ -12,7 +12,7 @@ ################################################################################################################## ### Load data -load("../data/stockSeries.rda"); +data("stockSeries"); ################################################################################################################## ### Inputs Modified: pkg/Meucci/demo/S_MeanVarianceOptimization.R =================================================================== --- pkg/Meucci/demo/S_MeanVarianceOptimization.R 2013-09-13 14:37:41 UTC (rev 3087) +++ pkg/Meucci/demo/S_MeanVarianceOptimization.R 2013-09-13 15:37:55 UTC (rev 3088) @@ -13,7 +13,7 @@ ################################################################################################################## ### Load data -load( "../data/stockSeries.rda" ); +data("stockSeries" ); ################################################################################################################## ### Inputs Modified: pkg/Meucci/demo/S_MultiVarSqrRootRule.R =================================================================== --- pkg/Meucci/demo/S_MultiVarSqrRootRule.R 2013-09-13 14:37:41 UTC (rev 3087) +++ pkg/Meucci/demo/S_MultiVarSqrRootRule.R 2013-09-13 15:37:55 UTC (rev 3088) @@ -9,7 +9,7 @@ ################################################################################################################## ### Load data -load("../data/swaps.rda"); +data("swaps"); ################################################################################################################## ### Aggregation steps in days Modified: pkg/Meucci/demo/S_ProjectNPriceMvGarch.R =================================================================== --- pkg/Meucci/demo/S_ProjectNPriceMvGarch.R 2013-09-13 14:37:41 UTC (rev 3087) +++ pkg/Meucci/demo/S_ProjectNPriceMvGarch.R 2013-09-13 15:37:55 UTC (rev 3088) @@ -13,7 +13,7 @@ ################################################################################################################## ### Load data -load( "../data/equities.rda" ); +data("equities" ); ################################################################################################################## ### Inputs Modified: pkg/Meucci/demo/S_PureResidualBonds.R =================================================================== --- pkg/Meucci/demo/S_PureResidualBonds.R 2013-09-13 14:37:41 UTC (rev 3087) +++ pkg/Meucci/demo/S_PureResidualBonds.R 2013-09-13 15:37:55 UTC (rev 3088) @@ -10,7 +10,7 @@ ################################################################################################################## ### Load data -load("../data/bondAttribution.rda"); +data("bondAttribution"); ################################################################################################################## Modified: pkg/Meucci/demo/S_SnPCaseStudy.R =================================================================== --- pkg/Meucci/demo/S_SnPCaseStudy.R 2013-09-13 14:37:41 UTC (rev 3087) +++ pkg/Meucci/demo/S_SnPCaseStudy.R 2013-09-13 15:37:55 UTC (rev 3088) @@ -4,7 +4,7 @@ # source on www.symmys.com #################################################################### -load("../data/sectorsSnP500.rda") +data("sectorsSnP500") p_m = 0.1 # aversion to estimation risk for mu p_s = 0.1 # aversion to estimation risk for sigma Modified: pkg/Meucci/demo/S_StatArbSwaps.R =================================================================== --- pkg/Meucci/demo/S_StatArbSwaps.R 2013-09-13 14:37:41 UTC (rev 3087) +++ pkg/Meucci/demo/S_StatArbSwaps.R 2013-09-13 15:37:55 UTC (rev 3088) @@ -11,7 +11,7 @@ ################################################################################################################## ### Load data -load("../data/swapParRates.rda"); +data("swapParRates"); ################################################################################################################## ### Estimate covariance and PCA decomposition Modified: pkg/Meucci/demo/S_SwapPca2Dim.R =================================================================== --- pkg/Meucci/demo/S_SwapPca2Dim.R 2013-09-13 14:37:41 UTC (rev 3087) +++ pkg/Meucci/demo/S_SwapPca2Dim.R 2013-09-13 15:37:55 UTC (rev 3088) @@ -12,7 +12,7 @@ #' @author Xavier Valls \email{flamejat@@gmail.com} ################################################################################################################## ### Load data -load( "../data/swap2y4y.mat" ); +data("swap2y4y.mat" ); ################################################################################################################## ### Current curve Modified: pkg/Meucci/demo/S_TimeSeriesConstrainedIndustries.R =================================================================== --- pkg/Meucci/demo/S_TimeSeriesConstrainedIndustries.R 2013-09-13 14:37:41 UTC (rev 3087) +++ pkg/Meucci/demo/S_TimeSeriesConstrainedIndustries.R 2013-09-13 15:37:55 UTC (rev 3088) @@ -10,10 +10,10 @@ ################################################################################################################## ### Loads weekly stock returns X and indices stock returns F -load("../data/securitiesTS.rda"); +data("securitiesTS"); Data_Securities = securitiesTS$data[ , -1 ]; # 1st column is date -load("../data/sectorsTS.rda"); +data("sectorsTS"); Data_Sectors = sectorsTS$data[ , -(1:2) ]; #1st column is date, 2nd column is SPX ################################################################################################################## Modified: pkg/Meucci/demo/S_TimeSeriesIndustries.R =================================================================== --- pkg/Meucci/demo/S_TimeSeriesIndustries.R 2013-09-13 14:37:41 UTC (rev 3087) +++ pkg/Meucci/demo/S_TimeSeriesIndustries.R 2013-09-13 15:37:55 UTC (rev 3088) @@ -9,10 +9,10 @@ ################################################################################################################## ### Loads weekly stock returns X and indices stock returns F -load("../data/securitiesTS.rda"); +data("securitiesTS"); Data_Securities = securitiesTS$data[ , -1 ]; # 1st column is date -load("../data/sectorsTS.rda"); +data("sectorsTS"); Data_Sectors = sectorsTS$data[ , -(1:2) ]; #1st column is for date, 2nd column is SPX index ################################################################################################################## Modified: pkg/Meucci/demo/S_TimeSeriesVsCrossSectionIndustries.R =================================================================== --- pkg/Meucci/demo/S_TimeSeriesVsCrossSectionIndustries.R 2013-09-13 14:37:41 UTC (rev 3087) +++ pkg/Meucci/demo/S_TimeSeriesVsCrossSectionIndustries.R 2013-09-13 15:37:55 UTC (rev 3088) @@ -10,13 +10,13 @@ ################################################################################################################## ### Load data # loads weekly stock returns X and indices stock returns F -load("../data/securitiesTS.rda"); +data("securitiesTS"); Data_Securities = securitiesTS$data[ , -1 ]; # 1st column is date -load("../data/sectorsTS.rda"); +data("sectorsTS"); Data_Sectors = sectorsTS$data[ , -(1:2) ]; -load("../data/securitiesIndustryClassification.rda"); +data("securitiesIndustryClassification"); Securities_IndustryClassification = securitiesIndustryClassification$data; ################################################################################################################## Added: pkg/Meucci/man/ComputeCVaR.Rd =================================================================== --- pkg/Meucci/man/ComputeCVaR.Rd (rev 0) +++ pkg/Meucci/man/ComputeCVaR.Rd 2013-09-13 15:37:55 UTC (rev 3088) @@ -0,0 +1,31 @@ +\name{ComputeCVaR} +\alias{ComputeCVaR} +\title{Computes the conditional value at risk as it appears in A. Meucci, "Fully Flexible Views: Theory and Practice", +The Risk Magazine, October 2008, p 100-106} +\usage{ + ComputeCVaR(Units, Scenarios, Conf) +} +\arguments{ + \item{Units}{panel of joint factors realizations} + + \item{Scenarios}{vector of probabilities} + + \item{Conf}{Confidence} +} +\value{ + CVaR Conditional Value at Risk +} +\description{ + Computes the conditional value at risk as it appears in + A. Meucci, "Fully Flexible Views: Theory and Practice", + The Risk Magazine, October 2008, p 100-106 +} +\author{ + Ram Ahluwalia \email{ram at wingedfootcapital.com} +} +\references{ + A. Meucci, "Fully Flexible Views: Theory and Practice" + \url{http://www.symmys.com/node/158} See Meucci script + for "ButterflyTrading/ComputeCVaR.m" +} + Added: pkg/Meucci/man/LongShortMeanCVaRFrontier.Rd =================================================================== --- pkg/Meucci/man/LongShortMeanCVaRFrontier.Rd (rev 0) +++ pkg/Meucci/man/LongShortMeanCVaRFrontier.Rd 2013-09-13 15:37:55 UTC (rev 3088) @@ -0,0 +1,45 @@ +\name{LongShortMeanCVaRFrontier} +\alias{LongShortMeanCVaRFrontier} +\title{Computes the long-short conditional value at risk frontier as it appears in A. Meucci, +"Fully Flexible Views: Theory and Practice", The Risk Magazine, October 2008, p 100-106} +\usage{ + LongShortMeanCVaRFrontier(PnL, Probs, Butterflies, + Options) +} +\arguments{ + \item{PnL}{Profit and Loss scenarios} + + \item{Probs}{vector of probabilities} + + \item{Butterflies}{list of securities with some analytics + computed.} + + \item{Options}{list of options} +} +\value{ + Exp vector of expected returns for each asset + + SDev vector of security volatilities along the efficient + frontier + + CVaR Conditional Value at Risk for each portfolio + + Composition matrix of compositions (security weights) for + each portfolio along the efficient frontier +} +\description{ + Computes the long-short conditional value at risk + frontier as it appears in A. Meucci, "Fully Flexible + Views: Theory and Practice", The Risk Magazine, October + 2008, p 100-106 +} +\author{ + Ram Ahluwalia \email{ram at wingedfootcapital.com}, Xavier + Valls \email{flamejat at gmail.com} +} +\references{ + A. Meucci, "Fully Flexible Views: Theory and Practice" + \url{http://www.symmys.com/node/158} See Meucci script + for "ButterflyTrading/LongShortMeanCVaRFrontier.m" +} + From noreply at r-forge.r-project.org Fri Sep 13 18:02:01 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 13 Sep 2013 18:02:01 +0200 (CEST) Subject: [Returnanalytics-commits] r3089 - pkg/Meucci Message-ID: <20130913160201.3352C185D16@r-forge.r-project.org> Author: xavierv Date: 2013-09-13 18:02:00 +0200 (Fri, 13 Sep 2013) New Revision: 3089 Added: pkg/Meucci/TODO Log: - added a TODO file Added: pkg/Meucci/TODO =================================================================== --- pkg/Meucci/TODO (rev 0) +++ pkg/Meucci/TODO 2013-09-13 16:02:00 UTC (rev 3089) @@ -0,0 +1,12 @@ +Xavi's notes: + +* Matlab package doesn't seem to be necessary, find substitutes for its functions where possible +* There are some problems with charts and what can be done with base graphics. Maybe using ggplot2 instead of base graphics can provide more flexibility. +* All the scripts from the papers need to be revised, some don't even work. +* Maybe there are some packages that aren't needed anymore. Find out which of them. +* Documentation for papers and functions from Ram - Manan. +* Look for the TODOs from Ram or Manan. +* Confirm every datafile is different from the others. +* Change coding style to one more R alike +* Still 2 scripts left from the book: S_MeanVarianceCallsRobust from chapter 9 and S_OptionReplication from chapter 6 + From noreply at r-forge.r-project.org Fri Sep 13 18:18:30 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 13 Sep 2013 18:18:30 +0200 (CEST) Subject: [Returnanalytics-commits] r3090 - pkg/PortfolioAnalytics/sandbox/symposium2013/R Message-ID: <20130913161830.F27F2185D16@r-forge.r-project.org> Author: peter_carl Date: 2013-09-13 18:18:30 +0200 (Fri, 13 Sep 2013) New Revision: 3090 Added: pkg/PortfolioAnalytics/sandbox/symposium2013/R/page.Distributions.R Log: - needs to be added to PerfA Added: pkg/PortfolioAnalytics/sandbox/symposium2013/R/page.Distributions.R =================================================================== --- pkg/PortfolioAnalytics/sandbox/symposium2013/R/page.Distributions.R (rev 0) +++ pkg/PortfolioAnalytics/sandbox/symposium2013/R/page.Distributions.R 2013-09-13 16:18:30 UTC (rev 3090) @@ -0,0 +1,44 @@ + +# Histogram, QQPlot and ECDF plots aligned by scale for comparison +page.Distributions <- function (R, ...) { + require(PerformanceAnalytics) + op <- par(no.readonly = TRUE) + # c(bottom, left, top, right) + par(oma = c(5,0,2,1), mar=c(0,0,0,3)) + layout(matrix(1:(4*NCOL(R)), ncol=4, byrow=TRUE), widths=rep(c(.6,1,1,1),NCOL(R))) + # layout.show(n=21) + chart.mins=min(R, na.rm=TRUE) + chart.maxs=max(R, na.rm=TRUE) + means=colMeans(R, na.rm=TRUE) + row.names = sapply(colnames(R), function(x) paste(strwrap(x,10), collapse = "\n"), USE.NAMES=FALSE) + for(i in 1:NCOL(R)){ + if(i==NCOL(R)){ + plot.new() + text(x=1, y=0.5, adj=c(1,0.5), labels=row.names[i], cex=1.1) + chart.Histogram(R[,i], main="", xlim=c(chart.mins, chart.maxs), + breaks=seq(round(chart.mins, digits=2)-0.01, round(chart.maxs, digits=2)+0.01, by=0.01), note.lines=boxplot.stats(as.vector(R[,i]))$stats, note.color="#005AFF", + show.outliers=TRUE, methods=c("add.normal"), colorset = + c("black", "#00008F", "#005AFF", "#23FFDC", "#ECFF13", "#FF4A00", "#800000")) + abline(v=0, col="darkgray", lty=2) + chart.QQPlot(R[,i], main="", pch=20, envelope=0.95, col=c(1,"#005AFF"), ylim=c(chart.mins, chart.maxs)) + abline(v=0, col="darkgray", lty=2) + chart.ECDF(R[,i], main="", xlim=c(chart.mins, chart.maxs), lwd=2) + abline(v=0, col="darkgray", lty=2) + } + else{ + plot.new() + text(x=1, y=0.5, adj=c(1,0.5), labels=row.names[i], cex=1.1) + chart.Histogram(R[,i], main="", xlim=c(chart.mins, chart.maxs), + breaks=seq(round(chart.mins, digits=2)-0.01, round(chart.maxs, digits=2)+0.01, by=0.01), note.lines=boxplot.stats(as.vector(R[,i]))$stats, note.color="#005AFF", + xaxis=FALSE, yaxis=FALSE, show.outliers=TRUE, methods=c("add.normal"), colorset = + c("black", "#00008F", "#005AFF", "#23FFDC", "#ECFF13", "#FF4A00", "#800000")) + abline(v=0, col="darkgray", lty=2) + chart.QQPlot(R[,i], main="", xaxis=FALSE, yaxis=FALSE, pch=20, envelope=0.95, col=c(1,"#005AFF"), ylim=c(chart.mins, chart.maxs)) + abline(v=0, col="darkgray", lty=2) + chart.ECDF(R[,i], main="", xlim=c(chart.mins, chart.maxs), xaxis=FALSE, yaxis=FALSE, lwd=2) + abline(v=0, col="darkgray", lty=2) + } + } + par(op) +} + From noreply at r-forge.r-project.org Fri Sep 13 18:21:40 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 13 Sep 2013 18:21:40 +0200 (CEST) Subject: [Returnanalytics-commits] r3091 - pkg/PortfolioAnalytics/sandbox/symposium2013 Message-ID: <20130913162140.CFDA1185D16@r-forge.r-project.org> Author: peter_carl Date: 2013-09-13 18:21:40 +0200 (Fri, 13 Sep 2013) New Revision: 3091 Modified: pkg/PortfolioAnalytics/sandbox/symposium2013/parse.EDHEC.R Log: - modified to fit the workspace - should be very easy to update now, written more generally Modified: pkg/PortfolioAnalytics/sandbox/symposium2013/parse.EDHEC.R =================================================================== --- pkg/PortfolioAnalytics/sandbox/symposium2013/parse.EDHEC.R 2013-09-13 16:18:30 UTC (rev 3090) +++ pkg/PortfolioAnalytics/sandbox/symposium2013/parse.EDHEC.R 2013-09-13 16:21:40 UTC (rev 3091) @@ -7,26 +7,25 @@ require(gdata) require(xts) + +### Constants +filename = "EDHEC-index-history.csv" +objectname = "edhec" +datadir = "./data" +cachedir = "./cache" + # Download the following file to the working directory: # http://www.edhec-risk.com/indexes/pure_style/data/table/history.csv ### @TODO: Is there a way to download it directly? Maybe not, seems to require a login -x=read.csv(file="history.csv", sep=";", header=TRUE, check.names=FALSE) + +### Read data from csv file +x=read.csv(file=paste(datadir,filename,sep="/"), sep=";", header=TRUE, check.names=FALSE) x.dates = as.Date(x[,1], format="%d/%m/%Y") x.data = apply(x[,-1], MARGIN=2, FUN=function(x){as.numeric(sub("%","", x, fixed=TRUE))/100}) # get rid of percentage signs edhec = xts(x.data, order.by=x.dates) -colnames(edhec) -# calculate a wealth index -edhec.idx = apply(edhec, MARGIN=2, FUN=function(x){cumprod(1 + na.omit(x))}) -# identify quarters -edhec.Q.idx=edhec.idx[endpoints(edhec.idx, on="quarters"),] -# calculate quarterly returns -edhec.Q.R=ROC(edhec.Q.idx) -# trim the last data point, if needed -# dim(edhec.Q.R) -# edhec.Q.R=edhec.Q.R[-61,] -# reclass the object -edhec.Q.R=as.xts(edhec.Q.R) -# lm requires safe names -colnames(edhec.Q.R)=make.names(colnames(edhec)) +### Add pretty columnnames as an xts attribute? + +### Save data into cache +save(edhec, file=paste(cachedir, "/", objectname, ".RData", sep="")) \ No newline at end of file From noreply at r-forge.r-project.org Fri Sep 13 18:23:19 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 13 Sep 2013 18:23:19 +0200 (CEST) Subject: [Returnanalytics-commits] r3092 - pkg/PortfolioAnalytics/sandbox/symposium2013 Message-ID: <20130913162319.CAF57185D16@r-forge.r-project.org> Author: peter_carl Date: 2013-09-13 18:23:19 +0200 (Fri, 13 Sep 2013) New Revision: 3092 Added: pkg/PortfolioAnalytics/sandbox/symposium2013/analyze.HFindexes.R Log: - breaks out data analysis portion of workflow - modified to fit the local workspace - added AC chart and table - modified cor charts Added: pkg/PortfolioAnalytics/sandbox/symposium2013/analyze.HFindexes.R =================================================================== --- pkg/PortfolioAnalytics/sandbox/symposium2013/analyze.HFindexes.R (rev 0) +++ pkg/PortfolioAnalytics/sandbox/symposium2013/analyze.HFindexes.R 2013-09-13 16:23:19 UTC (rev 3092) @@ -0,0 +1,180 @@ +### For Presentation at FactSet's 2013 US Investment Process Symposium +# November 10 - 12 , 2013 +# Peter Carl + +### Description +# This script will generate a series of plots and csv data in the resultsdir +# for possible inclusion in slides. + +### Make needed changes to workspace here ### +# +datadir = "./data/" +resultsdir = "./results/" +functionsdir = "./R/" + +### Load the necessary packages +require(vcd) # for color palates +require(corrplot) # for correlation charts + +# This may be useful for PCA analysis of index data +# require(FactorAnalytics) # development version > build + +### Set up color palates +pal <- function(col, border = "light gray", ...){ + n <- length(col) + plot(0, 0, type="n", xlim = c(0, 1), ylim = c(0, 1), + axes = FALSE, xlab = "", ylab = "", ...) + rect(0:(n-1)/n, 0, 1:n/n, 1, col = col, border = border) +} + +# Qualitative color scheme by Paul Tol +tol1qualitative=c("#4477AA") +tol2qualitative=c("#4477AA", "#CC6677") +tol3qualitative=c("#4477AA", "#DDCC77", "#CC6677") +tol4qualitative=c("#4477AA", "#117733", "#DDCC77", "#CC6677") +tol5qualitative=c("#332288", "#88CCEE", "#117733", "#DDCC77", "#CC6677") +tol6qualitative=c("#332288", "#88CCEE", "#117733", "#DDCC77", "#CC6677","#AA4499") +tol7qualitative=c("#332288", "#88CCEE", "#44AA99", "#117733", "#DDCC77", "#CC6677","#AA4499") +tol8qualitative=c("#332288", "#88CCEE", "#44AA99", "#117733", "#999933", "#DDCC77", "#CC6677","#AA4499") +tol9qualitative=c("#332288", "#88CCEE", "#44AA99", "#117733", "#999933", "#DDCC77", "#CC6677", "#882255", "#AA4499") +tol10qualitative=c("#332288", "#88CCEE", "#44AA99", "#117733", "#999933", "#DDCC77", "#661100", "#CC6677", "#882255", "#AA4499") + +# Constants +p=1-(1/12) # +Rf=.03/12 # Monthly risk free rate +colorset = rich8equal +dataname="EDHEC" + +######################################################################## +# Load data +######################################################################## +## Just load the data from packages +### See parse.EDHEC.R + +# Load data from cache +load("./cache/edhec.Rdata") + +# Drop some indexes and reorder +R = edhec[,c("Convertible Arbitrage", "Equity Market Neutral","Fixed Income Arbitrage", "Event Driven", "CTA Global", "Global Macro", "Long/Short Equity")] + + +######################################################################## +# Returns-based performance analysis +######################################################################## +# -------------------------------------------------------------------- +# Returns through time +# -------------------------------------------------------------------- +png(filename=paste(resultsdir, dataname, "-Cumulative-Returns.png", sep=""), units="in", height=5.5, width=9, res=96) +par(cex.lab=.8) # should set these parameters once at the top +op <- par(no.readonly = TRUE) +layout(matrix(c(1, 2)), height = c(2, 1.3), width = 1) +par(mar = c(1, 4, 1, 2)) #c(bottom, left, top, right) +chart.CumReturns(R, main = "", xaxis = FALSE, legend.loc = "topleft", ylab = "Cumulative Return", colorset= rainbow8equal, ylog=TRUE, wealth.index=TRUE, cex.legend=.7, cex.axis=.6, cex.lab=.7) +par(mar = c(4, 4, 0, 2)) +chart.Drawdown(edhec.R, main = "", ylab = "Drawdown", colorset = rainbow8equal, cex.axis=.6, cex.lab=.7) +par(op) +dev.off() + +# -------------------------------------------------------------------- +# Monthly Returns and Risk +# -------------------------------------------------------------------- +# @TODO: Too small: break this into two graphics? Directional, non-directional? +png(filename=paste(resultsdir, dataname, "-BarVaR.png", sep=""), units="in", height=5.5, width=9, res=96) +# Generate charts of returns with ETL and VaR through time +par(mar=c(3, 4, 0, 2) + 0.1) #c(bottom, left, top, right) +charts.BarVaR(R, p=p, gap=36, main="", show.greenredbars=TRUE, + methods=c("ModifiedES", "ModifiedVaR"), show.endvalue=TRUE, + colorset=rep("Black",7), ylim=c(-.1,.15)) +par(op) +dev.off() + +# -------------------------------------------------------------------- +# Rolling Performance +# -------------------------------------------------------------------- +png(filename=paste(resultsdir, dataname, "-RollPerf.png", sep=""), units="in", height=5.5, width=9, res=96) +# Generate charts of EDHEC index returns with ETL and VaR through time +par(mar=c(5, 4, 0, 2) + 0.1) #c(bottom, left, top, right) +charts.RollingPerformance(R, width=36, main="", colorset=rainbow8equal, legend.loc="topleft") +par(op) +dev.off() + +# -------------------------------------------------------------------- +# Returns and Risk Scatter +# -------------------------------------------------------------------- +png(filename=paste(resultsdir, dataname, "-Scatter36m.png", sep=""), units="in", height=5.5, width=4.5, res=96) +chart.RiskReturnScatter(last(edhec.R,36), main="EDHEC Index Trailing 36-Month Performance", colorset=rainbow8equal, ylim=c(0,.2), xlim=c(0,.12)) +dev.off() +png(filename=paste(resultsdir, dataname, "-ScatterSinceIncept.png", sep=""), units="in", height=5.5, width=4.5, res=96) +chart.RiskReturnScatter(edhec.R, main="EDHEC Index Since Inception Performance", colorset=rainbow8equal, ylim=c(0,.2), xlim=c(0,.12)) +dev.off() + +# -------------------------------------------------------------------- +# Table of Return and Risk Statistics +# -------------------------------------------------------------------- +# @TODO: Too small, break into two panels? +require(Hmisc) +source(paste(functionsdir,'table.RiskStats.R', sep="") +incept.stats = t(table.RiskStats(R=R, p=p, Rf=Rf)) +write.csv(incept.stats, file=paste(resultsdir, dataname, "-inception-stats.csv", sep="")) +png(filename=paste(resultsdir, dataname, "-InceptionStats.png", sep=""), units="in", height=5.5, width=9, res=96) +textplot(format.df(incept.stats, na.blank=TRUE, numeric.dollar=FALSE, cdec=c(3,3,1,3,1,3,3,1,3,3,1,1,3,3,1,0), rmar = 0.8, cmar = 1, max.cex=.9, halign = "center", valign = "top", row.valign="center", wrap.rownames=20, wrap.colnames=10, mar = c(0,0,4,0)+0.1)) +dev.off() + +# -------------------------------------------------------------------- +# Compare Distributions +# -------------------------------------------------------------------- +# @TODO: too small? +png(filename=paste(resultsdir, dataname, "-Distributions.png", sep=""), units="in", height=5.5, width=9, res=96) +source(paste(functionsdir, "/page.Distributions", sep="")) +page.Distributions(R) +dev.off() + +# -------------------------------------------------------------------- +# Correlation Panels +# -------------------------------------------------------------------- +# col3 <- colorRampPalette(c("darkgreen", "white", "darkred")) +library(gplots) +# Generate some color choices for the scale +skewedWB20 = c(colorpanel(16, "#008566","#E1E56D"), colorpanel(5, "#E1E56D", "#742414")[-1]) +skewedGnYeRd10 = c(colorpanel(8, "darkgreen", "yellow"),colorpanel(3, "yellow", "darkred")[-1]) +skewedGnYeRd20 = c(colorpanel(16, "darkgreen", "yellow"),colorpanel(5, "yellow", "darkred")[-1]) + +M <- cor(R) +colnames(M) = rownames(M) +order.hc2 <- corrMatOrder(M, order="hclust", hclust.method="complete") +M.hc2 <- M[order.hc2,order.hc2] +png(filename=paste(resultsdir, dataname, "-cor-inception.png", sep=""), units="in", height=5.5, width=4.5, res=96) +corrplot(M.hc2, tl.col="black", tl.cex=0.8, method="shade", col=skewedWB20, cl.offset=.75, cl.cex=.7, cl.align.text="l", cl.ratio=.25, shade.lwd=0, cl.length=11) +corrRect.hclust(M.hc2, k=3, method="complete", col="blue") +dev.off() + +M36 <- cor(last(R,36)) +colnames(M36) = rownames(M36) = row.names +order36.hc2 <- corrMatOrder(M36, order="hclust", hclust.method="complete") +M36.hc2 <- M36[order36.hc2,order36.hc2] +png(filename=paste(resultsdir, dataname, "-cor-tr36m.png", sep=""), units="in", height=5.5, width=4.5, res=96) +corrplot(M36.hc2, tl.col="black", tl.cex=0.8, method="shade", col=skewedWB20, cl.offset=.75, cl.cex=.7, cl.align.text="l", cl.ratio=.25, shade.lwd=0, cl.length=11) +corrRect.hclust(M36.hc2, k=3, method="complete", col="blue") +dev.off() + +# @TODO: Add 12M rolling correlation to S&P500 + + +# -------------------------------------------------------------------- +## Autocorrelation +# -------------------------------------------------------------------- +# @TODO: This is frosting, do it last + +# require(Hmisc) +AC.stats = t(table.Autocorrelation(R=R)) +write.csv(AC.stats, file=paste(resultsdir, dataname, "-AC-stats.csv", sep="")) +png(filename=paste(resultsdir, dataname, "-ACStats.png", sep=""), units="in", height=5.5, width=9, res=96) +# sort by p-value +AC.order = order(AC.stats[,7], decreasing=FALSE) +textplot(format.df(AC.stats[AC.order,], na.blank=TRUE, numeric.dollar=FALSE, rdec=c(rep(4,dim(AC.stats)[1])), col.just=rep("nc",dim(AC.stats)[2])), rmar = 0.7, cmar = 0.9, max.cex=1, halign = "center", valign = "center", row.valign="center", wrap.rownames=50, wrap.colnames=10) +dev.off() + +png(filename=paste(resultsdir, dataname, "-ACStackedBars.png", sep=""), units="in", height=5.5, width=9, res=96) +rownames(AC.stats)= sapply(colnames(R), function(x) paste(strwrap(x,10), collapse = "\n"), USE.NAMES=FALSE) +chart.StackedBar(as.matrix(AC.stats[,1:6]), colorset=bluemono, main="Observed Autocorrelation") +dev.off() \ No newline at end of file From noreply at r-forge.r-project.org Fri Sep 13 18:37:52 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 13 Sep 2013 18:37:52 +0200 (CEST) Subject: [Returnanalytics-commits] r3093 - pkg/PortfolioAnalytics/sandbox/symposium2013 Message-ID: <20130913163752.40E5A185D9C@r-forge.r-project.org> Author: peter_carl Date: 2013-09-13 18:37:51 +0200 (Fri, 13 Sep 2013) New Revision: 3093 Modified: pkg/PortfolioAnalytics/sandbox/symposium2013/README.md Log: - added workspace description - modified howto slightly Modified: pkg/PortfolioAnalytics/sandbox/symposium2013/README.md =================================================================== --- pkg/PortfolioAnalytics/sandbox/symposium2013/README.md 2013-09-13 16:23:19 UTC (rev 3092) +++ pkg/PortfolioAnalytics/sandbox/symposium2013/README.md 2013-09-13 16:37:51 UTC (rev 3093) @@ -0,0 +1,31 @@ +# Project space for developing code for Factset 2013 conference. + +## Constructing Portfolios of Dynamic Strategies using Downside Risk Measures +Peter Carl, Hedge Fund Strategies, William Blair & Co. + +In this session, we'll discuss portfolio construction within the context of portfolios of dynamic strategies. The speaker will use an approach that identifies several sets of objectives to establish benchmark, target, and nearby portfolios in a variety of ways and with complex constraints, including some that equalize or budget risks using downside measures of risk. We will then examine the ex-post results through time and identify the conditions under which certain objectives might be expected to do well or poorly. Any investor with complex, layered objectives and who is subject to a variety of real-world constraints may find useful elements in the methods presented. + +For presentation at FactSet's 2013 US Investment Process Symposium +November 10 - 12 , 2013 + +# SETUP +The workspace is set up in the following way: +R script files reside in the project root directory +README: this description file covering the objectives of the project and any instructions +notebook.md: file to track progress +./R contains files with un-packaged function definitions (only functions - + no script code to be run) +./data contains data used in the analysis; treated as read only - do not write + data into this directory. +./data/README should cover who downloaded what data from where and when +./cache contains processed data files and intermediary results to be processed +./results contains output, figures, or other generated files. Should be able to delete the contents and regenerate them +./logs: contains logging output +./src: contains non-R source code where needed +./bin: compiled binaries or scripts + +May want to organize subdirectories in results and data chronologically at some point + +# HOWTO +To create PDF of slides: +$ pandoc symposium-slides-2013.Rmd -t beamer -o symposium-slides-2013.pdf \ No newline at end of file From noreply at r-forge.r-project.org Fri Sep 13 19:10:17 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 13 Sep 2013 19:10:17 +0200 (CEST) Subject: [Returnanalytics-commits] r3094 - pkg/PortfolioAnalytics/R Message-ID: <20130913171017.D7B7A184D98@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-13 19:10:17 +0200 (Fri, 13 Sep 2013) New Revision: 3094 Modified: pkg/PortfolioAnalytics/R/random_portfolios.R Log: adding grid method for random portfolios. Modified: pkg/PortfolioAnalytics/R/random_portfolios.R =================================================================== --- pkg/PortfolioAnalytics/R/random_portfolios.R 2013-09-13 16:37:51 UTC (rev 3093) +++ pkg/PortfolioAnalytics/R/random_portfolios.R 2013-09-13 17:10:17 UTC (rev 3094) @@ -420,6 +420,109 @@ return(out) } +#' Generate random portfolios based on grid search method +#' +#' This function generates random portfolios based on the \code{gridSearch} +#' function from the 'NMOF' package. +#' +#' @details +#' The number of levels is calculated based on permutations and number of assets. +#' The number of levels must be an integer and may not result in the exact number +#' of permutations. We round up to the nearest integer for the levels so the +#' number of portfolios generated will be greater than or equal to permutations. +#' +#' The grid search method only satisfies the \code{min} and \code{max} box +#' constraints. The \code{min_sum} and \code{max_sum} leverage constraints will +#' likely be violated and the weights in the random portfolios should be +#' normalized. Normalization may cause the box constraints to be violated and +#' will be penalized in \code{constrained_objective}. +#' +#' @param portfolio +#' @param permutations +#' @param normalize TRUE/FALSE +#' @param \dots any passthru parameters. Currently ignored +#' @return matrix of random portfolios +#' @export +rp_grid <- function(portfolio, permutations=2000, normalize=TRUE, ...){ + + # get the constraints from the portfolio + constraints <- get_constraints(portfolio) + + # box constraints to generate the grid + min <- constraints$min + max <- constraints$max + + # number of parameters and length.out levels to generate + npar <- length(min) + n <- ceiling(exp(log(permutations) / npar)) + + levels <- vector("list", length = length(min)) + for (i in seq_len(npar)){ + levels[[i]] <- seq(min[[i]], max[[i]], length.out = max(n, 2L)) + } + np <- length(levels) + res <- vector("list", np) + rep.fac <- 1L + nl <- sapply(levels, length) + nlp <- prod(nl) + + # create the grid + for (i in seq_len(np)) { + x <- levels[[i]] + nx <- length(x) + nlp <- nlp/nx + res[[i]] <- x[rep.int(rep.int(seq_len(nx), rep.int(rep.fac, nx)), nlp)] + rep.fac <- rep.fac * nx + } + + # create the random portfolios from the grid + nlp <- prod(nl) + lstLevels <- vector("list", length = nlp) + for (r in seq_len(nlp)) { + lstLevels[[r]] <- sapply(res, `[[`, r) + } + # lstLevels is a list of random portfolios, rbind into a matrix + rp <- do.call(rbind, lstLevels) + + # min_sum and max_sum will likely be violated + # Normalization will likely cause min and max to be violated. This can be + # handled by the penalty in constrained_objective. + if(normalize){ + normalize_weights <- function(weights){ + # normalize results if necessary + if(!is.null(constraints$min_sum) | !is.null(constraints$max_sum)){ + # the user has passed in either min_sum or max_sum constraints for the portfolio, or both. + # we'll normalize the weights passed in to whichever boundary condition has been violated + # NOTE: this means that the weights produced by a numeric optimization algorithm like DEoptim + # might violate your constraints, so you'd need to renormalize them after optimizing + # we'll create functions for that so the user is less likely to mess it up. + + ##' NOTE: need to normalize in the optimization wrapper too before we return, since we've normalized in here + ##' In Kris' original function, this was manifested as a full investment constraint + if(!is.null(constraints$max_sum) & constraints$max_sum != Inf ) { + max_sum=constraints$max_sum + if(sum(weights)>max_sum) { weights<-(max_sum/sum(weights))*weights } # normalize to max_sum + } + + if(!is.null(constraints$min_sum) & constraints$min_sum != -Inf ) { + min_sum=constraints$min_sum + if(sum(weights) nrow(unique(x)) # [1] 4906 From noreply at r-forge.r-project.org Fri Sep 13 19:23:09 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 13 Sep 2013 19:23:09 +0200 (CEST) Subject: [Returnanalytics-commits] r3095 - pkg/FactorAnalytics/vignettes Message-ID: <20130913172309.2720B18515D@r-forge.r-project.org> Author: chenyian Date: 2013-09-13 19:23:08 +0200 (Fri, 13 Sep 2013) New Revision: 3095 Modified: pkg/FactorAnalytics/vignettes/fundamentalFM.Rnw Log: modifying vignette. Modified: pkg/FactorAnalytics/vignettes/fundamentalFM.Rnw =================================================================== --- pkg/FactorAnalytics/vignettes/fundamentalFM.Rnw 2013-09-13 17:10:17 UTC (rev 3094) +++ pkg/FactorAnalytics/vignettes/fundamentalFM.Rnw 2013-09-13 17:23:08 UTC (rev 3095) @@ -40,7 +40,7 @@ \subsubsection{Loading Data} Let's look at the arguments of \verb at fitFundamentalFactorModel()@ which will deal with fundamental factor model in \verb at factorAnalytics@. <>= -require(factorAnalytics) +library(factorAnalytics) args(fitFundamentalFactorModel) @ \verb at data@ is in class of \verb at data.frame@ and is required to have \emph{assetvar},\emph{returnvar} and \emph{datevar}. One can image \emph{data} is like panel data setup and need firm variable and time variable. Data has dimension (N x T) and at least 3 consumes to specify information needed. @@ -56,7 +56,7 @@ @ We need asset returns to run our model. We can utilize \verb at Delt()@ to calculate price percentage change which is exactly asset returns in \verb at quantmod@ package. <>= -require(quantmod) # for Delt. See Delt for detail +library(quantmod) # for Delt. See Delt for detail equity <- cbind(equity,do.call(rbind,lapply(split(equity,equity$tic), function(x) Delt(x$PRCCQ)))) names(equity)[22] <- "RET" From noreply at r-forge.r-project.org Fri Sep 13 20:34:12 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 13 Sep 2013 20:34:12 +0200 (CEST) Subject: [Returnanalytics-commits] r3096 - in pkg/PortfolioAnalytics: R man sandbox Message-ID: <20130913183412.E2CF8183DE7@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-13 20:34:12 +0200 (Fri, 13 Sep 2013) New Revision: 3096 Added: pkg/PortfolioAnalytics/sandbox/rp_method_comparison.R Modified: pkg/PortfolioAnalytics/R/random_portfolios.R pkg/PortfolioAnalytics/man/chart.Weights.Rd Log: Adding sample, simplex, and grid methods to random_portfolios. Added script in sandbox to demo 3 different methods for random portfolios. Modified: pkg/PortfolioAnalytics/R/random_portfolios.R =================================================================== --- pkg/PortfolioAnalytics/R/random_portfolios.R 2013-09-13 17:23:08 UTC (rev 3095) +++ pkg/PortfolioAnalytics/R/random_portfolios.R 2013-09-13 18:34:12 UTC (rev 3096) @@ -318,17 +318,76 @@ #' repeatedly calls \code{\link{randomize_portfolio}} to generate an #' arbitrary number of constrained random portfolios. #' +#' @details +#' Random portfolios can be generate using one of three methods. +#' \itemize{ +#' \item{sample: }{The 'sample' method to generate random portfolios is based +#' on an idea pioneerd by Pat Burns. This is the most flexible method and can +#' generate portfolios to satisfy leverage, box, group, and position limit +#' constraints.} +#' \item{simplex: }{The 'simplex' method to generate random portfolios is +#' based on a paper by W. T. Shaw. The simplex method is useful to generate +#' random portfolios with the full investment constraint, where the sum of the +#' weights is equal to 1, and min box constraints. All other constraints such +#' as group and position limit constraints will be handled by elimination. If +#' the constraints are very restrictive, this may result in very few feasible +#' portfolios remaining.} +#' \item{grid: }{The 'grid' method to generate random portfolios is based on +#' the \code(gridSearch} function in package 'NMOF'. The grid search method +#' only satisfies the \code{min} and \code{max} box constraints. The +#' \code{min_sum} and \code{max_sum} leverage constraints will likely be +#' violated and the weights in the random portfolios should be normalized. +#' Normalization may cause the box constraints to be violated and will be +#' penalized in \code{constrained_objective}.} +#' } +#' #' @param portfolio an object of type "portfolio" specifying the constraints for the optimization, see \code{\link{constraint}} #' @param permutations integer: number of unique constrained random portfolios to generate -#' @param \dots any other passthru parameters +#' @param \dots any other passthru parameters +#' @param rp_method method to generate random portfolios #' @return matrix of random portfolio weights #' @seealso \code{\link{portfolio.spec}}, \code{\link{objective}}, \code{\link{randomize_portfolio_v2}} #' @author Peter Carl, Brian G. Peterson, (based on an idea by Pat Burns) #' @aliases random_portfolios #' @rdname random_portfolios #' @export -random_portfolios_v2 <- function( portfolio, permutations=100, ...) -{ # +random_portfolios_v2 <- function( portfolio, permutations=100, rp_method="sample", ...){ + if(hasArg(p)) p=match.call(expand.dots=TRUE)$p else p=0:5 + if(hasArg(normalize)) normalize=match.call(expand.dots=TRUE)$normalize else normalize=TRUE + switch(rp_method, + sample = {rp <- rp_sample(portfolio, permutations, ...) + }, + simplex = {rp <- rp_simplex(portfolio, permutations, p, ...) + }, + grid = {rp <- rp_grid(portfolio, permutations, normalize, ...) + } + ) + return(rp) +} + +# Alias randomize_portfolio_v2 to randomize_portfolio +#' @export +randomize_portfolio <- randomize_portfolio_v2 + +# Alias random_portfolios_v2 to random_portfolios +#' @export +random_portfolios <- random_portfolios_v2 + +#' Generate random portfolios using the sample method +#' +#' This function generates random portfolios based on an idea by Pat Burns. +#' +#' @details +#' The 'sample' method to generate random portfolios is based +#' on an idea pioneerd by Pat Burns. This is the most flexible method and can +#' generate portfolios to satisfy leverage, box, group, and position limit +#' constraints. +#' @param portfolio an object of type "portfolio" specifying the constraints for the optimization, see \code{\link{portfolio.spec}} +#' @param permutations integer: number of unique constrained random portfolios to generate +#' @param \dots any other passthru parameters +#' @return a matrix of random portfolio weights +#' @export +rp_sample <- function(portfolio, permutations, ...){ # this function generates a series of portfolios that are a "random walk" from the current portfolio seed <- portfolio$assets result <- matrix(nrow=permutations, ncol=length(seed)) @@ -350,14 +409,6 @@ return(result) } -# Alias randomize_portfolio_v2 to randomize_portfolio -#' @export -randomize_portfolio <- randomize_portfolio_v2 - -# Alias random_portfolios_v2 to random_portfolios -#' @export -random_portfolios <- random_portfolios_v2 - #' Generate random portfolios using the simplex method #' #' This function generates random portfolios based on the method outlined in the @@ -365,8 +416,8 @@ #' #' @details #' The simplex method is useful to generate random portfolios with the full -#' investment constraint where the sum of the weights is equal to 1 and min and -#' max box constraints. All other constraints such as group and position limit +#' investment constraint where the sum of the weights is equal to 1 and min +#' box constraints. All other constraints such as group and position limit #' constraints will be handled by elimination. If the constraints are very #' restrictive, this may result in very few feasible portfolios remaining. #' @@ -389,7 +440,7 @@ #' @param permutations integer: number of unique constrained random portfolios to generate #' @param p scalar or vector for FEV biasing #' @param \dots any other passthru parameters -#' @return a matrix of random portfolios +#' @return a matrix of random portfolio weights #' @export rp_simplex <- function(portfolio, permutations, p=0:5, ...){ # get the assets from the portfolio @@ -441,7 +492,7 @@ #' @param permutations #' @param normalize TRUE/FALSE #' @param \dots any passthru parameters. Currently ignored -#' @return matrix of random portfolios +#' @return matrix of random portfolio weights #' @export rp_grid <- function(portfolio, permutations=2000, normalize=TRUE, ...){ @@ -514,7 +565,7 @@ } stopifnot("package:foreach" %in% search() || require("foreach",quietly = TRUE)) - out <- foreach(1=1:nrow(rp)) %dopar% { + out <- foreach(i=1:nrow(rp)) %dopar% { tmp <- normalize_weights(weights=rp[i,]) tmp } Modified: pkg/PortfolioAnalytics/man/chart.Weights.Rd =================================================================== --- pkg/PortfolioAnalytics/man/chart.Weights.Rd 2013-09-13 17:23:08 UTC (rev 3095) +++ pkg/PortfolioAnalytics/man/chart.Weights.Rd 2013-09-13 18:34:12 UTC (rev 3096) @@ -9,10 +9,6 @@ \alias{chart.Weights.optimize.portfolio.RP} \title{boxplot of the weights of the optimal portfolios} \usage{ - chart.Weights(object, neighbors = NULL, ..., - main = "Weights", las = 3, xlab = NULL, cex.lab = 1, - element.color = "darkgray", cex.axis = 0.8) - \method{chart.Weights}{optimize.portfolio.DEoptim} (object, neighbors = NULL, ..., main = "Weights", las = 3, xlab = NULL, cex.lab = 1, element.color = "darkgray", cex.axis = 0.8, @@ -46,6 +42,10 @@ legend.loc = "topright", cex.legend = 0.8, plot.type = "line") + chart.Weights(object, neighbors = NULL, ..., + main = "Weights", las = 3, xlab = NULL, cex.lab = 1, + element.color = "darkgray", cex.axis = 0.8) + \method{chart.Weights}{opt.list} (object, neighbors = NULL, ..., main = "Weights", las = 3, xlab = NULL, cex.lab = 1, element.color = "darkgray", Added: pkg/PortfolioAnalytics/sandbox/rp_method_comparison.R =================================================================== --- pkg/PortfolioAnalytics/sandbox/rp_method_comparison.R (rev 0) +++ pkg/PortfolioAnalytics/sandbox/rp_method_comparison.R 2013-09-13 18:34:12 UTC (rev 3096) @@ -0,0 +1,34 @@ +library(PortfolioAnalytics) + +data(edhec) +R <- edhec[, 1:4] + +# set up simple portfolio with leverage and box constraints +pspec <- portfolio.spec(assets=colnames(R)) +pspec <- add.constraint(portfolio=pspec, type="leverage", min_sum=0.99, max_sum=1.01) +pspec <- add.constraint(portfolio=pspec, type="box", min=0, max=1) + +# generate random portfolios using the 3 methods +rp1 <- random_portfolios(portfolio=pspec, permutations=5000, rp_method='sample') +rp2 <- random_portfolios(portfolio=pspec, permutations=5000, rp_method='simplex') +rp3 <- random_portfolios(portfolio=pspec, permutations=5000, rp_method='grid') + +# show feasible portfolios in mean-StdDev space +tmp1.mean <- apply(rp1, 1, function(x) mean(R %*% x)) +tmp1.StdDev <- apply(rp1, 1, function(x) StdDev(R=R, weights=x)) +tmp2.mean <- apply(rp2, 1, function(x) mean(R %*% x)) +tmp2.StdDev <- apply(rp2, 1, function(x) StdDev(R=R, weights=x)) +tmp3.mean <- apply(rp3, 1, function(x) mean(R %*% x)) +tmp3.StdDev <- apply(rp3, 1, function(x) StdDev(R=R, weights=x)) + +# plot feasible portfolios +plot(x=tmp1.StdDev, y=tmp1.mean, col="gray", main="Random Portfolio Methods") +points(x=tmp2.StdDev, y=tmp2.mean, col="red", pch=2) +points(x=tmp3.StdDev, y=tmp3.mean, col="lightgreen", pch=5) +legend("bottomright", legend=c("sample", "simplex", "grid"), col=c("gray", "red", "lightgreen"), + pch=c(1, 2, 5), bty="n") + +# sample has pretty even coverage of feasible space +# simplex is concentrated around the assets +# grid is 'pushed'/concentrated to the interior due to normalization +# This could be a really good example with Shiny for an interactive example From noreply at r-forge.r-project.org Fri Sep 13 22:55:28 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 13 Sep 2013 22:55:28 +0200 (CEST) Subject: [Returnanalytics-commits] r3097 - in pkg/PortfolioAnalytics: . R man Message-ID: <20130913205528.582401806F4@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-13 22:55:28 +0200 (Fri, 13 Sep 2013) New Revision: 3097 Added: pkg/PortfolioAnalytics/man/check_constraints.Rd pkg/PortfolioAnalytics/man/rp_grid.Rd pkg/PortfolioAnalytics/man/rp_sample.Rd Modified: pkg/PortfolioAnalytics/NAMESPACE pkg/PortfolioAnalytics/R/constraints.R pkg/PortfolioAnalytics/R/random_portfolios.R pkg/PortfolioAnalytics/man/chart.RiskReward.Rd pkg/PortfolioAnalytics/man/random_portfolios.Rd pkg/PortfolioAnalytics/man/rp_simplex.Rd Log: Adding option to eliminate portfolios that do not satisfy constraints. Adding helper function to check if constraints are satisfied. Updating documentation for rp functions. Modified: pkg/PortfolioAnalytics/NAMESPACE =================================================================== --- pkg/PortfolioAnalytics/NAMESPACE 2013-09-13 18:34:12 UTC (rev 3096) +++ pkg/PortfolioAnalytics/NAMESPACE 2013-09-13 20:55:28 UTC (rev 3097) @@ -61,6 +61,8 @@ export(return_constraint) export(return_objective) export(risk_budget_objective) +export(rp_grid) +export(rp_sample) export(rp_simplex) export(rp_transform) export(scatterFUN) Modified: pkg/PortfolioAnalytics/R/constraints.R =================================================================== --- pkg/PortfolioAnalytics/R/constraints.R 2013-09-13 18:34:12 UTC (rev 3096) +++ pkg/PortfolioAnalytics/R/constraints.R 2013-09-13 20:55:28 UTC (rev 3097) @@ -1125,6 +1125,62 @@ return(portfolio) } +#' check if a set of weights satisfies the constraints +#' +#' This function checks if a set of weights satisfies all constraints. This is +#' used as a helper function for random portfolios created with \code{rp_simplex} +#' and \code{rp_grid} to eliminate portfolios that do not satisfy the constraints. +#' +#' @param weights vector of weights +#' @param portfolio object of class 'portfolio' +#' @return TRUE if all constraints are satisfied, FALSE if any constraint is violated +#' @author Ross Bennett +check_constraints <- function(weights, portfolio){ + + # get the constraints to check + # We will check leverage, box, group, and position limit constraints + constraints <- get_constraints(portfolio) + min_sum <- constraints$min_sum + max_sum <- constraints$max_sum + min <- constraints$min + max <- constraints$max + groups <- constraints$groups + cLO <- constraints$cLO + cUP <- constraints$cUP + group_pos <- constraints$group_pos + div_target <- constraints$div_target + turnover_target <- constraints$turnover_target + max_pos <- constraints$max_pos + max_pos_long <- constraints$max_pos_long + max_pos_short <- constraints$max_pos_short + tolerance <- .Machine$double.eps^0.5 + + log_vec <- c() + # check leverage constraints + if(!is.null(min_sum) & !is.null(max_sum)){ + # TRUE if constraints are satisfied + log_vec <- c(log_vec, ((sum(weights) >= min_sum) & (sum(weights) <= max_sum))) + } + + # check box constraints + if(!is.null(min) & !is.null(max)){ + # TRUE if constraints are satisfied + log_vec <- c(log_vec, (all(weights >= min) & all(weights <= max))) + } + + # check group constraints + if(!is.null(groups) & !is.null(cLO) & !is.null(cUP)){ + log_vec <- c(log_vec, all(!group_fail(weights, groups, cLO, cUP, group_pos))) + } + + # check position limit constraints + if(!is.null(max_pos) | !is.null(max_pos_long) | !is.null(max_pos_short)){ + log_vec <- c(log_vec, !pos_limit_fail(weights, max_pos, max_pos_long, max_pos_short)) + } + # return TRUE if all constraints are satisfied, FALSE if any constraint is violated + return(all(log_vec)) +} + # #' constructor for class constraint_ROI # #' # #' @param assets number of assets, or optionally a named vector of assets specifying seed weights Modified: pkg/PortfolioAnalytics/R/random_portfolios.R =================================================================== --- pkg/PortfolioAnalytics/R/random_portfolios.R 2013-09-13 18:34:12 UTC (rev 3096) +++ pkg/PortfolioAnalytics/R/random_portfolios.R 2013-09-13 20:55:28 UTC (rev 3097) @@ -328,10 +328,11 @@ #' \item{simplex: }{The 'simplex' method to generate random portfolios is #' based on a paper by W. T. Shaw. The simplex method is useful to generate #' random portfolios with the full investment constraint, where the sum of the -#' weights is equal to 1, and min box constraints. All other constraints such -#' as group and position limit constraints will be handled by elimination. If -#' the constraints are very restrictive, this may result in very few feasible -#' portfolios remaining.} +#' weights is equal to 1, and min box constraints. Values for \code{min_sum} +#' and \code{max_sum} of the leverage constraint will be ignored, the sum of +#' weights will equal 1. All other constraints such as group and position +#' limit constraints will be handled by elimination. If the constraints are +#' very restrictive, this may result in very few feasible portfolios remaining.} #' \item{grid: }{The 'grid' method to generate random portfolios is based on #' the \code(gridSearch} function in package 'NMOF'. The grid search method #' only satisfies the \code{min} and \code{max} box constraints. The @@ -341,17 +342,21 @@ #' penalized in \code{constrained_objective}.} #' } #' +#' The constraint types checked are leverage, box, group, and position limit. Any +#' portfolio that does not satisfy all these constraints will be eliminated. +#' #' @param portfolio an object of type "portfolio" specifying the constraints for the optimization, see \code{\link{constraint}} #' @param permutations integer: number of unique constrained random portfolios to generate #' @param \dots any other passthru parameters #' @param rp_method method to generate random portfolios +#' @param eliminate TRUE/FALSE, eliminate portfolios that do not satisfy constraints #' @return matrix of random portfolio weights #' @seealso \code{\link{portfolio.spec}}, \code{\link{objective}}, \code{\link{randomize_portfolio_v2}} #' @author Peter Carl, Brian G. Peterson, (based on an idea by Pat Burns) #' @aliases random_portfolios #' @rdname random_portfolios #' @export -random_portfolios_v2 <- function( portfolio, permutations=100, rp_method="sample", ...){ +random_portfolios_v2 <- function( portfolio, permutations=100, rp_method="sample", eliminate=TRUE, ...){ if(hasArg(p)) p=match.call(expand.dots=TRUE)$p else p=0:5 if(hasArg(normalize)) normalize=match.call(expand.dots=TRUE)$normalize else normalize=TRUE switch(rp_method, @@ -362,6 +367,15 @@ grid = {rp <- rp_grid(portfolio, permutations, normalize, ...) } ) + if(eliminate){ + # eliminate portfolios that do not satisfy constraints + stopifnot("package:foreach" %in% search() || require("foreach",quietly = TRUE)) + check <- foreach(i=1:nrow(rp), .combine=c) %dopar% { + # check_constraint returns TRUE if all constraints are satisfied + check_constraints(weights=rp[i,], portfolio=portfolio) + } + rp <- rp[which(check==TRUE),] + } return(rp) } @@ -417,7 +431,8 @@ #' @details #' The simplex method is useful to generate random portfolios with the full #' investment constraint where the sum of the weights is equal to 1 and min -#' box constraints. All other constraints such as group and position limit +#' box constraints. Values for min_sum and max_sum will be ignored, the sum +#' of weights will equal 1. All other constraints such as group and position limit #' constraints will be handled by elimination. If the constraints are very #' restrictive, this may result in very few feasible portfolios remaining. #' @@ -449,6 +464,7 @@ # get the constraints # the simplex method for generating random portfolios requires that the sum of weights is equal to 1 + # ignore the min_sum and max_sum constraints constraints <- get_constraints(portfolio) L <- constraints$min @@ -570,6 +586,7 @@ tmp } out <- do.call(rbind, out) + out <- na.omit(out) } if(normalize) return(out) else return(rp) } Modified: pkg/PortfolioAnalytics/man/chart.RiskReward.Rd =================================================================== --- pkg/PortfolioAnalytics/man/chart.RiskReward.Rd 2013-09-13 18:34:12 UTC (rev 3096) +++ pkg/PortfolioAnalytics/man/chart.RiskReward.Rd 2013-09-13 20:55:28 UTC (rev 3097) @@ -8,8 +8,6 @@ \alias{chart.RiskReward.optimize.portfolio.ROI} \title{classic risk reward scatter} \usage{ - chart.RiskReward(object, ...) - \method{chart.RiskReward}{optimize.portfolio.DEoptim} (object, ..., neighbors = NULL, return.col = "mean", risk.col = "ES", chart.assets = FALSE, element.color = "darkgray", cex.axis = 0.8, @@ -35,6 +33,8 @@ element.color = "darkgray", cex.axis = 0.8, ylim = NULL, xlim = NULL, rp = FALSE) + chart.RiskReward(object, ...) + \method{chart.RiskReward}{opt.list} (object, ..., risk.col = "ES", return.col = "mean", main = "", ylim = NULL, xlim = NULL, labels.assets = TRUE, Added: pkg/PortfolioAnalytics/man/check_constraints.Rd =================================================================== --- pkg/PortfolioAnalytics/man/check_constraints.Rd (rev 0) +++ pkg/PortfolioAnalytics/man/check_constraints.Rd 2013-09-13 20:55:28 UTC (rev 3097) @@ -0,0 +1,26 @@ +\name{check_constraints} +\alias{check_constraints} +\title{check if a set of weights satisfies the constraints} +\usage{ + check_constraints(weights, portfolio) +} +\arguments{ + \item{weights}{vector of weights} + + \item{portfolio}{object of class 'portfolio'} +} +\value{ + TRUE if all constraints are satisfied, FALSE if any + constraint is violated +} +\description{ + This function checks if a set of weights satisfies all + constraints. This is used as a helper function for random + portfolios created with \code{rp_simplex} and + \code{rp_grid} to eliminate portfolios that do not + satisfy the constraints. +} +\author{ + Ross Bennett +} + Modified: pkg/PortfolioAnalytics/man/random_portfolios.Rd =================================================================== --- pkg/PortfolioAnalytics/man/random_portfolios.Rd 2013-09-13 18:34:12 UTC (rev 3096) +++ pkg/PortfolioAnalytics/man/random_portfolios.Rd 2013-09-13 20:55:28 UTC (rev 3097) @@ -3,7 +3,8 @@ \alias{random_portfolios_v2} \title{version 2 generate an arbitary number of constrained random portfolios} \usage{ - random_portfolios_v2(portfolio, permutations = 100, ...) + random_portfolios_v2(portfolio, permutations = 100, + rp_method = "sample", eliminate = TRUE, ...) } \arguments{ \item{portfolio}{an object of type "portfolio" specifying @@ -14,6 +15,11 @@ random portfolios to generate} \item{\dots}{any other passthru parameters} + + \item{rp_method}{method to generate random portfolios} + + \item{eliminate}{TRUE/FALSE, eliminate portfolios that do + not satisfy constraints} } \value{ matrix of random portfolio weights @@ -23,6 +29,38 @@ generate an arbitrary number of constrained random portfolios. } +\details{ + Random portfolios can be generate using one of three + methods. \itemize{ \item{sample: }{The 'sample' method to + generate random portfolios is based on an idea pioneerd + by Pat Burns. This is the most flexible method and can + generate portfolios to satisfy leverage, box, group, and + position limit constraints.} \item{simplex: }{The + 'simplex' method to generate random portfolios is based + on a paper by W. T. Shaw. The simplex method is useful to + generate random portfolios with the full investment + constraint, where the sum of the weights is equal to 1, + and min box constraints. Values for \code{min_sum} and + \code{max_sum} of the leverage constraint will be + ignored, the sum of weights will equal 1. All other + constraints such as group and position limit constraints + will be handled by elimination. If the constraints are + very restrictive, this may result in very few feasible + portfolios remaining.} \item{grid: }{The 'grid' method to + generate random portfolios is based on the + \code(gridSearch} function in package 'NMOF'. The grid + search method only satisfies the \code{min} and + \code{max} box constraints. The \code{min_sum} and + \code{max_sum} leverage constraints will likely be + violated and the weights in the random portfolios should + be normalized. Normalization may cause the box + constraints to be violated and will be penalized in + \code{constrained_objective}.} } + + The constraint types checked are leverage, box, group, + and position limit. Any portfolio that does not satisfy + all these constraints will be eliminated. +} \author{ Peter Carl, Brian G. Peterson, (based on an idea by Pat Burns) Added: pkg/PortfolioAnalytics/man/rp_grid.Rd =================================================================== --- pkg/PortfolioAnalytics/man/rp_grid.Rd (rev 0) +++ pkg/PortfolioAnalytics/man/rp_grid.Rd 2013-09-13 20:55:28 UTC (rev 3097) @@ -0,0 +1,40 @@ +\name{rp_grid} +\alias{rp_grid} +\title{Generate random portfolios based on grid search method} +\usage{ + rp_grid(portfolio, permutations = 2000, normalize = TRUE, + ...) +} +\arguments{ + \item{portfolio}{} + + \item{permutations}{} + + \item{normalize}{TRUE/FALSE} + + \item{\dots}{any passthru parameters. Currently ignored} +} +\value{ + matrix of random portfolio weights +} +\description{ + This function generates random portfolios based on the + \code{gridSearch} function from the 'NMOF' package. +} +\details{ + The number of levels is calculated based on permutations + and number of assets. The number of levels must be an + integer and may not result in the exact number of + permutations. We round up to the nearest integer for the + levels so the number of portfolios generated will be + greater than or equal to permutations. + + The grid search method only satisfies the \code{min} and + \code{max} box constraints. The \code{min_sum} and + \code{max_sum} leverage constraints will likely be + violated and the weights in the random portfolios should + be normalized. Normalization may cause the box + constraints to be violated and will be penalized in + \code{constrained_objective}. +} + Added: pkg/PortfolioAnalytics/man/rp_sample.Rd =================================================================== --- pkg/PortfolioAnalytics/man/rp_sample.Rd (rev 0) +++ pkg/PortfolioAnalytics/man/rp_sample.Rd 2013-09-13 20:55:28 UTC (rev 3097) @@ -0,0 +1,30 @@ +\name{rp_sample} +\alias{rp_sample} +\title{Generate random portfolios using the sample method} +\usage{ + rp_sample(portfolio, permutations, ...) +} +\arguments{ + \item{portfolio}{an object of type "portfolio" specifying + the constraints for the optimization, see + \code{\link{portfolio.spec}}} + + \item{permutations}{integer: number of unique constrained + random portfolios to generate} + + \item{\dots}{any other passthru parameters} +} +\value{ + a matrix of random portfolio weights +} +\description{ + This function generates random portfolios based on an + idea by Pat Burns. +} +\details{ + The 'sample' method to generate random portfolios is + based on an idea pioneerd by Pat Burns. This is the most + flexible method and can generate portfolios to satisfy + leverage, box, group, and position limit constraints. +} + Modified: pkg/PortfolioAnalytics/man/rp_simplex.Rd =================================================================== --- pkg/PortfolioAnalytics/man/rp_simplex.Rd 2013-09-13 18:34:12 UTC (rev 3096) +++ pkg/PortfolioAnalytics/man/rp_simplex.Rd 2013-09-13 20:55:28 UTC (rev 3097) @@ -17,7 +17,7 @@ \item{\dots}{any other passthru parameters} } \value{ - a matrix of random portfolios + a matrix of random portfolio weights } \description{ This function generates random portfolios based on the @@ -26,9 +26,10 @@ \details{ The simplex method is useful to generate random portfolios with the full investment constraint where the - sum of the weights is equal to 1 and min and max box - constraints. All other constraints such as group and - position limit constraints will be handled by + sum of the weights is equal to 1 and min box constraints. + Values for min_sum and max_sum will be ignored, the sum + of weights will equal 1. All other constraints such as + group and position limit constraints will be handled by elimination. If the constraints are very restrictive, this may result in very few feasible portfolios remaining. From noreply at r-forge.r-project.org Fri Sep 13 23:05:13 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 13 Sep 2013 23:05:13 +0200 (CEST) Subject: [Returnanalytics-commits] r3098 - pkg/PortfolioAnalytics/R Message-ID: <20130913210513.C24511806F4@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-13 23:05:13 +0200 (Fri, 13 Sep 2013) New Revision: 3098 Modified: pkg/PortfolioAnalytics/R/optimize.portfolio.R Log: Adding revised random_portfolios arguments to optimize.portfolio Modified: pkg/PortfolioAnalytics/R/optimize.portfolio.R =================================================================== --- pkg/PortfolioAnalytics/R/optimize.portfolio.R 2013-09-13 20:55:28 UTC (rev 3097) +++ pkg/PortfolioAnalytics/R/optimize.portfolio.R 2013-09-13 21:05:13 UTC (rev 3098) @@ -615,7 +615,9 @@ if(hasArg(rpseed) & isTRUE(rpseed)) { # initial seed population is generated with random_portfolios function # if(hasArg(eps)) eps=match.call(expand.dots=TRUE)$eps else eps = 0.01 - rp <- random_portfolios(portfolio=portfolio, permutations=NP) + if(hasArg(rp_method)) rp_method=match.call(expand.dots=TRUE)$rp_method else rp_method="sample" + if(hasArg(eliminate)) eliminate=match.call(expand.dots=TRUE)$eliminate else eliminate=TRUE + rp <- random_portfolios(portfolio=portfolio, permutations=NP, rp_method=rp_method, eliminate=eliminate, ...) DEcformals$initialpop <- rp } controlDE <- do.call(DEoptim.control, DEcformals) @@ -651,7 +653,9 @@ if(optimize_method=="random"){ #' call random_portfolios() with portfolio and search_size to create matrix of portfolios if(missing(rp) | is.null(rp)){ - rp <- random_portfolios(portfolio=portfolio, permutations=search_size) + if(hasArg(rp_method)) rp_method=match.call(expand.dots=TRUE)$rp_method else rp_method="sample" + if(hasArg(eliminate)) eliminate=match.call(expand.dots=TRUE)$eliminate else eliminate=TRUE + rp <- random_portfolios(portfolio=portfolio, permutations=search_size, rp_method=rp_method, eliminate=eliminate, ...) } #' store matrix in out if trace=TRUE if (isTRUE(trace)) out$random_portfolios <- rp From noreply at r-forge.r-project.org Fri Sep 13 23:15:47 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 13 Sep 2013 23:15:47 +0200 (CEST) Subject: [Returnanalytics-commits] r3099 - in pkg/PortfolioAnalytics: R man Message-ID: <20130913211548.0FC15185DBF@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-13 23:15:47 +0200 (Fri, 13 Sep 2013) New Revision: 3099 Modified: pkg/PortfolioAnalytics/R/random_portfolios.R pkg/PortfolioAnalytics/man/rp_simplex.Rd Log: Changin variable name from 'p' to 'fev' in rp_simplex so it is not confused with 'p' for ETL and other risk measures. Modified: pkg/PortfolioAnalytics/R/random_portfolios.R =================================================================== --- pkg/PortfolioAnalytics/R/random_portfolios.R 2013-09-13 21:05:13 UTC (rev 3098) +++ pkg/PortfolioAnalytics/R/random_portfolios.R 2013-09-13 21:15:47 UTC (rev 3099) @@ -357,12 +357,12 @@ #' @rdname random_portfolios #' @export random_portfolios_v2 <- function( portfolio, permutations=100, rp_method="sample", eliminate=TRUE, ...){ - if(hasArg(p)) p=match.call(expand.dots=TRUE)$p else p=0:5 + if(hasArg(fev)) fev=match.call(expand.dots=TRUE)$fev else fev=0:5 if(hasArg(normalize)) normalize=match.call(expand.dots=TRUE)$normalize else normalize=TRUE switch(rp_method, sample = {rp <- rp_sample(portfolio, permutations, ...) }, - simplex = {rp <- rp_simplex(portfolio, permutations, p, ...) + simplex = {rp <- rp_simplex(portfolio, permutations, fev, ...) }, grid = {rp <- rp_grid(portfolio, permutations, normalize, ...) } @@ -431,10 +431,11 @@ #' @details #' The simplex method is useful to generate random portfolios with the full #' investment constraint where the sum of the weights is equal to 1 and min -#' box constraints. Values for min_sum and max_sum will be ignored, the sum -#' of weights will equal 1. All other constraints such as group and position limit -#' constraints will be handled by elimination. If the constraints are very -#' restrictive, this may result in very few feasible portfolios remaining. +#' box constraints with no upper bound on max constraints. Values for min_sum +#' and max_sum will be ignored, the sum of weights will equal 1. All other +#' constraints such as group and position limit constraints will be handled by +#' elimination. If the constraints are very restrictive, this may result in +#' very few feasible portfolios remaining. #' #' The random portfolios are created by first generating a set of uniform #' random numbers. @@ -443,21 +444,21 @@ #' box constraints. #' \deqn{w_{i} = min_{i} + (1 - \sum_{j=1}^{N} min_{j}) \frac{log(U_{i}^{q}}{\sum_{k=1}^{N}log(U_{k}^{q}}} #' -#' \code{p} controls the Face-Edge-Vertex (FEV) biasing where \deqn{q=2^p}. As +#' \code{fev} controls the Face-Edge-Vertex (FEV) biasing where \deqn{q=2^fev}. As #' \code{q} approaches infinity, the set of weights will be concentrated in a -#' single asset. To sample the interior and exterior, \code{p} can be passed +#' single asset. To sample the interior and exterior, \code{fev} can be passed #' in as a vector. The number of portfolios, \code{permutations}, and the -#' length of \code{p} affect how the random portfolios are generated. For -#' example if \code{permutations=10000} and \code{p=0:4}, 2000 portfolios will -#' be generated for each value of \code{p}. +#' length of \code{fev} affect how the random portfolios are generated. For +#' example, if \code{permutations=10000} and \code{fev=0:4}, 2000 portfolios will +#' be generated for each value of \code{fev}. #' -#' @param portfolio an object of type "portfolio" specifying the constraints for the optimization, see \code{\link{portfolio.spec}} +#' @param portfolio an object of class 'portfolio' specifying the constraints for the optimization, see \code{\link{portfolio.spec}} #' @param permutations integer: number of unique constrained random portfolios to generate -#' @param p scalar or vector for FEV biasing +#' @param fev scalar or vector for FEV biasing #' @param \dots any other passthru parameters #' @return a matrix of random portfolio weights #' @export -rp_simplex <- function(portfolio, permutations, p=0:5, ...){ +rp_simplex <- function(portfolio, permutations, fev=0:5, ...){ # get the assets from the portfolio assets <- portfolio$assets nassets <- length(assets) @@ -468,8 +469,8 @@ constraints <- get_constraints(portfolio) L <- constraints$min - # number of portfolios for each p to generate - k <- floor(permutations / length(p)) + # number of portfolios for each fev to generate + k <- ceiling(permutations / length(fev)) # generate uniform[0, 1] random numbers U <- runif(n=k*permutations, 0, 1) @@ -477,8 +478,8 @@ # do the transformation to the set of weights to satisfy lower bounds stopifnot("package:foreach" %in% search() || require("foreach",quietly = TRUE)) - out <- foreach(j = 1:length(p), .combine=c) %:% foreach(i=1:nrow(Umat)) %dopar% { - q <- 2^p[j] + out <- foreach(j = 1:length(fev), .combine=c) %:% foreach(i=1:nrow(Umat)) %dopar% { + q <- 2^fev[j] tmp <- L + (1 - sum(L)) * log(Umat[i,])^q / sum(log(Umat[i,])^q) tmp } Modified: pkg/PortfolioAnalytics/man/rp_simplex.Rd =================================================================== --- pkg/PortfolioAnalytics/man/rp_simplex.Rd 2013-09-13 21:05:13 UTC (rev 3098) +++ pkg/PortfolioAnalytics/man/rp_simplex.Rd 2013-09-13 21:15:47 UTC (rev 3099) @@ -2,17 +2,17 @@ \alias{rp_simplex} \title{Generate random portfolios using the simplex method} \usage{ - rp_simplex(portfolio, permutations, p = 0:5, ...) + rp_simplex(portfolio, permutations, fev = 0:5, ...) } \arguments{ - \item{portfolio}{an object of type "portfolio" specifying - the constraints for the optimization, see + \item{portfolio}{an object of class 'portfolio' + specifying the constraints for the optimization, see \code{\link{portfolio.spec}}} \item{permutations}{integer: number of unique constrained random portfolios to generate} - \item{p}{scalar or vector for FEV biasing} + \item{fev}{scalar or vector for FEV biasing} \item{\dots}{any other passthru parameters} } @@ -26,10 +26,11 @@ \details{ The simplex method is useful to generate random portfolios with the full investment constraint where the - sum of the weights is equal to 1 and min box constraints. - Values for min_sum and max_sum will be ignored, the sum - of weights will equal 1. All other constraints such as - group and position limit constraints will be handled by + sum of the weights is equal to 1 and min box constraints + with no upper bound on max constraints. Values for + min_sum and max_sum will be ignored, the sum of weights + will equal 1. All other constraints such as group and + position limit constraints will be handled by elimination. If the constraints are very restrictive, this may result in very few feasible portfolios remaining. @@ -41,14 +42,15 @@ \sum_{j=1}^{N} min_{j}) \frac{log(U_{i}^{q}}{\sum_{k=1}^{N}log(U_{k}^{q}}} - \code{p} controls the Face-Edge-Vertex (FEV) biasing - where \deqn{q=2^p}. As \code{q} approaches infinity, the - set of weights will be concentrated in a single asset. To - sample the interior and exterior, \code{p} can be passed - in as a vector. The number of portfolios, - \code{permutations}, and the length of \code{p} affect - how the random portfolios are generated. For example if - \code{permutations=10000} and \code{p=0:4}, 2000 - portfolios will be generated for each value of \code{p}. + \code{fev} controls the Face-Edge-Vertex (FEV) biasing + where \deqn{q=2^fev}. As \code{q} approaches infinity, + the set of weights will be concentrated in a single + asset. To sample the interior and exterior, \code{fev} + can be passed in as a vector. The number of portfolios, + \code{permutations}, and the length of \code{fev} affect + how the random portfolios are generated. For example, if + \code{permutations=10000} and \code{fev=0:4}, 2000 + portfolios will be generated for each value of + \code{fev}. } From noreply at r-forge.r-project.org Sat Sep 14 00:30:19 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sat, 14 Sep 2013 00:30:19 +0200 (CEST) Subject: [Returnanalytics-commits] r3100 - in pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm: . R man Message-ID: <20130913223019.2A4E9185450@r-forge.r-project.org> Author: shubhanm Date: 2013-09-14 00:30:18 +0200 (Sat, 14 Sep 2013) New Revision: 3100 Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/NAMESPACE pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/glmi.R pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/glmi.Rd pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/lmi.Rd Log: documentation added Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/NAMESPACE =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/NAMESPACE 2013-09-13 21:15:47 UTC (rev 3099) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/NAMESPACE 2013-09-13 22:30:18 UTC (rev 3100) @@ -4,7 +4,9 @@ export(chart.AcarSim) export(chart.Autocorrelation) export(EmaxDDGBM) +export(glmi) export(GLMSmoothIndex) +export(lmi) export(LoSharpe) export(QP.Norm) export(Return.GLM) Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/glmi.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/glmi.R 2013-09-13 21:15:47 UTC (rev 3099) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/glmi.R 2013-09-13 22:30:18 UTC (rev 3100) @@ -1,3 +1,59 @@ +#'@title Fitting Generalized Linear Models with HC and HAC Covariance Matrix Estimators +#'@description +#' glm is used to fit generalized linear models, specified by giving a symbolic description of the linear predictor and a description of the error distribution. +#' @details +#' see \code{\link{glm}}. +#' @param formula +#'an object of class "formula" (or one that can be coerced to that class): a symbolic description of the model to be fitted. The details of model specification are given under ?Details?. +#' +#'@param family +#' a description of the error distribution and link function to be used in the model. This can be a character string naming a family function, a family function or the result of a call to a family function. (See family for details of family functions.) +#'@param data +#'an optional data frame, list or environment (or object coercible by as.data.frame to a data frame) containing the variables in the model. If not found in data, the variables are taken from environment(formula), typically the environment from which lm is called. +#' +#'@param vcov HC-HAC covariance estimation +#'@param weights +#'an optional vector of weights to be used in the fitting process. Should be NULL or a numeric vector. If non-NULL, weighted least squares is used with weights weights (that is, minimizing sum(w*e^2)); otherwise ordinary least squares is used. See also ?Details?, +#'@param subset +#'an optional vector specifying a subset of observations to be used in the fitting process. +#' +#' +#'@param na.action +#'a function which indicates what should happen when the data contain NAs. The default is set by the na.action setting of options, and is na.fail if that is unset. The ?factory-fresh? default is na.omit. Another possible value is NULL, no action. Value na.exclude can be useful. +#' +#'@param start +#'starting values for the parameters in the linear predictor. +#' +#'@param etastart +#'starting values for the linear predictor. +#' +#'@param mustart +#'starting values for the vector of means. +#' +#'@param offset +#'this can be used to specify an a priori known component to be included in the linear predictor during fitting. This should be NULL or a numeric vector of length equal to the number of cases. One or more offset terms can be included in the formula instead or as well, and if more than one is specified their sum is used. See model.offset. +#' +#'@param control +#'a list of parameters for controlling the fitting process. For glm.fit this is passed to glm.control. +#' +#'@param model +#' a logical value indicating whether model frame should be included as a component of the returned value. +#'@param method +#'the method to be used; for fitting, currently only method = "qr" is supported; method = "model.frame" returns the model frame (the same as with model = TRUE, see below). +#' +#'@param x logicals. If TRUE the corresponding components of the fit (the model frame, the model matrix, the response, the QR decomposition) are returned. +#'@param y logicals. If TRUE the corresponding components of the fit (the model frame, the model matrix, the response, the QR decomposition) are returned. +#' +#'@param contrasts +#'an optional list. See the contrasts.arg of model.matrix.default. +#' +#'@param \dots +#'additional arguments to be passed to the low level regression fitting functions (see below). +#' @author The original R implementation of glm was written by Simon Davies working for Ross Ihaka at the University of Auckland, but has since been extensively re-written by members of the R Core team. +#' The design was inspired by the S function of the same name described in Hastie & Pregibon (1992). +#' @keywords HC HAC covariance estimation regression fitting model +#' @rdname glmi +#' @export glmi <- function (formula, family = gaussian, data,vcov = NULL, weights, subset, na.action, start = NULL, etastart, mustart, offset, control = list(...), model = TRUE, method = "glm.fit", x = FALSE, y = TRUE, contrasts = NULL, Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R 2013-09-13 21:15:47 UTC (rev 3099) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R 2013-09-13 22:30:18 UTC (rev 3100) @@ -1,4 +1,48 @@ - +#'@title Fitting Generalized Linear Models with HC and HAC Covariance Matrix Estimators +#'@description +#' lm is used to fit generalized linear models, specified by giving a symbolic description of the linear predictor and a description of the error distribution. +#' @details +#' see \code{\link{lm}}. +#' @param formula +#'an object of class "formula" (or one that can be coerced to that class): a symbolic description of the model to be fitted. The details of model specification are given under ?Details?. +#' +#' +#'@param data +#'an optional data frame, list or environment (or object coercible by as.data.frame to a data frame) containing the variables in the model. If not found in data, the variables are taken from environment(formula), typically the environment from which lm is called. +#' +#'@param vcov HC-HAC covariance estimation +#'@param weights +#'an optional vector of weights to be used in the fitting process. Should be NULL or a numeric vector. If non-NULL, weighted least squares is used with weights weights (that is, minimizing sum(w*e^2)); otherwise ordinary least squares is used. See also ?Details?, +#' +#' +#'@param subset +#'an optional vector specifying a subset of observations to be used in the fitting process. +#'@param na.action +#'a function which indicates what should happen when the data contain NAs. The default is set by the na.action setting of options, and is na.fail if that is unset. The ?factory-fresh? default is na.omit. Another possible value is NULL, no action. Value na.exclude can be useful. +#' +#'@param method +#'the method to be used; for fitting, currently only method = "qr" is supported; method = "model.frame" returns the model frame (the same as with model = TRUE, see below). +#' +#'@param model logicals. If TRUE the corresponding components of the fit (the model frame, the model matrix, the response, the QR decomposition) are returned. +#'@param x logicals. If TRUE the corresponding components of the fit (the model frame, the model matrix, the response, the QR decomposition) are returned. +#'@param y logicals. If TRUE the corresponding components of the fit (the model frame, the model matrix, the response, the QR decomposition) are returned. +#'@param qr logicals. If TRUE the corresponding components of the fit (the model frame, the model matrix, the response, the QR decomposition) are returned. +#'@param singular.ok +#'logical. If FALSE (the default in S but not in R) a singular fit is an error. +#' +#'@param contrasts +#'an optional list. See the contrasts.arg of model.matrix.default. +#' +#'@param offset +#'this can be used to specify an a priori known component to be included in the linear predictor during fitting. This should be NULL or a numeric vector of length equal to the number of cases. One or more offset terms can be included in the formula instead or as well, and if more than one are specified their sum is used. See model.offset. +#' +#'@param \dots +#'additional arguments to be passed to the low level regression fitting functions (see below). +#' @author The original R implementation of glm was written by Simon Davies working for Ross Ihaka at the University of Auckland, but has since been extensively re-written by members of the R Core team. +#' The design was inspired by the S function of the same name described in Hastie & Pregibon (1992). +#' @keywords HC HAC covariance estimation regression fitting model +#' @rdname lmi +#' @export lmi <- function (formula, data,vcov = NULL, subset, weights, na.action, method = "qr", model = TRUE, x = FALSE, y = FALSE, qr = TRUE, singular.ok = TRUE, contrasts = NULL, offset, ...) Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/glmi.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/glmi.Rd 2013-09-13 21:15:47 UTC (rev 3099) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/glmi.Rd 2013-09-13 22:30:18 UTC (rev 3100) @@ -1,6 +1,6 @@ \name{glmi} \alias{glmi} -\title{Support of HAC methods within lm regression model} +\title{Fitting Generalized Linear Models with HC and HAC Covariance Matrix Estimators} \usage{ glmi(formula, family = gaussian, data, vcov = NULL, weights, subset, na.action, start = NULL, etastart, @@ -8,11 +8,107 @@ method = "glm.fit", x = FALSE, y = TRUE, contrasts = NULL, ...) } +\arguments{ + \item{formula}{an object of class "formula" (or one that + can be coerced to that class): a symbolic description of + the model to be fitted. The details of model + specification are given under ?Details?.} + + \item{family}{a description of the error distribution and + link function to be used in the model. This can be a + character string naming a family function, a family + function or the result of a call to a family function. + (See family for details of family functions.)} + + \item{data}{an optional data frame, list or environment + (or object coercible by as.data.frame to a data frame) + containing the variables in the model. If not found in + data, the variables are taken from environment(formula), + typically the environment from which lm is called.} + + \item{vcov}{HC-HAC covariance estimation} + + \item{weights}{an optional vector of weights to be used + in the fitting process. Should be NULL or a numeric + vector. If non-NULL, weighted least squares is used with + weights weights (that is, minimizing sum(w*e^2)); + otherwise ordinary least squares is used. See also + ?Details?,} + + \item{subset}{an optional vector specifying a subset of + observations to be used in the fitting process.} + + \item{na.action}{a function which indicates what should + happen when the data contain NAs. The default is set by + the na.action setting of options, and is na.fail if that + is unset. The ?factory-fresh? default is na.omit. + Another possible value is NULL, no action. Value + na.exclude can be useful.} + + \item{start}{starting values for the parameters in the + linear predictor.} + + \item{etastart}{starting values for the linear + predictor.} + + \item{mustart}{starting values for the vector of means.} + + \item{offset}{this can be used to specify an a priori + known component to be included in the linear predictor + during fitting. This should be NULL or a numeric vector + of length equal to the number of cases. One or more + offset terms can be included in the formula instead or as + well, and if more than one is specified their sum is + used. See model.offset.} + + \item{control}{a list of parameters for controlling the + fitting process. For glm.fit this is passed to + glm.control.} + + \item{model}{a logical value indicating whether model + frame should be included as a component of the returned + value.} + + \item{method}{the method to be used; for fitting, + currently only method = "qr" is supported; method = + "model.frame" returns the model frame (the same as with + model = TRUE, see below).} + + \item{x}{logicals. If TRUE the corresponding components + of the fit (the model frame, the model matrix, the + response, the QR decomposition) are returned.} + + \item{y}{logicals. If TRUE the corresponding components + of the fit (the model frame, the model matrix, the + response, the QR decomposition) are returned.} + + \item{contrasts}{an optional list. See the contrasts.arg + of model.matrix.default.} + + \item{\dots}{additional arguments to be passed to the low + level regression fitting functions (see below).} +} \description{ - Support of HAC methods within lm regression model + glm is used to fit generalized linear models, specified + by giving a symbolic description of the linear predictor + and a description of the error distribution. } -\seealso{ - \code{\link{glm} +\details{ + see \code{\link{glm}}. } +\author{ + The original R implementation of glm was written by Simon + Davies working for Ross Ihaka at the University of + Auckland, but has since been extensively re-written by + members of the R Core team. The design was inspired by + the S function of the same name described in Hastie & + Pregibon (1992). } +\keyword{covariance} +\keyword{estimation} +\keyword{fitting} +\keyword{HAC} +\keyword{HC} +\keyword{model} +\keyword{regression} Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/lmi.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/lmi.Rd 2013-09-13 21:15:47 UTC (rev 3099) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/lmi.Rd 2013-09-13 22:30:18 UTC (rev 3100) @@ -1,17 +1,102 @@ \name{lmi} \alias{lmi} -\title{Support of HAC methods within glm regression model} +\title{Fitting Generalized Linear Models with HC and HAC Covariance Matrix Estimators} \usage{ lmi(formula, data, vcov = NULL, subset, weights, na.action, method = "qr", model = TRUE, x = FALSE, y = FALSE, qr = TRUE, singular.ok = TRUE, contrasts = NULL, offset, ...) } +\arguments{ + \item{formula}{an object of class "formula" (or one that + can be coerced to that class): a symbolic description of + the model to be fitted. The details of model + specification are given under ?Details?.} + + \item{data}{an optional data frame, list or environment + (or object coercible by as.data.frame to a data frame) + containing the variables in the model. If not found in + data, the variables are taken from environment(formula), + typically the environment from which lm is called.} + + \item{vcov}{HC-HAC covariance estimation} + + \item{weights}{an optional vector of weights to be used + in the fitting process. Should be NULL or a numeric + vector. If non-NULL, weighted least squares is used with + weights weights (that is, minimizing sum(w*e^2)); + otherwise ordinary least squares is used. See also + ?Details?,} + + \item{subset}{an optional vector specifying a subset of + observations to be used in the fitting process.} + + \item{na.action}{a function which indicates what should + happen when the data contain NAs. The default is set by + the na.action setting of options, and is na.fail if that + is unset. The ?factory-fresh? default is na.omit. + Another possible value is NULL, no action. Value + na.exclude can be useful.} + + \item{method}{the method to be used; for fitting, + currently only method = "qr" is supported; method = + "model.frame" returns the model frame (the same as with + model = TRUE, see below).} + + \item{model}{logicals. If TRUE the corresponding + components of the fit (the model frame, the model matrix, + the response, the QR decomposition) are returned.} + + \item{x}{logicals. If TRUE the corresponding components + of the fit (the model frame, the model matrix, the + response, the QR decomposition) are returned.} + + \item{y}{logicals. If TRUE the corresponding components + of the fit (the model frame, the model matrix, the + response, the QR decomposition) are returned.} + + \item{qr}{logicals. If TRUE the corresponding components + of the fit (the model frame, the model matrix, the + response, the QR decomposition) are returned.} + + \item{singular.ok}{logical. If FALSE (the default in S + but not in R) a singular fit is an error.} + + \item{contrasts}{an optional list. See the contrasts.arg + of model.matrix.default.} + + \item{offset}{this can be used to specify an a priori + known component to be included in the linear predictor + during fitting. This should be NULL or a numeric vector + of length equal to the number of cases. One or more + offset terms can be included in the formula instead or as + well, and if more than one are specified their sum is + used. See model.offset.} + + \item{\dots}{additional arguments to be passed to the low + level regression fitting functions (see below).} +} \description{ - Support of HAC methods within glm regression model + lm is used to fit generalized linear models, specified by + giving a symbolic description of the linear predictor and + a description of the error distribution. } -\seealso{ - \code{\link{lm} +\details{ + see \code{\link{lm}}. } +\author{ + The original R implementation of glm was written by Simon + Davies working for Ross Ihaka at the University of + Auckland, but has since been extensively re-written by + members of the R Core team. The design was inspired by + the S function of the same name described in Hastie & + Pregibon (1992). } +\keyword{covariance} +\keyword{estimation} +\keyword{fitting} +\keyword{HAC} +\keyword{HC} +\keyword{model} +\keyword{regression} From noreply at r-forge.r-project.org Sat Sep 14 01:06:42 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sat, 14 Sep 2013 01:06:42 +0200 (CEST) Subject: [Returnanalytics-commits] r3101 - pkg/PortfolioAnalytics/sandbox Message-ID: <20130913230642.543031856C6@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-14 01:06:41 +0200 (Sat, 14 Sep 2013) New Revision: 3101 Modified: pkg/PortfolioAnalytics/sandbox/rp_method_comparison.R Log: Added graphs to compare different fev biasing values for simplex method. Modified: pkg/PortfolioAnalytics/sandbox/rp_method_comparison.R =================================================================== --- pkg/PortfolioAnalytics/sandbox/rp_method_comparison.R 2013-09-13 22:30:18 UTC (rev 3100) +++ pkg/PortfolioAnalytics/sandbox/rp_method_comparison.R 2013-09-13 23:06:41 UTC (rev 3101) @@ -31,4 +31,17 @@ # sample has pretty even coverage of feasible space # simplex is concentrated around the assets # grid is 'pushed'/concentrated to the interior due to normalization + +# demonstrate how different values of fev influence the random portfolios of +# the simplex method # This could be a really good example with Shiny for an interactive example +fev <- 0:5 +par(mfrow=c(2, 3)) +for(i in 1:length(fev)){ + rp <- random_portfolios(portfolio=pspec, permutations=2000, rp_method='simplex', fev=fev[i]) + tmp.mean <- apply(rp, 1, function(x) mean(R %*% x)) + tmp.StdDev <- apply(rp, 1, function(x) StdDev(R=R, weights=x)) + plot(x=tmp.StdDev, y=tmp.mean, main=paste("FEV =", fev[i]), + ylab="mean", xlab="StdDev", col=rgb(0, 0, 100, 50, maxColorValue=255)) +} +par(mfrow=c(1,1)) From noreply at r-forge.r-project.org Sat Sep 14 02:06:06 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sat, 14 Sep 2013 02:06:06 +0200 (CEST) Subject: [Returnanalytics-commits] r3102 - pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox Message-ID: <20130914000606.2BDF01856DC@r-forge.r-project.org> Author: shubhanm Date: 2013-09-14 02:06:05 +0200 (Sat, 14 Sep 2013) New Revision: 3102 Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Commodity.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Commodity.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Managers.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Managers.pdf Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/CommodityReport.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/CommodityReport.pdf Log: Vignettes for different data sets Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Commodity.Rnw =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Commodity.Rnw (rev 0) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Commodity.Rnw 2013-09-14 00:06:05 UTC (rev 3102) @@ -0,0 +1,268 @@ +%% no need for \DeclareGraphicsExtensions{.pdf,.eps} + +\documentclass[12pt,letterpaper,english]{article} +\usepackage{times} +\usepackage[T1]{fontenc} +\IfFileExists{url.sty}{\usepackage{url}} + {\newcommand{\url}{\texttt}} + +\usepackage{babel} +%\usepackage{noweb} +\usepackage{Rd} + +\usepackage{Sweave} +\SweaveOpts{engine=R,eps=FALSE} +%\VignetteIndexEntry{Performance Attribution from Bacon} +%\VignetteDepends{PerformanceAnalytics} +%\VignetteKeywords{returns, performance, risk, benchmark, portfolio} +%\VignettePackage{PerformanceAnalytics} + +%\documentclass[a4paper]{article} +%\usepackage[noae]{Sweave} +%\usepackage{ucs} +%\usepackage[utf8x]{inputenc} +%\usepackage{amsmath, amsthm, latexsym} +%\usepackage[top=3cm, bottom=3cm, left=2.5cm]{geometry} +%\usepackage{graphicx} +%\usepackage{graphicx, verbatim} +%\usepackage{ucs} +%\usepackage[utf8x]{inputenc} +%\usepackage{amsmath, amsthm, latexsym} +%\usepackage{graphicx} + +\title{Commodity Index Fund Performance Analysis} +\author{Shubhankit Mohan} + +\begin{document} +\SweaveOpts{concordance=TRUE} + +\maketitle + + +\begin{abstract} +The fact that many hedge fund returns exhibit extraordinary levels of serial correlation is now well-known and generally accepted as fact. The effect of this autocorrelation on investment returns diminishes the apparent risk of such asset classes as the true returns/risk is easily \textbf{camouflaged} within a haze of liquidity, stale prices, averaged price quotes and smoothed return reporting. We highlight the effect \emph{autocorrelation} and \emph{drawdown} has on performance analysis by investigating the results of functions developed during the Google Summer of Code 2013 on \textbf{commodity based index} . +\end{abstract} + +\tableofcontents + +<>= +library(PerformanceAnalytics) +library(noniid.sm) +data(edhec) +@ + + +\section{Background} +The investigated fund index that tracks a basket of \emph{commodities} to measure their performance.The value of these indexes fluctuates based on their underlying commodities, and this value depends on the \emph{component}, \emph{methodology} and \emph{style} to cover commodity markets . + +A brief overview of the indicies invested in our report are : + \begin{itemize} + \item + \textbf{DJUBS Commodity index} : is a broadly diversified index that allows investors to track commodity futures through a single, simple measure. As the index has grown in popularity since its introduction in 1998, additional versions and a full complement of sub-indices have been introduced. Together, the family offers investors a comprehensive set of tools for measuring the commodity markets. + \item + \textbf{Morningstar CLS index} : is a simple rules-based trend following index operated in commodities + \item + \textbf{Newedge CTI} : includes funds that utilize a variety of investment strategies to profit from price moves in commodity markets. +Managers typically use either (i) a trading orientated approach,involving the trading of physical commodity products and/or of commodity +derivative instruments in either directional or relative value strategies; Or (ii) Long short equity strategies focused on commodity related stocks. + \end{itemize} +%Let $X \sim N(0,1)$ and $Y \sim \textrm{Exponential}(\mu)$. Let +%$Z = \sin(X)$. $\sqrt{X}$. + +%$\hat{\mu}$ = $\displaystyle\frac{22}{7}$ +%e^{2 \mu} = 1 +%\begin{equation} +%\left(\sum_{t=1}^{T} R_t/T\right) = \hat{\mu} \\ +%\end{equation} + +\section{Performance Summary Chart} + +Given a series of historical returns \((R_1,R_2, . . .,R_T)\) from \textbf{January-2001} to \textbf{December-2009}, create a wealth index chart, bars for per-period performance, and underwater chart for drawdown of the 3 funds. + +<>= +data <- read.csv("C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/data/HAM3-data.csv") +dates <- data$X +values <- data[,-1] # convert percentage to return +COM <- as.xts(values, order.by=as.Date(dates)) +COM.09<-COM[,9:11] +charts.PerformanceSummary(COM.09[1:108,],colorset = rich6equal, lwd = 2, ylog = TRUE) +@ + +The above figure shows the behavior of the respective fund performance, which is \textbf{upward} trending for all the funds till the period of \textbf{"January-2008"}.For comparative purpose, one can observe the distinct \textbf{drawdown} of \textbf{Newedge CTI} since the latter period. + +\section{Statistical and Drawdown Analysis} + +A summary of Fund Return series characteristics show that \textbf{DJUBS.Commodity} performs worse relatively to it's peers.The most distinct characteristic being highest : \textbf{Variance, Stdev, SE Mean} and well as negative \textbf{Skewness} .The table shows clearly, that the returns of all the hedge fund indices are non-normal.Presence of \emph{negative} skewness is a major area of concern for the downside risk potential and expected maximum loss. + +<>= +table.Stats(COM.09, ci = 0.95, digits = 4) +@ + + +The results are consistent with Drawdown Analysis in which \textbf{DJUBS.Commodity} performs worse relatively to it's peers. + +<>= +table.DownsideRisk(COM.09, ci = 0.95, digits = 4) +@ +\section{Non-i.i.d GSoC Usage} +\subsection{Auctocorrelation Adjusted Standard Deviation} +Given a sample of historical returns \((R_1,R_2, . . .,R_T)\),the method assumes the fund manager smooths returns in the following manner, when 't' is the unit time interval, with $\rho$\ as the respective term autocorrelation coefficient + +%Let $X \sim N(0,1)$ and $Y \sim \textrm{Exponential}(\mu)$. Let +%$Z = \sin(X)$. $\sqrt{X}$. + +%$\hat{\mu}$ = $\displaystyle\frac{22}{7}$ +%e^{2 \mu} = 1 +%\begin{equation} +%\left(\sum_{t=1}^{T} R_t/T\right) = \hat{\mu} \\ +%\end{equation} +\begin{equation} + \sigma_{T} = \sqrt{ \sum_k^n(\sigma_{t}^2 + 2*\rho_i) } \\ +\end{equation} + + +<>= +ACFVol = ACStdDev.annualized(COM.09) +Vol = StdDev.annualized(COM.09) +barplot(rbind(ACFVol,Vol), main="ACF and Orignal Volatility", + xlab="Fund Type",ylab="Volatilty (in %)", col=rich6equal[2:3], beside=TRUE) + legend("topright", c("ACF","Orignal"), cex=0.6, + bty="2", fill=rich6equal[2:3]); +@ + +From the above figure, we can observe that all the funds, exhibit \textbf{serial auto correlation}, which results in significantly \emph{inflated} standard deviation. +\subsection{Andrew Lo Statistics of Sharpe Ratio} + +The building blocks of the \textbf{Sharpe Ratio} : expected returns and volatilities are unknown quantities that must be estimated statistically and are, +therefore, subject to \emph{estimation error} .To address this question, Andrew Lo derives explicit expressions for the statistical distribution of the Sharpe ratio using +standard asymptotic theory. + +The Sharpe ratio (SR) is simply the return per unit of risk (represented by variability). In the classic case, the unit of risk is the standard deviation of the returns. + +\deqn{\frac{\overline{(R_{a}-R_{f})}}{\sqrt{\sigma_{(R_{a}-R_{f})}}}} + +The relationship between SR and SR(q) is somewhat more involved for non- +IID returns because the variance of Rt(q) is not just the sum of the variances of component returns but also includes all the co-variances. Specifically, under +the assumption that returns \(R_t\) are stationary, +\begin{equation} +Var[(R_t)] = \sum_{i=0}^{q-1} \sum_{j=1}^{q-1} Cov(R(t-i),R(t-j)) = q\hat{\sigma^2} + 2\hat{\sigma^2} \sum_{k=1}^{q-1} (q-k)\rho_k \\ +\end{equation} + +Where $\rho$\(_k\) = Cov(\(R(t)\),\(R(t-k\)))/Var[\(R_t\)] is the \(k^{th}\) order autocorrelation coefficient's of the series of returns.This yields the following relationship between SR and SR(q): + +\begin{equation} +\hat{SR}(q) = \eta(q) \\ +\end{equation} + +Where : + +\begin{equation} +\eta(q) = \frac{q}{\sqrt{(q\hat{\sigma^2} + 2\hat{\sigma^2} \sum_{k=1}^{q-1} (q-k)\rho_k)}} \\ +\end{equation} + +In given commodity funds, we find results, similar reported in paper, that the annual Sharpe ratio for a hedge fund can be overstated by as much as \textbf{65} \% because of the presence of \textbf{serial correlation}.We can observe that the fund "\textbf{DJUBS.Commodity}", which has the largest drawdown and serial autocorrelation, has it's Andrew Lo Sharpe ratio , \emph{decrease} most significantly as compared to other funds. + +<>= +Lo.Sharpe = LoSharpe(COM.09) +Theoretical.Sharpe= SharpeRatio.annualized(COM.09) +barplot(rbind(Theoretical.Sharpe,Lo.Sharpe), main="Sharpe Ratio Observed", + xlab="Fund Type",ylab="Value", col=rich6equal[2:3], beside=TRUE) + legend("topright", c("Orginal","Lo"), cex=0.6, + bty="2", fill=rich6equal[2:3]); +@ +\subsection{Conditional Drawdown} +A new one-parameter family of risk measures called Conditional Drawdown (CDD) has +been proposed. These measures of risk are functional of the portfolio drawdown (underwater) curve considered in active portfolio management. For some value of $\hat{\alpha}$ the tolerance parameter, in the case of a single sample path, drawdown functional is defined as the mean of the worst (1 \(-\) $\hat{\alpha}$)100\% drawdowns. The CDD measure generalizes the notion of the drawdown functional to a multi-scenario case and can be considered as a generalization of deviation measure to a dynamic case. The CDD measure includes the Maximal Drawdown and Average Drawdown as its limiting cases.Similar to other cases, \textbf{DJUBS.Commodity}, is the worst performing fund with worst case conditional drawdown greater than \textbf{50\%} and \textbf{Newedge.CTI} performing significantly well among the peer commodity indices with less than \textbf{15\%}. + +<>= +c.draw=CDrawdown(COM.09) +e.draw=ES(COM.09,.95,method="gaussian") +c.draw=100*as.matrix(c.draw) +e.draw=100*as.matrix(e.draw) +barplot(rbind(-c.draw,-e.draw), main="Expected Loss in (%) ", + xlab="Fund Type",ylab="Value", col=rich6equal[2:3], beside=TRUE) + legend("topright", c("Conditional Drawdown","Expected Shortfall"), cex=0.6, + bty="2", fill=rich6equal[2:3]); +@ +\subsection{Calmar and Sterling Ratio} +Both the Calmar and the Sterling ratio are the ratio of annualized return over the absolute value of the maximum drawdown of an investment. +{equation} +\begin{equation} + Calmar Ratio = \frac{Return [0,T]}{max Drawdown [0,T]} \\ +\end{equation} + +\begin{equation} + Sterling Ratio = \frac{Return [0,T]}{max Drawdown [0,T] - 10\%} \\ +\end{equation} +<>= +round(CalmarRatio.Norm(COM.09,1),4) +round(SterlingRatio.Norm(COM.09,1),4) +@ +For a 1 year \emph{horizon} return, we can see that Newedge.CTI is the clear performer in this metric as well.However, a \textbf{surprising} observed result, is negative \emph{Sterling} and \emph{Calmar} ratio for Morningstar.CLS . +\subsection{GLM Smooth Index} +GLM Smooth Index is a useful parameter to quantify the degree of autocorrelation.It is a summary statistic for measuring the concentration of autocorrelation present in the lag factors (up-to 6) , which can be defined by the below equation as : +\begin{equation} +\xi = \sum_{j=0}^{k} \theta _j^2 \\ +\end{equation} + +This measure is well known in the industrial organization literature as the Herfindahl index, a measure of the concentration of firms in a given industry where $\theta$\(_j\) represents the market share of firm j. Because $\xi_t$\ is confined to the unit interval, and is minimized when all the $\theta$\(_j\) 's are identical, which implies a value of 1/k+1 for $\xi_i$\ ; and is maximized when one coefficient is 1 and the rest are 0. In the context of smoothed returns, a lower value of implies less smoothing, and the upper bound of 1 implies pure smoothing, hence we shall refer to $\theta$\(_j\) as a \textbf{smoothing index}. + +<>= +library(noniid.sm) +source("C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/GLMSmoothIndex.R") +GLM.index=GLMSmoothIndex(COM.09) +barplot(as.matrix(GLM.index), main="GLM Smooth Index", + xlab="Fund Type",ylab="Value",colorset = rich6equal[1], beside=TRUE) +@ + +For the given chart, we can observe that \textbf{all the funds} have significant level of smooth returns. +\subsection{Acar Shane Maximum Loss} + +Measuring risk through extreme losses is a very appealing idea. This is indeed how financial companies perceive risks. This explains the popularity of loss statistics such as the maximum drawdown and maximum loss. An empirical application to fund managers performance show that \textbf{very few investments} exhibit \emph{abnormally high or low drawdowns}. Consequently, it is doubtful that drawdowns statistics can be used +to significantly distinguish fund managers. This is confirmed by the fact that predicting one-period ahead drawdown is an almost impossible task. Errors average at the very best 27\% of the true value observed in the market. + +The main concern of this paper is the study of alternative risk measures: namely maximum loss and maximum drawdown. Unfortunately, there is no analytical formula to establish the maximum drawdown properties under the random walk assumption. We should note first that due to its definition, the maximum drawdown divided by volatility is an only function of the ratio mean divided by volatility. + + +\begin{equation} +MD / \sigma = Min \frac{ \sum_{j=1}^{t} X_{j}}{\sigma} = F(\frac{\mu}{\sigma}) \\ +\end{equation} + +Such a ratio is useful in that this is a complementary statistic to the return divided by volatility ratio. To get some insight on the relationships between maximum drawdown per unit of volatility and mean return divided by volatility, we have proceeded to Monte-Carlo simulations. We have simulated cash flows over a period of 36 monthly returns and measured maximum drawdown for varied levels of annualized return divided by volatility varying from minus two to two by step of 0.1. The process has been repeated six thousand times. + +For instance, an investment exhibiting an annualized return/volatility equal to -2 +should experience on average a maximum drawdown equal to six times the annualized volatility. + +Other observations are that: +\begin{itemize} +\item maximum drawdown is a positive function of the return/volatility ratio +\item confidence interval widens as the return/volatility ratio decreases +\end{itemize} + +This means that as the return/volatility increases not only the magnitude of drawdown decreases but the confidence interval as well. In others words losses are both smaller and more predictable. + +<>= +source("C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/AcarSim.R") +AcarSim(COM.09) +@ + +As we can see from the \emph{simulated chart}, DJUBS.Commodity comes at the bottom , which imply a \emph{lower} \textbf{return-maximum loss} ratio. + +<>= +library(noniid.sm) +chart.Autocorrelation(COM.09) +@ + +Finally, from the autocorrelation lag plot, one can observe, significant \textbf{positive} autocorrelation for \textbf{Newedge.CTI}, which is a \emph{warning} signal in case drawdown occurs, in an otherwise excellent performing fund. +\section{Conclusion} + +Analyzing all the function results, one can clearly differentiate \textbf{Newedge.CTI}, as a far superior fund as compared to it's peer.\textbf{MorningStar.CLS}, exhibits highest autocorrelation as well as lowest Calmar/Sterling ratio, but compared on other front, it distinctly outperforms \textbf{DJUBS.Commodity}, which has performed poorly on all the tests. + +The above figure shows the characteristic of the respective fund performance, which is after the period of analysis till \textbf{"July-2013"}.At this moment, we would like the readers, to use the functions developed in the R \textbf{"PerformanceAnalytics"} package, to study ,use it for analysis as well as for forming their own opinion. + +<>= +charts.PerformanceSummary(COM.09[109:151],colorset = rich6equal, lwd = 2, ylog = TRUE) +@ + + +\end{document} \ No newline at end of file Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Commodity.pdf =================================================================== (Binary files differ) Property changes on: pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Commodity.pdf ___________________________________________________________________ Added: svn:mime-type + application/octet-stream Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/CommodityReport.Rnw =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/CommodityReport.Rnw 2013-09-13 23:06:41 UTC (rev 3101) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/CommodityReport.Rnw 2013-09-14 00:06:05 UTC (rev 3102) @@ -43,6 +43,8 @@ The fact that many hedge fund returns exhibit extraordinary levels of serial correlation is now well-known and generally accepted as fact. The effect of this autocorrelation on investment returns diminishes the apparent risk of such asset classes as the true returns/risk is easily \textbf{camouflaged} within a haze of illiquidity, stale prices, averaged price quotes and smoothed return reporting. We highlight the effect \emph{autocorrelation} and \emph{drawdown} has on performance analysis by investigating the results of functions developed during the Google Summer of Code 2013 on \textbf{commodity based index} . \end{abstract} +\tableofcontents + <>= library(PerformanceAnalytics) library(noniid.sm) Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/CommodityReport.pdf =================================================================== (Binary files differ) Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Managers.Rnw =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Managers.Rnw (rev 0) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Managers.Rnw 2013-09-14 00:06:05 UTC (rev 3102) @@ -0,0 +1,257 @@ +%% no need for \DeclareGraphicsExtensions{.pdf,.eps} + +\documentclass[12pt,letterpaper,english]{article} +\usepackage{times} +\usepackage[T1]{fontenc} +\IfFileExists{url.sty}{\usepackage{url}} + {\newcommand{\url}{\texttt}} + +\usepackage{babel} +%\usepackage{noweb} +\usepackage{Rd} + +\usepackage{Sweave} +\SweaveOpts{engine=R,eps=FALSE} +%\VignetteIndexEntry{Performance Attribution from Bacon} +%\VignetteDepends{PerformanceAnalytics} +%\VignetteKeywords{returns, performance, risk, benchmark, portfolio} +%\VignettePackage{PerformanceAnalytics} + +%\documentclass[a4paper]{article} +%\usepackage[noae]{Sweave} +%\usepackage{ucs} +%\usepackage[utf8x]{inputenc} +%\usepackage{amsmath, amsthm, latexsym} +%\usepackage[top=3cm, bottom=3cm, left=2.5cm]{geometry} +%\usepackage{graphicx} +%\usepackage{graphicx, verbatim} +%\usepackage{ucs} +%\usepackage[utf8x]{inputenc} +%\usepackage{amsmath, amsthm, latexsym} +%\usepackage{graphicx} + +\title{Commodity Index Fund Performance Analysis} +\author{Shubhankit Mohan} + +\begin{document} +\SweaveOpts{concordance=TRUE} + +\maketitle + + +\begin{abstract} +The fact that many hedge fund returns exhibit extraordinary levels of serial correlation is now well-known and generally accepted as fact. The effect of this autocorrelation on investment returns diminishes the apparent risk of such asset classes as the true returns/risk is easily \textbf{camouflaged} within a haze of liquidity, stale prices, averaged price quotes and smoothed return reporting. We highlight the effect \emph{autocorrelation} and \emph{drawdown} has on performance analysis by investigating the results of functions developed during the Google Summer of Code 2013 on \textbf{commodity based index} . +\end{abstract} + +\tableofcontents + +<>= +library(PerformanceAnalytics) +library(noniid.sm) +data(edhec) +@ + + +\section{Background} +The investigated fund index that tracks a basket of \emph{commodities} to measure their performance.The value of these indexes fluctuates based on their underlying commodities, and this value depends on the \emph{component}, \emph{methodology} and \emph{style} to cover commodity markets . + +A brief overview of the indicies invested in our report are : + \begin{itemize} + \item + \textbf{DJUBS Commodity index} : is a broadly diversified index that allows investors to track commodity futures through a single, simple measure. As the index has grown in popularity since its introduction in 1998, additional versions and a full complement of sub-indices have been introduced. Together, the family offers investors a comprehensive set of tools for measuring the commodity markets. + \item + \textbf{Morningstar CLS index} : is a simple rules-based trend following index operated in commodities + \item + \textbf{Newedge CTI} : includes funds that utilize a variety of investment strategies to profit from price moves in commodity markets. +Managers typically use either (i) a trading orientated approach,involving the trading of physical commodity products and/or of commodity +derivative instruments in either directional or relative value strategies; Or (ii) Long short equity strategies focused on commodity related stocks. + \end{itemize} +%Let $X \sim N(0,1)$ and $Y \sim \textrm{Exponential}(\mu)$. Let +%$Z = \sin(X)$. $\sqrt{X}$. + +%$\hat{\mu}$ = $\displaystyle\frac{22}{7}$ +%e^{2 \mu} = 1 +%\begin{equation} +%\left(\sum_{t=1}^{T} R_t/T\right) = \hat{\mu} \\ +%\end{equation} + +\section{Performance Summary Chart} + +Given a series of historical returns \((R_1,R_2, . . .,R_T)\) from \textbf{January-96} to \textbf{December-2006}, create a wealth index chart, bars for per-period performance, and underwater chart for drawdown of the 3 funds. + +<>= +data(managers) +charts.PerformanceSummary(managers[,1:6],colorset = rich6equal, lwd = 2, ylog = TRUE) +@ + +The above figure shows the behavior of the respective fund performance, which is \textbf{upward} trending for all the funds till the period of \textbf{"January-2008"}.For comparative purpose, one can observe the distinct \textbf{drawdown} of \textbf{Newedge CTI} since the latter period. + +\section{Statistical and Drawdown Analysis} + +A summary of Fund Return series characteristics show that \textbf{DJUBS.Commodity} performs worse relatively to it's peers.The most distinct characteristic being highest : \textbf{Variance, Stdev, SE Mean} and well as negative \textbf{Skewness} .The table shows clearly, that the returns of all the hedge fund indices are non-normal.Presence of \emph{negative} skewness is a major area of concern for the downside risk potential and expected maximum loss. + +<>= +table.Stats(managers[,1:6], ci = 0.95, digits = 4) +@ + + +The results are consistent with Drawdown Analysis in which \textbf{DJUBS.Commodity} performs worse relatively to it's peers. + +<>= +table.DownsideRisk(managers[,1:6], ci = 0.95, digits = 4) +@ +\section{Non-i.i.d GSoC Usage} +\subsection{Auctocorrelation Adjusted Standard Deviation} +Given a sample of historical returns \((R_1,R_2, . . .,R_T)\),the method assumes the fund manager smooths returns in the following manner, when 't' is the unit time interval, with $\rho$\ as the respective term autocorrelation coefficient + +%Let $X \sim N(0,1)$ and $Y \sim \textrm{Exponential}(\mu)$. Let +%$Z = \sin(X)$. $\sqrt{X}$. + +%$\hat{\mu}$ = $\displaystyle\frac{22}{7}$ +%e^{2 \mu} = 1 +%\begin{equation} +%\left(\sum_{t=1}^{T} R_t/T\right) = \hat{\mu} \\ +%\end{equation} +\begin{equation} + \sigma_{T} = \sqrt{ \sum_k^n(\sigma_{t}^2 + 2*\rho_i) } \\ +\end{equation} + + +<>= +ACFVol = ACStdDev.annualized(managers[,1:6]) +Vol = StdDev.annualized(managers[,1:6]) +barplot(rbind(ACFVol,Vol), main="ACF and Orignal Volatility", + xlab="Fund Type",ylab="Volatilty (in %)", col=rich6equal[2:3], beside=TRUE) + legend("topright", c("ACF","Orignal"), cex=0.6, + bty="2", fill=rich6equal[2:3]); +@ + +From the above figure, we can observe that all the funds, exhibit \textbf{serial auto correlation}, which results in significantly \emph{inflated} standard deviation. +\subsection{Andrew Lo Statistics of Sharpe Ratio} + +The building blocks of the \textbf{Sharpe Ratio} : expected returns and volatilities are unknown quantities that must be estimated statistically and are, +therefore, subject to \emph{estimation error} .To address this question, Andrew Lo derives explicit expressions for the statistical distribution of the Sharpe ratio using +standard asymptotic theory. + +The Sharpe ratio (SR) is simply the return per unit of risk (represented by variability). In the classic case, the unit of risk is the standard deviation of the returns. + +\deqn{\frac{\overline{(R_{a}-R_{f})}}{\sqrt{\sigma_{(R_{a}-R_{f})}}}} + +The relationship between SR and SR(q) is somewhat more involved for non- +IID returns because the variance of Rt(q) is not just the sum of the variances of component returns but also includes all the co-variances. Specifically, under +the assumption that returns \(R_t\) are stationary, +\begin{equation} +Var[(R_t)] = \sum_{i=0}^{q-1} \sum_{j=1}^{q-1} Cov(R(t-i),R(t-j)) = q\hat{\sigma^2} + 2\hat{\sigma^2} \sum_{k=1}^{q-1} (q-k)\rho_k \\ +\end{equation} + +Where $\rho$\(_k\) = Cov(\(R(t)\),\(R(t-k\)))/Var[\(R_t\)] is the \(k^{th}\) order autocorrelation coefficient's of the series of returns.This yields the following relationship between SR and SR(q): + +\begin{equation} +\hat{SR}(q) = \eta(q) \\ +\end{equation} + +Where : + +\begin{equation} +\eta(q) = \frac{q}{\sqrt{(q\hat{\sigma^2} + 2\hat{\sigma^2} \sum_{k=1}^{q-1} (q-k)\rho_k)}} \\ +\end{equation} + +In given commodity funds, we find results, similar reported in paper, that the annual Sharpe ratio for a hedge fund can be overstated by as much as \textbf{65} \% because of the presence of \textbf{serial correlation}.We can observe that the fund "\textbf{DJUBS.Commodity}", which has the largest drawdown and serial autocorrelation, has it's Andrew Lo Sharpe ratio , \emph{decrease} most significantly as compared to other funds. + +<>= +Lo.Sharpe = LoSharpe(managers[,1:6]) +Theoretical.Sharpe= SharpeRatio.annualized(managers[,1:6]) +barplot(rbind(Theoretical.Sharpe,Lo.Sharpe), main="Sharpe Ratio Observed", + xlab="Fund Type",ylab="Value", col=rich6equal[2:3], beside=TRUE) + legend("topright", c("Orginal","Lo"), cex=0.6, + bty="2", fill=rich6equal[2:3]); +@ +\subsection{Conditional Drawdown} +A new one-parameter family of risk measures called Conditional Drawdown (CDD) has +been proposed. These measures of risk are functional of the portfolio drawdown (underwater) curve considered in active portfolio management. For some value of $\hat{\alpha}$ the tolerance parameter, in the case of a single sample path, drawdown functional is defined as the mean of the worst (1 \(-\) $\hat{\alpha}$)100\% drawdowns. The CDD measure generalizes the notion of the drawdown functional to a multi-scenario case and can be considered as a generalization of deviation measure to a dynamic case. The CDD measure includes the Maximal Drawdown and Average Drawdown as its limiting cases.Similar to other cases, \textbf{DJUBS.Commodity}, is the worst performing fund with worst case conditional drawdown greater than \textbf{50\%} and \textbf{Newedge.CTI} performing significantly well among the peer commodity indices with less than \textbf{15\%}. + + + + +\subsection{Calmar and Sterling Ratio} +Both the Calmar and the Sterling ratio are the ratio of annualized return over the absolute value of the maximum drawdown of an investment. +{equation} +\begin{equation} + Calmar Ratio = \frac{Return [0,T]}{max Drawdown [0,T]} \\ +\end{equation} + +\begin{equation} + Sterling Ratio = \frac{Return [0,T]}{max Drawdown [0,T] - 10\%} \\ +\end{equation} +<>= +round(CalmarRatio.Norm(managers[,1:6],1),4) +round(SterlingRatio.Norm(managers[,1:6],1),4) +@ +For a 1 year \emph{horizon} return, we can see that Newedge.CTI is the clear performer in this metric as well.However, a \textbf{surprising} observed result, is negative \emph{Sterling} and \emph{Calmar} ratio for Morningstar.CLS . +\subsection{GLM Smooth Index} +GLM Smooth Index is a useful parameter to quantify the degree of autocorrelation.It is a summary statistic for measuring the concentration of autocorrelation present in the lag factors (up-to 6) , which can be defined by the below equation as : +\begin{equation} +\xi = \sum_{j=0}^{k} \theta _j^2 \\ +\end{equation} + +This measure is well known in the industrial organization literature as the Herfindahl index, a measure of the concentration of firms in a given industry where $\theta$\(_j\) represents the market share of firm j. Because $\xi_t$\ is confined to the unit interval, and is minimized when all the $\theta$\(_j\) 's are identical, which implies a value of 1/k+1 for $\xi_i$\ ; and is maximized when one coefficient is 1 and the rest are 0. In the context of smoothed returns, a lower value of implies less smoothing, and the upper bound of 1 implies pure smoothing, hence we shall refer to $\theta$\(_j\) as a \textbf{smoothing index}. + +<>= +library(noniid.sm) +source("C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/GLMSmoothIndex.R") +GLM.index=GLMSmoothIndex(managers[,1:6]) +barplot(as.matrix(GLM.index), main="GLM Smooth Index", + xlab="Fund Type",ylab="Value",colorset = rich6equal[1], beside=TRUE) +@ + +For the given chart, we can observe that \textbf{all the funds} have significant level of smooth returns. +\subsection{Acar Shane Maximum Loss} + +Measuring risk through extreme losses is a very appealing idea. This is indeed how financial companies perceive risks. This explains the popularity of loss statistics such as the maximum drawdown and maximum loss. An empirical application to fund managers performance show that \textbf{very few investments} exhibit \emph{abnormally high or low drawdowns}. Consequently, it is doubtful that drawdowns statistics can be used +to significantly distinguish fund managers. This is confirmed by the fact that predicting one-period ahead drawdown is an almost impossible task. Errors average at the very best 27\% of the true value observed in the market. + +The main concern of this paper is the study of alternative risk measures: namely maximum loss and maximum drawdown. Unfortunately, there is no analytical formula to establish the maximum drawdown properties under the random walk assumption. We should note first that due to its definition, the maximum drawdown divided by volatility is an only function of the ratio mean divided by volatility. + + +\begin{equation} +MD / \sigma = Min \frac{ \sum_{j=1}^{t} X_{j}}{\sigma} = F(\frac{\mu}{\sigma}) \\ +\end{equation} + +Such a ratio is useful in that this is a complementary statistic to the return divided by volatility ratio. To get some insight on the relationships between maximum drawdown per unit of volatility and mean return divided by volatility, we have proceeded to Monte-Carlo simulations. We have simulated cash flows over a period of 36 monthly returns and measured maximum drawdown for varied levels of annualized return divided by volatility varying from minus two to two by step of 0.1. The process has been repeated six thousand times. + +For instance, an investment exhibiting an annualized return/volatility equal to -2 +should experience on average a maximum drawdown equal to six times the annualized volatility. + +Other observations are that: +\begin{itemize} +\item maximum drawdown is a positive function of the return/volatility ratio +\item confidence interval widens as the return/volatility ratio decreases +\end{itemize} + +This means that as the return/volatility increases not only the magnitude of drawdown decreases but the confidence interval as well. In others words losses are both smaller and more predictable. + +<>= +source("C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/AcarSim.R") +AcarSim(managers[,1:6]) +@ + +As we can see from the \emph{simulated chart}, DJUBS.Commodity comes at the bottom , which imply a \emph{lower} \textbf{return-maximum loss} ratio. + +<>= +library(noniid.sm) +chart.Autocorrelation(managers[,1:6]) +@ + +Finally, from the autocorrelation lag plot, one can observe, significant \textbf{positive} autocorrelation for \textbf{Newedge.CTI}, which is a \emph{warning} signal in case drawdown occurs, in an otherwise excellent performing fund. [TRUNCATED] To get the complete diff run: svnlook diff /svnroot/returnanalytics -r 3102 From noreply at r-forge.r-project.org Sat Sep 14 02:12:18 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sat, 14 Sep 2013 02:12:18 +0200 (CEST) Subject: [Returnanalytics-commits] r3103 - in pkg/PerformanceAnalytics/sandbox/Shubhankit: . noniid.sm noniid.sm/.Rproj.user noniid.sm/.Rproj.user/E5D7D248 noniid.sm/.Rproj.user/E5D7D248/pcs noniid.sm/.Rproj.user/E5D7D248/sdb noniid.sm/.Rproj.user/E5D7D248/sdb/per noniid.sm/.Rproj.user/E5D7D248/sdb/per/t noniid.sm/.Rproj.user/E5D7D248/sdb/prop noniid.sm/vignettes sandbox sandbox/Week6-7 sandbox/Week6-7/Code sandbox/Week6-7/Code/Covariance Matrix Integrated Regression Function sandbox/Week6-7/Code/Data sandbox/Week6-7/Code/Equivalent Matlab Code sandbox/Week6-7/Code/Tests sandbox/Week6-7/Literature sandbox/Week6-7/Vignette sandbox/vignettes Message-ID: <20130914001218.C888E1856DC@r-forge.r-project.org> Author: shubhanm Date: 2013-09-14 02:12:17 +0200 (Sat, 14 Sep 2013) New Revision: 3103 Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rbuildignore pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rhistory pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/ctx/ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/pcs/ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/pcs/files-pane.pper pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/pcs/packages-pane.pper pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/pcs/source-pane.pper pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/pcs/windowlayoutstate.pper pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/pcs/workbench-pane.pper pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/persistent-state pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/per/ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/per/t/ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/per/t/32D790F7 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/per/t/445E439C pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/per/t/44B07808 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/per/t/58E583C6 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/per/t/7D095D73 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/per/t/934ACCDE pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/per/t/C4A4A866 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/per/t/F08D801A pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/per/u/ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/1CFFFCF5 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/2991F3E9 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/2AD6DF94 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/2BD04ACB pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/2EE78BD4 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/2F79FCCA pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/33CAE6A9 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/37353A3D pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/3ABA837C pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/3EF1051C pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/40B77CD3 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/41919319 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/43D7BE7F pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/4612DDC1 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/4836AC8C pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/5049B60E pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/5D302CE3 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/5D6C2593 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/5ED82E8C pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/60D2FDD5 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/649AEBE6 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/716852E0 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/717883C7 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/72E0BDFF pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/75C0309 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/7909B0E2 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/7A63D707 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/7AFAB966 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/7BD69630 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/7E8C5556 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/8047D8D6 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/81399909 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/88C0FBC4 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/892D5F16 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/915BCBE3 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/915ED3AD pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/945F7AEE pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/9B1541E6 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/A036626A pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/A16037E8 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/A65013F5 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/A69BD695 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/AA22084D pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/AC219D7D pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/C8912CEB pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/C9696525 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/C9BD1399 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/CE4BAAEC pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/CF948D8A pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/D581FC31 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/DA251988 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/DA891E76 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/DEAD1743 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/E76F5680 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/EA9396 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/EF28E7CC pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/F0290DA5 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/F24BE9F7 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/F9E07115 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/FFE2C69 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/INDEX pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/noniid.sm.Rproj pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/LoSharpe-003.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/LoSharpe-004.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/LoSharpe-concordance.tex pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/LoSharpe.log pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/LoSharpe.synctex.gz pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/LoSharpe.tex pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OWReturn-003.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OWReturn-004.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OWReturn-005.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OWReturn-006.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OWReturn-007.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OWReturn-008.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OWReturn-Graph10.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OWReturn-concordance.tex pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OWReturn.log pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OWReturn.synctex.gz pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OWReturn.tex pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OWReturn.toc pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OkunevWhite-004.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OkunevWhite-Graph10.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OkunevWhite-concordance.tex pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OkunevWhite.log pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OkunevWhite.synctex.gz pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OkunevWhite.tex pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/ShaneAcarMaxLoss-003.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/ShaneAcarMaxLoss-concordance.tex pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/ShaneAcarMaxLoss.log pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/UnSmoothReturnAnalysis-003.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/UnSmoothReturnAnalysis-004.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/UnSmoothReturnAnalysis-005.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/UnSmoothReturnAnalysis-006.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/UnSmoothReturnAnalysis-007.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/UnSmoothReturnAnalysis-008.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/UnSmoothReturnAnalysis-Graph3.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/UnSmoothReturnAnalysis-Graph4.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/UnSmoothReturnAnalysis-Graph5.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/UnSmoothReturnAnalysis-Graph6.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/UnSmoothReturnAnalysis-concordance.tex pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/UnSmoothReturnAnalysis.log pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/UnSmoothReturnAnalysis.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/UnSmoothReturnAnalysis.synctex.gz pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/UnSmoothReturnAnalysis.tex pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/UnSmoothReturnAnalysis.toc pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Commodity-002.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Commodity-005.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Commodity-006.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Commodity-007.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Commodity-009.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Commodity-010.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Commodity-011.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Commodity-012.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Commodity-concordance.tex pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Commodity.log pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Commodity.synctex.gz pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Commodity.tex pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Commodity.toc pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/CommodityReport-002.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/CommodityReport-005.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/CommodityReport-006.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/CommodityReport-007.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/CommodityReport-009.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/CommodityReport-010.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/CommodityReport-011.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/CommodityReport-012.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/CommodityReport-concordance.tex pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/CommodityReport.log pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/CommodityReport.synctex.gz pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/CommodityReport.tex pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/CommodityReport.toc pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Managers-002.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Managers-005.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Managers-006.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Managers-007.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Managers-008.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Managers-009.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Managers-010.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Managers-011.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Managers-concordance.tex pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Managers.log pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Managers.synctex.gz pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Managers.tex pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Managers.toc pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Week6-7/ pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Week6-7/Code/ pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Week6-7/Code/Covariance Matrix Integrated Regression Function/ pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Week6-7/Code/Covariance Matrix Integrated Regression Function/glmi.R pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Week6-7/Code/Covariance Matrix Integrated Regression Function/lmi.R pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Week6-7/Code/Covariance Matrix Integrated Regression Function/nlsi.R pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Week6-7/Code/Data/ pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Week6-7/Code/Data/Investment.csv pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Week6-7/Code/Data/PublicSchools.csv pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Week6-7/Code/Data/RealInt.csv pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Week6-7/Code/Data/inst/ pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Week6-7/Code/Data/man/ pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Week6-7/Code/Data/ps.csv pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Week6-7/Code/Equivalent Matlab Code/ pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Week6-7/Code/Equivalent Matlab Code/NWmissings.m pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Week6-7/Code/Equivalent Matlab Code/effort.dat pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Week6-7/Code/Equivalent Matlab Code/nwse.m pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Week6-7/Code/Equivalent Matlab Code/regstats2.m pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Week6-7/Code/Equivalent Matlab Code/sim_NWmissings.m pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Week6-7/Code/Tests/ pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Week6-7/Code/Tests/Cross Sectional Data.R pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Week6-7/Code/Tests/HAC Data.R pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Week6-7/Code/Tests/Tests.R pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Week6-7/Code/Tests/Time Series Data.R pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Week6-7/Literature/ pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Week6-7/Literature/Thumbs.db pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Week6-7/Literature/Zelisis.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Week6-7/Literature/sandwich-OOP.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Week6-7/Vignette/ pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Week6-7/Vignette/HACintegrated-hac-kweights.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Week6-7/Vignette/HACintegrated-hac-plot.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Week6-7/Vignette/HACintegrated-hc-plot.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Week6-7/Vignette/HACintegrated.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Week6-7/Vignette/HACintegrated.log pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Week6-7/Vignette/HACintegrated.tex pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Week6-7/Vignette/Test_Report.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Week6-7/Vignette/Test_Report.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Week6-7/lmi.R pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/ pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/ACFSTDEV-Graph10.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/ACFSTDEV-concordance.tex pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/ACFSTDEV.log pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/ACFSTDEV.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/ACFSTDEV.rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/ACFSTDEV.synctex.gz pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/ACFSTDEV.tex pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/Cheklov.CDDOpt.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/Commodity_ResearchReport.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/Commodity_ResearchReport.tex pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/ConditionalDrawdown-Graph10.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/ConditionalDrawdown.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/ConditionalDrawdown.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/GLMReturn-Graph1.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/GLMReturn-Graph10.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/GLMReturn.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/GLMReturn.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/GLMSmoothIndex.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/GLMSmoothIndex.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/LoSharpe.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/LoSharpeRatio-concordance.tex pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/LoSharpeRatio.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/LoSharpeRatio.log pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/LoSharpeRatio.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/LoSharpeRatio.synctex.gz pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/LoSharpeRatio.tex pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/MaximumLoss.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/NormCalmar-Graph10.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/NormCalmar.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/NormCalmar.rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/OkunevWhite-Graph1.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/OkunevWhite-Graph10.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/OkunevWhite.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/OkunevWhite.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/Rplots.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/ShaneAcarMaxLoss-003.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/ShaneAcarMaxLoss.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/ShaneAcarMaxLoss.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/UnSmoothReturnAnalysis-003.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/UnSmoothReturnAnalysis-004.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/UnSmoothReturnAnalysis-005.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/UnSmoothReturnAnalysis-006.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/UnSmoothReturnAnalysis-007.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/UnSmoothReturnAnalysis-008.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/UnSmoothReturnAnalysis-Graph3.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/UnSmoothReturnAnalysis-Graph4.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/UnSmoothReturnAnalysis-Graph5.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/UnSmoothReturnAnalysis-Graph6.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/UnSmoothReturnAnalysis-concordance.tex pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/UnSmoothReturnAnalysis.log pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/UnSmoothReturnAnalysis.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/UnSmoothReturnAnalysis.synctex.gz pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/UnSmoothReturnAnalysis.tex pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/UnSmoothReturnAnalysis.toc Removed: pkg/PerformanceAnalytics/sandbox/Shubhankit/.Rbuildignore pkg/PerformanceAnalytics/sandbox/Shubhankit/.Rhistory pkg/PerformanceAnalytics/sandbox/Shubhankit/.Rproj.user/ pkg/PerformanceAnalytics/sandbox/Shubhankit/DESCRIPTION pkg/PerformanceAnalytics/sandbox/Shubhankit/Gsoc-iid.Rproj pkg/PerformanceAnalytics/sandbox/Shubhankit/NAMESPACE pkg/PerformanceAnalytics/sandbox/Shubhankit/R/ pkg/PerformanceAnalytics/sandbox/Shubhankit/Shubhankit.Rproj pkg/PerformanceAnalytics/sandbox/Shubhankit/Week1/ pkg/PerformanceAnalytics/sandbox/Shubhankit/Week2/ pkg/PerformanceAnalytics/sandbox/Shubhankit/Week3/ pkg/PerformanceAnalytics/sandbox/Shubhankit/Week4/ pkg/PerformanceAnalytics/sandbox/Shubhankit/Week5/ pkg/PerformanceAnalytics/sandbox/Shubhankit/Week6-7/ pkg/PerformanceAnalytics/sandbox/Shubhankit/data/ pkg/PerformanceAnalytics/sandbox/Shubhankit/inst/ pkg/PerformanceAnalytics/sandbox/Shubhankit/man/ pkg/PerformanceAnalytics/sandbox/Shubhankit/src/ pkg/PerformanceAnalytics/sandbox/Shubhankit/tests/ pkg/PerformanceAnalytics/sandbox/Shubhankit/vignettes/ Log: Kindly Ignore Commit : garbage file removal and useful code transfer in sandbox/ Deleted: pkg/PerformanceAnalytics/sandbox/Shubhankit/.Rbuildignore =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/.Rbuildignore 2013-09-14 00:06:05 UTC (rev 3102) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/.Rbuildignore 2013-09-14 00:12:17 UTC (rev 3103) @@ -1,7 +0,0 @@ -sandbox -generatechangelog\.sh -ChangeLog\.1\.0\.0 -week* -Week* -^.*\.Rproj$ -^\.Rproj\.user$ Deleted: pkg/PerformanceAnalytics/sandbox/Shubhankit/.Rhistory =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/.Rhistory 2013-09-14 00:06:05 UTC (rev 3102) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/.Rhistory 2013-09-14 00:12:17 UTC (rev 3103) @@ -1,48 +0,0 @@ -devtools::load_all(".") -package.skeleton("noniid.sm") -R CMD check noniid.sm -library(noniid.sm) -source('C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/AcarSim.R') -source('C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/AcarSim.R') -library(PerformanceAnalytics) -data(edhec) -source('C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/AcarSim.R') -source('C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/ACStdDev.annualized.R') -devtools::load_all("noniid.sm") -source('C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/AcarSim.R') -source('C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/ACStdDev.annualized.R') -source('C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/Return.GLM.R') -source('C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/ACStdDev.annualized.R') -source('C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/LoSharpe.R') -source('C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/se.LoSharpe.R') -source('C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/se.LoSharpe.R') -source('C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/LoSharpe.R') -get("edhec") -source('C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/AcarSim.R') -source('C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/ACStdDev.annualized.R') -source('C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/chart.Autocorrelation.R') -source('C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/CalmarRatio.Norm.R') -source('C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/CDrawdown.R') -source('C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/EmaxDDGBM.R') -source('C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/SterlingRatio.Norm.R') -source('C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/GLMSmoothIndex.R') -source('C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/Return.Okunev.R') -source('C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/chart.Autocorrelation.R') -roxygenize() -roxygenize("C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/") -roxygenize("C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R") -roxygenize("C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm") -source('C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/EmaxDDGBM.R') -roxygenize("C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R") -roxygenize("C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm") -source('C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/EmaxDDGBM.R') -source('C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/EmaxDDGBM.R') -roxygenize("C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm") -source('C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/CDrawdown.R') -source('C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.ComparitiveReturn.GLM.R') -source('C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.UnsmoothReturn.R') -roxygenize("C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm") -roxygenize("C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm") -roxygenize("C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm") -source('C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/EmaxDDGBM.R') -library(noniid.sm) Deleted: pkg/PerformanceAnalytics/sandbox/Shubhankit/DESCRIPTION =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/DESCRIPTION 2013-09-14 00:06:05 UTC (rev 3102) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/DESCRIPTION 2013-09-14 00:12:17 UTC (rev 3103) @@ -1,38 +0,0 @@ -Package: noniid.sm -Type: Package -Title: Non-i.i.d. GSoC 2013 Shubhankit -Version: 0.1 -Date: $Date: 2013-05-13 14:30:22 -0500 (Mon, 13 May 2013) $ -Author: Shubhankit Mohan -Contributors: Peter Carl, Brian G. Peterson -Depends: - xts, - PerformanceAnalytics -Suggests: - PortfolioAnalytics -Maintainer: Brian G. Peterson -Description: GSoC 2013 project to replicate literature on drawdowns and - non-i.i.d assumptions in finance. -License: GPL-3 -ByteCompile: TRUE -Collate: - 'ACStdDev.annualized.R' - 'CDDopt.R' - 'CDrawdown.R' - 'chart.Autocorrelation.R' - 'EmaxDDGBM.R' - 'GLMSmoothIndex.R' - 'maxDDGBM.R' - 'na.skip.R' - 'Return.GLM.R' - 'table.ComparitiveReturn.GLM.R' - 'table.UnsmoothReturn.R' - 'UnsmoothReturn.R' - 'AcarSim.R' - 'CDD.Opt.R' - 'CalmarRatio.Norm.R' - 'SterlingRatio.Norm.R' - 'LoSharpe.R' - 'Return.Okunev.R' - 'se.LoSharpe.R' - 'chart.AcarSim.R' Deleted: pkg/PerformanceAnalytics/sandbox/Shubhankit/Gsoc-iid.Rproj =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/Gsoc-iid.Rproj 2013-09-14 00:06:05 UTC (rev 3102) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/Gsoc-iid.Rproj 2013-09-14 00:12:17 UTC (rev 3103) @@ -1,17 +0,0 @@ -Version: 1.0 - -RestoreWorkspace: Yes -SaveWorkspace: Yes -AlwaysSaveHistory: Yes - -EnableCodeIndexing: Yes -UseSpacesForTab: Yes -NumSpacesForTab: 2 -Encoding: UTF-8 - -RnwWeave: Sweave -LaTeX: pdfLaTeX - -BuildType: Package -PackageInstallArgs: --no-multiarch -PackageRoxygenize: rd Deleted: pkg/PerformanceAnalytics/sandbox/Shubhankit/NAMESPACE =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/NAMESPACE 2013-09-14 00:06:05 UTC (rev 3102) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/NAMESPACE 2013-09-14 00:12:17 UTC (rev 3103) @@ -1,18 +0,0 @@ -export(AcarSim) -export(ACStdDev.annualized) -export(CalmarRatio.Norm) -export(CDD.Opt) -export(CDDOpt) -export(CDrawdown) -export(chart.AcarSim) -export(chart.Autocorrelation) -export(EMaxDDGBM) -export(GLMSmoothIndex) -export(LoSharpe) -export(Return.GLM) -export(Return.Okunev) -export(se.LoSharpe) -export(SterlingRatio.Norm) -export(table.ComparitiveReturn.GLM) -export(table.EMaxDDGBM) -export(table.UnsmoothReturn) Deleted: pkg/PerformanceAnalytics/sandbox/Shubhankit/Shubhankit.Rproj =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/Shubhankit.Rproj 2013-09-14 00:06:05 UTC (rev 3102) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/Shubhankit.Rproj 2013-09-14 00:12:17 UTC (rev 3103) @@ -1,18 +0,0 @@ -Version: 1.0 - -RestoreWorkspace: Yes -SaveWorkspace: Yes -AlwaysSaveHistory: Yes - -EnableCodeIndexing: Yes -UseSpacesForTab: Yes -NumSpacesForTab: 2 -Encoding: UTF-8 - -RnwWeave: Sweave -LaTeX: pdfLaTeX - -BuildType: Package -PackagePath: noniid.sm -PackageInstallArgs: --no-multiarch -PackageRoxygenize: rd Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rbuildignore =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rbuildignore (rev 0) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rbuildignore 2013-09-14 00:12:17 UTC (rev 3103) @@ -0,0 +1,2 @@ +^.*\.Rproj$ +^\.Rproj\.user$ Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rhistory =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rhistory (rev 0) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rhistory 2013-09-14 00:12:17 UTC (rev 3103) @@ -0,0 +1,512 @@ +for(i in 1:15) {if(managers[i,8]>0){splus[i]=managers[,8]}else{splus[i]=0}} +a=edhec[,1] +a=(edhec[:,1]>0) +a=(edhec[,1]>0) +head(a) +plot(a) +a=(edhec[,1]<0) +plot(a) +head(a) +a[,6] +a[,3] +lm(edhec[,1]~managers[,2]) +lm(edhec[,1]~managers[,8]) +a=managers[,8] +lm(edhec[1:132,1]~managers[,8]) +a=(edhec[,1]>0) +a +a[1,1] +a[1,1]*2 +a=(edhec[1:10,1]>0) +a +a=(edhec[1:15,1]>0) +a +a=(edhec[10:15,1]>0) +a +a*1 +a*edhec[10:15,1] +a=(edhec[10:15,1]>0) +a +a*edhec[0:15,1] +managers[,8] +head(managers[,8]) +sp+ = (managers[,8]>0)*managers[,8] +spplus = (managers[,8]>0)*managers[,8] +spminus = (managers[,8]<0)*managers[,8] +lm(edhec[,1]~spplus[]+spminus) +lm(edhec[1:132,1]~spplus[]+spminus) +lm(edhec[1:132,1]~spplus[]+spminus+managers[,9]) +table.autocorrelation +table.Autocorrelation +lm(edhec[1:132,1]~spplus[]+spminus+managers[,9]) +table.Autocorrelation(edhec,spplus) +table.Autocorrelation(edhec[,9],spplus) +table.Correlation(edhec[],managers[,8]) +table.Correlation(edhec[],spminus) +table.Correlation(edhec[],spplus) +table.Correlation(Return.okunev(edhec[]),spplus) +table.Correlation(Return.Okunev(edhec[]),spplus) +a=table.Correlation(Return.okunev(edhec[]),spplus) +a=table.Correlation(Return.Okunev(edhec[]),spplus) +b=table.Correlation(edhec[],spplus) +b-a +a=table.Correlation(Return.Okunev(edhec[]),sminus) +a=table.Correlation(Return.Okunev(edhec[]),spminus) +b=table.Correlation(edhec[],spminus) +a-b +a +b +chart(a) +plot(a) +plot(a[,1]) +chart.PerformanceSummary(edhec[,12]) +charts.PerformanceSummary(edhec[,12]) +charts.PerformanceSummary(edhec[,12],managers[,8]) +a=c(edhec[,12],managers[,8]) +a=cbind(edhec[,12],managers[,8]) +charts.PerformanceSummary(a) +a=cbind(edhec[,12],managers[,6]) +charts.PerformanceSummary(a) +a=cbind(edhec[,6],managers[,8]) +charts.PerformanceSummary(a) +b=table.Correlation(edhec[],spminus) +b +a=table.Correlation(Return.Okunev(edhec),spminus) +a +bb=cbind(spminus,spplus) +charts.PerformanceSummary(bb) +a=cbind(edhec[,6],managers[,8]) +charts.PerformanceSummary(a) +a=table.Correlation(edhec,spminus) +b=table.Correlation(edhec,spplus) +b-a +a +b +b=table.Correlation(edhec,managers[,8]) +b +b=table.Correlation(edhec,managers[,9]) +b +b=table.Correlation(Return.Okunev(edhec),managers[,9]) +b +VaR(edhec) +VaR(Return.Okunev(edhec)) +table.DrawdownsRatio(edhec) +table.DrawdownsRatio(Return.Okunev(edhec)) +table.DownsideRisk(Return.Okunev(edhec)) +table.DownsideRisk(edhec) +charts.PerformanceSummary(edhec) +charts.PerformanceSummary(Return.Okunev(edhec)) +a=edhec[,1] +a=edhec[140,1] +a +edhec[135:145,1] +edhec[125:145,1] +edhec[129:145,1] +edhec[135:145,1] +edhec[133:145,1] +edhec[132:145,1] +charts.PerformanceSummary(edhec[1:132,1:4],colorset = rich6equal, lwd = 2, ylog = TRUE) +charts.PerformanceSummary(edhec[,1:4],colorset = rich6equal, lwd = 2, ylog = TRUE) +table.Autocorrelation(edhec) +chart.Autocorrelation(edhec) +chart.Autocorrelation(edhec[,1:4]) +chart.Autocorrelation(Return.Okunev(edhec[,1:4])) +charts.PerformanceSummary(edhec[,],colorset = rich6equal, lwd = 2, ylog = TRUE) +charts.PerformanceSummary(edhec[132:152,],colorset = rich6equal, lwd = 2, ylog = TRUE) +charts.PerformanceSummary(edhec[132:152,2],colorset = rich6equal, lwd = 2, ylog = TRUE) +charts.PerformanceSummary(edhec[132:152,],colorset = rich6equal, lwd = 2, ylog = TRUE) +charts.PerformanceSummary(edhec[132:152,1],colorset = rich6equal, lwd = 2, ylog = TRUE) +charts.PerformanceSummary(edhec[132:152,2:5],colorset = rich6equal, lwd = 2, ylog = TRUE) +charts.PerformanceSummary(edhec[132:152,2],colorset = rich6equal, lwd = 2, ylog = TRUE) +charts.PerformanceSummary(edhec[,2],colorset = rich6equal, lwd = 2, ylog = TRUE) +table.stats(edhec) +table.Stats(edhec) +?table.Stats +data(edhec) +table.Stats(edhec[,1:3]) +t(table.Stats(edhec)) +result=t(table.Stats(edhec)) +require("Hmisc") +textplot(format.df(result, na.blank=TRUE, numeric.dollar=FALSE, cdec=c(rep(1,2),rep(3,14))), rmar = 0.8, cmar = 1.5, max.cex=.9, halign = "center", valign = "top", row.valign="center", wrap.rownames=10, wrap.colnames=10, mar = c(0,0,3,0)+0.1) +title(main="Statistics for EDHEC Indexes") +data(edhec) +table.Stats(edhec[,1:3]) +t(table.Stats(edhec)) +result=t(table.Stats(edhec[,1:3])) +require("Hmisc") +textplot(format.df(result, na.blank=TRUE, numeric.dollar=FALSE, cdec=c(rep(1,2),rep(3,14))), rmar = 0.8, cmar = 1.5, max.cex=.9, halign = "center", valign = "top", row.valign="center", wrap.rownames=10, wrap.colnames=10, mar = c(0,0,3,0)+0.1) +title(main="Statistics for EDHEC Indexes") +charts.PerformanceSummary(managers,colorset = rich6equal, lwd = 2, ylog = TRUE) +charts.PerformanceSummary(managers[,1:6],colorset = rich6equal, lwd = 2, ylog = TRUE) +data <- read.csv("C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/data/HAM3-data.csv") +dates <- data$X +values <- data[,-1] # convert percentage to return +COM <- as.xts(values, order.by=as.Date(dates)) +COM.09<-COM[,9:11] +charts.PerformanceSummary(COM.09[1:108,],colorset = rich6equal, lwd = 2, ylog = TRUE) +charts.PerformanceSummary(COM[,1:6],colorset = rich6equal, lwd = 2, ylog = TRUE) +charts.PerformanceSummary(COM[1:108,1:6],colorset = rich6equal, lwd = 2, ylog = TRUE) +COM[,1] +a=COM[,1] +charts.PerformanceSummary(COM[108,1:6],colorset = rich6equal, lwd = 2, ylog = TRUE) +charts.PerformanceSummary(COM[,1:6],colorset = rich6equal, lwd = 2, ylog = TRUE) +charts.PerformanceSummary(COM[1:10,1:6],colorset = rich6equal, lwd = 2, ylog = TRUE) +head(COM[1:10,1:6]) +head(COM[1:151,1:6]) +head(COM[151,1:6]) +head(COM[151,1:6]) +head(COM[36,1:6]) +head(COM[70,1:6]) +head(COM[75,1:6]) +head(COM[76,1:6]) +head(COM[65,1:6]) +head(COM[68,1:6]) +head(COM[140,1:6]) +head(COM[142,1:6]) +head(COM[145,1:6]) +charts.PerformanceSummary(COM[1:10,1:6],colorset = rich6equal, lwd = 2, ylog = TRUE) +charts.PerformanceSummary(COM[68:145],1:6],colorset = rich6equal, lwd = 2, ylog = TRUE) +charts.PerformanceSummary(COM[68:145,1:6],colorset = rich6equal, lwd = 2, ylog = TRUE) +table.AnnualizedReturns(edhec) +table.AnnualizedReturns(Return.Okunev(edhec)) +charts.PerformanceSummary(edhec[,5],colorset = rich6equal, lwd = 2, ylog = TRUE) +charts.PerformanceSummary(edhec[,8],colorset = rich6equal, lwd = 2, ylog = TRUE) +charts.PerformanceSummary(edhec[,5:8],colorset = rich6equal, lwd = 2, ylog = TRUE) +table.DownsideRisk(edhec) +table.DownsideRiskRatio(edhec) +chart.AcarSim(edhec) +AcarSim(edhec) +chart.AcarSim(Return.Okunev(edhec)) +data(managers) +head(managers) +a=managers[,1] +a[132] +data(edhec) +edhec +managers[,132] +managers[132,1] +edhec(132,1) +edhec[132,1] +edhec[120,1] +table.Correlation(edhec[1:120,],managers[,8]) +managers[12,8] +managers[13,8] +table.Correlation(edhec[1:120,],managers[13:132,8]) +table.Correlation(edhec[,],managers[,8]) +?table.DownSideRisk +?table.DownsideRisk +?table.DownsideRiskRatio +table.DownsideRisk(edhec) +table.DownsideRisk(Return.Okunev(edhec)) +q=table.DownsideRisk(Return.Okunev(edhec)) +p=table.DownsideRisk(edhec) +q-p +(q-p)/q +table.SpecificRisk(edhec) +table.SpecificRisk(edhec,managers[,8]) +table.SpecificRisk(edhec,managers[,8],0) +CAPM(edhec,managers[,8],0) +CAPM.beta(edhec,managers[,8],0) +CAPM.beta(Return.Okunev(edhec),managers[,8],0) +table.UpDownRatios +?table.UpDownRatios +table.UpDownRatios(edhec,managers[,8]) +Return.Annualized(edhec) +Return.annualized(edhec) +a=Return.annualized(edhec) +plot(a) +plot(a) +a +a=Return.annualized(Return.Okunev(edhec)) +a +table.Autocorrelation(edhec) +?SharpeRatio +?VaR +data(edhec) +VaR(edhec[,1:3,drop=FALSE],method="normal") +VaR(Return.Okunev(edhec[,1:3,drop=FALSE]),method="normal") +# now use Gaussian +VaR(edhec, p=.95, method="gaussian") +CAPM.jensenAlpha(edhec,managers[,8],Rf = managers[, "US 3m TR", drop=FALSE]) +CAPM.jensenAlpha(edhec,managers[,8],Rf = managers[12:132, "US 3m TR", drop=FALSE]) +CAPM.jensenAlpha(edhec,managers[12:132,8],Rf = managers[12:132, "US 3m TR", drop=FALSE]) +CAPM.jensenAlpha(edhec[1:121],managers[12:132,8],Rf = managers[12:132, "US 3m TR", drop=FALSE]) +SystematicRisk(edhec) +SystematicRisk(edhec,managers[,8]) +SystematicRisk(Return.Okunev(edhec),managers[,8]) +chart.RiskReturnScatter +chart.RiskReturnScatter(edhec) +chart.RiskReturnScatter(edhec[trailing36.rows,1:8], Rf=.03/12, main = "Trailing 36-Month Performance", colorset=c("red", rep("black",5), "orange", "green")) +trailing36.rows +table.Autocorrelation(edhec) +chart.QQPlot +?chart.QQPlot +x = checkData(managers[,2, drop = FALSE], na.rm = TRUE, method = "vector") +#layout(rbind(c(1,2),c(3,4))) +# Panel 1, Normal distribution +chart.QQPlot(x, main = "Normal Distribution", distribution = 'norm', envelope=0.95) +# Panel 2, Log-Normal distribution +fit = fitdistr(1+x, 'lognormal') +chart.QQPlot(1+x, main = "Log-Normal Distribution", envelope=0.95, distribution='lnorm') +#other options could include +#, meanlog = fit$estimate[[1]], sdlog = fit$estimate[[2]]) +## Not run: +# Panel 3, Skew-T distribution +library(sn) +fit = st.mle(y=x) +chart.QQPlot(x, main = "Skew T Distribution", envelope=0.95, +distribution = 'st', location = fit$dp[[1]], +scale = fit$dp[[2]], shape = fit$dp[[3]], df=fit$dp[[4]]) +#Panel 4: Stable Parietian +library(fBasics) +fit.stable = stableFit(x,doplot=FALSE) +chart.QQPlot(x, main = "Stable Paretian Distribution", envelope=0.95, +distribution = 'stable', alpha = fit(stable.fit)$estimate[[1]], +beta = fit(stable.fit)$estimate[[2]], gamma = fit(stable.fit)$estimate[[3]], +delta = fit(stable.fit)$estimate[[4]], pm = 0) +## End(Not run) +#end examples +?chart.Events +charts.Bar(edhec) +charts.Bar(edhec[,1]) +chart.VaRSensitivity(edhec[,1]) +managers[,132] +managers[132,1] +head(edhec) +table.Autocorrelation(edhec) +data(edhec) +library("noniid.sm", lib.loc="C:/Users/shubhankit/Documents/R/win-library/3.0") +data(edhec) +table.Autocorrelation(edhec) +a=table.Autocorrelation(edhec) +t(a) +t(a) +xtable(a) +install.packages("xtable") +library("xtable", lib.loc="C:/Users/shubhankit/Documents/R/win-library/3.0") +xtable(a) +install.packages("stargazer") +library(stargazer) +data(edhec) +stargazer(edhec[1:10,1]) +stargazer(edhec[1:10,1],summary=FALSE) +edhec +library("SweaveListingUtils", lib.loc="C:/Users/shubhankit/Documents/R/win-library/3.0") +stargazer(attitude) +stargazer(attitude) +stargazer(edhec) +stargazer(Return.Annualized(edhec)) +stargazer(Return.Annualized(edhec)) +CalmarRatio(edhec[,1:4]) +a=CalmarRatio(edhec[,1:4]) +summary(a) +fm2 <- lm(tlimth ~ sex * ethnicty, data = tli) +data(tli) +fm2 <- lm(tlimth ~ sex * ethnicty, data = tli) +print(xtable(anova(fm2)), type="html") +library(xtable) +library(xtable) +source('~/R/win-library/3.0/xtable/doc/xtableGallery.R') +getwd() +roxygenize(getwd()) +library("roxygen2", lib.loc="C:/Users/shubhankit/Documents/R/win-library/3.0") +roxygenize(getwd()) +?glm +glm +?lm +viewsource(lm) +view(lm) +detach("package:stats", unload=TRUE) +library("stats", lib.loc="C:/Program Files/R/R-3.0.1/library") +lm +?lm +source('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/glmi.R') +?glmi +?glmi +source('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/glmi.R') +source('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R') +roxygenize(getwd()) +source('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R') +source('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/glmi.R') +source('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/glmi.R') +source('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R') +source('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/glmi.R') +??noniid.sm +library(PerformanceAnalytics) +data(edhec) +table.EMaxDDGBM(edhec) +table.DrawDown(edhec) +table.DownSideRisk(edhec) +table.DownsideRisk(edhec) +a=table.DownsideRisk(edhec[,1:4]) +t(a) +a=table.DownsideRisk(edhec[,]) +t(a) +library(PerformanceAnalytics) +data(edhec) +table.EMaxDDGBM(edhec) +library(PerformanceAnalytics) +data(edhec) +b=table.EMaxDDGBM(edhec) +t(b) +Rank(edhec) +b[order()] +b[order(Expected Drawdown in %)] +rank(b) +rank(t(b)) +rank(t(b[:,3])) +b(,3) +b +b[1,2] +b[:,3] +b[,3] +b[3,] +rank(b[3,]) +rank(a[3,]) +rank(a[11,]) +rank(b[3,]) +rank(-a[11,]) +rank(b[3,]) +bb=rank(b[3,]) +cc=rank(-a[11,]) +bb-cc +table.stats +?table.Stats +table.Stats(edhec) +round(4.4) +round(4.444444) +round(4.444444,5) +round(4.444444,3) +round(CalmarRatio.Norm(COM.09,1),4) +round(SterlingRatio.Norm(COM.09,1),4) +round(CalmarRatio.Norm(edhec,1),4) +round(SterlingRatio.Norm(edhec,1),4) +round(CalmarRatio.Norm(edhec[,1:4],1),4) +round(CalmarRatio(edhec[,1:4],1),4) +ES(edhec,.95,method="gaussian") +chart.Autocorrelation(edhec) +chart.Autocorrelation(COM.09) +chart.Autocorrelation(COM.09) +table.normDD(edhec) +table.EMaxDDGBM(edhec) +EmaxDDGBM(edhec) +source('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/EmaxDDGBM.R') +EmaxDDGBM(edhec) +source('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/EmaxDDGBM.R') +EmaxDDGBM(edhec) +source('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/EmaxDDGBM.R') +EmaxDDGBM(edhec) +EmaxDDGBM(edhec[,2]) +EmaxDDGBM(edhec[,13]) +source('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/EmaxDDGBM.R') +source('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/EmaxDDGBM.R') +EmaxDDGBM(edhec) +source('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.EMaxDDGBM.R') +source('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/EmaxDDGBM.R') +EmaxDDGBM(edhec) +table.EMaxDDGBM(edhec) +source('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/EmaxDDGBM.R') +EmaxDDGBM(edhec) +source('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/EmaxDDGBM.R') +EmaxDDGBM(managers) +data(managers) +EmaxDDGBM(managers) +roxygenize(getwd()) +source('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/EmaxDDGBM.R') +roxygenize(getwd()) +data <- read.csv("C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/data/HAM3-data.csv") +dates <- data$X +values <- data[,-1] # convert percentage to return +COM <- as.xts(values, order.by=as.Date(dates)) +COM.09<-COM[,9:11] +charts.PerformanceSummary(COM.09[1:108,],colorset = rich6equal, lwd = 2, ylog = TRUE) +table.DrawdownsRatio(COM.09) +table.Drawdowns(COM.09) +table.DownsideRisk(COM.09) +EmaxDDGBM(COM.09) +EmaxDDGBM(edhec) +table.DownsideRisk(COM.09) +table.DownsideRisk(edhec)[11,] +EmaxDDGBM(edhec) +a=EmaxDDGBM(edhec) +b=table.DownsideRisk(edhec)[11,] +a-(b*100) +a+(b*100) +charts.PerformanceSummary(edhec,colorset = rich6equal, lwd = 2, ylog = TRUE) +charts.PerformanceSummary(edhec[,10:13],colorset = rich6equal, lwd = 2, ylog = TRUE) +EmaxDDGBM(edhec) +?EmaxDDGBM +ES(edhec[1:4],.05,method="gaussian") +ES(edhec[1:4],.95,method="gaussian") +ES(edhec[2:4],.95,method="gaussian") +ES(edhec[,2:4],.95,method="gaussian") +EmaxDDGBM(edhec[,1:4]) +data <- read.csv("C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/data/HAM3-data.csv") +dates <- data$X +values <- data[,-1] # convert percentage to return +COM <- as.xts(values, order.by=as.Date(dates)) +COM.09<-COM[,9:11] +Vol1 = EMaxDDGBM(COM.09) +Vol1 +Vol1 +Vol2 = -ES(COM.09,.95,method="gaussian") +Vol2 +data <- read.csv("C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/data/HAM3-data.csv") +dates <- data$X +values <- data[,-1] # convert percentage to return +COM <- as.xts(values, order.by=as.Date(dates)) +COM.09<-COM[,9:11] +Vol1 = EMaxDDGBM(managers) +Vol1 +data(edhec) +EmaxDDGBM(edhec) +data(edhec) +EmaxDDGBM(managers) +data(edhec) +EmaxDDGBM(COM.09) +source('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/R/EmaxDDGBM.R') +source('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/R/maxDDGBM.R') +head(managers) +head(COM.09) +head(COM) +charts.PerformanceSummary(COM) +charts.PerformanceSummary(COM[,1:7]) +charts.PerformanceSummary(COM[,8:11]) +?glm +?CalmarRatio +??CalmarRatio +source('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R') +?lmi +??lmi +source('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R') +getwd() +roxygenize(getwd()) +library("roxygen2", lib.loc="C:/Users/shubhankit/Documents/R/win-library/3.0") +roxygenize(getwd()) +source('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R') +?lmi +source('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R') +roxygenize(getwd()) +source('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R') +roxygenize(getwd()) +?lm +source.with.encoding('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R', encoding='UTF-8') +roxygenize(getwd()) +?glm +source.with.encoding('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R', encoding='UTF-8') +?lm +source.with.encoding('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R', encoding='UTF-8') +roxygenize(getwd()) +source.with.encoding('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R', encoding='UTF-8') +source.with.encoding('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R', encoding='UTF-8') +roxygenize(getwd()) +?glm +source.with.encoding('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/glmi.R', encoding='UTF-8') +roxygenize(getwd()) +source.with.encoding('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R', encoding='UTF-8') +source.with.encoding('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/glmi.R', encoding='UTF-8') +source.with.encoding('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R', encoding='UTF-8') +source.with.encoding('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/glmi.R', encoding='UTF-8') +roxygenize(getwd()) +source.with.encoding('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/glmi.R', encoding='UTF-8') +roxygenize(getwd()) Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/pcs/files-pane.pper =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/pcs/files-pane.pper (rev 0) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/pcs/files-pane.pper 2013-09-14 00:12:17 UTC (rev 3103) @@ -0,0 +1,9 @@ +{ + "path" : "C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm", + "sortOrder" : [ + { + "ascending" : true, + "columnIndex" : 2 + } + ] +} \ No newline at end of file Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/pcs/packages-pane.pper =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/pcs/packages-pane.pper (rev 0) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/pcs/packages-pane.pper 2013-09-14 00:12:17 UTC (rev 3103) @@ -0,0 +1,7 @@ +{ + "installOptions" : { + "installDependencies" : true, + "installFromRepository" : true, + "libraryPath" : "C:/Users/shubhankit/Documents/R/win-library/3.0" + } +} \ No newline at end of file Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/pcs/source-pane.pper =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/pcs/source-pane.pper (rev 0) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/pcs/source-pane.pper 2013-09-14 00:12:17 UTC (rev 3103) @@ -0,0 +1,3 @@ +{ + "activeTab" : 0 +} \ No newline at end of file Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/pcs/windowlayoutstate.pper =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/pcs/windowlayoutstate.pper (rev 0) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/pcs/windowlayoutstate.pper 2013-09-14 00:12:17 UTC (rev 3103) @@ -0,0 +1,14 @@ +{ + "left" : { + "panelheight" : 646, + "splitterpos" : 410, + "topwindowstate" : "NORMAL", + "windowheight" : 684 + }, + "right" : { + "panelheight" : 646, + "splitterpos" : 411, + "topwindowstate" : "NORMAL", + "windowheight" : 684 + } +} \ No newline at end of file Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/pcs/workbench-pane.pper =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/pcs/workbench-pane.pper (rev 0) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/pcs/workbench-pane.pper 2013-09-14 00:12:17 UTC (rev 3103) @@ -0,0 +1,4 @@ +{ + "TabSet1" : 2, + "TabSet2" : 3 +} \ No newline at end of file Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/persistent-state =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/persistent-state (rev 0) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/persistent-state 2013-09-14 00:12:17 UTC (rev 3103) @@ -0,0 +1,9 @@ +build-last-errors="[]" +build-last-errors-base-dir="C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/" +build-last-outputs="[{\"output\":\"==> Rcmd.exe INSTALL --no-multiarch noniid.sm\\n\\n\",\"type\":0},{\"output\":\"* installing to library 'C:/Users/shubhankit/Documents/R/win-library/3.0'\\r\\n\",\"type\":1},{\"output\":\"\",\"type\":1},{\"output\":\"* installing *source* package 'noniid.sm' ...\\r\\n\",\"type\":1},{\"output\":\"\",\"type\":1},{\"output\":\"** R\\r\\n\",\"type\":1},{\"output\":\"\",\"type\":1},{\"output\":\"** byte-compile and prepare package for lazy loading\\r\\n\",\"type\":1},{\"output\":\"\",\"type\":1},{\"output\":\"** help\\r\\n\",\"type\":1},{\"output\":\"\",\"type\":1},{\"output\":\"*** installing help indices\\r\\n\",\"type\":1},{\"output\":\"\",\"type\":1},{\"output\":\"** building package indices\\r\\n\",\"type\":1},{\"output\":\"\",\"type\":1},{\"output\":\"** installing vignettes\\r\\n\",\"type\":1},{\"output\":\"\",\"type\":1},{\"output\":\"** testing if installed package can be loaded\\r\\n\",\"type\":1},{\"output\":\"\",\"type\":1},{\"output\":\"* DONE (noniid.sm)\\r\\n\",\"type\":1},{\"output\":\"\",\"type\":1}]" +compile_pdf_state="{\"errors\":[],\"output\":\"\",\"running\":false,\"tab_visible\":false,\"target_file\":\"\"}" +console_procs="[]" +files.monitored-path="" [TRUNCATED] To get the complete diff run: svnlook diff /svnroot/returnanalytics -r 3103 From noreply at r-forge.r-project.org Sat Sep 14 03:16:50 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sat, 14 Sep 2013 03:16:50 +0200 (CEST) Subject: [Returnanalytics-commits] r3104 - in pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm: . R man vignettes Message-ID: <20130914011650.DAB87185326@r-forge.r-project.org> Author: shubhanm Date: 2013-09-14 03:16:46 +0200 (Sat, 14 Sep 2013) New Revision: 3104 Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/EmaxDDGBM.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/EmaxDDGBM.pdf Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/NAMESPACE pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/EmaxDDGBM.R pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/ACStdDev.annualized.Rd pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/EMaxDDGBM.Rd pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/glmi.Rd pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/lmi.Rd pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.EMaxDDGBM.Rd Log: Documentation Checks + Modification of EmaxDDGBM.R code Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/NAMESPACE =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/NAMESPACE 2013-09-14 00:12:17 UTC (rev 3103) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/NAMESPACE 2013-09-14 01:16:46 UTC (rev 3104) @@ -1,20 +1,20 @@ export(ACStdDev.annualized) +export(CDrawdown) export(CalmarRatio.Norm) -export(CDrawdown) -export(chart.AcarSim) -export(chart.Autocorrelation) export(EmaxDDGBM) -export(glmi) export(GLMSmoothIndex) -export(lmi) export(LoSharpe) export(QP.Norm) export(Return.GLM) export(Return.Okunev) +export(SterlingRatio.Norm) +export(UnsmoothReturn) +export(chart.AcarSim) +export(chart.Autocorrelation) +export(glmi) +export(lmi) export(se.LoSharpe) -export(SterlingRatio.Norm) export(table.ComparitiveReturn.GLM) export(table.EMaxDDGBM) export(table.Sharpe) export(table.UnsmoothReturn) -export(UnsmoothReturn) Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/EmaxDDGBM.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/EmaxDDGBM.R 2013-09-14 00:12:17 UTC (rev 3103) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/EmaxDDGBM.R 2013-09-14 01:16:46 UTC (rev 3104) @@ -22,7 +22,7 @@ #'library(PerformanceAnalytics) #' data(edhec) #' EmaxDDGBM(edhec) -#' @rdname EMaxDDGBM +#' @rdname EmaxDDGBM #' @export #' @export EmaxDDGBM <- Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/ACStdDev.annualized.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/ACStdDev.annualized.Rd 2013-09-14 00:12:17 UTC (rev 3103) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/ACStdDev.annualized.Rd 2013-09-14 01:16:46 UTC (rev 3104) @@ -1,8 +1,8 @@ \name{ACStdDev.annualized} \alias{ACStdDev.annualized} +\alias{StdDev.annualized} \alias{sd.annualized} \alias{sd.multiperiod} -\alias{StdDev.annualized} \title{Autocorrleation adjusted Standard Deviation} \usage{ ACStdDev.annualized(R, lag = 6, scale = NA, ...) Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/EMaxDDGBM.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/EMaxDDGBM.Rd 2013-09-14 00:12:17 UTC (rev 3103) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/EMaxDDGBM.Rd 2013-09-14 01:16:46 UTC (rev 3104) @@ -51,7 +51,7 @@ \keyword{Brownian} \keyword{Drawdown} \keyword{Expected} -\keyword{models} \keyword{Motion} \keyword{Using} +\keyword{models} Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/glmi.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/glmi.Rd 2013-09-14 00:12:17 UTC (rev 3103) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/glmi.Rd 2013-09-14 01:16:46 UTC (rev 3104) @@ -104,11 +104,11 @@ the S function of the same name described in Hastie & Pregibon (1992). } +\keyword{HAC} +\keyword{HC} \keyword{covariance} \keyword{estimation} \keyword{fitting} -\keyword{HAC} -\keyword{HC} \keyword{model} \keyword{regression} Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/lmi.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/lmi.Rd 2013-09-14 00:12:17 UTC (rev 3103) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/lmi.Rd 2013-09-14 01:16:46 UTC (rev 3104) @@ -92,11 +92,11 @@ the S function of the same name described in Hastie & Pregibon (1992). } +\keyword{HAC} +\keyword{HC} \keyword{covariance} \keyword{estimation} \keyword{fitting} -\keyword{HAC} -\keyword{HC} \keyword{model} \keyword{regression} Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.EMaxDDGBM.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.EMaxDDGBM.Rd 2013-09-14 00:12:17 UTC (rev 3103) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.EMaxDDGBM.Rd 2013-09-14 01:16:46 UTC (rev 3104) @@ -51,7 +51,7 @@ \keyword{Brownian} \keyword{Drawdown} \keyword{Expected} -\keyword{models} \keyword{Motion} \keyword{Using} +\keyword{models} Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/EmaxDDGBM.Rnw =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/EmaxDDGBM.Rnw (rev 0) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/EmaxDDGBM.Rnw 2013-09-14 01:16:46 UTC (rev 3104) @@ -0,0 +1,83 @@ +\documentclass[12pt,letterpaper,english]{article} +\usepackage{times} +\usepackage[T1]{fontenc} +\IfFileExists{url.sty}{\usepackage{url}} + {\newcommand{\url}{\texttt}} + +\usepackage{babel} +\usepackage{Rd} + +\usepackage{Sweave} +\SweaveOpts{engine=R,eps = FALSE} +\begin{document} +\SweaveOpts{concordance=TRUE} + +\title{ On the Maximum Drawdown of a Brownian Motion } +\author{Shubhankit} +% \keywords{Lo Sharpe Ratio,GLM Smooth Index,GLM Return Table} + +\makeatletter +\makeatother +\maketitle + +\begin{abstract} +The maximum drawdown possible of an asset whose return series follows a Geometric Brownian Motion Process. + +\end{abstract} + + +<>= +require(noniid.sm) #source('C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/LoSharpe.R') +@ + +<>= +library(PerformanceAnalytics) +data(edhec) +data(managers) +@ +\section{Background} + If X(t) is a random process on [0, T ], the maximum + drawdown at time T , D(T), is defined by where \deqn{D(T) + = sup [X(s) - X(t)]} where s belongs to [0,t] and s + belongs to [0,T] Informally, this is the largest drop + from a peak to a bottom. In this paper, we investigate + the behavior of this statistic for a Brownian motion with + drift. In particular, we give an infinite series + representation of its distribution, and consider its + expected value. When the drift is zero, we give an + analytic expression for the expected value, and for + non-zero drift, we give an infinite series + representation. For all cases, we compute the limiting + \bold{(\eqn{T "tends to" \infty})} behavior, which can be + logarithmic (\eqn{\mu > 0} ), square root (\eqn{\mu = 0}), + or linear (\eqn{\mu < 0} ). + + + +<>= +source('C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/EmaxDDGBM.R') +data(edhec) +Lo.Sharpe = -100*ES(edhec,.99) +Theoretical.Sharpe= EmaxDDGBM(edhec) +barplot(as.matrix(rbind(Theoretical.Sharpe,Lo.Sharpe)), main="Expected Shortfall(.99) and Drawdown of a Brwonian Motion Asset Process", + xlab="Fund Type",ylab="Value", col=rich6equal[1:2], beside=TRUE) + legend("topright", c("ES","EGBMDD"), cex=0.6, + bty="2", fill=rich6equal[1:2]); +@ + +We can observe that the fund "\textbf{Emerging Markets}", which has the largest drawdown and serial autocorrelation, has highest Drawdown , \emph{decrease} most significantly as comapared to other funds. + +<>= + +data(managers) +Lo.Sharpe = -100*ES(managers[,1:6],.99) +Theoretical.Sharpe= EmaxDDGBM(managers[,1:6]) +barplot(as.matrix(rbind(Theoretical.Sharpe,Lo.Sharpe)), main="Expected Shortfall(.99) and Drawdown of a Brwonian Motion Asset Process", + xlab="Fund Type",ylab="Value", col=rich6equal[1:2], beside=TRUE) + legend("topright", c("ES","EGBMDD"), cex=0.6, + bty="2", fill=rich6equal[1:2]); +@ + +We can see that the model, correctly ranks the highest drawdown fund managers, i.e. \textbf{HAM2}, which has the largest drawdown among all the funds. + +\end{document} Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/EmaxDDGBM.pdf =================================================================== (Binary files differ) Property changes on: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/EmaxDDGBM.pdf ___________________________________________________________________ Added: svn:mime-type + application/octet-stream From noreply at r-forge.r-project.org Sat Sep 14 13:10:01 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sat, 14 Sep 2013 13:10:01 +0200 (CEST) Subject: [Returnanalytics-commits] r3105 - in pkg/Meucci: . demo Message-ID: <20130914111001.DDD6D185D1D@r-forge.r-project.org> Author: xavierv Date: 2013-09-14 13:10:00 +0200 (Sat, 14 Sep 2013) New Revision: 3105 Modified: pkg/Meucci/TODO pkg/Meucci/demo/S_SelectionHeuristics.R Log: - fixed error in S_SelectionHeuristics demo script Modified: pkg/Meucci/TODO =================================================================== --- pkg/Meucci/TODO 2013-09-14 01:16:46 UTC (rev 3104) +++ pkg/Meucci/TODO 2013-09-14 11:10:00 UTC (rev 3105) @@ -9,4 +9,7 @@ * Confirm every datafile is different from the others. * Change coding style to one more R alike * Still 2 scripts left from the book: S_MeanVarianceCallsRobust from chapter 9 and S_OptionReplication from chapter 6 +* Improve documentation for every script from the book: + - find the exercises and sections they come from + - write down the formulas Modified: pkg/Meucci/demo/S_SelectionHeuristics.R =================================================================== --- pkg/Meucci/demo/S_SelectionHeuristics.R 2013-09-14 01:16:46 UTC (rev 3104) +++ pkg/Meucci/demo/S_SelectionHeuristics.R 2013-09-14 11:10:00 UTC (rev 3105) @@ -191,7 +191,7 @@ SelectExactNChooseK = function( OutOfWho, K, M ) { - Combos = t(combn( OutOfWho[ i ], K ) ); + Combos = t(combn( OutOfWho, K ) ); L = dim(Combos)[1]; a = matrix( 0, 1, L ); From noreply at r-forge.r-project.org Sat Sep 14 19:26:06 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sat, 14 Sep 2013 19:26:06 +0200 (CEST) Subject: [Returnanalytics-commits] r3106 - pkg/PortfolioAnalytics/sandbox Message-ID: <20130914172606.CD95118590A@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-14 19:26:06 +0200 (Sat, 14 Sep 2013) New Revision: 3106 Modified: pkg/PortfolioAnalytics/sandbox/rp_method_comparison.R Log: Adding comparison of sample vs simplex method for random portfolios. Modified: pkg/PortfolioAnalytics/sandbox/rp_method_comparison.R =================================================================== --- pkg/PortfolioAnalytics/sandbox/rp_method_comparison.R 2013-09-14 11:10:00 UTC (rev 3105) +++ pkg/PortfolioAnalytics/sandbox/rp_method_comparison.R 2013-09-14 17:26:06 UTC (rev 3106) @@ -45,3 +45,19 @@ ylab="mean", xlab="StdDev", col=rgb(0, 0, 100, 50, maxColorValue=255)) } par(mfrow=c(1,1)) + +# charts to compare simplex and sample random portfolio generation +par(mfrow=c(1, 2)) +# simplex +rp_simplex <- random_portfolios(portfolio=pspec, permutations=2000, rp_method='simplex', fev=0:5) +tmp.mean <- apply(rp_simplex, 1, function(x) mean(R %*% x)) +tmp.StdDev <- apply(rp_simplex, 1, function(x) StdDev(R=R, weights=x)) +plot(x=tmp.StdDev, y=tmp.mean, main="rp_method=simplex fev=0:5", + ylab="mean", xlab="StdDev", col=rgb(0, 0, 100, 50, maxColorValue=255)) +#sample +rp_sample <- random_portfolios(portfolio=pspec, permutations=2000, rp_method='sample') +tmp.mean <- apply(rp_sample, 1, function(x) mean(R %*% x)) +tmp.StdDev <- apply(rp_sample, 1, function(x) StdDev(R=R, weights=x)) +plot(x=tmp.StdDev, y=tmp.mean, main="rp_method=sample", + ylab="mean", xlab="StdDev", col=rgb(0, 0, 100, 50, maxColorValue=255)) +par(mfrow=c(1,1)) From noreply at r-forge.r-project.org Sun Sep 15 02:04:44 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 15 Sep 2013 02:04:44 +0200 (CEST) Subject: [Returnanalytics-commits] r3107 - in pkg/PortfolioAnalytics: R man Message-ID: <20130915000444.F3F47185CF3@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-15 02:04:44 +0200 (Sun, 15 Sep 2013) New Revision: 3107 Modified: pkg/PortfolioAnalytics/R/extract.efficient.frontier.R pkg/PortfolioAnalytics/R/generics.R pkg/PortfolioAnalytics/R/objectiveFUN.R pkg/PortfolioAnalytics/R/random_portfolios.R pkg/PortfolioAnalytics/man/random_portfolios.Rd Log: Added group names to Groups_HHI output. Added category labels to print method for portfolio objects. Fixed bug in meanvar.efficient.frontier to allow more then 2 objectives. Fixed documentation for random_portfolios. Modified: pkg/PortfolioAnalytics/R/extract.efficient.frontier.R =================================================================== --- pkg/PortfolioAnalytics/R/extract.efficient.frontier.R 2013-09-14 17:26:06 UTC (rev 3106) +++ pkg/PortfolioAnalytics/R/extract.efficient.frontier.R 2013-09-15 00:04:44 UTC (rev 3107) @@ -131,7 +131,7 @@ } # for a mean-var efficient frontier, there must be two objectives 1) "mean" and 2) "var" - if(!((length(objnames) == 2) & ("var" %in% objnames) & ("mean" %in% objnames))){ + if(!((length(objnames) >= 2) & ("var" %in% objnames) & ("mean" %in% objnames))){ stop("The portfolio object must have both 'mean' and 'var' specified as objectives") } Modified: pkg/PortfolioAnalytics/R/generics.R =================================================================== --- pkg/PortfolioAnalytics/R/generics.R 2013-09-14 17:26:06 UTC (rev 3106) +++ pkg/PortfolioAnalytics/R/generics.R 2013-09-15 00:04:44 UTC (rev 3107) @@ -70,6 +70,21 @@ cat("More than 10 assets, only printing the first 10\n") } + # Category labels + if(!is.null(x$category_labels)){ + cat("\nCategory Labels\n") + cat_labels <- x$category_labels + for(i in 1:min(10, length(cat_labels))){ + cat(names(cat_labels)[i],": ") + tmp <- names(x$assets[cat_labels[[i]]]) + cat(tmp, "\n") + } + if(length(cat_labels) > 10){ + cat("More than 10 categories, only printing the first 10\n") + } + cat("\n") + } + # Constraints cat("\nConstraints\n") nconstraints <- length(x$constraints) @@ -263,16 +278,16 @@ names(tmp_obj) <- names(objective_measures) cat("Objective Measure:\n") for(i in 1:length(objective_measures)){ - print(tmp_obj[i], digits=4) + print(tmp_obj[i], digits=digits) cat("\n") if(length(objective_measures[[i]]) > 1){ # This will be the case for any objective measures with HHI for QP problems for(j in 2:length(objective_measures[[i]])){ tmpl <- objective_measures[[i]][j] - cat(names(tmpl), ":\n") + cat(names(tmpl), "\n") tmpv <- unlist(tmpl) - # names(tmpv) <- names(x$weights) - print(tmpv) + names(tmpv) <- gsub(paste(names(tmpl), ".", sep=""), "", names(tmpv)) + print.default(round(tmpv, digits=digits), digits=digits) cat("\n") } } Modified: pkg/PortfolioAnalytics/R/objectiveFUN.R =================================================================== --- pkg/PortfolioAnalytics/R/objectiveFUN.R 2013-09-14 17:26:06 UTC (rev 3106) +++ pkg/PortfolioAnalytics/R/objectiveFUN.R 2013-09-15 00:04:44 UTC (rev 3107) @@ -54,10 +54,11 @@ if(!is.null(groups)){ ngroups <- length(groups) group_hhi <- rep(0, ngroups) + if(!is.null((names(groups)))) names(group_hhi) <- names(groups) for(i in 1:ngroups){ group_hhi[i] <- sum(weights[groups[[i]]]^2) } - return(list(hhi=hhi, group_hhi=group_hhi)) + return(list(HHI=hhi, Groups_HHI=group_hhi)) } else { return(hhi) } Modified: pkg/PortfolioAnalytics/R/random_portfolios.R =================================================================== --- pkg/PortfolioAnalytics/R/random_portfolios.R 2013-09-14 17:26:06 UTC (rev 3106) +++ pkg/PortfolioAnalytics/R/random_portfolios.R 2013-09-15 00:04:44 UTC (rev 3107) @@ -334,7 +334,7 @@ #' limit constraints will be handled by elimination. If the constraints are #' very restrictive, this may result in very few feasible portfolios remaining.} #' \item{grid: }{The 'grid' method to generate random portfolios is based on -#' the \code(gridSearch} function in package 'NMOF'. The grid search method +#' the \code{gridSearch} function in package 'NMOF'. The grid search method #' only satisfies the \code{min} and \code{max} box constraints. The #' \code{min_sum} and \code{max_sum} leverage constraints will likely be #' violated and the weights in the random portfolios should be normalized. Modified: pkg/PortfolioAnalytics/man/random_portfolios.Rd =================================================================== --- pkg/PortfolioAnalytics/man/random_portfolios.Rd 2013-09-14 17:26:06 UTC (rev 3106) +++ pkg/PortfolioAnalytics/man/random_portfolios.Rd 2013-09-15 00:04:44 UTC (rev 3107) @@ -48,7 +48,7 @@ very restrictive, this may result in very few feasible portfolios remaining.} \item{grid: }{The 'grid' method to generate random portfolios is based on the - \code(gridSearch} function in package 'NMOF'. The grid + \code{gridSearch} function in package 'NMOF'. The grid search method only satisfies the \code{min} and \code{max} box constraints. The \code{min_sum} and \code{max_sum} leverage constraints will likely be From noreply at r-forge.r-project.org Sun Sep 15 10:39:12 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 15 Sep 2013 10:39:12 +0200 (CEST) Subject: [Returnanalytics-commits] r3108 - in pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm: . R man vignettes Message-ID: <20130915083912.97513184675@r-forge.r-project.org> Author: shubhanm Date: 2013-09-15 10:39:12 +0200 (Sun, 15 Sep 2013) New Revision: 3108 Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.normDD.R Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/DESCRIPTION pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/NAMESPACE pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/glmi.R pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/ACStdDev.annualized.Rd pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/EMaxDDGBM.Rd pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/glmi.Rd pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/lmi.Rd pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.EMaxDDGBM.Rd pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/EmaxDDGBM.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/EmaxDDGBM.pdf Log: Doc modification , addition of normDD Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/DESCRIPTION =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/DESCRIPTION 2013-09-15 00:04:44 UTC (rev 3107) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/DESCRIPTION 2013-09-15 08:39:12 UTC (rev 3108) @@ -9,7 +9,8 @@ xts, PerformanceAnalytics, tseries, - stats + stats, + gld Maintainer: Brian G. Peterson Description: GSoC 2013 project to replicate literature on drawdowns and non-i.i.d assumptions in finance. @@ -38,3 +39,4 @@ 'table.Sharpe.R' 'glmi.R' 'lmi.R' + 'table.normDD.R' Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/NAMESPACE =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/NAMESPACE 2013-09-15 00:04:44 UTC (rev 3107) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/NAMESPACE 2013-09-15 08:39:12 UTC (rev 3108) @@ -1,20 +1,21 @@ export(ACStdDev.annualized) +export(CalmarRatio.Norm) export(CDrawdown) -export(CalmarRatio.Norm) +export(chart.AcarSim) +export(chart.Autocorrelation) export(EmaxDDGBM) +export(glmi) export(GLMSmoothIndex) +export(lmi) export(LoSharpe) export(QP.Norm) export(Return.GLM) export(Return.Okunev) +export(se.LoSharpe) export(SterlingRatio.Norm) -export(UnsmoothReturn) -export(chart.AcarSim) -export(chart.Autocorrelation) -export(glmi) -export(lmi) -export(se.LoSharpe) export(table.ComparitiveReturn.GLM) export(table.EMaxDDGBM) +export(table.NormDD) export(table.Sharpe) export(table.UnsmoothReturn) +export(UnsmoothReturn) Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/glmi.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/glmi.R 2013-09-15 00:04:44 UTC (rev 3107) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/glmi.R 2013-09-15 08:39:12 UTC (rev 3108) @@ -13,7 +13,7 @@ #' #'@param vcov HC-HAC covariance estimation #'@param weights -#'an optional vector of weights to be used in the fitting process. Should be NULL or a numeric vector. If non-NULL, weighted least squares is used with weights weights (that is, minimizing sum(w*e^2)); otherwise ordinary least squares is used. See also ?Details?, +#'an optional vector of weights to be used in the fitting process. Should be NULL or a numeric vector. If non-NULL, weighted least squares is used with weights weights (that is, minimizing sum; otherwise ordinary least squares is used. See also ?Details?, #'@param subset #'an optional vector specifying a subset of observations to be used in the fitting process. #' Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R 2013-09-15 00:04:44 UTC (rev 3107) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R 2013-09-15 08:39:12 UTC (rev 3108) @@ -12,7 +12,7 @@ #' #'@param vcov HC-HAC covariance estimation #'@param weights -#'an optional vector of weights to be used in the fitting process. Should be NULL or a numeric vector. If non-NULL, weighted least squares is used with weights weights (that is, minimizing sum(w*e^2)); otherwise ordinary least squares is used. See also ?Details?, +#'an optional vector of weights to be used in the fitting process. Should be NULL or a numeric vector. If non-NULL, weighted least squares is used with weights weights (that is, minimizing sum; otherwise ordinary least squares is used. See also ?Details?, #' #' #'@param subset Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.normDD.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.normDD.R (rev 0) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.normDD.R 2013-09-15 08:39:12 UTC (rev 3108) @@ -0,0 +1,110 @@ +#'@title Generalised Lambda Distribution Simulated Drawdown +#'@description When selecting a hedge fund manager, one risk measure investors often +#' consider is drawdown. How should drawdown distributions look? Carr Futures' +#' Galen Burghardt, Ryan Duncan and Lianyan Liu share some insights from their +#'research to show investors how to begin to answer this tricky question +#'@details To simulate net asset value (NAV) series where skewness and kurtosis are zero, +#' we draw sample returns from a lognormal return distribution. To capture skewness +#' and kurtosis, we sample returns from a \bold{generalised \eqn{\lambda} distribution}.The values of +#' skewness and excess kurtosis used were roughly consistent with the range of values the paper +#' observed for commodity trading advisers in our database. The NAV series is constructed +#' from the return series. The simulated drawdowns are then derived and used to produce +#' the theoretical drawdown distributions. A typical run usually requires \bold{10,000} +#' iterations to produce a smooth distribution. +#' +#' +#' @param R an xts, vector, matrix, data frame, timeSeries or zoo object of +#' asset returns +#' @param digits number of rounding off digits. +#' @references Burghardt, G., and L. Liu, \emph{ It's the Autocorrelation, Stupid (November 2012) Newedge +#' working paper.} +#' \code{\link[stats]{}} \cr +#' \url{http://www.amfmblog.com/assets/Newedge-Autocorrelation.pdf} +#' Burghardt, G., Duncan, R. and L. Liu, \emph{Deciphering drawdown}. Risk magazine, Risk management for investors, September, S16-S20, 2003. \url{http://www.risk.net/data/risk/pdf/investor/0903_risk.pdf} +#' @author Peter Carl, Brian Peterson, Shubhankit Mohan +#' @keywords Simulated Drawdown Using Brownian Motion Assumptions +#' @seealso Drawdowns.R +#' @rdname table.normDD +#' @export +table.NormDD <- + function (R,digits =4) + {# @author + + # DESCRIPTION: + # Downside Risk Summary: Statistics and Stylized Facts + + # Inputs: + # R: a regular timeseries of returns (rather than prices) + # Output: Table of Estimated Drawdowns +# library(gld) + + y = checkData(R, method = "xts") + columns = ncol(y) + rows = nrow(y) + columnnames = colnames(y) + rownames = rownames(y) + T= nyears(y); + n <- 1000 + dt <- 1/T; + r0 <- 0; + s0 <- 1; + # for each column, do the following: + for(column in 1:columns) { + x = y[,column] + mu = Return.annualized(x, scale = NA, geometric = TRUE) + sig=StdDev.annualized(x) + skew = skewness(x) + kurt = kurtosis(x) + r <- matrix(0,T+1,n) # matrix to hold short rate paths + s <- matrix(0,T+1,n) + r[1,] <- r0 + s[1,] <- s0 + drawdown <- matrix(0,n) + # return(Ed) + + for(j in 1:n){ + r[2:(T+1),j]= rgl(T,mu,sig,skew,kurt) + for(i in 2:(T+1)){ + + dr <- r[i,j]*dt + s[i,j] <- s[i-1,j] + (dr/100) + } + + + drawdown[j] = as.numeric(maxdrawdown(s[,j])[1]) + } + z = c((mu*100), + (sig*100), + ((mean(drawdown)))) + znames = c( + "Annual Returns in %", + "Std Devetions in %", + "Normalized Drawdown Drawdown in %" + ) + if(column == 1) { + resultingtable = data.frame(Value = z, row.names = znames) + } + else { + nextcolumn = data.frame(Value = z, row.names = znames) + resultingtable = cbind(resultingtable, nextcolumn) + } + } + colnames(resultingtable) = columnnames + ans = base::round(resultingtable, digits) + ans + # t <- seq(0, T, dt) + # matplot(t, r[1,1:T], type="l", lty=1, main="Short Rate Paths", ylab="rt") + + } + +############################################################################### +# R (http://r-project.org/) +# +# Copyright (c) 2004-2013 +# +# This R package is distributed under the terms of the GNU Public License (GPL) +# for full details see the file COPYING +# +# $Id: table.normDD +# +############################################################################### Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/ACStdDev.annualized.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/ACStdDev.annualized.Rd 2013-09-15 00:04:44 UTC (rev 3107) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/ACStdDev.annualized.Rd 2013-09-15 08:39:12 UTC (rev 3108) @@ -1,8 +1,8 @@ \name{ACStdDev.annualized} \alias{ACStdDev.annualized} -\alias{StdDev.annualized} \alias{sd.annualized} \alias{sd.multiperiod} +\alias{StdDev.annualized} \title{Autocorrleation adjusted Standard Deviation} \usage{ ACStdDev.annualized(R, lag = 6, scale = NA, ...) Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/EMaxDDGBM.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/EMaxDDGBM.Rd 2013-09-15 00:04:44 UTC (rev 3107) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/EMaxDDGBM.Rd 2013-09-15 08:39:12 UTC (rev 3108) @@ -51,7 +51,7 @@ \keyword{Brownian} \keyword{Drawdown} \keyword{Expected} +\keyword{models} \keyword{Motion} \keyword{Using} -\keyword{models} Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/glmi.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/glmi.Rd 2013-09-15 00:04:44 UTC (rev 3107) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/glmi.Rd 2013-09-15 08:39:12 UTC (rev 3108) @@ -31,9 +31,8 @@ \item{weights}{an optional vector of weights to be used in the fitting process. Should be NULL or a numeric vector. If non-NULL, weighted least squares is used with - weights weights (that is, minimizing sum(w*e^2)); - otherwise ordinary least squares is used. See also - ?Details?,} + weights weights (that is, minimizing sum; otherwise + ordinary least squares is used. See also ?Details?,} \item{subset}{an optional vector specifying a subset of observations to be used in the fitting process.} @@ -104,11 +103,11 @@ the S function of the same name described in Hastie & Pregibon (1992). } -\keyword{HAC} -\keyword{HC} \keyword{covariance} \keyword{estimation} \keyword{fitting} +\keyword{HAC} +\keyword{HC} \keyword{model} \keyword{regression} Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/lmi.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/lmi.Rd 2013-09-15 00:04:44 UTC (rev 3107) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/lmi.Rd 2013-09-15 08:39:12 UTC (rev 3108) @@ -24,9 +24,8 @@ \item{weights}{an optional vector of weights to be used in the fitting process. Should be NULL or a numeric vector. If non-NULL, weighted least squares is used with - weights weights (that is, minimizing sum(w*e^2)); - otherwise ordinary least squares is used. See also - ?Details?,} + weights weights (that is, minimizing sum; otherwise + ordinary least squares is used. See also ?Details?,} \item{subset}{an optional vector specifying a subset of observations to be used in the fitting process.} @@ -92,11 +91,11 @@ the S function of the same name described in Hastie & Pregibon (1992). } -\keyword{HAC} -\keyword{HC} \keyword{covariance} \keyword{estimation} \keyword{fitting} +\keyword{HAC} +\keyword{HC} \keyword{model} \keyword{regression} Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.EMaxDDGBM.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.EMaxDDGBM.Rd 2013-09-15 00:04:44 UTC (rev 3107) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.EMaxDDGBM.Rd 2013-09-15 08:39:12 UTC (rev 3108) @@ -51,7 +51,7 @@ \keyword{Brownian} \keyword{Drawdown} \keyword{Expected} +\keyword{models} \keyword{Motion} \keyword{Using} -\keyword{models} Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/EmaxDDGBM.Rnw =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/EmaxDDGBM.Rnw 2013-09-15 00:04:44 UTC (rev 3107) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/EmaxDDGBM.Rnw 2013-09-15 08:39:12 UTC (rev 3108) @@ -1,3 +1,5 @@ +%% no need for \DeclareGraphicsExtensions{.pdf,.eps} + \documentclass[12pt,letterpaper,english]{article} \usepackage{times} \usepackage[T1]{fontenc} @@ -5,21 +7,38 @@ {\newcommand{\url}{\texttt}} \usepackage{babel} +%\usepackage{noweb} \usepackage{Rd} \usepackage{Sweave} -\SweaveOpts{engine=R,eps = FALSE} +\SweaveOpts{engine=R,eps=FALSE} +%\VignetteIndexEntry{Performance Attribution from Bacon} +%\VignetteDepends{PerformanceAnalytics} +%\VignetteKeywords{returns, performance, risk, benchmark, portfolio} +%\VignettePackage{PerformanceAnalytics} + +%\documentclass[a4paper]{article} +%\usepackage[noae]{Sweave} +%\usepackage{ucs} +%\usepackage[utf8x]{inputenc} +%\usepackage{amsmath, amsthm, latexsym} +%\usepackage[top=3cm, bottom=3cm, left=2.5cm]{geometry} +%\usepackage{graphicx} +%\usepackage{graphicx, verbatim} +%\usepackage{ucs} +%\usepackage[utf8x]{inputenc} +%\usepackage{amsmath, amsthm, latexsym} +%\usepackage{graphicx} + +\title{On the Maximum Drawdown of a Brownian Motion} +\author{Shubhankit Mohan} + \begin{document} \SweaveOpts{concordance=TRUE} -\title{ On the Maximum Drawdown of a Brownian Motion } -\author{Shubhankit} -% \keywords{Lo Sharpe Ratio,GLM Smooth Index,GLM Return Table} - -\makeatletter -\makeatother \maketitle + \begin{abstract} The maximum drawdown possible of an asset whose return series follows a Geometric Brownian Motion Process. Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/EmaxDDGBM.pdf =================================================================== (Binary files differ) From noreply at r-forge.r-project.org Sun Sep 15 10:42:24 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 15 Sep 2013 10:42:24 +0200 (CEST) Subject: [Returnanalytics-commits] r3109 - pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man Message-ID: <20130915084224.EFDCA184675@r-forge.r-project.org> Author: shubhanm Date: 2013-09-15 10:42:24 +0200 (Sun, 15 Sep 2013) New Revision: 3109 Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.normDD.Rd Log: addition of normDD Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.normDD.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.normDD.Rd (rev 0) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.normDD.Rd 2013-09-15 08:42:24 UTC (rev 3109) @@ -0,0 +1,58 @@ +\name{table.NormDD} +\alias{table.NormDD} +\title{Generalised Lambda Distribution Simulated Drawdown} +\usage{ + table.NormDD(R, digits = 4) +} +\arguments{ + \item{R}{an xts, vector, matrix, data frame, timeSeries + or zoo object of asset returns} + + \item{digits}{number of rounding off digits.} +} +\description{ + When selecting a hedge fund manager, one risk measure + investors often consider is drawdown. How should drawdown + distributions look? Carr Futures' Galen Burghardt, Ryan + Duncan and Lianyan Liu share some insights from their + research to show investors how to begin to answer this + tricky question +} +\details{ + To simulate net asset value (NAV) series where skewness + and kurtosis are zero, we draw sample returns from a + lognormal return distribution. To capture skewness and + kurtosis, we sample returns from a \bold{generalised + \eqn{\lambda} distribution}.The values of skewness and + excess kurtosis used were roughly consistent with the + range of values the paper observed for commodity trading + advisers in our database. The NAV series is constructed + from the return series. The simulated drawdowns are then + derived and used to produce the theoretical drawdown + distributions. A typical run usually requires + \bold{10,000} iterations to produce a smooth + distribution. +} +\author{ + Peter Carl, Brian Peterson, Shubhankit Mohan +} +\references{ + Burghardt, G., and L. Liu, \emph{ It's the + Autocorrelation, Stupid (November 2012) Newedge working + paper.} \code{\link[stats]{}} \cr + \url{http://www.amfmblog.com/assets/Newedge-Autocorrelation.pdf} + Burghardt, G., Duncan, R. and L. Liu, \emph{Deciphering + drawdown}. Risk magazine, Risk management for investors, + September, S16-S20, 2003. + \url{http://www.risk.net/data/risk/pdf/investor/0903_risk.pdf} +} +\seealso{ + Drawdowns.R +} +\keyword{Assumptions} +\keyword{Brownian} +\keyword{Drawdown} +\keyword{Motion} +\keyword{Simulated} +\keyword{Using} + From noreply at r-forge.r-project.org Sun Sep 15 11:06:56 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 15 Sep 2013 11:06:56 +0200 (CEST) Subject: [Returnanalytics-commits] r3110 - pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes Message-ID: <20130915090656.1A00E1844F1@r-forge.r-project.org> Author: shubhanm Date: 2013-09-15 11:06:55 +0200 (Sun, 15 Sep 2013) New Revision: 3110 Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Commodity.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Managers.pdf Removed: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/LoSharpe-003.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/LoSharpe-004.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/LoSharpe-concordance.tex pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/LoSharpe.log pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/LoSharpe.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/LoSharpe.synctex.gz pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/LoSharpe.tex pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OWReturn-003.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OWReturn-004.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OWReturn-005.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OWReturn-006.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OWReturn-007.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OWReturn-008.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OWReturn-Graph10.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OWReturn-concordance.tex pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OWReturn.log pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OWReturn.synctex.gz pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OWReturn.tex pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OWReturn.toc pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OkunevWhite-004.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OkunevWhite-Graph10.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OkunevWhite-concordance.tex pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OkunevWhite.log pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OkunevWhite.synctex.gz pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OkunevWhite.tex pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/ShaneAcarMaxLoss-003.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/ShaneAcarMaxLoss-concordance.tex pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/ShaneAcarMaxLoss.log pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/ShaneAcarMaxLoss.synctex.gz pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/ShaneAcarMaxLoss.tex pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/UnSmoothReturnAnalysis-003.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/UnSmoothReturnAnalysis-004.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/UnSmoothReturnAnalysis-005.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/UnSmoothReturnAnalysis-006.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/UnSmoothReturnAnalysis-007.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/UnSmoothReturnAnalysis-008.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/UnSmoothReturnAnalysis-Graph3.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/UnSmoothReturnAnalysis-Graph4.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/UnSmoothReturnAnalysis-Graph5.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/UnSmoothReturnAnalysis-Graph6.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/UnSmoothReturnAnalysis-concordance.tex pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/UnSmoothReturnAnalysis.log pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/UnSmoothReturnAnalysis.synctex.gz pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/UnSmoothReturnAnalysis.tex pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/UnSmoothReturnAnalysis.toc Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/ACFSTDEV.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/CommodityReport.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/ConditionalDrawdown.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/EmaxDDGBM.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/GLMReturn.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/GLMSmoothIndex.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/NormCalmar.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OWReturn.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OkunevWhite.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/ShaneAcarMaxLoss.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/UnSmoothReturnAnalysis.pdf Log: Vignettes : All vignettes done addition (13) Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/ACFSTDEV.pdf =================================================================== (Binary files differ) Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Commodity.pdf =================================================================== (Binary files differ) Property changes on: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Commodity.pdf ___________________________________________________________________ Added: svn:mime-type + application/octet-stream Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/CommodityReport.pdf =================================================================== (Binary files differ) Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/ConditionalDrawdown.pdf =================================================================== (Binary files differ) Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/EmaxDDGBM.pdf =================================================================== (Binary files differ) Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/GLMReturn.pdf =================================================================== (Binary files differ) Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/GLMSmoothIndex.pdf =================================================================== (Binary files differ) Deleted: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/LoSharpe-003.pdf =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/LoSharpe-003.pdf 2013-09-15 08:42:24 UTC (rev 3109) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/LoSharpe-003.pdf 2013-09-15 09:06:55 UTC (rev 3110) @@ -1,157 +0,0 @@ -%PDF-1.4 -%????????\r -1 0 obj -<< -/CreationDate (D:20130907225119) -/ModDate (D:20130907225119) -/Title (R Graphics Output) -/Producer (R 3.0.1) -/Creator (R) ->> -endobj -2 0 obj -<< /Type /Catalog /Pages 3 0 R >> -endobj -7 0 obj -<< /Type /Page /Parent 3 0 R /Contents 8 0 R /Resources 4 0 R >> -endobj -8 0 obj -<< -/Length 9028 /Filter /FlateDecode ->> -stream -x???M??q???W???b??d7?????$??, /????#i`i#?>U?????:G?j?A#????l~l?z,~???v4???????]G?C&?8?[?C&??_=???e?%9Pg?*o?'????k???c??k????^???:{?~]?t??????t??K???A?=????:{?r?[????v]?t?????&??1??g??????=??G[?km?A?=?z]?t??C?t???k????=???f????k-?:{????t?8???=?O??F?-??????q?N??+?t8h,=???T???@???:9??59@'m?????=??]?t0?T?fR??A:$tC?v?~-???a[?U??~???a????r?7?R9??Z??\???z?*?W??k3?K*?fr?K?]????\??]??H??*?u?i??W?????T??U???N?????????u_?????? -?????]??@???????????b')r?????&r[?pDJ???_?^?H+*??u?M????T?bz?o????g{huT}???}???b????Ji??E????N:??L?zc?v?~???-??ZTw?o???????u??V?I???lv???????{w)????Z?!5????????.?_?O:?j?_?=?\?*W?????OR???a??????n??aW??}?c)??z?f+z\?? -h??l?????i?,??n???6g}??~?U?c????)??????=????1??+/?KR>??c?x>??"???y?]O???????O??6???HF ?????jG??Y?!/?Z>?]_ -^G(???Z?8???v=??j?}]??v????4?D??|x\????J?????.???W???????7?j3??>X???}??ie?].$??y????.??u?????" ?AL????,??3Y??Rihp?j????A??~H????|???@??????=????c???(0????M????o[???3Z???????Z?qz??7??=?T?C???K????9?A??????~RPG1??7?^????mc????u??Hx?o??6mfv}??T$}?b???v@??$??_????|???j+X????=??v??#??>?|?j?'W?6?i?JCi2?Z??#+?}o?=?LQ?????_???j?D0?j?? ???FL??;??$??`???`????XGB????&?????`+*?le3Pr??r????Y?E?+??`>J???6????? -?W(@( ?`??6|?%???.?L^???o ???{??`R?ZF0m?0D0E??`R?:4lg??? -=2?IM?????,D?????`??????`???@0???D?? w?O?Q?`??d@?c??`m%???? ?@jG0E??`?;? -?f&z??I?h?V???`?4v"?<??vLT??1?X@??C?#?"???&?????D7 lcw??5^??9????v??`?ND#?m? G0??s"??!???{9??hCL?#C G? -??D????s????3???!??@{?vp?%? -?G0)f<????`???'?M??0?`?cG????L4?V?LD0?v??L+D,G0G>G???u+??`c?;?????l "/L?[F?q???`?p;???D?A"?I?. at 4 ?\?B?`}??#?m???D??`?@. ??WG0i?6?M??O?`;???v ? ???z;!?j???`o??A??aG? -?????v?`r?%!????L??5?`?&?`?DF??N8?y? ?/?L[???`=????x??R;?`"????`;???i?l?!???G0?E??IM?"?u??&?y??l??L????:????rk?TD? -U|F??< -60?q???@?}"?`?Q??y???`???Q?Jd!?iOo??Q?J??(XE??Q??Q1"Xc???1????sc?=?1??(??b??(??????i?*G?N$< -&?GT?l??0 -vx?{??z?0??(?G??`??{??Q]??gHlA[ -???`??l???o"g??` ??`:?????#???D0???2?I?G???`?gl?( -??(??( ?t|G????V??Q???5?`??#Xe{?Q?B? ?G$"???C?????}??`??xG??)?#? V?w????G??`???#?R?z2???q?;um??? -j??(????????W?Q?UM?+? +?????????8 #ln??7???,2;?|???L????PQ??L?o???=?????qE???(???4?????_?2??W?p?????????py?-??????; cs??/??????>????????????7B?_}???????z?k????V?Z?oo^????c????????V(??????._???C?n?.?|?#}???}-?&?????~x?????^? -}eY-???R????,?/?????5??J??s? -;B:??O???v?i????t?XK?@t$<-I?o?????`?"y ??9$D ??Q???y~xp???O_??<0??@I?-??t&?w??k:?f???5??s???????YcxpMgxp?7=|Mg???-8?O??NNr?????3?????t?t,9`R38?I?)LN????N??L;????8????5c?H3H8??0? -???t?X?0?M?? -?pu?b??A?????i? ???%?f]??Nu? ?.}?? ?&???d?N????????? ????/??E?{.?_?S???g??5????u?sa?3?xS???Y????L???9??a?Sn????T&?????? ??????m ?e??L??e??#FC??!??Fv??z??mA?????-wFk+????????KO??i+???????~:F.???H)????? -????1?????6???A-??`-???A?t ??-XL????Y? ??F?4????w??y)??6 k??i?K?1?T????pB?(8??A???L???P9?V???P???H??????%??A??YO??;4???|?P?? ,?FX94?????| O??o??}????swC???;?? ?q??H???AZ?m????S3??????J???D ???W?nAPT?#???/?n??cxsj3C#t)??? ?dGX??cOh?C#???????Pa???&??s?????????(?~??? -???| ??tM??\h?B#?z??}?~ Mf ?pY??H, ;BaS3?%??I????~O?+?? -OXt????.y?`''??x?$???f??x??$/b:??????J^lN^?'>??W?C?A^ ??q?Z?u?R??y-~ ?k?h???x?'??A^???x?W9??g?L^???Kg???=??8?????Ay??k~?W~? ??'?N??(??{?I^???e?????6/;d?1O??S?Q?a?????0?{T~7Wf%;??N????.?2?mv?d?yI?!???)a? ???c?n???C&??k???6? ;d?1??ui??n???C&??um?????2??7????Q?a????0? -????0? -;d??Zv????M;e??y???d? Y?!???ma??v??b?d??Z???a?Lv??C???XT????????s?q???\~?-f????e?1??v?d?yf?!????a?Lv???2?7?Tf??S???6o -;d??<6??a??6?lB&??s?????|f?8?e???? ;???b?<*?????>*???????????y??S&{;?G?Lv???2?m^v??c? b!?????l?y|?!????a?Lv???2????z????????n?????T~?aG?`?)???a?Lv?+?2?-?v?i???8?-v?d??D?!???a?Lv?[?2??c?jg?d??:?{.??????sv?A??2????(?};?e??Z?????L;e???Rm?d?8K?!?}????/?-v???\K??? ;d?[?&???nq??C?q?i?L?v*??d???=????(?????S&??????7 -;d?[)???~k%~?#?4???nq??C&??????? -;d???v?d??Q ??S?Q&?8?e???? ;?^?N????)d{???~????i?x?T$.????????O?>??t ??`?S???&?OMf??,>5{b???d?)X -?????`?)XzF???L??? ??l?????,?h???G??A???v?,?l[??y???]?q>?W???r?*???r? -??W9?}[H?n_fY?? -???????_f_p?????g_F??zR?????_??!o?/?>?2?o_f{?e;???e??/?|+]???> -endobj -4 0 obj -<< -/ProcSet [/PDF /Text] -/Font <> -/ExtGState << >> -/ColorSpace << /sRGB 5 0 R >> ->> -endobj -5 0 obj -[/ICCBased 6 0 R] -endobj -6 0 obj -<< /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> -stream -x???wTS????7?P????khRH -?H?.*1 J??"6DTpDQ??2(???C??"??Q??D?qp?Id???y?????~k????g?}??????LX ? ?X??????g` ?l?p??B?F?|??l???? ??*????????Y"1P??????\?8=W?%?O???4M?0J?"Y?2V?s?,[|??e9?2?<?s??e???'??9???`???2?&c?tI?@?o??|N6(??.?sSdl-c?(2?-?y?H?_??/X??????Z.$??&\S???????M????07?#?1??Y?rf??Yym?";?8980m-m?(?]????v?^??D???W~? -??e????mi]?P????`/???u}q?|^R??,g+???\K?k)/????C_|?R????ax??8?t1C^7nfz?D????p? ?????u?$??/?ED??L L??[???B?@???????????????X?!@~(* {d+??} ?G???????????}W?L??$?cGD2?Q????Z4 E@?@??????A(?q`1???D ??????`'?u?4?6pt?c?48.??`?R0??)? -?@???R?t C???X??CP?%CBH@??R?????f?[?(t? -C??Qh?z#0 ??Z?l?`O8?????28.????p|??O???X -????:??0?FB?x$ !???i@?????H???[EE1PL? ??????V?6??QP??>?U?(j -?MFk?????t,:??.FW???????8???c?1?L&?????9???a??X?:??? -?r?bl1? -{{{;?}?#?tp?8_\?8??"?Ey?.,?X?????%?%G??1?-??9????????K??l?.??oo???/?O$?&?'=JvM??x??????{????=Vs\?x? ????N???>?u?????c?Kz???=s?/?o?l????|??????y???? ??^d]???p?s?~???:;???/;]??7|?????W????p???????Q?o?H?!?????V????sn??Ys}?????????~4??]? =>?=:?`??;c??'?e??~??!?a???D?#?G?&}'/?^?x?I??????+?\????w?x?20;5?\?????_??????e?t???W?f^??Qs?-?m???w3????+??~???????O?~????endstream -endobj -9 0 obj -<< -/Type /Encoding /BaseEncoding /WinAnsiEncoding -/Differences [ 45/minus ] ->> -endobj -10 0 obj -<< /Type /Font /Subtype /Type1 /Name /F2 /BaseFont /Helvetica -/Encoding 9 0 R >> -endobj -11 0 obj -<< /Type /Font /Subtype /Type1 /Name /F3 /BaseFont /Helvetica-Bold -/Encoding 9 0 R >> -endobj -xref -0 12 -0000000000 65535 f -0000000021 00000 n -0000000163 00000 n -0000009392 00000 n -0000009475 00000 n -0000009598 00000 n -0000009631 00000 n -0000000212 00000 n -0000000292 00000 n -0000012326 00000 n -0000012420 00000 n -0000012517 00000 n -trailer -<< /Size 12 /Info 1 0 R /Root 2 0 R >> -startxref -12619 -%%EOF Deleted: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/LoSharpe-004.pdf =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/LoSharpe-004.pdf 2013-09-15 08:42:24 UTC (rev 3109) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/LoSharpe-004.pdf 2013-09-15 09:06:55 UTC (rev 3110) @@ -1,96 +0,0 @@ -%PDF-1.4 -%????????\r -1 0 obj -<< -/CreationDate (D:20130907225119) -/ModDate (D:20130907225119) -/Title (R Graphics Output) -/Producer (R 3.0.1) -/Creator (R) ->> -endobj -2 0 obj -<< /Type /Catalog /Pages 3 0 R >> -endobj -7 0 obj -<< /Type /Page /Parent 3 0 R /Contents 8 0 R /Resources 4 0 R >> -endobj -8 0 obj -<< -/Length 663 /Filter /FlateDecode ->> -stream -x??UMS?0??W?19 ?Z???@???????$*?????L?}W?????????~???wr??ny~??2?BJ ???????F8?G???5??\^??I?p??]??8M, -K`S????Z)?`?a????!" --???Xs^??:R.?ls?Hv?VmsD??8????v]GJ g#'?`?? k??#?5?*Q?? "h+???.??! ?=8?-???? -???0?|mCK??m?/?r??e?'0????v??`?????0??]Z???~y3???i??5??_5?M??u?ye?(???[??????~??????A1???|???6? ?.`t[,??U> -endobj -4 0 obj -<< -/ProcSet [/PDF /Text] -/Font <> -/ExtGState << >> -/ColorSpace << /sRGB 5 0 R >> ->> -endobj -5 0 obj -[/ICCBased 6 0 R] -endobj -6 0 obj -<< /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> -stream -x???wTS????7?P????khRH -?H?.*1 J??"6DTpDQ??2(???C??"??Q??D?qp?Id???y?????~k????g?}??????LX ? ?X??????g` ?l?p??B?F?|??l???? ??*????????Y"1P??????\?8=W?%?O???4M?0J?"Y?2V?s?,[|??e9?2?<?s??e???'??9???`???2?&c?tI?@?o??|N6(??.?sSdl-c?(2?-?y?H?_??/X??????Z.$??&\S???????M????07?#?1??Y?rf??Yym?";?8980m-m?(?]????v?^??D???W~? -??e????mi]?P????`/???u}q?|^R??,g+???\K?k)/????C_|?R????ax??8?t1C^7nfz?D????p? ?????u?$??/?ED??L L??[???B?@???????????????X?!@~(* {d+??} ?G???????????}W?L??$?cGD2?Q????Z4 E@?@??????A(?q`1???D ??????`'?u?4?6pt?c?48.??`?R0??)? -?@???R?t C???X??CP?%CBH@??R?????f?[?(t? -C??Qh?z#0 ??Z?l?`O8?????28.????p|??O???X -????:??0?FB?x$ !???i@?????H???[EE1PL? ??????V?6??QP??>?U?(j -?MFk?????t,:??.FW???????8???c?1?L&?????9???a??X?:??? -?r?bl1? -{{{;?}?#?tp?8_\?8??"?Ey?.,?X?????%?%G??1?-??9????????K??l?.??oo???/?O$?&?'=JvM??x??????{????=Vs\?x? ????N???>?u?????c?Kz???=s?/?o?l????|??????y???? ??^d]???p?s?~???:;???/;]??7|?????W????p???????Q?o?H?!?????V????sn??Ys}?????????~4??]? =>?=:?`??;c??'?e??~??!?a???D?#?G?&}'/?^?x?I??????+?\????w?x?20;5?\?????_??????e?t???W?f^??Qs?-?m???w3????+??~???????O?~????endstream -endobj -9 0 obj -<< -/Type /Encoding /BaseEncoding /WinAnsiEncoding -/Differences [ 45/minus ] ->> -endobj -10 0 obj -<< /Type /Font /Subtype /Type1 /Name /F2 /BaseFont /Helvetica -/Encoding 9 0 R >> -endobj -11 0 obj -<< /Type /Font /Subtype /Type1 /Name /F3 /BaseFont /Helvetica-Bold -/Encoding 9 0 R >> -endobj -xref -0 12 -0000000000 65535 f -0000000021 00000 n -0000000163 00000 n -0000001026 00000 n -0000001109 00000 n -0000001232 00000 n -0000001265 00000 n -0000000212 00000 n -0000000292 00000 n -0000003960 00000 n -0000004054 00000 n -0000004151 00000 n -trailer -<< /Size 12 /Info 1 0 R /Root 2 0 R >> -startxref -4253 -%%EOF Deleted: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/LoSharpe-concordance.tex =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/LoSharpe-concordance.tex 2013-09-15 08:42:24 UTC (rev 3109) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/LoSharpe-concordance.tex 2013-09-15 09:06:55 UTC (rev 3110) @@ -1,3 +0,0 @@ -\Sconcordance{concordance:LoSharpe.tex:LoSharpe.Rnw:% -1 27 1 1 4 1 5 21 1 1 2 1 0 1 2 5 0 1 2 1 1 1 2 1 0 1 1 1 2 1 0 1 2 5 0 % -1 2 2 1} Deleted: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/LoSharpe.log =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/LoSharpe.log 2013-09-15 08:42:24 UTC (rev 3109) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/LoSharpe.log 2013-09-15 09:06:55 UTC (rev 3110) @@ -1,420 +0,0 @@ -This is pdfTeX, Version 3.1415926-2.5-1.40.14 (TeX Live 2013/W32TeX) (format=pdflatex 2013.8.31) 7 SEP 2013 22:51 -entering extended mode - restricted \write18 enabled. - %&-line parsing enabled. -**LoSharpe.tex -(./LoSharpe.tex -LaTeX2e <2011/06/27> -Babel <3.9f> and hyphenation patterns for 78 languages loaded. -(c:/texlive/2013/texmf-dist/tex/latex/base/article.cls -Document Class: article 2007/10/19 v1.4h Standard LaTeX document class -(c:/texlive/2013/texmf-dist/tex/latex/base/size12.clo -File: size12.clo 2007/10/19 v1.4h Standard LaTeX file (size option) -) -\c at part=\count79 -\c at section=\count80 -\c at subsection=\count81 -\c at subsubsection=\count82 -\c at paragraph=\count83 -\c at subparagraph=\count84 -\c at figure=\count85 -\c at table=\count86 -\abovecaptionskip=\skip41 -\belowcaptionskip=\skip42 -\bibindent=\dimen102 -) -(c:/texlive/2013/texmf-dist/tex/latex/psnfss/times.sty -Package: times 2005/04/12 PSNFSS-v9.2a (SPQR) -) -(c:/texlive/2013/texmf-dist/tex/latex/base/fontenc.sty -Package: fontenc 2005/09/27 v1.99g Standard LaTeX package - -(c:/texlive/2013/texmf-dist/tex/latex/base/t1enc.def -File: t1enc.def 2005/09/27 v1.99g Standard LaTeX file -LaTeX Font Info: Redeclaring font encoding T1 on input line 43. -)) -(c:/texlive/2013/texmf-dist/tex/latex/url/url.sty -\Urlmuskip=\muskip10 -Package: url 2006/04/12 ver 3.3 Verb mode for urls, etc. -) -(c:/texlive/2013/texmf-dist/tex/generic/babel/babel.sty -Package: babel 2013/05/16 v3.9f The Babel package - -(c:/texlive/2013/texmf-dist/tex/generic/babel-english/english.ldf -Language: english 2012/08/20 v3.3p English support from the babel system - -(c:/texlive/2013/texmf-dist/tex/generic/babel/babel.def -File: babel.def 2013/05/16 v3.9f Babel common definitions -\babel at savecnt=\count87 -\U at D=\dimen103 -) -\l at canadian = a dialect from \language\l at american -\l at australian = a dialect from \language\l at british -\l at newzealand = a dialect from \language\l at british -)) -(c:/PROGRA~1/R/R-30~1.1/share/texmf/tex/latex/Rd.sty -Package: Rd - -(c:/texlive/2013/texmf-dist/tex/latex/base/ifthen.sty -Package: ifthen 2001/05/26 v1.1c Standard LaTeX ifthen package (DPC) -) -(c:/texlive/2013/texmf-dist/tex/latex/tools/longtable.sty -Package: longtable 2004/02/01 v4.11 Multi-page Table package (DPC) -\LTleft=\skip43 -\LTright=\skip44 -\LTpre=\skip45 -\LTpost=\skip46 -\LTchunksize=\count88 -\LTcapwidth=\dimen104 -\LT at head=\box26 -\LT at firsthead=\box27 -\LT at foot=\box28 -\LT at lastfoot=\box29 -\LT at cols=\count89 -\LT at rows=\count90 -\c at LT@tables=\count91 -\c at LT@chunks=\count92 -\LT at p@ftn=\toks14 -) -(c:/texlive/2013/texmf-dist/tex/latex/tools/bm.sty -Package: bm 2004/02/26 v1.1c Bold Symbol Support (DPC/FMi) -\symboldoperators=\mathgroup4 -\symboldletters=\mathgroup5 -\symboldsymbols=\mathgroup6 -LaTeX Font Info: Redeclaring math alphabet \mathbf on input line 138. -LaTeX Info: Redefining \bm on input line 204. -) -(c:/texlive/2013/texmf-dist/tex/latex/base/alltt.sty -Package: alltt 1997/06/16 v2.0g defines alltt environment -) -(c:/texlive/2013/texmf-dist/tex/latex/tools/verbatim.sty -Package: verbatim 2003/08/22 v1.5q LaTeX2e package for verbatim enhancements -\every at verbatim=\toks15 -\verbatim at line=\toks16 -\verbatim at in@stream=\read1 -) -(c:/PROGRA~1/R/R-30~1.1/share/texmf/tex/latex/upquote.sty -Package: upquote 2003/08/11 v1.1 Covington's upright-quote modification to verb -atim and verb - -(c:/texlive/2013/texmf-dist/tex/latex/base/textcomp.sty -Package: textcomp 2005/09/27 v1.99g Standard LaTeX package -Package textcomp Info: Sub-encoding information: -(textcomp) 5 = only ISO-Adobe without \textcurrency -(textcomp) 4 = 5 + \texteuro -(textcomp) 3 = 4 + \textohm -(textcomp) 2 = 3 + \textestimated + \textcurrency -(textcomp) 1 = TS1 - \textcircled - \t -(textcomp) 0 = TS1 (full) -(textcomp) Font families with sub-encoding setting implement -(textcomp) only a restricted character set as indicated. -(textcomp) Family '?' is the default used for unknown fonts. -(textcomp) See the documentation for details. -Package textcomp Info: Setting ? sub-encoding to TS1/1 on input line 71. - -(c:/texlive/2013/texmf-dist/tex/latex/base/ts1enc.def -File: ts1enc.def 2001/06/05 v3.0e (jk/car/fm) Standard LaTeX file -) -LaTeX Info: Redefining \oldstylenums on input line 266. -Package textcomp Info: Setting cmr sub-encoding to TS1/0 on input line 281. -Package textcomp Info: Setting cmss sub-encoding to TS1/0 on input line 282. -Package textcomp Info: Setting cmtt sub-encoding to TS1/0 on input line 283. -Package textcomp Info: Setting cmvtt sub-encoding to TS1/0 on input line 284. -Package textcomp Info: Setting cmbr sub-encoding to TS1/0 on input line 285. -Package textcomp Info: Setting cmtl sub-encoding to TS1/0 on input line 286. -Package textcomp Info: Setting ccr sub-encoding to TS1/0 on input line 287. -Package textcomp Info: Setting ptm sub-encoding to TS1/4 on input line 288. -Package textcomp Info: Setting pcr sub-encoding to TS1/4 on input line 289. -Package textcomp Info: Setting phv sub-encoding to TS1/4 on input line 290. -Package textcomp Info: Setting ppl sub-encoding to TS1/3 on input line 291. -Package textcomp Info: Setting pag sub-encoding to TS1/4 on input line 292. -Package textcomp Info: Setting pbk sub-encoding to TS1/4 on input line 293. -Package textcomp Info: Setting pnc sub-encoding to TS1/4 on input line 294. -Package textcomp Info: Setting pzc sub-encoding to TS1/4 on input line 295. -Package textcomp Info: Setting bch sub-encoding to TS1/4 on input line 296. -Package textcomp Info: Setting put sub-encoding to TS1/5 on input line 297. -Package textcomp Info: Setting uag sub-encoding to TS1/5 on input line 298. -Package textcomp Info: Setting ugq sub-encoding to TS1/5 on input line 299. -Package textcomp Info: Setting ul8 sub-encoding to TS1/4 on input line 300. -Package textcomp Info: Setting ul9 sub-encoding to TS1/4 on input line 301. -Package textcomp Info: Setting augie sub-encoding to TS1/5 on input line 302. -Package textcomp Info: Setting dayrom sub-encoding to TS1/3 on input line 303. -Package textcomp Info: Setting dayroms sub-encoding to TS1/3 on input line 304. - -Package textcomp Info: Setting pxr sub-encoding to TS1/0 on input line 305. -Package textcomp Info: Setting pxss sub-encoding to TS1/0 on input line 306. -Package textcomp Info: Setting pxtt sub-encoding to TS1/0 on input line 307. -Package textcomp Info: Setting txr sub-encoding to TS1/0 on input line 308. -Package textcomp Info: Setting txss sub-encoding to TS1/0 on input line 309. -Package textcomp Info: Setting txtt sub-encoding to TS1/0 on input line 310. -Package textcomp Info: Setting lmr sub-encoding to TS1/0 on input line 311. -Package textcomp Info: Setting lmdh sub-encoding to TS1/0 on input line 312. -Package textcomp Info: Setting lmss sub-encoding to TS1/0 on input line 313. -Package textcomp Info: Setting lmssq sub-encoding to TS1/0 on input line 314. -Package textcomp Info: Setting lmvtt sub-encoding to TS1/0 on input line 315. -Package textcomp Info: Setting qhv sub-encoding to TS1/0 on input line 316. -Package textcomp Info: Setting qag sub-encoding to TS1/0 on input line 317. -Package textcomp Info: Setting qbk sub-encoding to TS1/0 on input line 318. -Package textcomp Info: Setting qcr sub-encoding to TS1/0 on input line 319. -Package textcomp Info: Setting qcs sub-encoding to TS1/0 on input line 320. -Package textcomp Info: Setting qpl sub-encoding to TS1/0 on input line 321. -Package textcomp Info: Setting qtm sub-encoding to TS1/0 on input line 322. -Package textcomp Info: Setting qzc sub-encoding to TS1/0 on input line 323. -Package textcomp Info: Setting qhvc sub-encoding to TS1/0 on input line 324. -Package textcomp Info: Setting futs sub-encoding to TS1/4 on input line 325. -Package textcomp Info: Setting futx sub-encoding to TS1/4 on input line 326. -Package textcomp Info: Setting futj sub-encoding to TS1/4 on input line 327. -Package textcomp Info: Setting hlh sub-encoding to TS1/3 on input line 328. -Package textcomp Info: Setting hls sub-encoding to TS1/3 on input line 329. -Package textcomp Info: Setting hlst sub-encoding to TS1/3 on input line 330. -Package textcomp Info: Setting hlct sub-encoding to TS1/5 on input line 331. -Package textcomp Info: Setting hlx sub-encoding to TS1/5 on input line 332. -Package textcomp Info: Setting hlce sub-encoding to TS1/5 on input line 333. -Package textcomp Info: Setting hlcn sub-encoding to TS1/5 on input line 334. -Package textcomp Info: Setting hlcw sub-encoding to TS1/5 on input line 335. -Package textcomp Info: Setting hlcf sub-encoding to TS1/5 on input line 336. -Package textcomp Info: Setting pplx sub-encoding to TS1/3 on input line 337. -Package textcomp Info: Setting pplj sub-encoding to TS1/3 on input line 338. -Package textcomp Info: Setting ptmx sub-encoding to TS1/4 on input line 339. -Package textcomp Info: Setting ptmj sub-encoding to TS1/4 on input line 340. -)) -\ldescriptionwidth=\skip47 - NOT loading ae -NOT loading times NOT loading lmodern) -(c:/PROGRA~1/R/R-30~1.1/share/texmf/tex/latex/Sweave.sty -Package: Sweave - -(c:/texlive/2013/texmf-dist/tex/latex/graphics/graphicx.sty -Package: graphicx 1999/02/16 v1.0f Enhanced LaTeX Graphics (DPC,SPQR) - -(c:/texlive/2013/texmf-dist/tex/latex/graphics/keyval.sty -Package: keyval 1999/03/16 v1.13 key=value parser (DPC) -\KV at toks@=\toks17 -) -(c:/texlive/2013/texmf-dist/tex/latex/graphics/graphics.sty -Package: graphics 2009/02/05 v1.0o Standard LaTeX Graphics (DPC,SPQR) - -(c:/texlive/2013/texmf-dist/tex/latex/graphics/trig.sty -Package: trig 1999/03/16 v1.09 sin cos tan (DPC) -) -(c:/texlive/2013/texmf-dist/tex/latex/latexconfig/graphics.cfg -File: graphics.cfg 2010/04/23 v1.9 graphics configuration of TeX Live -) -Package graphics Info: Driver file: pdftex.def on input line 91. - -(c:/texlive/2013/texmf-dist/tex/latex/pdftex-def/pdftex.def -File: pdftex.def 2011/05/27 v0.06d Graphics/color for pdfTeX - -(c:/texlive/2013/texmf-dist/tex/generic/oberdiek/infwarerr.sty -Package: infwarerr 2010/04/08 v1.3 Providing info/warning/error messages (HO) -) -(c:/texlive/2013/texmf-dist/tex/generic/oberdiek/ltxcmds.sty -Package: ltxcmds 2011/11/09 v1.22 LaTeX kernel commands for general use (HO) -) -\Gread at gobject=\count93 -)) -\Gin at req@height=\dimen105 -\Gin at req@width=\dimen106 -) -(c:/texlive/2013/texmf-dist/tex/latex/fancyvrb/fancyvrb.sty -Package: fancyvrb 2008/02/07 - -Style option: `fancyvrb' v2.7a, with DG/SPQR fixes, and firstline=lastline fix -<2008/02/07> (tvz) -\FV at CodeLineNo=\count94 -\FV at InFile=\read2 -\FV at TabBox=\box30 -\c at FancyVerbLine=\count95 -\FV at StepNumber=\count96 -\FV at OutFile=\write3 -) (c:/texlive/2013/texmf-dist/tex/latex/base/fontenc.sty -Package: fontenc 2005/09/27 v1.99g Standard LaTeX package - -(c:/texlive/2013/texmf-dist/tex/latex/base/t1enc.def -File: t1enc.def 2005/09/27 v1.99g Standard LaTeX file -LaTeX Font Info: Redeclaring font encoding T1 on input line 43. -)) -(c:/texlive/2013/texmf-dist/tex/latex/ae/ae.sty -Package: ae 2001/02/12 1.3 Almost European Computer Modern - -(c:/texlive/2013/texmf-dist/tex/latex/base/fontenc.sty -Package: fontenc 2005/09/27 v1.99g Standard LaTeX package - -(c:/texlive/2013/texmf-dist/tex/latex/base/t1enc.def -File: t1enc.def 2005/09/27 v1.99g Standard LaTeX file -LaTeX Font Info: Redeclaring font encoding T1 on input line 43. -) -LaTeX Font Info: Try loading font information for T1+aer on input line 100. - -(c:/texlive/2013/texmf-dist/tex/latex/ae/t1aer.fd -File: t1aer.fd 1997/11/16 Font definitions for T1/aer. -)))) (./LoSharpe.aux) -\openout1 = `LoSharpe.aux'. - -LaTeX Font Info: Checking defaults for OML/cmm/m/it on input line 12. -LaTeX Font Info: ... okay on input line 12. -LaTeX Font Info: Checking defaults for T1/cmr/m/n on input line 12. -LaTeX Font Info: ... okay on input line 12. -LaTeX Font Info: Checking defaults for OT1/cmr/m/n on input line 12. -LaTeX Font Info: ... okay on input line 12. -LaTeX Font Info: Checking defaults for OMS/cmsy/m/n on input line 12. -LaTeX Font Info: ... okay on input line 12. [TRUNCATED] To get the complete diff run: svnlook diff /svnroot/returnanalytics -r 3110 From noreply at r-forge.r-project.org Sun Sep 15 20:50:31 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 15 Sep 2013 20:50:31 +0200 (CEST) Subject: [Returnanalytics-commits] r3111 - pkg/PortfolioAnalytics/R Message-ID: <20130915185031.2A7B6184ECD@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-15 20:50:30 +0200 (Sun, 15 Sep 2013) New Revision: 3111 Modified: pkg/PortfolioAnalytics/R/extract.efficient.frontier.R Log: Adding optional argument to pass in risk_aversion parameters to construct a quadratic utility based efficient frontier Modified: pkg/PortfolioAnalytics/R/extract.efficient.frontier.R =================================================================== --- pkg/PortfolioAnalytics/R/extract.efficient.frontier.R 2013-09-15 09:06:55 UTC (rev 3110) +++ pkg/PortfolioAnalytics/R/extract.efficient.frontier.R 2013-09-15 18:50:30 UTC (rev 3111) @@ -105,10 +105,13 @@ #' @param portfolio a portfolio object with constraints and objectives created via \code{\link{portfolio.spec}} #' @param R an xts or matrix of asset returns #' @param n.portfolios number of portfolios to plot along the efficient frontier +#' @param risk_aversion vector of risk_aversion values to construct the efficient frontier. +#' \code{n.portfolios} is ignored if \code{risk_aversion} is specified and the number +#' of points along the efficient frontier is equal to the length of \code{risk_aversion}. #' @return a matrix of objective measure values and weights along the efficient frontier #' @author Ross Bennett #' @export -meanvar.efficient.frontier <- function(portfolio, R, n.portfolios=25){ +meanvar.efficient.frontier <- function(portfolio, R, n.portfolios=25, risk_aversion=NULL){ if(!is.portfolio(portfolio)) stop("portfolio object must be of class 'portfolio'") # step 1: find the minimum return given the constraints # step 2: find the maximum return given the constraints @@ -163,18 +166,26 @@ # length.out is the number of portfolios to create ret_seq <- seq(from=minret, to=maxret, length.out=n.portfolios) - out <- matrix(0, nrow=length(ret_seq), ncol=length(extractStats(tmp))) - +# out <- matrix(0, nrow=length(ret_seq), ncol=length(extractStats(tmp))) # for(i in 1:length(ret_seq)){ # portfolio$objectives[[mean_idx]]$target <- ret_seq[i] # out[i, ] <- extractStats(optimize.portfolio(R=R, portfolio=portfolio, optimize_method="ROI")) # } stopifnot("package:foreach" %in% search() || require("foreach",quietly = TRUE)) - out <- foreach(i=1:length(ret_seq), .inorder=TRUE, .combine=rbind, .errorhandling='remove') %dopar% { - portfolio$objectives[[mean_idx]]$target <- ret_seq[i] - extractStats(optimize.portfolio(R=R, portfolio=portfolio, optimize_method="ROI")) + if(!is.null(risk_aversion)){ + out <- foreach(i=1:length(risk_aversion), .inorder=TRUE, .combine=rbind, .errorhandling='remove') %dopar% { + portfolio$objectives[[var_idx]]$risk_aversion <- risk_aversion[i] + extractStats(optimize.portfolio(R=R, portfolio=portfolio, optimize_method="ROI")) + } + out <- cbind(out, risk_aversion) + colnames(out) <- c(names(stats), "lambda") + } else { + out <- foreach(i=1:length(ret_seq), .inorder=TRUE, .combine=rbind, .errorhandling='remove') %dopar% { + portfolio$objectives[[mean_idx]]$target <- ret_seq[i] + extractStats(optimize.portfolio(R=R, portfolio=portfolio, optimize_method="ROI")) + } + colnames(out) <- names(stats) } - colnames(out) <- names(stats) return(structure(out, class="frontier")) } @@ -292,6 +303,9 @@ #' @param portfolio object of class 'portfolio' specifying the constraints and objectives, see \code{\link{portfolio.spec}} #' @param type type of efficient frontier, see details #' @param n.portfolios number of portfolios to calculate along the efficient frontier +#' @param risk_aversion vector of risk_aversion values to construct the efficient frontier. +#' \code{n.portfolios} is ignored if \code{risk_aversion} is specified and the number +#' of points along the efficient frontier is equal to the length of \code{risk_aversion}. #' @param match.col column to match when extracting the efficient frontier from an objected created by optimize.portfolio #' @param search_size passed to \code{\link{optimize.portfolio}} for type="DEoptim" or type="random" #' @param ... passthrough parameters to \code{\link{optimize.portfolio}} @@ -304,7 +318,7 @@ #' \code{\link{meanetl.efficient.frontier}}, #' \code{\link{extract.efficient.frontier}} #' @export -create.EfficientFrontier <- function(R, portfolio, type, n.portfolios=25, match.col="ES", search_size=2000, ...){ +create.EfficientFrontier <- function(R, portfolio, type, n.portfolios=25, risk_aversion=NULL, match.col="ES", search_size=2000, ...){ # This is just a wrapper around a few functions to easily create efficient frontiers # given a portfolio object and other parameters call <- match.call() @@ -315,7 +329,8 @@ "mean-StdDev"=, "mean-var" = {frontier <- meanvar.efficient.frontier(portfolio=portfolio, R=R, - n.portfolios=n.portfolios) + n.portfolios=n.portfolios, + risk_aversion=risk_aversion) }, "mean-ETL"=, "mean-CVaR"=, @@ -376,10 +391,13 @@ #' \code{objective_measures} or \code{opt_values} slot in the object created #' by \code{\link{optimize.portfolio}}. #' @param n.portfolios number of portfolios to use to plot the efficient frontier +#' @param risk_aversion vector of risk_aversion values to construct the efficient frontier. +#' \code{n.portfolios} is ignored if \code{risk_aversion} is specified and the number +#' of points along the efficient frontier is equal to the length of \code{risk_aversion}. #' @return an \code{efficient.frontier} object with weights and other metrics along the efficient frontier #' @author Ross Bennett #' @export -extractEfficientFrontier <- function(object, match.col="ES", n.portfolios=25){ +extractEfficientFrontier <- function(object, match.col="ES", n.portfolios=25, risk_aversion=NULL){ # extract the efficient frontier from an optimize.portfolio output object call <- match.call() if(!inherits(object, "optimize.portfolio")) stop("object must be of class 'optimize.portfolio'") @@ -405,7 +423,7 @@ frontier <- meanetl.efficient.frontier(portfolio=portf, R=R, n.portfolios=n.portfolios) } if(match.col == "StdDev"){ - frontier <- meanvar.efficient.frontier(portfolio=portf, R=R, n.portfolios=n.portfolios) + frontier <- meanvar.efficient.frontier(portfolio=portf, R=R, n.portfolios=n.portfolios, risk_aversion=risk_aversion) } } # end optimize.portfolio.ROI From noreply at r-forge.r-project.org Sun Sep 15 23:18:08 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 15 Sep 2013 23:18:08 +0200 (CEST) Subject: [Returnanalytics-commits] r3112 - pkg/PortfolioAnalytics/demo Message-ID: <20130915211808.55B21186099@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-15 23:18:08 +0200 (Sun, 15 Sep 2013) New Revision: 3112 Modified: pkg/PortfolioAnalytics/demo/demo_efficient_frontier.R pkg/PortfolioAnalytics/demo/demo_group_ROI.R pkg/PortfolioAnalytics/demo/demo_random_portfolios.R Log: Minor cleanup of some demo scripts. Modified: pkg/PortfolioAnalytics/demo/demo_efficient_frontier.R =================================================================== --- pkg/PortfolioAnalytics/demo/demo_efficient_frontier.R 2013-09-15 18:50:30 UTC (rev 3111) +++ pkg/PortfolioAnalytics/demo/demo_efficient_frontier.R 2013-09-15 21:18:08 UTC (rev 3112) @@ -10,8 +10,6 @@ require(ROI.plugin.quadprog) require(ROI.plugin.glpk) -rm(list=ls()) - data(edhec) R <- edhec[, 1:5] # change the column names for better legends in plotting Modified: pkg/PortfolioAnalytics/demo/demo_group_ROI.R =================================================================== --- pkg/PortfolioAnalytics/demo/demo_group_ROI.R 2013-09-15 18:50:30 UTC (rev 3111) +++ pkg/PortfolioAnalytics/demo/demo_group_ROI.R 2013-09-15 21:18:08 UTC (rev 3112) @@ -2,7 +2,7 @@ library(PortfolioAnalytics) library(ROI) library(ROI.plugin.quadprog) -library(ROI.plugin.quadprog) +library(ROI.plugin.glpk) data(edhec) Modified: pkg/PortfolioAnalytics/demo/demo_random_portfolios.R =================================================================== --- pkg/PortfolioAnalytics/demo/demo_random_portfolios.R 2013-09-15 18:50:30 UTC (rev 3111) +++ pkg/PortfolioAnalytics/demo/demo_random_portfolios.R 2013-09-15 21:18:08 UTC (rev 3112) @@ -24,8 +24,8 @@ # The multicore package, and therefore registerDoMC, should not be used in a # GUI environment, because multiple processes then share the same GUI. Only use # when running from the command line -require(doMC) -registerDoMC(3) +# require(doMC) +# registerDoMC(3) data(edhec) From noreply at r-forge.r-project.org Mon Sep 16 07:19:14 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 16 Sep 2013 07:19:14 +0200 (CEST) Subject: [Returnanalytics-commits] r3113 - in pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm: . R man Message-ID: <20130916051914.90A9E185A7C@r-forge.r-project.org> Author: shubhanm Date: 2013-09-16 07:19:14 +0200 (Mon, 16 Sep 2013) New Revision: 3113 Removed: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rhistory pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/inst/ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/man/ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/Read-and-delete-me Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/NAMESPACE pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.normDD.R pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.normDD.Rd Log: documentation change Deleted: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rhistory =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rhistory 2013-09-15 21:18:08 UTC (rev 3112) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rhistory 2013-09-16 05:19:14 UTC (rev 3113) @@ -1,512 +0,0 @@ -for(i in 1:15) {if(managers[i,8]>0){splus[i]=managers[,8]}else{splus[i]=0}} -a=edhec[,1] -a=(edhec[:,1]>0) -a=(edhec[,1]>0) -head(a) -plot(a) -a=(edhec[,1]<0) -plot(a) -head(a) -a[,6] -a[,3] -lm(edhec[,1]~managers[,2]) -lm(edhec[,1]~managers[,8]) -a=managers[,8] -lm(edhec[1:132,1]~managers[,8]) -a=(edhec[,1]>0) -a -a[1,1] -a[1,1]*2 -a=(edhec[1:10,1]>0) -a -a=(edhec[1:15,1]>0) -a -a=(edhec[10:15,1]>0) -a -a*1 -a*edhec[10:15,1] -a=(edhec[10:15,1]>0) -a -a*edhec[0:15,1] -managers[,8] -head(managers[,8]) -sp+ = (managers[,8]>0)*managers[,8] -spplus = (managers[,8]>0)*managers[,8] -spminus = (managers[,8]<0)*managers[,8] -lm(edhec[,1]~spplus[]+spminus) -lm(edhec[1:132,1]~spplus[]+spminus) -lm(edhec[1:132,1]~spplus[]+spminus+managers[,9]) -table.autocorrelation -table.Autocorrelation -lm(edhec[1:132,1]~spplus[]+spminus+managers[,9]) -table.Autocorrelation(edhec,spplus) -table.Autocorrelation(edhec[,9],spplus) -table.Correlation(edhec[],managers[,8]) -table.Correlation(edhec[],spminus) -table.Correlation(edhec[],spplus) -table.Correlation(Return.okunev(edhec[]),spplus) -table.Correlation(Return.Okunev(edhec[]),spplus) -a=table.Correlation(Return.okunev(edhec[]),spplus) -a=table.Correlation(Return.Okunev(edhec[]),spplus) -b=table.Correlation(edhec[],spplus) -b-a -a=table.Correlation(Return.Okunev(edhec[]),sminus) -a=table.Correlation(Return.Okunev(edhec[]),spminus) -b=table.Correlation(edhec[],spminus) -a-b -a -b -chart(a) -plot(a) -plot(a[,1]) -chart.PerformanceSummary(edhec[,12]) -charts.PerformanceSummary(edhec[,12]) -charts.PerformanceSummary(edhec[,12],managers[,8]) -a=c(edhec[,12],managers[,8]) -a=cbind(edhec[,12],managers[,8]) -charts.PerformanceSummary(a) -a=cbind(edhec[,12],managers[,6]) -charts.PerformanceSummary(a) -a=cbind(edhec[,6],managers[,8]) -charts.PerformanceSummary(a) -b=table.Correlation(edhec[],spminus) -b -a=table.Correlation(Return.Okunev(edhec),spminus) -a -bb=cbind(spminus,spplus) -charts.PerformanceSummary(bb) -a=cbind(edhec[,6],managers[,8]) -charts.PerformanceSummary(a) -a=table.Correlation(edhec,spminus) -b=table.Correlation(edhec,spplus) -b-a -a -b -b=table.Correlation(edhec,managers[,8]) -b -b=table.Correlation(edhec,managers[,9]) -b -b=table.Correlation(Return.Okunev(edhec),managers[,9]) -b -VaR(edhec) -VaR(Return.Okunev(edhec)) -table.DrawdownsRatio(edhec) -table.DrawdownsRatio(Return.Okunev(edhec)) -table.DownsideRisk(Return.Okunev(edhec)) -table.DownsideRisk(edhec) -charts.PerformanceSummary(edhec) -charts.PerformanceSummary(Return.Okunev(edhec)) -a=edhec[,1] -a=edhec[140,1] -a -edhec[135:145,1] -edhec[125:145,1] -edhec[129:145,1] -edhec[135:145,1] -edhec[133:145,1] -edhec[132:145,1] -charts.PerformanceSummary(edhec[1:132,1:4],colorset = rich6equal, lwd = 2, ylog = TRUE) -charts.PerformanceSummary(edhec[,1:4],colorset = rich6equal, lwd = 2, ylog = TRUE) -table.Autocorrelation(edhec) -chart.Autocorrelation(edhec) -chart.Autocorrelation(edhec[,1:4]) -chart.Autocorrelation(Return.Okunev(edhec[,1:4])) -charts.PerformanceSummary(edhec[,],colorset = rich6equal, lwd = 2, ylog = TRUE) -charts.PerformanceSummary(edhec[132:152,],colorset = rich6equal, lwd = 2, ylog = TRUE) -charts.PerformanceSummary(edhec[132:152,2],colorset = rich6equal, lwd = 2, ylog = TRUE) -charts.PerformanceSummary(edhec[132:152,],colorset = rich6equal, lwd = 2, ylog = TRUE) -charts.PerformanceSummary(edhec[132:152,1],colorset = rich6equal, lwd = 2, ylog = TRUE) -charts.PerformanceSummary(edhec[132:152,2:5],colorset = rich6equal, lwd = 2, ylog = TRUE) -charts.PerformanceSummary(edhec[132:152,2],colorset = rich6equal, lwd = 2, ylog = TRUE) -charts.PerformanceSummary(edhec[,2],colorset = rich6equal, lwd = 2, ylog = TRUE) -table.stats(edhec) -table.Stats(edhec) -?table.Stats -data(edhec) -table.Stats(edhec[,1:3]) -t(table.Stats(edhec)) -result=t(table.Stats(edhec)) -require("Hmisc") -textplot(format.df(result, na.blank=TRUE, numeric.dollar=FALSE, cdec=c(rep(1,2),rep(3,14))), rmar = 0.8, cmar = 1.5, max.cex=.9, halign = "center", valign = "top", row.valign="center", wrap.rownames=10, wrap.colnames=10, mar = c(0,0,3,0)+0.1) -title(main="Statistics for EDHEC Indexes") -data(edhec) -table.Stats(edhec[,1:3]) -t(table.Stats(edhec)) -result=t(table.Stats(edhec[,1:3])) -require("Hmisc") -textplot(format.df(result, na.blank=TRUE, numeric.dollar=FALSE, cdec=c(rep(1,2),rep(3,14))), rmar = 0.8, cmar = 1.5, max.cex=.9, halign = "center", valign = "top", row.valign="center", wrap.rownames=10, wrap.colnames=10, mar = c(0,0,3,0)+0.1) -title(main="Statistics for EDHEC Indexes") -charts.PerformanceSummary(managers,colorset = rich6equal, lwd = 2, ylog = TRUE) -charts.PerformanceSummary(managers[,1:6],colorset = rich6equal, lwd = 2, ylog = TRUE) -data <- read.csv("C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/data/HAM3-data.csv") -dates <- data$X -values <- data[,-1] # convert percentage to return -COM <- as.xts(values, order.by=as.Date(dates)) -COM.09<-COM[,9:11] -charts.PerformanceSummary(COM.09[1:108,],colorset = rich6equal, lwd = 2, ylog = TRUE) -charts.PerformanceSummary(COM[,1:6],colorset = rich6equal, lwd = 2, ylog = TRUE) -charts.PerformanceSummary(COM[1:108,1:6],colorset = rich6equal, lwd = 2, ylog = TRUE) -COM[,1] -a=COM[,1] -charts.PerformanceSummary(COM[108,1:6],colorset = rich6equal, lwd = 2, ylog = TRUE) -charts.PerformanceSummary(COM[,1:6],colorset = rich6equal, lwd = 2, ylog = TRUE) -charts.PerformanceSummary(COM[1:10,1:6],colorset = rich6equal, lwd = 2, ylog = TRUE) -head(COM[1:10,1:6]) -head(COM[1:151,1:6]) -head(COM[151,1:6]) -head(COM[151,1:6]) -head(COM[36,1:6]) -head(COM[70,1:6]) -head(COM[75,1:6]) -head(COM[76,1:6]) -head(COM[65,1:6]) -head(COM[68,1:6]) -head(COM[140,1:6]) -head(COM[142,1:6]) -head(COM[145,1:6]) -charts.PerformanceSummary(COM[1:10,1:6],colorset = rich6equal, lwd = 2, ylog = TRUE) -charts.PerformanceSummary(COM[68:145],1:6],colorset = rich6equal, lwd = 2, ylog = TRUE) -charts.PerformanceSummary(COM[68:145,1:6],colorset = rich6equal, lwd = 2, ylog = TRUE) -table.AnnualizedReturns(edhec) -table.AnnualizedReturns(Return.Okunev(edhec)) -charts.PerformanceSummary(edhec[,5],colorset = rich6equal, lwd = 2, ylog = TRUE) -charts.PerformanceSummary(edhec[,8],colorset = rich6equal, lwd = 2, ylog = TRUE) -charts.PerformanceSummary(edhec[,5:8],colorset = rich6equal, lwd = 2, ylog = TRUE) -table.DownsideRisk(edhec) -table.DownsideRiskRatio(edhec) -chart.AcarSim(edhec) -AcarSim(edhec) -chart.AcarSim(Return.Okunev(edhec)) -data(managers) -head(managers) -a=managers[,1] -a[132] -data(edhec) -edhec -managers[,132] -managers[132,1] -edhec(132,1) -edhec[132,1] -edhec[120,1] -table.Correlation(edhec[1:120,],managers[,8]) -managers[12,8] -managers[13,8] -table.Correlation(edhec[1:120,],managers[13:132,8]) -table.Correlation(edhec[,],managers[,8]) -?table.DownSideRisk -?table.DownsideRisk -?table.DownsideRiskRatio -table.DownsideRisk(edhec) -table.DownsideRisk(Return.Okunev(edhec)) -q=table.DownsideRisk(Return.Okunev(edhec)) -p=table.DownsideRisk(edhec) -q-p -(q-p)/q -table.SpecificRisk(edhec) -table.SpecificRisk(edhec,managers[,8]) -table.SpecificRisk(edhec,managers[,8],0) -CAPM(edhec,managers[,8],0) -CAPM.beta(edhec,managers[,8],0) -CAPM.beta(Return.Okunev(edhec),managers[,8],0) -table.UpDownRatios -?table.UpDownRatios -table.UpDownRatios(edhec,managers[,8]) -Return.Annualized(edhec) -Return.annualized(edhec) -a=Return.annualized(edhec) -plot(a) -plot(a) -a -a=Return.annualized(Return.Okunev(edhec)) -a -table.Autocorrelation(edhec) -?SharpeRatio -?VaR -data(edhec) -VaR(edhec[,1:3,drop=FALSE],method="normal") -VaR(Return.Okunev(edhec[,1:3,drop=FALSE]),method="normal") -# now use Gaussian -VaR(edhec, p=.95, method="gaussian") -CAPM.jensenAlpha(edhec,managers[,8],Rf = managers[, "US 3m TR", drop=FALSE]) -CAPM.jensenAlpha(edhec,managers[,8],Rf = managers[12:132, "US 3m TR", drop=FALSE]) -CAPM.jensenAlpha(edhec,managers[12:132,8],Rf = managers[12:132, "US 3m TR", drop=FALSE]) -CAPM.jensenAlpha(edhec[1:121],managers[12:132,8],Rf = managers[12:132, "US 3m TR", drop=FALSE]) -SystematicRisk(edhec) -SystematicRisk(edhec,managers[,8]) -SystematicRisk(Return.Okunev(edhec),managers[,8]) -chart.RiskReturnScatter -chart.RiskReturnScatter(edhec) -chart.RiskReturnScatter(edhec[trailing36.rows,1:8], Rf=.03/12, main = "Trailing 36-Month Performance", colorset=c("red", rep("black",5), "orange", "green")) -trailing36.rows -table.Autocorrelation(edhec) -chart.QQPlot -?chart.QQPlot -x = checkData(managers[,2, drop = FALSE], na.rm = TRUE, method = "vector") -#layout(rbind(c(1,2),c(3,4))) -# Panel 1, Normal distribution -chart.QQPlot(x, main = "Normal Distribution", distribution = 'norm', envelope=0.95) -# Panel 2, Log-Normal distribution -fit = fitdistr(1+x, 'lognormal') -chart.QQPlot(1+x, main = "Log-Normal Distribution", envelope=0.95, distribution='lnorm') -#other options could include -#, meanlog = fit$estimate[[1]], sdlog = fit$estimate[[2]]) -## Not run: -# Panel 3, Skew-T distribution -library(sn) -fit = st.mle(y=x) -chart.QQPlot(x, main = "Skew T Distribution", envelope=0.95, -distribution = 'st', location = fit$dp[[1]], -scale = fit$dp[[2]], shape = fit$dp[[3]], df=fit$dp[[4]]) -#Panel 4: Stable Parietian -library(fBasics) -fit.stable = stableFit(x,doplot=FALSE) -chart.QQPlot(x, main = "Stable Paretian Distribution", envelope=0.95, -distribution = 'stable', alpha = fit(stable.fit)$estimate[[1]], -beta = fit(stable.fit)$estimate[[2]], gamma = fit(stable.fit)$estimate[[3]], -delta = fit(stable.fit)$estimate[[4]], pm = 0) -## End(Not run) -#end examples -?chart.Events -charts.Bar(edhec) -charts.Bar(edhec[,1]) -chart.VaRSensitivity(edhec[,1]) -managers[,132] -managers[132,1] -head(edhec) -table.Autocorrelation(edhec) -data(edhec) -library("noniid.sm", lib.loc="C:/Users/shubhankit/Documents/R/win-library/3.0") -data(edhec) -table.Autocorrelation(edhec) -a=table.Autocorrelation(edhec) -t(a) -t(a) -xtable(a) -install.packages("xtable") -library("xtable", lib.loc="C:/Users/shubhankit/Documents/R/win-library/3.0") -xtable(a) -install.packages("stargazer") -library(stargazer) -data(edhec) -stargazer(edhec[1:10,1]) -stargazer(edhec[1:10,1],summary=FALSE) -edhec -library("SweaveListingUtils", lib.loc="C:/Users/shubhankit/Documents/R/win-library/3.0") -stargazer(attitude) -stargazer(attitude) -stargazer(edhec) -stargazer(Return.Annualized(edhec)) -stargazer(Return.Annualized(edhec)) -CalmarRatio(edhec[,1:4]) -a=CalmarRatio(edhec[,1:4]) -summary(a) -fm2 <- lm(tlimth ~ sex * ethnicty, data = tli) -data(tli) -fm2 <- lm(tlimth ~ sex * ethnicty, data = tli) -print(xtable(anova(fm2)), type="html") -library(xtable) -library(xtable) -source('~/R/win-library/3.0/xtable/doc/xtableGallery.R') -getwd() -roxygenize(getwd()) -library("roxygen2", lib.loc="C:/Users/shubhankit/Documents/R/win-library/3.0") -roxygenize(getwd()) -?glm -glm -?lm -viewsource(lm) -view(lm) -detach("package:stats", unload=TRUE) -library("stats", lib.loc="C:/Program Files/R/R-3.0.1/library") -lm -?lm -source('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/glmi.R') -?glmi -?glmi -source('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/glmi.R') -source('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R') -roxygenize(getwd()) -source('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R') -source('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/glmi.R') -source('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/glmi.R') -source('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R') -source('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/glmi.R') -??noniid.sm -library(PerformanceAnalytics) -data(edhec) -table.EMaxDDGBM(edhec) -table.DrawDown(edhec) -table.DownSideRisk(edhec) -table.DownsideRisk(edhec) -a=table.DownsideRisk(edhec[,1:4]) -t(a) -a=table.DownsideRisk(edhec[,]) -t(a) -library(PerformanceAnalytics) -data(edhec) -table.EMaxDDGBM(edhec) -library(PerformanceAnalytics) -data(edhec) -b=table.EMaxDDGBM(edhec) -t(b) -Rank(edhec) -b[order()] -b[order(Expected Drawdown in %)] -rank(b) -rank(t(b)) -rank(t(b[:,3])) -b(,3) -b -b[1,2] -b[:,3] -b[,3] -b[3,] -rank(b[3,]) -rank(a[3,]) -rank(a[11,]) -rank(b[3,]) -rank(-a[11,]) -rank(b[3,]) -bb=rank(b[3,]) -cc=rank(-a[11,]) -bb-cc -table.stats -?table.Stats -table.Stats(edhec) -round(4.4) -round(4.444444) -round(4.444444,5) -round(4.444444,3) -round(CalmarRatio.Norm(COM.09,1),4) -round(SterlingRatio.Norm(COM.09,1),4) -round(CalmarRatio.Norm(edhec,1),4) -round(SterlingRatio.Norm(edhec,1),4) -round(CalmarRatio.Norm(edhec[,1:4],1),4) -round(CalmarRatio(edhec[,1:4],1),4) -ES(edhec,.95,method="gaussian") -chart.Autocorrelation(edhec) -chart.Autocorrelation(COM.09) -chart.Autocorrelation(COM.09) -table.normDD(edhec) -table.EMaxDDGBM(edhec) -EmaxDDGBM(edhec) -source('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/EmaxDDGBM.R') -EmaxDDGBM(edhec) -source('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/EmaxDDGBM.R') -EmaxDDGBM(edhec) -source('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/EmaxDDGBM.R') -EmaxDDGBM(edhec) -EmaxDDGBM(edhec[,2]) -EmaxDDGBM(edhec[,13]) -source('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/EmaxDDGBM.R') -source('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/EmaxDDGBM.R') -EmaxDDGBM(edhec) -source('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.EMaxDDGBM.R') -source('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/EmaxDDGBM.R') -EmaxDDGBM(edhec) -table.EMaxDDGBM(edhec) -source('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/EmaxDDGBM.R') -EmaxDDGBM(edhec) -source('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/EmaxDDGBM.R') -EmaxDDGBM(managers) -data(managers) -EmaxDDGBM(managers) -roxygenize(getwd()) -source('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/EmaxDDGBM.R') -roxygenize(getwd()) -data <- read.csv("C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/data/HAM3-data.csv") -dates <- data$X -values <- data[,-1] # convert percentage to return -COM <- as.xts(values, order.by=as.Date(dates)) -COM.09<-COM[,9:11] -charts.PerformanceSummary(COM.09[1:108,],colorset = rich6equal, lwd = 2, ylog = TRUE) -table.DrawdownsRatio(COM.09) -table.Drawdowns(COM.09) -table.DownsideRisk(COM.09) -EmaxDDGBM(COM.09) -EmaxDDGBM(edhec) -table.DownsideRisk(COM.09) -table.DownsideRisk(edhec)[11,] -EmaxDDGBM(edhec) -a=EmaxDDGBM(edhec) -b=table.DownsideRisk(edhec)[11,] -a-(b*100) -a+(b*100) -charts.PerformanceSummary(edhec,colorset = rich6equal, lwd = 2, ylog = TRUE) -charts.PerformanceSummary(edhec[,10:13],colorset = rich6equal, lwd = 2, ylog = TRUE) -EmaxDDGBM(edhec) -?EmaxDDGBM -ES(edhec[1:4],.05,method="gaussian") -ES(edhec[1:4],.95,method="gaussian") -ES(edhec[2:4],.95,method="gaussian") -ES(edhec[,2:4],.95,method="gaussian") -EmaxDDGBM(edhec[,1:4]) -data <- read.csv("C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/data/HAM3-data.csv") -dates <- data$X -values <- data[,-1] # convert percentage to return -COM <- as.xts(values, order.by=as.Date(dates)) -COM.09<-COM[,9:11] -Vol1 = EMaxDDGBM(COM.09) -Vol1 -Vol1 -Vol2 = -ES(COM.09,.95,method="gaussian") -Vol2 -data <- read.csv("C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/data/HAM3-data.csv") -dates <- data$X -values <- data[,-1] # convert percentage to return -COM <- as.xts(values, order.by=as.Date(dates)) -COM.09<-COM[,9:11] -Vol1 = EMaxDDGBM(managers) -Vol1 -data(edhec) -EmaxDDGBM(edhec) -data(edhec) -EmaxDDGBM(managers) -data(edhec) -EmaxDDGBM(COM.09) -source('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/R/EmaxDDGBM.R') -source('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/R/maxDDGBM.R') -head(managers) -head(COM.09) -head(COM) -charts.PerformanceSummary(COM) -charts.PerformanceSummary(COM[,1:7]) -charts.PerformanceSummary(COM[,8:11]) -?glm -?CalmarRatio -??CalmarRatio -source('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R') -?lmi -??lmi -source('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R') -getwd() -roxygenize(getwd()) -library("roxygen2", lib.loc="C:/Users/shubhankit/Documents/R/win-library/3.0") -roxygenize(getwd()) -source('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R') -?lmi -source('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R') -roxygenize(getwd()) -source('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R') -roxygenize(getwd()) -?lm -source.with.encoding('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R', encoding='UTF-8') -roxygenize(getwd()) -?glm -source.with.encoding('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R', encoding='UTF-8') -?lm -source.with.encoding('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R', encoding='UTF-8') -roxygenize(getwd()) -source.with.encoding('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R', encoding='UTF-8') -source.with.encoding('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R', encoding='UTF-8') -roxygenize(getwd()) -?glm -source.with.encoding('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/glmi.R', encoding='UTF-8') -roxygenize(getwd()) -source.with.encoding('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R', encoding='UTF-8') -source.with.encoding('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/glmi.R', encoding='UTF-8') -source.with.encoding('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R', encoding='UTF-8') -source.with.encoding('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/glmi.R', encoding='UTF-8') -roxygenize(getwd()) -source.with.encoding('C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/glmi.R', encoding='UTF-8') -roxygenize(getwd()) Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/NAMESPACE =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/NAMESPACE 2013-09-15 21:18:08 UTC (rev 3112) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/NAMESPACE 2013-09-16 05:19:14 UTC (rev 3113) @@ -15,7 +15,7 @@ export(SterlingRatio.Norm) export(table.ComparitiveReturn.GLM) export(table.EMaxDDGBM) -export(table.NormDD) +export(table.normDD) export(table.Sharpe) export(table.UnsmoothReturn) export(UnsmoothReturn) Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.normDD.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.normDD.R 2013-09-15 21:18:08 UTC (rev 3112) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.normDD.R 2013-09-16 05:19:14 UTC (rev 3113) @@ -26,7 +26,7 @@ #' @seealso Drawdowns.R #' @rdname table.normDD #' @export -table.NormDD <- +table.normDD <- function (R,digits =4) {# @author Deleted: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/Read-and-delete-me =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/Read-and-delete-me 2013-09-15 21:18:08 UTC (rev 3112) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/Read-and-delete-me 2013-09-16 05:19:14 UTC (rev 3113) @@ -1,9 +0,0 @@ -* Edit the help file skeletons in 'man', possibly combining help files for multiple - functions. -* Edit the exports in 'NAMESPACE', and add necessary imports. -* Put any C/C++/Fortran code in 'src'. -* If you have compiled code, add a useDynLib() directive to 'NAMESPACE'. -* Run R CMD build to build the package tarball. -* Run R CMD check to check the package tarball. - -Read "Writing R Extensions" for more information. Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.normDD.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.normDD.Rd 2013-09-15 21:18:08 UTC (rev 3112) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.normDD.Rd 2013-09-16 05:19:14 UTC (rev 3113) @@ -1,8 +1,8 @@ -\name{table.NormDD} -\alias{table.NormDD} +\name{table.normDD} +\alias{table.normDD} \title{Generalised Lambda Distribution Simulated Drawdown} \usage{ - table.NormDD(R, digits = 4) + table.normDD(R, digits = 4) } \arguments{ \item{R}{an xts, vector, matrix, data frame, timeSeries From noreply at r-forge.r-project.org Mon Sep 16 10:06:30 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 16 Sep 2013 10:06:30 +0200 (CEST) Subject: [Returnanalytics-commits] r3114 - in pkg/Meucci: . R demo Message-ID: <20130916080631.0542C185EBE@r-forge.r-project.org> Author: xavierv Date: 2013-09-16 10:06:30 +0200 (Mon, 16 Sep 2013) New Revision: 3114 Modified: pkg/Meucci/R/BlackScholesCallPrice.R pkg/Meucci/R/ButterflyTradingFunctions.R pkg/Meucci/R/CentralAndStandardizedStatistics.R pkg/Meucci/R/CovertCompoundedReturns2Price.R pkg/Meucci/R/EfficientFrontierPrices.R pkg/Meucci/R/EfficientFrontierReturns.R pkg/Meucci/R/EfficientFrontierReturnsBenchmark.R pkg/Meucci/R/FitExpectationMaximization.R pkg/Meucci/R/FitMultivariateGarch.R pkg/Meucci/R/FitOrnsteinUhlenbeck.R pkg/Meucci/R/GenerateUniformDrawsOnUnitSphere.R pkg/Meucci/R/InterExtrapolate.R pkg/Meucci/R/InvariantProjection.R pkg/Meucci/R/Log2Lin.R pkg/Meucci/R/MaxRsqCS.R pkg/Meucci/R/MaxRsqTS.R pkg/Meucci/R/MvnRnd.R pkg/Meucci/R/PerformIidAnalysis.R pkg/Meucci/R/PlotCompositionEfficientFrontier.R pkg/Meucci/R/PlotMarginalsNormalInverseWishart.R pkg/Meucci/R/PlotVolVsCompositionEfficientFrontier.R pkg/Meucci/R/QuantileMixture.R pkg/Meucci/R/RandNormalInverseWishart.R pkg/Meucci/R/SimulateJumpDiffusionMerton.R pkg/Meucci/TODO pkg/Meucci/demo/ButterflyTrading.R pkg/Meucci/demo/S_AnalyzeNormalInverseWishart.R pkg/Meucci/demo/S_BlackLittermanBasic.R pkg/Meucci/demo/S_BondProjectionPricingNormal.R pkg/Meucci/demo/S_BuyNHold.R pkg/Meucci/demo/S_CPPI.R pkg/Meucci/demo/S_CallsProjectionPricing.R pkg/Meucci/demo/S_CornishFisher.R pkg/Meucci/demo/S_CorrelationPriorUniform.R pkg/Meucci/demo/S_CrossSectionConstrainedIndustries.R pkg/Meucci/demo/S_CrossSectionIndustries.R pkg/Meucci/demo/S_ESContributionFactors.R pkg/Meucci/demo/S_ESContributionsStudentT.R pkg/Meucci/demo/S_EigenvalueDispersion.R pkg/Meucci/demo/S_EquityProjectionPricing.R pkg/Meucci/demo/S_EstimateExpectedValueEvaluation.R pkg/Meucci/demo/S_EstimateMomentsComboEvaluation.R pkg/Meucci/demo/S_EstimateQuantileEvaluation.R pkg/Meucci/demo/S_Estimator.R pkg/Meucci/demo/S_EvaluationGeneric.R pkg/Meucci/demo/S_ExactMeanAndCovariance.R pkg/Meucci/demo/S_ExpectationMaximizationHighYield.R pkg/Meucci/demo/S_ExtremeValueTheory.R pkg/Meucci/demo/S_FactorAnalysisNotOk.R pkg/Meucci/demo/S_FactorResidualCorrelation.R pkg/Meucci/demo/S_FitSwapToStudentT.R pkg/Meucci/demo/S_FixedIncomeInvariants.R pkg/Meucci/demo/S_GenerateMixtureSample.R pkg/Meucci/demo/S_HedgeOptions.R pkg/Meucci/demo/S_HorizonEffect.R pkg/Meucci/demo/S_InvestorsObjective.R pkg/Meucci/demo/S_JumpDiffusionMerton.R pkg/Meucci/demo/S_LinVsLogReturn.R pkg/Meucci/demo/S_MarkovChainMonteCarlo.R pkg/Meucci/demo/S_MaxMinVariance.R pkg/Meucci/demo/S_MaximumLikelihood.R pkg/Meucci/demo/S_MeanVarianceBenchmark.R pkg/Meucci/demo/S_MeanVarianceCalls.R pkg/Meucci/demo/S_MeanVarianceHorizon.R pkg/Meucci/demo/S_MeanVarianceOptimization.R pkg/Meucci/demo/S_MultiVarSqrRootRule.R pkg/Meucci/demo/S_PasturMarchenko.R pkg/Meucci/demo/S_ProjectNPriceMvGarch.R pkg/Meucci/demo/S_ProjectSummaryStatistics.R pkg/Meucci/demo/S_PureResidualBonds.R pkg/Meucci/demo/S_ResidualAnalysisTheory.R pkg/Meucci/demo/S_SelectionHeuristics.R pkg/Meucci/demo/S_SemiCircular.R pkg/Meucci/demo/S_ShrinkageEstimators.R pkg/Meucci/demo/S_StatArbSwaps.R pkg/Meucci/demo/S_TStatApprox.R pkg/Meucci/demo/S_TimeSeriesConstrainedIndustries.R pkg/Meucci/demo/S_TimeSeriesIndustries.R pkg/Meucci/demo/S_TimeSeriesVsCrossSectionIndustries.R pkg/Meucci/demo/S_Toeplitz.R pkg/Meucci/demo/S_UtilityMax.R pkg/Meucci/demo/S_VaRContributionsUniform.R pkg/Meucci/demo/S_VolatilityClustering.R pkg/Meucci/demo/S_Wishart.R Log: -changed how book is referenced Modified: pkg/Meucci/R/BlackScholesCallPrice.R =================================================================== --- pkg/Meucci/R/BlackScholesCallPrice.R 2013-09-16 05:19:14 UTC (rev 3113) +++ pkg/Meucci/R/BlackScholesCallPrice.R 2013-09-16 08:06:30 UTC (rev 3114) @@ -16,7 +16,7 @@ #' Code is vectorized, so the inputs can be vectors or matrices (but sizes must match) #' #' @references -#' \url{http://symmys.com/node/170} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See Meucci's script for "BlackScholesCallPrice.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/R/ButterflyTradingFunctions.R =================================================================== --- pkg/Meucci/R/ButterflyTradingFunctions.R 2013-09-16 05:19:14 UTC (rev 3113) +++ pkg/Meucci/R/ButterflyTradingFunctions.R 2013-09-16 08:06:30 UTC (rev 3114) @@ -15,10 +15,10 @@ #' Compute the pricing in the horizon, as it appears in A. Meucci, "Fully Flexible Views: Theory and Practice", #' The Risk Magazine, October 2008, p 100-106. #' -#' @param Butterflies : List of securities with some analytics computed. -#' @param X : Panel of joint factors realizations +#' @param Butterflies List of securities with some analytics computed. +#' @param X Panel of joint factors realizations #' -#' @return PnL : Matrix of profit and loss scenarios +#' @return PnL Matrix of profit and loss scenarios #' #' @references #' A. Meucci, "Fully Flexible Views: Theory and Practice" \url{http://www.symmys.com/node/158} Modified: pkg/Meucci/R/CentralAndStandardizedStatistics.R =================================================================== --- pkg/Meucci/R/CentralAndStandardizedStatistics.R 2013-09-16 05:19:14 UTC (rev 3113) +++ pkg/Meucci/R/CentralAndStandardizedStatistics.R 2013-09-16 08:06:30 UTC (rev 3114) @@ -8,7 +8,7 @@ #' @return mu : [vector] (1 x N) central moments up to order N #' #' @references -#' \url{http://symmys.com/node/170} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See Meucci's script for "CentralAndStandardizedStatistics.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/R/CovertCompoundedReturns2Price.R =================================================================== --- pkg/Meucci/R/CovertCompoundedReturns2Price.R 2013-09-16 05:19:14 UTC (rev 3113) +++ pkg/Meucci/R/CovertCompoundedReturns2Price.R 2013-09-16 08:06:30 UTC (rev 3114) @@ -9,7 +9,7 @@ #' @return Cov_Prices : [matrix] (N x N) covariance matrix of prices #' #' @references -#' \url{http://symmys.com/node/170} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See (6.77)-(6.79) in "Risk and Asset Allocation"-Springer (2005), by A. Meucci #' See Meucci's script for "ConvertCompoundedReturns2Price.m" #' Modified: pkg/Meucci/R/EfficientFrontierPrices.R =================================================================== --- pkg/Meucci/R/EfficientFrontierPrices.R 2013-09-16 05:19:14 UTC (rev 3113) +++ pkg/Meucci/R/EfficientFrontierPrices.R 2013-09-16 08:06:30 UTC (rev 3114) @@ -12,7 +12,7 @@ #' @return Composition : [matrix] (NumPortf x N) optimal portfolios #' #' @references -#' \url{http://symmys.com/node/170} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See Meucci's script for "EfficientFrontierReturns.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/R/EfficientFrontierReturns.R =================================================================== --- pkg/Meucci/R/EfficientFrontierReturns.R 2013-09-16 05:19:14 UTC (rev 3113) +++ pkg/Meucci/R/EfficientFrontierReturns.R 2013-09-16 08:06:30 UTC (rev 3114) @@ -11,7 +11,7 @@ #' @return Composition : [matrix] (NumPortf x N) optimal portfolios #' #' @references -#' \url{http://symmys.com/node/170} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See Meucci's script for "EfficientFrontierReturns.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/R/EfficientFrontierReturnsBenchmark.R =================================================================== --- pkg/Meucci/R/EfficientFrontierReturnsBenchmark.R 2013-09-16 05:19:14 UTC (rev 3113) +++ pkg/Meucci/R/EfficientFrontierReturnsBenchmark.R 2013-09-16 08:06:30 UTC (rev 3114) @@ -12,7 +12,7 @@ #' @return Composition : [matrix] (NumPortf x N) optimal portfolios #' #' @references -#' \url{http://symmys.com/node/170} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See Meucci's script for "EfficientFrontierReturnsBenchmark.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/R/FitExpectationMaximization.R =================================================================== --- pkg/Meucci/R/FitExpectationMaximization.R 2013-09-16 05:19:14 UTC (rev 3113) +++ pkg/Meucci/R/FitExpectationMaximization.R 2013-09-16 08:06:30 UTC (rev 3114) @@ -9,7 +9,7 @@ #' @return CountLoop : [scalar] number of iterations of the algorithm #' #' @references -#' \url{http://symmys.com/node/170} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See Meucci's script for "FitExpectationMaximization.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/R/FitMultivariateGarch.R =================================================================== --- pkg/Meucci/R/FitMultivariateGarch.R 2013-09-16 05:19:14 UTC (rev 3113) +++ pkg/Meucci/R/FitMultivariateGarch.R 2013-09-16 08:06:30 UTC (rev 3114) @@ -14,7 +14,7 @@ #' @note Initially written by Olivier Ledoit and Michael Wolf #' #' @references -#' \url{http://symmys.com/node/170} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See Meucci's script for "FitMultivariateGarch.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} @@ -127,7 +127,7 @@ #' Difference with garch1f: errors come from the score alone #' #' @references -#' \url{http://symmys.com/node/170} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See Meucci's script for "FitMultivariateGarch.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} @@ -390,7 +390,7 @@ #' Steepest Ascent on boundary, Hessian off boundary, no grid search #' #' @references -#' \url{http://symmys.com/node/170} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See Meucci's script for "FitMultivariateGarch.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/R/FitOrnsteinUhlenbeck.R =================================================================== --- pkg/Meucci/R/FitOrnsteinUhlenbeck.R 2013-09-16 05:19:14 UTC (rev 3113) +++ pkg/Meucci/R/FitOrnsteinUhlenbeck.R 2013-09-16 08:06:30 UTC (rev 3114) @@ -13,7 +13,7 @@ #' o dB_t: vector of Brownian motions #' #' @references -#' \url{http://symmys.com/node/170} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See Meucci's script for "FitOrnsteinUhlenbeck.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/R/GenerateUniformDrawsOnUnitSphere.R =================================================================== --- pkg/Meucci/R/GenerateUniformDrawsOnUnitSphere.R 2013-09-16 05:19:14 UTC (rev 3113) +++ pkg/Meucci/R/GenerateUniformDrawsOnUnitSphere.R 2013-09-16 08:06:30 UTC (rev 3114) @@ -12,7 +12,7 @@ # R is a distribution on (0,1) proportional to r^(Dims-1), i.e. the area of surface of radius r #' #' @references -#' \url{http://symmys.com/node/170} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See Meucci's script for "GenerateUniformDrawsOnUnitSphere.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/R/InterExtrapolate.R =================================================================== --- pkg/Meucci/R/InterExtrapolate.R 2013-09-16 05:19:14 UTC (rev 3113) +++ pkg/Meucci/R/InterExtrapolate.R 2013-09-16 08:06:30 UTC (rev 3114) @@ -22,7 +22,7 @@ #' Extrapolating long distances outside the support of V is rarely advisable. #' #' @references -#' \url{http://symmys.com/node/170} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See Meucci's script for "InterExtrapolate.R" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/R/InvariantProjection.R =================================================================== --- pkg/Meucci/R/InvariantProjection.R 2013-09-16 05:19:14 UTC (rev 3113) +++ pkg/Meucci/R/InvariantProjection.R 2013-09-16 08:06:30 UTC (rev 3114) @@ -1,6 +1,6 @@ #' Transforms the first n raw moments into the first n central moments #' -#' step 6 of projection process: +#' Step 6 of projection process: #' #' compute multi-period central moments. #' @@ -16,10 +16,8 @@ #' @author Ram Ahluwalia \email{rahluwalia@@gmail.com} #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management". See page 9 -#' Symmys site containing original MATLAB source code \url{http://www.symmys.com} -#' -#' \url{http://symmys.com/node/170} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management". +#' Symmys site containing original MATLAB source code \url{http://symmys.com/node/170}. #' See Meucci's script for "Raw2Central.m" #' @export Raw2Central = function( mu_ ) @@ -44,7 +42,7 @@ #' Map cumulative moments into raw moments. #' -#' step 5 of the projection process: +#' Step 5 of the projection process: #' #' From the cumulants of Y we compute the raw non-central moments of Y #' @@ -61,11 +59,10 @@ #' @author Ram Ahluwalia \email{rahluwalia@@gmail.com} #' #' @references -#' \url{http://symmys.com/node/170} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See Meucci's script for "Cumul2Raw.m". #' -#' A. Meucci - "Annualization and General Projection of Skewness, Kurtosis and All Summary Statistics" - formula (24) -#' Symmys site containing original MATLAB source code \url{http://www.symmys.com/node/136} +#' A. Meucci - "Annualization and General Projection of Skewness, Kurtosis and All Summary Statistics" - formula (24) \url{http://www.symmys.com/node/136} #' @export Cumul2Raw = function( ka ) @@ -106,7 +103,7 @@ #' A. Meucci - "Annualization and General Projection of Skewness, Kurtosis and All Summary Statistics" - formula (21) #' Symmys site containing original MATLAB source code \url{http://www.symmys.com/node/136} #' -#' \url{http://symmys.com/node/170} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See Meucci's script for "Raw2Cumul.m" #' @export @@ -130,7 +127,7 @@ #' Transforms first n central moments into first n raw moments (first central moment defined as expectation) #' -#' step 2 of projection process: From the central moments of step 1, we compute the non-central moments. To do so we start +#' Step 2 of projection process: From the central moments of step 1, we compute the non-central moments. To do so we start #' with the first non-central moment and apply recursively an identity (formula 20) #' #' \deqn{ \tilde{ \mu }^{ \big(1\big) }_{X} \equiv \mu ^{\big(1\big)}_{X} @@ -146,7 +143,7 @@ #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management". See page 10. #' Symmys site containing original MATLAB source code \url{http://www.symmys.com} #' -#' \url{http://symmys.com/node/170} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See Meucci's script for "Central2Raw.m" #' @export Central2Raw = function( mu ) @@ -170,7 +167,7 @@ #' Compute summary stats #' -#' step 0 in projection process: Compute summary stats (mean, skew, kurtosis, etc.) of the invariant X-t +#' Step 0 in projection process: Compute summary stats (mean, skew, kurtosis, etc.) of the invariant X-t #' step 1 in the project process We collect the first 'n' central moments of the invariant X-t. #' #' @param X an invariant Modified: pkg/Meucci/R/Log2Lin.R =================================================================== --- pkg/Meucci/R/Log2Lin.R 2013-09-16 05:19:14 UTC (rev 3113) +++ pkg/Meucci/R/Log2Lin.R 2013-09-16 08:06:30 UTC (rev 3114) @@ -8,7 +8,7 @@ #' @return S : [matrix] (N x N) #' #' @references -#' \url{http://symmys.com/node/170} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See Meucci's script for "Log2Lin.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/R/MaxRsqCS.R =================================================================== --- pkg/Meucci/R/MaxRsqCS.R 2013-09-16 05:19:14 UTC (rev 3113) +++ pkg/Meucci/R/MaxRsqCS.R 2013-09-16 08:06:30 UTC (rev 3114) @@ -18,7 +18,7 @@ #' Initial code by Tai-Ho Wang #' #' @references -#' \url{http://symmys.com/node/170} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See Meucci's script for "MaxRsqCS.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/R/MaxRsqTS.R =================================================================== --- pkg/Meucci/R/MaxRsqTS.R 2013-09-16 05:19:14 UTC (rev 3113) +++ pkg/Meucci/R/MaxRsqTS.R 2013-09-16 08:06:30 UTC (rev 3114) @@ -18,7 +18,7 @@ #' Initial code by Tai-Ho Wang #' #' @references -#' \url{http://symmys.com/node/170} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See Meucci's script for "MaxRsqTS.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/R/MvnRnd.R =================================================================== --- pkg/Meucci/R/MvnRnd.R 2013-09-16 05:19:14 UTC (rev 3113) +++ pkg/Meucci/R/MvnRnd.R 2013-09-16 08:06:30 UTC (rev 3114) @@ -8,7 +8,7 @@ #' @return X : [matrix] (J x N) of drawsF_U : [vector] (J x 1) PDF values #' #' @references -#' \url{http://symmys.com/node/170}, \url{http://www.symmys.com/node/162}{A. Meucci - "Simulations with Exact Means and Covariances", Risk, July 2009} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}., \url{http://www.symmys.com/node/162}{A. Meucci - "Simulations with Exact Means and Covariances", Risk, July 2009} #' See Meucci's script for "MvnRnd.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} and Ram Ahluwalia \email{rahluwalia@@gmail.com} Modified: pkg/Meucci/R/PerformIidAnalysis.R =================================================================== --- pkg/Meucci/R/PerformIidAnalysis.R 2013-09-16 05:19:14 UTC (rev 3113) +++ pkg/Meucci/R/PerformIidAnalysis.R 2013-09-16 08:06:30 UTC (rev 3114) @@ -11,7 +11,7 @@ # under i.i.d. the location-dispersion ellipsoid should be a circle #' #' @references -#' \url{http://symmys.com/node/170} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See Meucci's script for "PerformIidAnalysis.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/R/PlotCompositionEfficientFrontier.R =================================================================== --- pkg/Meucci/R/PlotCompositionEfficientFrontier.R 2013-09-16 05:19:14 UTC (rev 3113) +++ pkg/Meucci/R/PlotCompositionEfficientFrontier.R 2013-09-16 08:06:30 UTC (rev 3114) @@ -4,7 +4,7 @@ #' @param Portfolios : [matrix] (M x N) M portfolios of size N (weights) #' #' @references -#' \url{http://symmys.com/node/170} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See Meucci's script for "PlotCompositionEfficientFrontier.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/R/PlotMarginalsNormalInverseWishart.R =================================================================== --- pkg/Meucci/R/PlotMarginalsNormalInverseWishart.R 2013-09-16 05:19:14 UTC (rev 3113) +++ pkg/Meucci/R/PlotMarginalsNormalInverseWishart.R 2013-09-16 08:06:30 UTC (rev 3114) @@ -16,7 +16,7 @@ #' inv(Sigma) ~ W(Nu_0,inv(Sigma_0)/Nu_0) #' #' @references -#' \url{http://symmys.com/node/170} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See Meucci's script for "QuantileMixture.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/R/PlotVolVsCompositionEfficientFrontier.R =================================================================== --- pkg/Meucci/R/PlotVolVsCompositionEfficientFrontier.R 2013-09-16 05:19:14 UTC (rev 3113) +++ pkg/Meucci/R/PlotVolVsCompositionEfficientFrontier.R 2013-09-16 08:06:30 UTC (rev 3114) @@ -5,7 +5,7 @@ #' @param vol : [vector] (M x 1) of volatilities #' #' @references -#' \url{http://symmys.com/node/170} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See Meucci's script for "PlotVolVsCompositionEfficientFrontier.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/R/QuantileMixture.R =================================================================== --- pkg/Meucci/R/QuantileMixture.R 2013-09-16 05:19:14 UTC (rev 3113) +++ pkg/Meucci/R/QuantileMixture.R 2013-09-16 08:06:30 UTC (rev 3114) @@ -13,7 +13,7 @@ #' @return Q : [scalar] quantile #' #' @references -#' \url{http://symmys.com/node/170} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See Meucci's script for "QuantileMixture.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/R/RandNormalInverseWishart.R =================================================================== --- pkg/Meucci/R/RandNormalInverseWishart.R 2013-09-16 05:19:14 UTC (rev 3113) +++ pkg/Meucci/R/RandNormalInverseWishart.R 2013-09-16 08:06:30 UTC (rev 3114) @@ -17,7 +17,7 @@ #' inv(Sigma) ~ W(Nu_0,inv(Sigma_0)/Nu_0) #' #' @references -#' \url{http://symmys.com/node/170} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See Meucci's script for "RandNormalInverseWishart.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/R/SimulateJumpDiffusionMerton.R =================================================================== --- pkg/Meucci/R/SimulateJumpDiffusionMerton.R 2013-09-16 05:19:14 UTC (rev 3113) +++ pkg/Meucci/R/SimulateJumpDiffusionMerton.R 2013-09-16 08:06:30 UTC (rev 3114) @@ -13,7 +13,7 @@ #' @return X : [matrix] (J x length(ts)) of simulations #' #' @references -#' \url{http://symmys.com/node/170} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See Meucci's script for "SimulateJumpDiffusionMerton.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/TODO =================================================================== --- pkg/Meucci/TODO 2013-09-16 05:19:14 UTC (rev 3113) +++ pkg/Meucci/TODO 2013-09-16 08:06:30 UTC (rev 3114) @@ -11,5 +11,5 @@ * Still 2 scripts left from the book: S_MeanVarianceCallsRobust from chapter 9 and S_OptionReplication from chapter 6 * Improve documentation for every script from the book: - find the exercises and sections they come from - - write down the formulas - + - write down the equations +* Not Sure if EntropyProg returns what it should with empty matrices as arguments for the constraints Modified: pkg/Meucci/demo/ButterflyTrading.R =================================================================== --- pkg/Meucci/demo/ButterflyTrading.R 2013-09-16 05:19:14 UTC (rev 3113) +++ pkg/Meucci/demo/ButterflyTrading.R 2013-09-16 08:06:30 UTC (rev 3114) @@ -77,7 +77,7 @@ # .2 is the confidence on View 1; .25 is the confidence on View 2; .2 is the confidence on View 3 c = cbind( 0.35 , 0.2 , 0.25 , 0.2 ) -p_= cbind( p , p_1 , p_2 , p_3 ) %*% t(c) # compute the uncertainty weighted posterior probabilities +p_= cbind( factorsDistribution$p , p_1 , p_2 , p_3 ) %*% t(c) # compute the uncertainty weighted posterior probabilities ########################################################################################################### Modified: pkg/Meucci/demo/S_AnalyzeNormalInverseWishart.R =================================================================== --- pkg/Meucci/demo/S_AnalyzeNormalInverseWishart.R 2013-09-16 05:19:14 UTC (rev 3113) +++ pkg/Meucci/demo/S_AnalyzeNormalInverseWishart.R 2013-09-16 08:06:30 UTC (rev 3114) @@ -4,7 +4,7 @@ #' Described in A. Meucci,"Risk and Asset Allocation",Springer, 2005, Chapter 7. #' #' @references -#' \url{http://symmys.com/node/170} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See Meucci's script for "S_AnalyzeNormalInverseWishart.m" # #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/demo/S_BlackLittermanBasic.R =================================================================== --- pkg/Meucci/demo/S_BlackLittermanBasic.R 2013-09-16 05:19:14 UTC (rev 3113) +++ pkg/Meucci/demo/S_BlackLittermanBasic.R 2013-09-16 08:06:30 UTC (rev 3114) @@ -5,7 +5,7 @@ #' Springer, 2005, Chapter 9. #' #' @references -#' \url{http://symmys.com/node/170} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See Meucci's script for "S_BlackLittermanBasic.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/demo/S_BondProjectionPricingNormal.R =================================================================== --- pkg/Meucci/demo/S_BondProjectionPricingNormal.R 2013-09-16 05:19:14 UTC (rev 3113) +++ pkg/Meucci/demo/S_BondProjectionPricingNormal.R 2013-09-16 08:06:30 UTC (rev 3114) @@ -5,7 +5,7 @@ #'"Risk and Asset Allocation", Springer, 2005, Chapter 3. #' #' @references -#' \url{http://symmys.com/node/170} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See Meucci's script for "S_BondProjectionPricingNormal.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/demo/S_BuyNHold.R =================================================================== --- pkg/Meucci/demo/S_BuyNHold.R 2013-09-16 05:19:14 UTC (rev 3113) +++ pkg/Meucci/demo/S_BuyNHold.R 2013-09-16 08:06:30 UTC (rev 3114) @@ -2,7 +2,7 @@ #' Springer, 2005, Chapter 6. #' #' @references -#' \url{http://symmys.com/node/170} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See Meucci's script for "S_BuyNHold.m" # #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/demo/S_CPPI.R =================================================================== --- pkg/Meucci/demo/S_CPPI.R 2013-09-16 05:19:14 UTC (rev 3113) +++ pkg/Meucci/demo/S_CPPI.R 2013-09-16 08:06:30 UTC (rev 3114) @@ -2,7 +2,7 @@ #' A. Meucci,"Risk and Asset Allocation", Springer, 2005, Chapter 6. #' #' @references -#' \url{http://symmys.com/node/170} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See Meucci's script for "S_CPPI.m" # #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/demo/S_CallsProjectionPricing.R =================================================================== --- pkg/Meucci/demo/S_CallsProjectionPricing.R 2013-09-16 05:19:14 UTC (rev 3113) +++ pkg/Meucci/demo/S_CallsProjectionPricing.R 2013-09-16 08:06:30 UTC (rev 3114) @@ -3,7 +3,7 @@ #'"Risk and Asset Allocation", Springer, 2005, Chapter 3. #' #' @references -#' \url{http://symmys.com/node/170} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See Meucci's script for "S_CallsProjectionPricing.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/demo/S_CornishFisher.R =================================================================== --- pkg/Meucci/demo/S_CornishFisher.R 2013-09-16 05:19:14 UTC (rev 3113) +++ pkg/Meucci/demo/S_CornishFisher.R 2013-09-16 08:06:30 UTC (rev 3114) @@ -2,7 +2,7 @@ #'assumptions as described in A. Meucci,"Risk and Asset Allocation", Springer, 2005, Chapter 5. #' #' @references -#' \url{http://symmys.com/node/170} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See Meucci's script for "S_CornishFisher.m" # #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/demo/S_CorrelationPriorUniform.R =================================================================== --- pkg/Meucci/demo/S_CorrelationPriorUniform.R 2013-09-16 05:19:14 UTC (rev 3113) +++ pkg/Meucci/demo/S_CorrelationPriorUniform.R 2013-09-16 08:06:30 UTC (rev 3114) @@ -3,7 +3,7 @@ #' Chapter 7. #' #' @references -#' \url{http://symmys.com/node/170} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See Meucci's script for "S_CorrelationPriorUniform.m" # #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/demo/S_CrossSectionConstrainedIndustries.R =================================================================== --- pkg/Meucci/demo/S_CrossSectionConstrainedIndustries.R 2013-09-16 05:19:14 UTC (rev 3113) +++ pkg/Meucci/demo/S_CrossSectionConstrainedIndustries.R 2013-09-16 08:06:30 UTC (rev 3114) @@ -3,7 +3,7 @@ #' Springer, 2005, Chapter 3. #' #' @references -#' \url{http://symmys.com/node/170} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See Meucci's script for "S_CrossSectionConstrainedIndustries.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/demo/S_CrossSectionIndustries.R =================================================================== --- pkg/Meucci/demo/S_CrossSectionIndustries.R 2013-09-16 05:19:14 UTC (rev 3113) +++ pkg/Meucci/demo/S_CrossSectionIndustries.R 2013-09-16 08:06:30 UTC (rev 3114) @@ -2,7 +2,7 @@ #' "Risk and Asset Allocation", Springer, 2005, Chapter 3. #' #' @references -#' \url{http://symmys.com/node/170} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See Meucci's script for "S_CrossSectionIndustries.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/demo/S_ESContributionFactors.R =================================================================== --- pkg/Meucci/demo/S_ESContributionFactors.R 2013-09-16 05:19:14 UTC (rev 3113) +++ pkg/Meucci/demo/S_ESContributionFactors.R 2013-09-16 08:06:30 UTC (rev 3114) @@ -5,7 +5,7 @@ #' Springer, 2005, Chapter 5. #' #' @references -#' \url{http://symmys.com/node/170} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See Meucci's script for "S_ESContributionFactors.m" # #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/demo/S_ESContributionsStudentT.R =================================================================== --- pkg/Meucci/demo/S_ESContributionsStudentT.R 2013-09-16 05:19:14 UTC (rev 3113) +++ pkg/Meucci/demo/S_ESContributionsStudentT.R 2013-09-16 08:06:30 UTC (rev 3114) @@ -6,7 +6,7 @@ #' Described in A. Meucci,"Risk and Asset Allocation",Springer, 2005, Chapter 5. #' #' @references -#' \url{http://symmys.com/node/170} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See Meucci's script for "S_ESContributionsStudentT.m" # #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/demo/S_EigenvalueDispersion.R =================================================================== --- pkg/Meucci/demo/S_EigenvalueDispersion.R 2013-09-16 05:19:14 UTC (rev 3113) +++ pkg/Meucci/demo/S_EigenvalueDispersion.R 2013-09-16 08:06:30 UTC (rev 3114) @@ -2,7 +2,7 @@ #' "Risk and Asset Allocation", Springer, 2005, Chapter 4. #' #' @references -#' \url{http://symmys.com/node/170} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See Meucci's script for "S_EigenValueDispersion.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/demo/S_EquityProjectionPricing.R =================================================================== --- pkg/Meucci/demo/S_EquityProjectionPricing.R 2013-09-16 05:19:14 UTC (rev 3113) +++ pkg/Meucci/demo/S_EquityProjectionPricing.R 2013-09-16 08:06:30 UTC (rev 3114) @@ -6,7 +6,7 @@ #' chapter 3. #' #' @references -#' \url{http://symmys.com/node/170} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See Meucci's script for "S_EquitiesInvariance.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/demo/S_EstimateExpectedValueEvaluation.R =================================================================== --- pkg/Meucci/demo/S_EstimateExpectedValueEvaluation.R 2013-09-16 05:19:14 UTC (rev 3113) +++ pkg/Meucci/demo/S_EstimateExpectedValueEvaluation.R 2013-09-16 08:06:30 UTC (rev 3114) @@ -2,7 +2,7 @@ #' and inefficiency, as described in A. Meucci, "Risk and Asset Allocation", Springer, 2005, Chapter 4. #' #' @references -#' \url{http://symmys.com/node/170} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See Meucci's script for "S_EigenValueDispersion.R" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/demo/S_EstimateMomentsComboEvaluation.R =================================================================== --- pkg/Meucci/demo/S_EstimateMomentsComboEvaluation.R 2013-09-16 05:19:14 UTC (rev 3113) +++ pkg/Meucci/demo/S_EstimateMomentsComboEvaluation.R 2013-09-16 08:06:30 UTC (rev 3114) @@ -2,7 +2,7 @@ #'bias and inefficiency as described in A. Meucci,"Risk and Asset Allocation", Springer, 2005, Chapter 4. #' #' @references -#' \url{http://symmys.com/node/170} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See Meucci's script for "S_EstimateMomentsComboEvaluation.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/demo/S_EstimateQuantileEvaluation.R =================================================================== --- pkg/Meucci/demo/S_EstimateQuantileEvaluation.R 2013-09-16 05:19:14 UTC (rev 3113) +++ pkg/Meucci/demo/S_EstimateQuantileEvaluation.R 2013-09-16 08:06:30 UTC (rev 3114) @@ -1,7 +1,7 @@ #'This script familiarizes the user with the evaluation of an estimator:replicability, loss, error, #'bias and inefficiency as described in A. Meucci,"Risk and Asset Allocation", Springer, 2005, Chapter 4. #' -#' \url{http://symmys.com/node/170} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See Meucci's script for "S_EstimateQuantileEvaluation.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/demo/S_Estimator.R =================================================================== --- pkg/Meucci/demo/S_Estimator.R 2013-09-16 05:19:14 UTC (rev 3113) +++ pkg/Meucci/demo/S_Estimator.R 2013-09-16 08:06:30 UTC (rev 3114) @@ -2,7 +2,7 @@ #', as described in A. Meucci, "Risk and Asset Allocation", Springer, 2005, Chapter 4. #' #' @references -#' \url{http://symmys.com/node/170} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See Meucci's script for "S_EigenValueprintersion.R" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/demo/S_EvaluationGeneric.R =================================================================== --- pkg/Meucci/demo/S_EvaluationGeneric.R 2013-09-16 05:19:14 UTC (rev 3113) +++ pkg/Meucci/demo/S_EvaluationGeneric.R 2013-09-16 08:06:30 UTC (rev 3114) @@ -9,7 +9,7 @@ #' compute optimal allocation, only possible if hidden parameters were known: thus it is not a "decision", we call it a "choice" #' #' @references -#' \url{http://symmys.com/node/170} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See Meucci's script for " EvaluationChoiceOptimal.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} @@ -40,7 +40,7 @@ #' @return CertaintyEquivalent : [scalar] #' #' @references -#' \url{http://symmys.com/node/170} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See Meucci's script for " EvaluationSatisfaction.m" #' [TRUNCATED] To get the complete diff run: svnlook diff /svnroot/returnanalytics -r 3114 From noreply at r-forge.r-project.org Mon Sep 16 10:26:10 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 16 Sep 2013 10:26:10 +0200 (CEST) Subject: [Returnanalytics-commits] r3115 - in pkg/Meucci: R demo man Message-ID: <20130916082610.CC041185A5F@r-forge.r-project.org> Author: xavierv Date: 2013-09-16 10:26:10 +0200 (Mon, 16 Sep 2013) New Revision: 3115 Modified: pkg/Meucci/R/LognormalMoments2Parameters.R pkg/Meucci/demo/00Index pkg/Meucci/demo/S_LognormalSample.R pkg/Meucci/man/BlackScholesCallPrice.Rd pkg/Meucci/man/Central2Raw.Rd pkg/Meucci/man/CentralAndStandardizedStatistics.Rd pkg/Meucci/man/ConvertCompoundedReturns2Price.Rd pkg/Meucci/man/Cumul2Raw.Rd pkg/Meucci/man/EfficientFrontierPrices.Rd pkg/Meucci/man/EfficientFrontierReturns.Rd pkg/Meucci/man/EfficientFrontierReturnsBenchmark.Rd pkg/Meucci/man/FitExpectationMaximization.Rd pkg/Meucci/man/FitMultivariateGarch.Rd pkg/Meucci/man/FitOrnsteinUhlenbeck.Rd pkg/Meucci/man/GenerateUniformDrawsOnUnitSphere.Rd pkg/Meucci/man/HorizonPricing.Rd pkg/Meucci/man/InterExtrapolate.Rd pkg/Meucci/man/Log2Lin.Rd pkg/Meucci/man/LognormalMoments2Parameters.Rd pkg/Meucci/man/MaxRsqCS.Rd pkg/Meucci/man/MaxRsqTS.Rd pkg/Meucci/man/MvnRnd.Rd pkg/Meucci/man/PerformIidAnalysis.Rd pkg/Meucci/man/PlotCompositionEfficientFrontier.Rd pkg/Meucci/man/PlotMarginalsNormalInverseWishart.Rd pkg/Meucci/man/PlotVolVsCompositionEfficientFrontier.Rd pkg/Meucci/man/QuantileMixture.Rd pkg/Meucci/man/RandNormalInverseWishart.Rd pkg/Meucci/man/Raw2Central.Rd pkg/Meucci/man/Raw2Cumul.Rd pkg/Meucci/man/SimulateJumpDiffusionMerton.Rd pkg/Meucci/man/SummStats.Rd pkg/Meucci/man/garch1f4.Rd pkg/Meucci/man/garch2f8.Rd Log: - improved description for demo files and generated documentation for last commit Modified: pkg/Meucci/R/LognormalMoments2Parameters.R =================================================================== --- pkg/Meucci/R/LognormalMoments2Parameters.R 2013-09-16 08:06:30 UTC (rev 3114) +++ pkg/Meucci/R/LognormalMoments2Parameters.R 2013-09-16 08:26:10 UTC (rev 3115) @@ -1,16 +1,18 @@ -#' Compute the mean and standard deviation of a lognormal distribution from its parameters, as described in -#' A. Meucci, "Risk and Asset Allocation", Springer, 2005. +#' @title Computes the mean and standard deviation of a lognormal distribution from its parameters. #' -#' @param e : [scalar] expected value of the lognormal distribution -#' @param v : [scalar] variance of the lognormal distribution +#' @description Computes the mean and standard deviation of a lognormal distribution from its parameters, as described in +#' A. Meucci, "Risk and Asset Allocation", Springer, 2005. +#' +#' @param e [scalar] expected value of the lognormal distribution +#' @param v [scalar] variance of the lognormal distribution #' -#' @return mu : [scalar] expected value of the normal distribution -#' @return sig2 : [scalar] variance of the normal distribution +#' @return mu [scalar] expected value of the normal distribution +#' @return sig2 [scalar] variance of the normal distribution #' #' @note Inverts the formulas (1.98)-(1.99) in "Risk and Asset Allocation", Springer, 2005. #' #' @references -#' \url{http://} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}., "E 25- Simulation of a lognormal random variable" #' See Meucci's script for "LognormalMoments2Parameters.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/demo/00Index =================================================================== --- pkg/Meucci/demo/00Index 2013-09-16 08:06:30 UTC (rev 3114) +++ pkg/Meucci/demo/00Index 2013-09-16 08:26:10 UTC (rev 3115) @@ -1,107 +1,106 @@ -AnalyticalvsNumerical This example script compares the numerical and the analytical solution of entropy-pooling -ButterflyTrading This example script performs the butterfly-trading case study for the Entropy-Pooling approach by Attilio Meucci -DetectOutliersviaMVE This example script detects outliers in two-asset and multi-asset case -FullyFlexibleBayesNets This case study uses Entropy Pooling to compute Fully Flexible Bayesian networks for risk management -FullFlexProbs This script uses Entropy Pooling to compute Fully Flexible Probabilities for historical scenarios -FullyIntegratedLiquidityAndMarketRisk This script computes the liquidity-risk and funding-risk adjusted P&L distribution -HermiteGrid_CaseStudy This script estimates the prior of a hedge fund return and processes extreme views on CVaR according to Entropy Pooling -HermiteGrid_CVaR_Recursion This script illustrates the discrete Newton recursion to process views on CVaR according to Entropy Pooling -HermiteGrid_demo This script compares the performance of plain Monte Carlo versus grid in applying Entropy Pooling to process extreme views -InvariantProjection This script projects summary statistics to arbitrary horizons under i.i.d. assumption -MeanDiversificationFrontier This script computes the mean-diversification efficient frontier -Prior2Posterior This example script compares the numerical and the analytical solution of entropy-pooling -RankingInformation This script performs ranking allocation using the Entropy-Pooling approach by Attilio Meucci -RobustBayesianAllocation This script replicates the example from Meucci's MATLAB script S_SimulationsCaseStudy.M -S_AnalyzeLognormalCorrelation This script considers a bivariate lognormal market and display the correlation and the condition number of the covariance matrix -S_AnalyzeNormalCorrelation This script considers a bivariate normal market and display the correlation and the condition number of the covariance matrix -S_AnalyzeNormalInverseWishart This script familiarizes the users with multivariate Bayesian estimation. -S_AutocorrelatedProcess This script simulates a Ornstein-Uhlenbeck AR(1) process -S_BivariateSample This script generates draws from a bivariate distribution with different marginals -S_BlackLittermanBasic This script describes to basic market-based Black-Litterman approach -S_BondProjectionPricingNormal This script projects the distribution of the market invariants for the bond markets from the estimation interval to the investment horizon -S_BondProjectionPricingStudentT This script projects the distribution of the market invariants for the bond markets from the estimation interval to the investment horizon (Student's T assumption) -S_BuyNHold This script illustrates the buy & hold dynamic strategy -S_CPPI This script illustrates the CPPI (constant proportion portfolio insurance) dynamic strategy -S_CallsProjectionPricing This script projects the distribution of the market invariants for the derivatives market and computes the distribution of prices at the investment horizon -S_CheckDiagonalization This script verifies the correctness of the eigenvalue-eigenvector representation in terms of real matrices for the transition matrix of an OU process -S_CornishFisher This script compares the Cornish-Fisher estimate of the VaR with the true analytical VaR under the lognormal assumptions -S_CorrelationPriorUniform This script shows how a jointly uniform prior on the correlations implies that the marginal distribution of each correlation is peaked around zero -S_CovarianceEvolution This script represents the evolution of the covariance of an OU process in terms of the dispersion ellipsoid -S_CrossSectionConstrainedIndustries This script fits a cross-sectional linear factor model creating industry factors, where the industry factors are constrained to be uncorrelated with the market -S_CrossSectionIndustries This script fits a cross-sectional linear factor model creating industry factors -S_DerivativesInvariants This script performs the quest for invariance in the derivatives market -S_DeterministicEvolution This script animates the evolution of the determinstic component of an OU process -S_DisplayLognormalCopulaPdf This script displays the pdf of the copula of a lognormal distribution -S_DisplayNormalCopulaCdf This script displays the cdf of the copula of a normal distribution -S_DisplayNormalCopulaPdf This script displays the pdf of the copula of a normal distribution -S_DisplayStudentTCopulaPdf This script displays the pdf of the copula of a Student t distribution -S_ESContributionFactors This script computes the expected shortfall and the contributions to ES from each factor in simulations -S_ESContributionsStudentT This script computes the expected shortfall and the contributions to ES from each security -S_EigenvalueDispersion This script displays the sample eigenvalues dispersion phenomenon -S_EllipticalNDim This script decomposes the N-variate normal distribution into its radial and uniform components to generate an elliptical distribution -S_EquitiesInvariants This file performs the quest for invariance in the stock market -S_EquityProjectionPricing This script projects the distribution of the market invariants for the stock market from the estimation interval (normal assumption) to the investment horizon. Then it computes the distribution of prices at the investment horizon analytically. -S_EstimateExpectedValueEvaluation This script script familiarizes the user with the evaluation of an estimator replicability, loss, error, bias and inefficiency -S_EstimateMomentsComboEvaluation This script familiarizes the user with the evaluation of an estimator: replicability, loss, error, bias and inefficiency -S_EstimateQuantileEvaluation This script familiarizes the user with the evaluation of an estimator: replicability, loss, error, bias and inefficiency -S_Estimator This script familiarizes the user with the evaluation of an estimator: replicability, loss, error, bias and inefficiency -S_EvaluationGeneric This script determines the optimal allocation -S_ExactMeanAndCovariance Generate draws from a multivariate normal with matching mean and covariance -S_ExpectationMaximizationHighYield This script implements the Expectation-Maximization (EM) algoritm, which estimates the parameters of a multivariate normal distribution when some observations are randomly missing -S_ExtremeValueTheory This script computes the quantile (VaR) analytically, in simulations and using the extreme value theory approximation -S_FactorAnalysisNotOk This script illustrates the hidden factor analysis puzzle -S_FactorResidualCorrelation This script illustrates exogenous loadings and endogenous factors the true analytical VaR under the lognormal assumptions from the estimation interval to the investment horizon -S_FitProjectRates This script fits the swap rates dynamics to a multivariate Ornstein-Uhlenbeck process and computes and plots the estimated future distribution -S_FitSwapToStudentT This script demonstrates the recursive ML estimation of the location and scatter parameters of a multivariate Student t distribution -S_FixedIncomeInvariants This file performs the quest for invariance in the fixed income market -S_FullCodependence This script illustrates the concept of co-dependence -S_FxCopulaMarginal This script displays the empirical copula of a set of market variables -S_GenerateMixtureSample This script generates draws from a univarite mixture -S_HedgeOptions This script compares hedging based on Black-Scholes deltas with Factors on Demand hedging -S_HorizonEffect This script studies horizon effect on explicit factors / implicit loadings linear model -S_InvestorsObjective This script familiarizes the users with the objectives of different investors in a highly non-normal bi-variate market of securities -S_JumpDiffusionMerton This script simulates a jump-diffusion process -S_LinVsLogReturn This script project a distribution in the future according to the i.i.d.-implied square-root rule -S_LognormalSample This script simulate univariate lognormal variables -S_MarkovChainMonteCarlo This script illustrates the Metropolis-Hastings algorithm -S_MaxMinVariance This script dispays location-dispersion ellipsoid and statistic -S_MaximumLikelihood This script performs ML under a non-standard parametric set of distributions -S_MeanVarianceBenchmark This script projects the distribution of the market invariants for the bond and stock markets from the estimation interval to the investment horizon. Then it computes the distribution of prices at the investment horizon and translates this distribution into the returns distribution -S_MeanVarianceCalls This script computes the mean-variance frontier of a set of options -S_MeanVarianceHorizon This script projects the distribution of the market invariants for the bond and stock markets from the estimation interval to the investment horizon. Then it computes the distribution of prices at the investment horizon and performs the two-step mean-variance optimization in terms of returns and relative portfolio weights. -S_MeanVarianceOptimization This script projects the distribution of the market invariants for the bond and stock markets from the estimation interval to the investment horizon. Then it computes the distribution of prices at the investment horizon and performs the two-step mean-variance optimization. -S_MultiVarSqrRootRule This script illustrates the multivariate square root rule-of-thumb -S_NonAnalytical This script generates draws for the sum of random variable -S_NormalSample This script simulate univariate normal variables -S_OrderStatisticsPdfLognormal This script script shows that the pdf of the r-th order statistics of a lognormal random variable -S_OrderStatisticsPdfStudentT This script script shows that the pdf of the r-th order statistics of a tudent t random variable -S_PasturMarchenko This script illustrate the Marchenko-Pastur limit of runifom matrix theory -S_ProjectNPriceMvGarch This script fits a multivariate GARCH model and projects the distribution of the compounded returns from the estimation interval to the investment horizon. Then it computes the distribution of prices at the investment horizon. -S_ProjectSummaryStatistics This script projects summary statistics to arbitrary horizons -S_PureResidualBonds This script models the joint distribution of the yet-to-be realized key rates of the government curve -S_ResidualAnalysisTheory This script performs the analysis of residuals -S_SelectionHeuristics Compute the r-square of selected factors -S_SemiCircular This script illustrate the semi-circular law of random matrix theory -S_ShrinkageEstimators This script computes the multivariate shrinkage estimators of location and scatter under the normal assumption -S_SnPCaseStudy This script replicates the example from Meucci's MATLAB scriptS_SnPCaseStudy.M -S_StatArbSwaps This script search for cointegrated stat-arb strategies among swap contracts -S_StudentTSample This script simulate univariate Student-t variables -S_SwapPca2Dim This script performs the principal component analysis of a simplified two-point swap curve -S_TStatApprox Simulate invariants for the regression model -S_TimeSeriesConstrainedIndustries This script fits a time-series linear factor computing the industry factors loadings, where the loadings are bounded and constrained to yield unit exposure -S_TimeSeriesIndustries This script fits a time-series linear factor computing the industry factors loadings -S_TimeSeriesVsCrossSectionIndustries This script computes the correlation between explicit, time-series industry factor returns and implicit, cross-section industry factor returns -S_Toeplitz This script shows that the eigenvectors of a Toeplitz matrix have a Fourier basis structure under t-distribution assumptions -S_UtilityMax This script illustrates the constant weight dynamic strategy that maximizes power utility -S_VaRContributionsUniform This script computes the VaR and the contributions to VaR from each security anallitically and in simulations -S_VolatilityClustering This file generates paths for a volatility clustering -S_Wishart This script generates a sample from the 2x2 Wishart distribution -S_WishartCorrelation This script computes the correlation of the first diagonal and off-diagonal elements of a 2x2 Wishart distribution as a function of the inputs -S_WishartLocationDispersion This script computes the location-dispersion ellipsoid of the normalized first diagonal and off-diagonal elements of a 2x2 Wishart distribution as a function of the inputs -S_ToyExample This toy example illustrates the use of Entropy Pooling to compute Fully Flexible -logToArithmeticCovariance This example script generates arithmetric returns and arithmetric covariance matrix given a distribution of log returns -S_plotGaussHermite This example script displays mesh points based on Gaussian-Hermite quadrature - Bayesian networks +AnalyticalvsNumerical compares the numerical and the analytical solution of entropy-pooling +ButterflyTrading performs the butterfly-trading case study for the Entropy-Pooling approach by Attilio Meucci +DetectOutliersviaMVE detects outliers in two-asset and multi-asset case +FullyFlexibleBayesNets uses Entropy Pooling to compute Fully Flexible Bayesian networks for risk management +FullFlexProbs uses Entropy Pooling to compute Fully Flexible Probabilities for historical scenarios +FullyIntegratedLiquidityAndMarketRisk computes the liquidity-risk and funding-risk adjusted P&L distribution +HermiteGrid_CaseStudy estimates the prior of a hedge fund return and processes extreme views on CVaR according to Entropy Pooling +HermiteGrid_CVaR_Recursion illustrates the discrete Newton recursion to process views on CVaR according to Entropy Pooling +HermiteGrid_demo compares the performance of plain Monte Carlo versus grid in applying Entropy Pooling to process extreme views +InvariantProjection projects summary statistics to arbitrary horizons under i.i.d. assumption +MeanDiversificationFrontier computes the mean-diversification efficient frontier +Prior2Posterior compares the numerical and the analytical solution of entropy-pooling +RankingInformation performs ranking allocation using the Entropy-Pooling approach by Attilio Meucci +RobustBayesianAllocation replicates the example from Meucci's MATLAB script S_SimulationsCaseStudy.M +S_AnalyzeLognormalCorrelation considers a bivariate lognormal market and display the correlation and the condition number of the covariance matrix +S_AnalyzeNormalCorrelation considers a bivariate normal market and display the correlation and the condition number of the covariance matrix +S_AnalyzeNormalInverseWishart familiarizes the users with multivariate Bayesian estimation. +S_AutocorrelatedProcess simulates a Ornstein-Uhlenbeck AR(1) process +S_BivariateSample generates draws from a bivariate distribution with different marginals +S_BlackLittermanBasic describes to basic market-based Black-Litterman approach +S_BondProjectionPricingNormal projects the distribution of the market invariants for the bond markets from the estimation interval to the investment horizon +S_BondProjectionPricingStudentT projects the distribution of the market invariants for the bond markets from the estimation interval to the investment horizon (Student's T assumption) +S_BuyNHold illustrates the buy & hold dynamic strategy +S_CPPI illustrates the CPPI (constant proportion portfolio insurance) dynamic strategy +S_CallsProjectionPricing projects the distribution of the market invariants for the derivatives market and computes the distribution of prices at the investment horizon +S_CheckDiagonalization verifies the correctness of the eigenvalue-eigenvector representation in terms of real matrices for the transition matrix of an OU process +S_CornishFisher compares the Cornish-Fisher estimate of the VaR with the true analytical VaR under the lognormal assumptions +S_CorrelationPriorUniform shows how a jointly uniform prior on the correlations implies that the marginal distribution of each correlation is peaked around zero +S_CovarianceEvolution represents the evolution of the covariance of an OU process in terms of the dispersion ellipsoid +S_CrossSectionConstrainedIndustries fits a cross-sectional linear factor model creating industry factors, where the industry factors are constrained to be uncorrelated with the market +S_CrossSectionIndustries fits a cross-sectional linear factor model creating industry factors +S_DerivativesInvariants performs the quest for invariance in the derivatives market +S_DeterministicEvolution animates the evolution of the determinstic component of an OU process +S_DisplayLognormalCopulaPdf displays the pdf of the copula of a lognormal distribution +S_DisplayNormalCopulaCdf displays the cdf of the copula of a normal distribution +S_DisplayNormalCopulaPdf displays the pdf of the copula of a normal distribution +S_DisplayStudentTCopulaPdf displays the pdf of the copula of a Student t distribution +S_ESContributionFactors computes the expected shortfall and the contributions to ES from each factor in simulations +S_ESContributionsStudentT computes the expected shortfall and the contributions to ES from each security +S_EigenvalueDispersion displays the sample eigenvalues dispersion phenomenon +S_EllipticalNDim decomposes the N-variate normal distribution into its radial and uniform components to generate an elliptical distribution +S_EquitiesInvariants performs the quest for invariance in the stock market +S_EquityProjectionPricing projects the distribution of the market invariants for the stock market from the estimation interval (normal assumption) to the investment horizon. Then it computes the distribution of prices at the investment horizon analytically. +S_EstimateExpectedValueEvaluation script familiarizes the user with the evaluation of an estimator replicability, loss, error, bias and inefficiency +S_EstimateMomentsComboEvaluation familiarizes the user with the evaluation of an estimator: replicability, loss, error, bias and inefficiency +S_EstimateQuantileEvaluation familiarizes the user with the evaluation of an estimator: replicability, loss, error, bias and inefficiency +S_Estimator familiarizes the user with the evaluation of an estimator: replicability, loss, error, bias and inefficiency +S_EvaluationGeneric determines the optimal allocation +S_ExactMeanAndCovariance generate draws from a multivariate normal with matching mean and covariance +S_ExpectationMaximizationHighYield implements the Expectation-Maximization (EM) algoritm, which estimates the parameters of a multivariate normal distribution when some observations are randomly missing +S_ExtremeValueTheory computes the quantile (VaR) analytically, in simulations and using the extreme value theory approximation +S_FactorAnalysisNotOk illustrates the hidden factor analysis puzzle +S_FactorResidualCorrelation illustrates exogenous loadings and endogenous factors the true analytical VaR under the lognormal assumptions from the estimation interval to the investment horizon +S_FitProjectRates fits the swap rates dynamics to a multivariate Ornstein-Uhlenbeck process and computes and plots the estimated future distribution +S_FitSwapToStudentT demonstrates the recursive ML estimation of the location and scatter parameters of a multivariate Student t distribution +S_FixedIncomeInvariants performs the quest for invariance in the fixed income market +S_FullCodependence illustrates the concept of co-dependence +S_FxCopulaMarginal displays the empirical copula of a set of market variables +S_GenerateMixtureSample generates draws from a univarite mixture +S_HedgeOptions compares hedging based on Black-Scholes deltas with Factors on Demand hedging +S_HorizonEffect studies horizon effect on explicit factors / implicit loadings linear model +S_InvestorsObjective familiarizes the users with the objectives of different investors in a highly non-normal bi-variate market of securities +S_JumpDiffusionMerton simulates a jump-diffusion process +S_LinVsLogReturn project a distribution in the future according to the i.i.d.-implied square-root rule +S_LognormalSample simulate univariate lognormal variables +S_MarkovChainMonteCarlo illustrates the Metropolis-Hastings algorithm +S_MaxMinVariance dispays location-dispersion ellipsoid and statistic +S_MaximumLikelihood performs ML under a non-standard parametric set of distributions +S_MeanVarianceBenchmark projects the distribution of the market invariants for the bond and stock markets from the estimation interval to the investment horizon. Then it computes the distribution of prices at the investment horizon and translates this distribution into the returns distribution +S_MeanVarianceCalls computes the mean-variance frontier of a set of options +S_MeanVarianceHorizon projects the distribution of the market invariants for the bond and stock markets from the estimation interval to the investment horizon. Then it computes the distribution of prices at the investment horizon and performs the two-step mean-variance optimization in terms of returns and relative portfolio weights. +S_MeanVarianceOptimization projects the distribution of the market invariants for the bond and stock markets from the estimation interval to the investment horizon. Then it computes the distribution of prices at the investment horizon and performs the two-step mean-variance optimization. +S_MultiVarSqrRootRule illustrates the multivariate square root rule-of-thumb +S_NonAnalytical generates draws for the sum of random variable +S_NormalSample simulate univariate normal variables +S_OrderStatisticsPdfLognormal script shows that the pdf of the r-th order statistics of a lognormal random variable +S_OrderStatisticsPdfStudentT script shows that the pdf of the r-th order statistics of a tudent t random variable +S_PasturMarchenko illustrate the Marchenko-Pastur limit of runifom matrix theory +S_ProjectNPriceMvGarch fits a multivariate GARCH model and projects the distribution of the compounded returns from the estimation interval to the investment horizon. Then it computes the distribution of prices at the investment horizon. +S_ProjectSummaryStatistics projects summary statistics to arbitrary horizons +S_PureResidualBonds models the joint distribution of the yet-to-be realized key rates of the government curve +S_ResidualAnalysisTheory performs the analysis of residuals +S_SelectionHeuristics computes the r-square of selected factors +S_SemiCircular illustrate the semi-circular law of random matrix theory +S_ShrinkageEstimators computes the multivariate shrinkage estimators of location and scatter under the normal assumption +S_SnPCaseStudy replicates the example from Meucci's MATLAB scriptS_SnPCaseStudy.M +S_StatArbSwaps search for cointegrated stat-arb strategies among swap contracts +S_StudentTSample simulate univariate Student-t variables +S_SwapPca2Dim performs the principal component analysis of a simplified two-point swap curve +S_TStatApprox simulates invariants for the regression model +S_TimeSeriesConstrainedIndustries fits a time-series linear factor computing the industry factors loadings, where the loadings are bounded and constrained to yield unit exposure +S_TimeSeriesIndustries fits a time-series linear factor computing the industry factors loadings +S_TimeSeriesVsCrossSectionIndustries computes the correlation between explicit, time-series industry factor returns and implicit, cross-section industry factor returns +S_Toeplitz shows that the eigenvectors of a Toeplitz matrix have a Fourier basis structure under t-distribution assumptions +S_UtilityMax illustrates the constant weight dynamic strategy that maximizes power utility +S_VaRContributionsUniform computes the VaR and the contributions to VaR from each security anallitically and in simulations +S_VolatilityClustering generates paths for a volatility clustering +S_Wishart generates a sample from the 2x2 Wishart distribution +S_WishartCorrelation computes the correlation of the first diagonal and off-diagonal elements of a 2x2 Wishart distribution as a function of the inputs +S_WishartLocationDispersion computes the location-dispersion ellipsoid of the normalized first diagonal and off-diagonal elements of a 2x2 Wishart distribution as a function of the inputs +S_ToyExample illustrates the use of Entropy Pooling to compute Fully Flexible probabilities +logToArithmeticCovariance generates arithmetric returns and arithmetric covariance matrix given a distribution of log returns +S_plotGaussHermite displays mesh points based on Gaussian-Hermite quadrature Bayesian networks Modified: pkg/Meucci/demo/S_LognormalSample.R =================================================================== --- pkg/Meucci/demo/S_LognormalSample.R 2013-09-16 08:06:30 UTC (rev 3114) +++ pkg/Meucci/demo/S_LognormalSample.R 2013-09-16 08:26:10 UTC (rev 3115) @@ -1,8 +1,9 @@ -#' This script simulate univariate lognormal variables, as described in +#' This script simulates univariate lognormal variables, as described in #' A. Meucci, "Risk and Asset Allocation", Springer, 2005, Chapter 1. #' #' @references -#' \url{http://} +#' @references +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}., "E 25- Simulation of a lognormal random variable" #' See Meucci's script for "S_LognormalSample.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/man/BlackScholesCallPrice.Rd =================================================================== --- pkg/Meucci/man/BlackScholesCallPrice.Rd 2013-09-16 08:06:30 UTC (rev 3114) +++ pkg/Meucci/man/BlackScholesCallPrice.Rd 2013-09-16 08:26:10 UTC (rev 3115) @@ -45,7 +45,8 @@ Xavier Valls \email{flamejat at gmail.com} } \references{ - \url{http://symmys.com/node/170} See Meucci's script for - "BlackScholesCallPrice.m" + A. Meucci - "Exercises in Advanced Risk and Portfolio + Management" \url{http://symmys.com/node/170}. See + Meucci's script for "BlackScholesCallPrice.m" } Modified: pkg/Meucci/man/Central2Raw.Rd =================================================================== --- pkg/Meucci/man/Central2Raw.Rd 2013-09-16 08:06:30 UTC (rev 3114) +++ pkg/Meucci/man/Central2Raw.Rd 2013-09-16 08:26:10 UTC (rev 3115) @@ -13,7 +13,7 @@ corresponding raw moments } \description{ - step 2 of projection process: From the central moments of + Step 2 of projection process: From the central moments of step 1, we compute the non-central moments. To do so we start with the first non-central moment and apply recursively an identity (formula 20) @@ -32,7 +32,8 @@ Management". See page 10. Symmys site containing original MATLAB source code \url{http://www.symmys.com} - \url{http://symmys.com/node/170} See Meucci's script for - "Central2Raw.m" + A. Meucci - "Exercises in Advanced Risk and Portfolio + Management" \url{http://symmys.com/node/170}. See + Meucci's script for "Central2Raw.m" } Modified: pkg/Meucci/man/CentralAndStandardizedStatistics.Rd =================================================================== --- pkg/Meucci/man/CentralAndStandardizedStatistics.Rd 2013-09-16 08:06:30 UTC (rev 3114) +++ pkg/Meucci/man/CentralAndStandardizedStatistics.Rd 2013-09-16 08:26:10 UTC (rev 3115) @@ -25,7 +25,8 @@ Xavier Valls \email{flamejat at gmail.com} } \references{ - \url{http://symmys.com/node/170} See Meucci's script for - "CentralAndStandardizedStatistics.m" + A. Meucci - "Exercises in Advanced Risk and Portfolio + Management" \url{http://symmys.com/node/170}. See + Meucci's script for "CentralAndStandardizedStatistics.m" } Modified: pkg/Meucci/man/ConvertCompoundedReturns2Price.Rd =================================================================== --- pkg/Meucci/man/ConvertCompoundedReturns2Price.Rd 2013-09-16 08:06:30 UTC (rev 3114) +++ pkg/Meucci/man/ConvertCompoundedReturns2Price.Rd 2013-09-16 08:26:10 UTC (rev 3115) @@ -29,9 +29,10 @@ Xavier Valls \email{flamejat at gmail.com} } \references{ - \url{http://symmys.com/node/170} See (6.77)-(6.79) in - "Risk and Asset Allocation"-Springer (2005), by A. Meucci - See Meucci's script for + A. Meucci - "Exercises in Advanced Risk and Portfolio + Management" \url{http://symmys.com/node/170}. See [TRUNCATED] To get the complete diff run: svnlook diff /svnroot/returnanalytics -r 3115 From noreply at r-forge.r-project.org Mon Sep 16 11:32:05 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 16 Sep 2013 11:32:05 +0200 (CEST) Subject: [Returnanalytics-commits] r3116 - in pkg/Meucci: R demo man Message-ID: <20130916093205.9EDDC185372@r-forge.r-project.org> Author: xavierv Date: 2013-09-16 11:32:05 +0200 (Mon, 16 Sep 2013) New Revision: 3116 Modified: pkg/Meucci/R/LognormalMoments2Parameters.R pkg/Meucci/demo/00Index pkg/Meucci/demo/S_LognormalSample.R pkg/Meucci/demo/S_NonAnalytical.R pkg/Meucci/demo/S_NormalSample.R pkg/Meucci/demo/S_StudentTSample.R pkg/Meucci/man/LognormalMoments2Parameters.Rd Log: - updated documentation for chapter 1 demo scripts and its functions Modified: pkg/Meucci/R/LognormalMoments2Parameters.R =================================================================== --- pkg/Meucci/R/LognormalMoments2Parameters.R 2013-09-16 08:26:10 UTC (rev 3115) +++ pkg/Meucci/R/LognormalMoments2Parameters.R 2013-09-16 09:32:05 UTC (rev 3116) @@ -1,24 +1,30 @@ #' @title Computes the mean and standard deviation of a lognormal distribution from its parameters. #' -#' @description Computes the mean and standard deviation of a lognormal distribution from its parameters, as described in +#' @description determines $\mu$ and $\sigma^2$ from $\Expect\{X\}$ and $\Var\{X\}$, and uses it to determine $\mu$ +#' and $\sigma^{2}$ such that $\Expect\left\{ X\right\} \bydef 3$ and $\Var\left\{ X\right\} \bydef 5$, as described in #' A. Meucci, "Risk and Asset Allocation", Springer, 2005. #' -#' @param e [scalar] expected value of the lognormal distribution -#' @param v [scalar] variance of the lognormal distribution +#' \deqn{\sigma^{2} = \ln \left( 1 + \frac{V}{E^{2}} \right) , } +#' \deqn{\mu = \ln(E) - \frac{1}{2} \ln \left( 1 + \frac{V}{E^{2}} \right) .} +#' +#' +#' @param e [scalar] expected value of the lognormal distribution +#' @param v [scalar] variance of the lognormal distribution #' -#' @return mu [scalar] expected value of the normal distribution -#' @return sig2 [scalar] variance of the normal distribution +#' @return mu [scalar] expected value of the normal distribution +#' @return sig2 [scalar] variance of the normal distribution #' #' @note Inverts the formulas (1.98)-(1.99) in "Risk and Asset Allocation", Springer, 2005. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}., "E 25- Simulation of a lognormal random variable" +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, "E 25- Simulation of a lognormal random variable". +#' #' See Meucci's script for "LognormalMoments2Parameters.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} #' @export -LognormalMoments2Parameters = function( e, v) +LognormalMoments2Parameters = function( e, v ) { sig2 = log( 1 + v / ( e^2 ) ); mu = log( e ) - sig2 / 2; Modified: pkg/Meucci/demo/00Index =================================================================== --- pkg/Meucci/demo/00Index 2013-09-16 08:26:10 UTC (rev 3115) +++ pkg/Meucci/demo/00Index 2013-09-16 09:32:05 UTC (rev 3116) @@ -98,7 +98,7 @@ S_Wishart generates a sample from the 2x2 Wishart distribution S_WishartCorrelation computes the correlation of the first diagonal and off-diagonal elements of a 2x2 Wishart distribution as a function of the inputs S_WishartLocationDispersion computes the location-dispersion ellipsoid of the normalized first diagonal and off-diagonal elements of a 2x2 Wishart distribution as a function of the inputs -S_ToyExample illustrates the use of Entropy Pooling to compute Fully Flexible probabilities +S_ToyExample illustrates the use of Entropy Pooling to compute Fully Flexible Probabilities logToArithmeticCovariance generates arithmetric returns and arithmetric covariance matrix given a distribution of log returns S_plotGaussHermite displays mesh points based on Gaussian-Hermite quadrature Bayesian networks Modified: pkg/Meucci/demo/S_LognormalSample.R =================================================================== --- pkg/Meucci/demo/S_LognormalSample.R 2013-09-16 08:26:10 UTC (rev 3115) +++ pkg/Meucci/demo/S_LognormalSample.R 2013-09-16 09:32:05 UTC (rev 3116) @@ -2,15 +2,14 @@ #' A. Meucci, "Risk and Asset Allocation", Springer, 2005, Chapter 1. #' #' @references -#' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}., "E 25- Simulation of a lognormal random variable" -#' See Meucci's script for "S_LognormalSample.m" +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' E 25- Simulation of a lognormal random variable". #' +#' See Meucci's script for "S_LognormalSample.m". +#' #' @author Xavier Valls \email{flamejat@@gmail.com} #' @export -source("../R/LognormalMoments2Parameters.R"); - ################################################################################################################## ### Input parameters Modified: pkg/Meucci/demo/S_NonAnalytical.R =================================================================== --- pkg/Meucci/demo/S_NonAnalytical.R 2013-09-16 08:26:10 UTC (rev 3115) +++ pkg/Meucci/demo/S_NonAnalytical.R 2013-09-16 09:32:05 UTC (rev 3116) @@ -2,11 +2,12 @@ #' A. Meucci, "Risk and Asset Allocation", Springer, 2005, Chapter 1. #' #' @references -#' \url{http://} -#' See Meucci's script for "S_NonAnalytical.m" +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 22- Sum of random variables via simulation". #' +#' See Meucci's script for "S_NonAnalytical.m". +#' #' @author Xavier Valls \email{flamejat@@gmail.com} -#' @export ################################################################################################################## ### Input parameters Modified: pkg/Meucci/demo/S_NormalSample.R =================================================================== --- pkg/Meucci/demo/S_NormalSample.R 2013-09-16 08:26:10 UTC (rev 3115) +++ pkg/Meucci/demo/S_NormalSample.R 2013-09-16 09:32:05 UTC (rev 3116) @@ -2,11 +2,12 @@ #' A. Meucci, "Risk and Asset Allocation", Springer, 2005, Chapter 1. #' #' @references -#' \url{http://} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 23- Simulation of univariate random normal variable". +#' #' See Meucci's script for "S_NormalSample.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} -#' @export ################################################################################################################## ### Input parameters @@ -50,7 +51,7 @@ # plot empirical quantile dev.new(); -u= seq( 0.01, 0.99, 0.01 ); # range of quantiles (values between zero and one) +u = seq( 0.01, 0.99, 0.01 ); # range of quantiles (values between zero and one) q = quantile( X, u ); plot( u, q, type = "l", xlab="Grade", ylab="Quantile", lty = 1, col = "red", main = "quantile of normal distribution" ); Modified: pkg/Meucci/demo/S_StudentTSample.R =================================================================== --- pkg/Meucci/demo/S_StudentTSample.R 2013-09-16 08:26:10 UTC (rev 3115) +++ pkg/Meucci/demo/S_StudentTSample.R 2013-09-16 09:32:05 UTC (rev 3116) @@ -2,11 +2,12 @@ #' A. Meucci, "Risk and Asset Allocation", Springer, 2005, Chapter 1. #' #' @references -#' \url{http://} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 24- Simulation of a Student t random variable". +#' #' See Meucci's script for "S_StudentTSample.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} -#' @export ################################################################################################################## ### Input parameters @@ -38,7 +39,7 @@ ################################################################################################################## ### Generate Student t sample with above parameters using grade inversion -U = runif( nSim ); +U = runif( nSim ); X_c = mu + sigma * qt( U, nu ); ################################################################################################################## @@ -46,21 +47,15 @@ NumBins = round(10 * log(nSim)); dev.new(); -par( mfrow = c( 3, 1) ); +par( mfrow = c( 3, 1 ) ); hist( X_a, NumBins, main = "built-in generator" ); hist( X_b, NumBins, main = "stoch. representation" ); hist( X_c, NumBins, main = "grade inversion" ); - -#axisLimits = [min(axisLimits(:, 1)), max(axisLimits(:, 2)), min(axisLimits(:, 3)), max(axisLimits(:, 4))]; -#subplot(3, 1, 1), axis(axisLimits); -#subplot(3, 1, 2), axis(axisLimits); -#subplot(3, 1, 3), axis(axisLimits); - ################################################################################################################## ### Compare empirical quantiles of the three simuations -u= seq( 0.01, 0.99, 0.01 ); # range of quantiles (values between zero and one) = 0.01 : 0.01 : 0.99; # range of quantiles (values between zero and one) +u = seq( 0.01, 0.99, 0.01 ); # range of quantiles (values between zero and one) = 0.01 : 0.01 : 0.99; # range of quantiles (values between zero and one) q_a = quantile( X_a, u ); q_b = quantile( X_b, u ); q_c = quantile( X_c, u ); Modified: pkg/Meucci/man/LognormalMoments2Parameters.Rd =================================================================== --- pkg/Meucci/man/LognormalMoments2Parameters.Rd 2013-09-16 08:26:10 UTC (rev 3115) +++ pkg/Meucci/man/LognormalMoments2Parameters.Rd 2013-09-16 09:32:05 UTC (rev 3116) @@ -16,9 +16,15 @@ sig2 [scalar] variance of the normal distribution } \description{ - Computes the mean and standard deviation of a lognormal - distribution from its parameters, as described in A. - Meucci, "Risk and Asset Allocation", Springer, 2005. + determines $\mu$ and $\sigma^2$ from $\Expect\{X\}$ and + $\Var\{X\}$, and uses it to determine $\mu$ and + $\sigma^{2}$ such that $\Expect\left\{ X\right\} \bydef + 3$ and $\Var\left\{ X\right\} \bydef 5$, as described in + A. Meucci, "Risk and Asset Allocation", Springer, 2005. + + \deqn{\sigma^{2} = \ln \left( 1 + \frac{V}{E^{2}} \right) + , } \deqn{\mu = \ln(E) - \frac{1}{2} \ln \left( 1 + + \frac{V}{E^{2}} \right) .} } \note{ Inverts the formulas (1.98)-(1.99) in "Risk and Asset @@ -29,8 +35,9 @@ } \references{ A. Meucci - "Exercises in Advanced Risk and Portfolio - Management" \url{http://symmys.com/node/170}., "E 25- - Simulation of a lognormal random variable" See Meucci's - script for "LognormalMoments2Parameters.m" + Management" \url{http://symmys.com/node/170}, "E 25- + Simulation of a lognormal random variable". + + See Meucci's script for "LognormalMoments2Parameters.m" } From noreply at r-forge.r-project.org Mon Sep 16 11:42:42 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 16 Sep 2013 11:42:42 +0200 (CEST) Subject: [Returnanalytics-commits] r3117 - in pkg/Meucci: R demo man Message-ID: <20130916094242.80AD7185ECC@r-forge.r-project.org> Author: xavierv Date: 2013-09-16 11:42:42 +0200 (Mon, 16 Sep 2013) New Revision: 3117 Modified: pkg/Meucci/R/InvariantProjection.R pkg/Meucci/demo/S_LognormalSample.R pkg/Meucci/man/Central2Raw.Rd pkg/Meucci/man/Raw2Central.Rd Log: - updated documentation for chapter 1 functions without demo scripts in the same chapter Modified: pkg/Meucci/R/InvariantProjection.R =================================================================== --- pkg/Meucci/R/InvariantProjection.R 2013-09-16 09:32:05 UTC (rev 3116) +++ pkg/Meucci/R/InvariantProjection.R 2013-09-16 09:42:42 UTC (rev 3117) @@ -16,8 +16,9 @@ #' @author Ram Ahluwalia \email{rahluwalia@@gmail.com} #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management". -#' Symmys site containing original MATLAB source code \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 16- Raw Moments to central moments". +#' #' See Meucci's script for "Raw2Central.m" #' @export Raw2Central = function( mu_ ) @@ -140,10 +141,9 @@ #' @author Ram Ahluwalia \email{rahluwalia@@gmail.com} #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management". See page 10. -#' Symmys site containing original MATLAB source code \url{http://www.symmys.com} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 16- Raw moments to central moments". #' -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See Meucci's script for "Central2Raw.m" #' @export Central2Raw = function( mu ) Modified: pkg/Meucci/demo/S_LognormalSample.R =================================================================== --- pkg/Meucci/demo/S_LognormalSample.R 2013-09-16 09:32:05 UTC (rev 3116) +++ pkg/Meucci/demo/S_LognormalSample.R 2013-09-16 09:42:42 UTC (rev 3117) @@ -3,7 +3,7 @@ #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, -#' E 25- Simulation of a lognormal random variable". +#' "E 25- Simulation of a lognormal random variable". #' #' See Meucci's script for "S_LognormalSample.m". #' Modified: pkg/Meucci/man/Central2Raw.Rd =================================================================== --- pkg/Meucci/man/Central2Raw.Rd 2013-09-16 09:32:05 UTC (rev 3116) +++ pkg/Meucci/man/Central2Raw.Rd 2013-09-16 09:42:42 UTC (rev 3117) @@ -29,11 +29,9 @@ } \references{ A. Meucci - "Exercises in Advanced Risk and Portfolio - Management". See page 10. Symmys site containing original - MATLAB source code \url{http://www.symmys.com} + Management" \url{http://symmys.com/node/170}, "E 16- Raw + moments to central moments". - A. Meucci - "Exercises in Advanced Risk and Portfolio - Management" \url{http://symmys.com/node/170}. See - Meucci's script for "Central2Raw.m" + See Meucci's script for "Central2Raw.m" } Modified: pkg/Meucci/man/Raw2Central.Rd =================================================================== --- pkg/Meucci/man/Raw2Central.Rd 2013-09-16 09:32:05 UTC (rev 3116) +++ pkg/Meucci/man/Raw2Central.Rd 2013-09-16 09:42:42 UTC (rev 3117) @@ -30,8 +30,9 @@ } \references{ A. Meucci - "Exercises in Advanced Risk and Portfolio - Management". Symmys site containing original MATLAB - source code \url{http://symmys.com/node/170}. See - Meucci's script for "Raw2Central.m" + Management" \url{http://symmys.com/node/170}, "E 16- Raw + Moments to central moments". + + See Meucci's script for "Raw2Central.m" } From noreply at r-forge.r-project.org Mon Sep 16 12:40:00 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 16 Sep 2013 12:40:00 +0200 (CEST) Subject: [Returnanalytics-commits] r3118 - in pkg/Meucci: R demo man Message-ID: <20130916104000.7D959183EDF@r-forge.r-project.org> Author: xavierv Date: 2013-09-16 12:40:00 +0200 (Mon, 16 Sep 2013) New Revision: 3118 Modified: pkg/Meucci/R/LognormalCopulaPdf.R pkg/Meucci/R/LognormalParameters2Statistics.R pkg/Meucci/R/MvnRnd.R pkg/Meucci/R/NormalCopulaPdf.R pkg/Meucci/R/StudentTCopulaPdf.R pkg/Meucci/R/TwoDimEllipsoid.R pkg/Meucci/demo/S_AnalyzeLognormalCorrelation.R pkg/Meucci/demo/S_AnalyzeNormalCorrelation.R pkg/Meucci/demo/S_BivariateSample.R pkg/Meucci/demo/S_BondProjectionPricingNormal.R pkg/Meucci/demo/S_BondProjectionPricingStudentT.R pkg/Meucci/demo/S_DerivativesInvariants.R pkg/Meucci/demo/S_DisplayLognormalCopulaPdf.R pkg/Meucci/demo/S_DisplayNormalCopulaCdf.R pkg/Meucci/demo/S_DisplayNormalCopulaPdf.R pkg/Meucci/demo/S_DisplayStudentTCopulaPdf.R pkg/Meucci/demo/S_EllipticalNDim.R pkg/Meucci/demo/S_ExactMeanAndCovariance.R pkg/Meucci/demo/S_FullCodependence.R pkg/Meucci/demo/S_FxCopulaMarginal.R pkg/Meucci/demo/S_LognormalSample.R pkg/Meucci/demo/S_MaxMinVariance.R pkg/Meucci/demo/S_OrderStatisticsPdfLognormal.R pkg/Meucci/demo/S_OrderStatisticsPdfStudentT.R pkg/Meucci/demo/S_ResidualAnalysisTheory.R pkg/Meucci/demo/S_SelectionHeuristics.R pkg/Meucci/demo/S_Wishart.R pkg/Meucci/demo/S_WishartCorrelation.R pkg/Meucci/demo/S_WishartLocationDispersion.R pkg/Meucci/man/LognormalCopulaPdf.Rd pkg/Meucci/man/LognormalParam2Statistics.Rd pkg/Meucci/man/MvnRnd.Rd pkg/Meucci/man/NormalCopulaPdf.Rd pkg/Meucci/man/StudentTCopulaPdf.Rd pkg/Meucci/man/TwoDimEllipsoid.Rd Log: - updated documentation for chapter 2 demo scripts and its functions Modified: pkg/Meucci/R/LognormalCopulaPdf.R =================================================================== --- pkg/Meucci/R/LognormalCopulaPdf.R 2013-09-16 09:42:42 UTC (rev 3117) +++ pkg/Meucci/R/LognormalCopulaPdf.R 2013-09-16 10:40:00 UTC (rev 3118) @@ -1,14 +1,18 @@ -#' Computes the pdf of the copula of the lognormal distribution at the generic point u in the unit hypercube, +#' @title Computes the pdf of the copula of the lognormal distribution at the generic point u in the unit hypercube. +#' +#' @description Computes the pdf of the copula of the lognormal distribution at the generic point u in the unit hypercube, #' as described in A. Meucci, "Risk and Asset Allocation", Springer, 2005. #' -#' @param u : [vector] (J x 1) grades -#' @param Mu : [vector] (N x 1) location parameter -#' @param Sigma : [matrix] (N x N) scatter parameter +#' @param u [vector] (J x 1) grades +#' @param Mu [vector] (N x 1) location parameter +#' @param Sigma [matrix] (N x N) scatter parameter #' -#' @return F_U : [vector] (J x 1) PDF values +#' @return F_U [vector] (J x 1) PDF values #' #' @references -#' \url{http://} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 36 - Pdf of the lognormal copula". +#' #' See Meucci's script for "LognormalCopulaPdf.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/R/LognormalParameters2Statistics.R =================================================================== --- pkg/Meucci/R/LognormalParameters2Statistics.R 2013-09-16 09:42:42 UTC (rev 3117) +++ pkg/Meucci/R/LognormalParameters2Statistics.R 2013-09-16 10:40:00 UTC (rev 3118) @@ -1,6 +1,8 @@ -#' Compute expectation, Cov, standard deviation and Corr for a lognormal distribution, as described in -#' A. Meucci "Risk and Asset Allocation", Springer, 2005 +#' @title Compute expectation, covariance, standard deviation and correlation for a lognormal distribution. #' +#' @description Compute expectation, covariance, standard deviation and correlation for a lognormal distribution, as described in +#' A. Meucci "Risk and Asset Allocation", Springer, 2005. +#' #' @param Mu : [vector] (N x 1) location parameter #' @param Sigma : [matrix] (N x N) scale parameter #' @@ -11,19 +13,21 @@ #' @return Corr : [matrix] (N x N) correlation #' #' @references -#' \url{http://} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 85 - Correlation in lognormal markets". +#' #' See Meucci's script for "LognormalParam2Statistics.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} #' @export -LognormalParam2Statistics = function(Mu, Sigma) +LognormalParam2Statistics = function( Mu, Sigma ) { - Exp = exp( Mu + (1/2) * diag( Sigma ) ); - Cov = exp( Mu + (1/2) * diag( Sigma ) ) %*% t( exp( Mu + (1/2) * diag( Sigma ) ) ) * ( exp( Sigma ) - 1 ); - Std = sqrt( diag( Cov ) ); + Exp = exp( Mu + (1/2) * diag( Sigma ) ); + Cov = exp( Mu + (1/2) * diag( Sigma ) ) %*% t( exp( Mu + (1/2) * diag( Sigma ) ) ) * ( exp( Sigma ) - 1 ); + Std = sqrt( diag( Cov ) ); Corr = diag( 1 / Std ) %*% Cov %*% diag( 1 / Std ); - return( list( Exp = Exp, Covariance = Cov, Standard_Deviation = Std, Correlation = Corr )); + return( list( Exp = Exp, Covariance = Cov, Standard_Deviation = Std, Correlation = Corr ) ); } \ No newline at end of file Modified: pkg/Meucci/R/MvnRnd.R =================================================================== --- pkg/Meucci/R/MvnRnd.R 2013-09-16 09:42:42 UTC (rev 3117) +++ pkg/Meucci/R/MvnRnd.R 2013-09-16 10:40:00 UTC (rev 3118) @@ -1,4 +1,6 @@ -#' Generate normal simulations whose sample moments match the population moments, +#' @title Generate normal simulations whose sample moments match the population moments +#' +#' @description Generate normal simulations whose sample moments match the population moments, #' as described in A. Meucci, "Risk and Asset Allocation", Springer, 2005. #' #' @param M : [vector] (N x 1) expectation @@ -8,15 +10,18 @@ #' @return X : [matrix] (J x N) of drawsF_U : [vector] (J x 1) PDF values #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}., \url{http://www.symmys.com/node/162}{A. Meucci - "Simulations with Exact Means and Covariances", Risk, July 2009} -#' See Meucci's script for "MvnRnd.m" +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 64 - Simulation of a multivariate normal random variable with matching moments". #' -#' @author Xavier Valls \email{flamejat@@gmail.com} and Ram Ahluwalia \email{rahluwalia@@gmail.com} +#' See Meucci's script for "MvnRnd.m". +#' +#' @author Xavier Valls \email{flamejat@@gmail.com} #' @export MvnRnd = function( M, S, J ) { - if ( !require( "QZ" ) ) stop("QZ package installation required for this script") + if ( !require( "QZ" ) ) stop("QZ package installation required for this script"); + N = length(M); # generate antithetic variables (mean = 0) @@ -28,9 +33,6 @@ # solve Riccati equation using Schur method H = rbind( cbind( matrix( 0, N, N ), -S ), cbind( -S, matrix( 0, N, N ) ) ); - - #Schur = Schur( H ); - #U = ordschur(U_,T_,'lhp'); U = ordqz( H, keyword = "lhp" )$Q; Modified: pkg/Meucci/R/NormalCopulaPdf.R =================================================================== --- pkg/Meucci/R/NormalCopulaPdf.R 2013-09-16 09:42:42 UTC (rev 3117) +++ pkg/Meucci/R/NormalCopulaPdf.R 2013-09-16 10:40:00 UTC (rev 3118) @@ -1,18 +1,20 @@ -library(pracma); - -#' Computes the pdf of the copula of the normal distribution at the generic point u in the unit hypercube, -#' as described in A. Meucci, "Risk and Asset Allocation", Springer, 2005. +#' @title Computes the pdf of the copula of the normal distribution at the generic point u in the unit hypercube +#' +#' @description Computes the pdf of the copula of the normal distribution at the generic point u in the unit +#' hypercube, as described in A. Meucci, "Risk and Asset Allocation", Springer, 2005. #' -#' @param u : [vector] (J x 1) grade -#' @param Mu : [vector] (N x 1) mean -#' @param Sigma : [matrix] (N x N) covariance +#' @param u [vector] (J x 1) grade +#' @param Mu [vector] (N x 1) mean +#' @param Sigma [matrix] (N x N) covariance #' -#' @return F_U : [vector] (J x 1) PDF values +#' @return F_U [vector] (J x 1) PDF values #' #' @references -#' \url{http://} -#' See Meucci's script for "LognormalCopulaPdf.m" +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 33 - Pdf of the normal copula". #' +#' See Meucci's script for "NormalCopulaPdf.m" +#' #' @author Xavier Valls \email{flamejat@@gmail.com} #' @export Modified: pkg/Meucci/R/StudentTCopulaPdf.R =================================================================== --- pkg/Meucci/R/StudentTCopulaPdf.R 2013-09-16 09:42:42 UTC (rev 3117) +++ pkg/Meucci/R/StudentTCopulaPdf.R 2013-09-16 10:40:00 UTC (rev 3118) @@ -1,6 +1,6 @@ -library(pracma); - -#' Pdf of the copula of the Student t distribution at the generic point u in the unit hypercube, +#' @title Pdf of the copula of the Student t distribution at the generic point u in the unit hypercube +#' +#' @description Pdf of the copula of the Student t distribution at the generic point u in the unit hypercube, #' as described in A. Meucci, "Risk and Asset Allocation", Springer, 2005. #' #' @param u : [vector] (J x 1) grade @@ -12,7 +12,9 @@ #' @return F_U : [vector] (J x 1) PDF values #' #' @references -#' \url{http://} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 88 - Copula vs. Correlation". +#' #' See Meucci's script for "StudentTCopulaPdf.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/R/TwoDimEllipsoid.R =================================================================== --- pkg/Meucci/R/TwoDimEllipsoid.R 2013-09-16 09:42:42 UTC (rev 3117) +++ pkg/Meucci/R/TwoDimEllipsoid.R 2013-09-16 10:40:00 UTC (rev 3118) @@ -1,7 +1,10 @@ -#' This script computes the location-dispersion ellipsoid of the normalized (unit variance, zero expectation) -#' first diagonal and off-diagonal elements of a 2x2 Wishart distribution as a function of the inputs, -#' as described in A. Meucci, "Risk and Asset Allocation", Springer, 2005, Chapter 2. +#'@title Computes the location-dispersion ellipsoid of the normalized first diagonal and off-diagonal elements +#' of a 2x2 Wishart distribution as a function of the inputs #' +#' @description This function computes the location-dispersion ellipsoid of the normalized (unit variance, +#' zero expectation)first diagonal and off-diagonal elements of a 2x2 Wishart distribution as a function +#' of the inputs, as described in A. Meucci, "Risk and Asset Allocation", Springer, 2005, Chapter 2. +#' #' @param Location : [vector] (2 x 1) location vector (typically the expected value #' @param Square_Dispersion : [matrix] (2 x 2) scatter matrix Square_Dispersion (typically the covariance matrix) #' @param Scale : [scalar] a scalar Scale, that specifies the scale (radius) of the ellipsoid @@ -11,7 +14,8 @@ #' @return E : [figure handle] #' #' @references -#' \url{http://} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' #' See Meucci's script for "TwoDimEllipsoid.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/demo/S_AnalyzeLognormalCorrelation.R =================================================================== --- pkg/Meucci/demo/S_AnalyzeLognormalCorrelation.R 2013-09-16 09:42:42 UTC (rev 3117) +++ pkg/Meucci/demo/S_AnalyzeLognormalCorrelation.R 2013-09-16 10:40:00 UTC (rev 3118) @@ -1,12 +1,14 @@ #' This script considers a bivariate lognormal market and display the correlation and the condition number of the #' covariance matrix, as described in A. Meucci, "Risk and Asset Allocation", Springer, 2005, Chapter 2. #' -#' @references -#' \url{http://} +#' @references +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 85 - Correlation in lognormal markets". +#' #' See Meucci's script for "S_AnalyzeLognormalCorrelation.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} -#' @export +#' ########################################################################################################################################### Modified: pkg/Meucci/demo/S_AnalyzeNormalCorrelation.R =================================================================== --- pkg/Meucci/demo/S_AnalyzeNormalCorrelation.R 2013-09-16 09:42:42 UTC (rev 3117) +++ pkg/Meucci/demo/S_AnalyzeNormalCorrelation.R 2013-09-16 10:40:00 UTC (rev 3118) @@ -1,12 +1,14 @@ #' This script considers a bivariate normal market and display the correlation and the condition number of the -#' covariance matrix, as described in A. Meucci, "Risk and Asset Allocation", Springer, 2005, Chapter 2. +#' covariance matrix, as described in A. Meucci, "Risk and Asset Allocation", Springer, 2005, Chapter 2. #' #' @references -#' \url{http://} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 84 - Correlation in normal markets". +#' #' See Meucci's script for "S_AnalyzeNormalCorrelation.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} -#' @export +#' ################################################################################################################### ### Set input parameters @@ -14,10 +16,10 @@ Mu = rbind( 0, 0 ) s = c( 1, 1 ); -rhos = seq( -0.99, 0.99, 0.01 ); +rhos = seq( -0.99, 0.99, 0.01 ); nrhos = length( rhos ); -Cs = array( NaN, nrhos ); +Cs = array( NaN, nrhos ); CRs = array( NaN, nrhos ); @@ -26,7 +28,7 @@ for ( n in 1 : nrhos ) { - rho = rhos[ n ] ; + rho = rhos[ n ] ; Sigma = rbind( c(s[1]^2, rho * s[1] * s[2]), c(rho * s[1] * s[2], s[2]^2) ); Covariance = Sigma; Modified: pkg/Meucci/demo/S_BivariateSample.R =================================================================== --- pkg/Meucci/demo/S_BivariateSample.R 2013-09-16 09:42:42 UTC (rev 3117) +++ pkg/Meucci/demo/S_BivariateSample.R 2013-09-16 10:40:00 UTC (rev 3118) @@ -2,7 +2,9 @@ #' as described in A. Meucci, "Risk and Asset Allocation", Springer, 2005, Chapter 2. #' #' @references -#' \url{http://} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 38 - Normal copula and given marginals". +#' #' See Meucci's script for "S_BivariateSample.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/demo/S_BondProjectionPricingNormal.R =================================================================== --- pkg/Meucci/demo/S_BondProjectionPricingNormal.R 2013-09-16 09:42:42 UTC (rev 3117) +++ pkg/Meucci/demo/S_BondProjectionPricingNormal.R 2013-09-16 10:40:00 UTC (rev 3118) @@ -9,7 +9,7 @@ #' See Meucci's script for "S_BondProjectionPricingNormal.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} -#' @export +#' ################################################################################################################## ### Inputs Modified: pkg/Meucci/demo/S_BondProjectionPricingStudentT.R =================================================================== --- pkg/Meucci/demo/S_BondProjectionPricingStudentT.R 2013-09-16 09:42:42 UTC (rev 3117) +++ pkg/Meucci/demo/S_BondProjectionPricingStudentT.R 2013-09-16 10:40:00 UTC (rev 3118) @@ -8,7 +8,7 @@ #' See Meucci's script for "S_BondProjectionPricingStudentT.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} -#' @export +#' ################################################################################################################## ### Inputs Modified: pkg/Meucci/demo/S_DerivativesInvariants.R =================================================================== --- pkg/Meucci/demo/S_DerivativesInvariants.R 2013-09-16 09:42:42 UTC (rev 3117) +++ pkg/Meucci/demo/S_DerivativesInvariants.R 2013-09-16 10:40:00 UTC (rev 3118) @@ -6,7 +6,6 @@ #' See Meucci's script for "S_DerivativesInvariants.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} -#' @export ################################################################################################################## ### Load implied vol for options on SPX for different time to maturity and moneyness Modified: pkg/Meucci/demo/S_DisplayLognormalCopulaPdf.R =================================================================== --- pkg/Meucci/demo/S_DisplayLognormalCopulaPdf.R 2013-09-16 09:42:42 UTC (rev 3117) +++ pkg/Meucci/demo/S_DisplayLognormalCopulaPdf.R 2013-09-16 10:40:00 UTC (rev 3118) @@ -1,13 +1,14 @@ - -#'This script displays the pdf of the copula of a lognormal distribution, as described +#' This script displays the pdf of the copula of a lognormal distribution, as described #' in A. Meucci, "Risk and Asset Allocation", Springer, 2005, Chapter 2. #' #' @references -#' \url{http://} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 36 - Pdf of the lognormal copula". +#' #' See Meucci's script for "S_DisplayLognormalCopulaPdf.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} -#' @export +#' ############################################################################################################# ### Input parameters @@ -21,7 +22,7 @@ ### Grid GridSide1 = seq( 0.05, 0.95, 0.05 ); GridSide2 = GridSide1; -nMesh = length(GridSide1); +nMesh = length(GridSide1); ############################################################################################################# ### Compute pdf of copula Modified: pkg/Meucci/demo/S_DisplayNormalCopulaCdf.R =================================================================== --- pkg/Meucci/demo/S_DisplayNormalCopulaCdf.R 2013-09-16 09:42:42 UTC (rev 3117) +++ pkg/Meucci/demo/S_DisplayNormalCopulaCdf.R 2013-09-16 10:40:00 UTC (rev 3118) @@ -1,12 +1,14 @@ -#'This script displays the cdf of the copula of a normal distribution, as described +#' This script displays the cdf of the copula of a normal distribution, as described #' in A. Meucci, "Risk and Asset Allocation", Springer, 2005, Chapter 2. #' #' @references -#' \url{http://} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 35 - Cdf of the normal copula". +#' #' See Meucci's script for "S_DisplayNormalCopulaCdf.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} -#' @export +#' ############################################################################################################# ### Input parameters Modified: pkg/Meucci/demo/S_DisplayNormalCopulaPdf.R =================================================================== --- pkg/Meucci/demo/S_DisplayNormalCopulaPdf.R 2013-09-16 09:42:42 UTC (rev 3117) +++ pkg/Meucci/demo/S_DisplayNormalCopulaPdf.R 2013-09-16 10:40:00 UTC (rev 3118) @@ -1,12 +1,12 @@ -#'This script displays the pdf of the copula of a normal distribution, as described +#' This script displays the pdf of the copula of a normal distribution, as described #' in A. Meucci, "Risk and Asset Allocation", Springer, 2005, Chapter 2. #' #' @references -#' \url{http://} -#' See Meucci's script for "S_DisplayNormalCopulaPdf.m" +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 33 - Pdf of the normal copula". #' #' @author Xavier Valls \email{flamejat@@gmail.com} -#' @export +#' ############################################################################################################# ### input parameters Modified: pkg/Meucci/demo/S_DisplayStudentTCopulaPdf.R =================================================================== --- pkg/Meucci/demo/S_DisplayStudentTCopulaPdf.R 2013-09-16 09:42:42 UTC (rev 3117) +++ pkg/Meucci/demo/S_DisplayStudentTCopulaPdf.R 2013-09-16 10:40:00 UTC (rev 3118) @@ -2,11 +2,13 @@ #' in A. Meucci, "Risk and Asset Allocation", Springer, 2005, Chapter 2. #' #' @references -#' \url{http://} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 88 - Copula vs. Correlation". +#' #' See Meucci's script for "S_DisplayStudentTCopulaPdf.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} -#' @export +#' ############################################################################################################# ### input parameters Modified: pkg/Meucci/demo/S_EllipticalNDim.R =================================================================== --- pkg/Meucci/demo/S_EllipticalNDim.R 2013-09-16 09:42:42 UTC (rev 3117) +++ pkg/Meucci/demo/S_EllipticalNDim.R 2013-09-16 10:40:00 UTC (rev 3118) @@ -4,11 +4,13 @@ #' Springer, 2005, Chapter 2. #' #' @references -#' \url{http://} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 81 - Radial-uniform representation". +#' #' See Meucci's script for "S_EllipticalNDim.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} -#' @export +#' ################################################################################################################## ### Parameters Modified: pkg/Meucci/demo/S_ExactMeanAndCovariance.R =================================================================== --- pkg/Meucci/demo/S_ExactMeanAndCovariance.R 2013-09-16 09:42:42 UTC (rev 3117) +++ pkg/Meucci/demo/S_ExactMeanAndCovariance.R 2013-09-16 10:40:00 UTC (rev 3118) @@ -2,12 +2,13 @@ #' in A. Meucci, "Risk and Asset Allocation", Springer, 2005, Chapter 2. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 64 - Simulation of a multivariate normal random variable with matching moments". +#' #' See Meucci's script for "S_ExactMeanAndCovariance.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} - ######################################################################################################## ### Inputs N = 20; # dimension (number of risk factors) @@ -23,7 +24,6 @@ S = A %*% t( A ); # generate sample of size J from multivariate normal N(M,S) -#X = mvnrnd(M, S, J); # no match between sample and population moments (built-in) function X = MvnRnd( M, S, J ); # exact match between sample and population moments ######################################################################################################## Modified: pkg/Meucci/demo/S_FullCodependence.R =================================================================== --- pkg/Meucci/demo/S_FullCodependence.R 2013-09-16 09:42:42 UTC (rev 3117) +++ pkg/Meucci/demo/S_FullCodependence.R 2013-09-16 10:40:00 UTC (rev 3118) @@ -2,11 +2,12 @@ #' in A. Meucci, "Risk and Asset Allocation", Springer, 2005, Chapter 2. #' #' @references -#' \url{http://} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 89 - Full co-dependence". +#' #' See Meucci's script for "S_FullCodependence.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} -#' @export ############################################################################################################# ### Generate draws Modified: pkg/Meucci/demo/S_FxCopulaMarginal.R =================================================================== --- pkg/Meucci/demo/S_FxCopulaMarginal.R 2013-09-16 09:42:42 UTC (rev 3117) +++ pkg/Meucci/demo/S_FxCopulaMarginal.R 2013-09-16 10:40:00 UTC (rev 3118) @@ -2,12 +2,14 @@ #' in A. Meucci, "Risk and Asset Allocation", Springer, 2005, Chapter 2. #' #' @references -#' \url{http://} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 39 - FX copula-marginal factorization". +#' #' See Meucci's script for "S_FxCopulaMarginal.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} -#' @export +############################################################################################################# ### Load data and select the pair to display data("fX" ) @@ -48,13 +50,9 @@ layout( matrix(c(1,2,2,1,2,2,0,3,3), 3, 3, byrow = TRUE), heights=c(1,2,1)); - - -#hist( X[ , Display[ 2 ] ], NumBins, xlab = db_FX$Fields[[ Display[ 2 ] + 1 ]], ylab = "", main = ""); barplot( table( cut( X[ , Display[ 2 ] ], NumBins )), horiz=TRUE, yaxt="n") axis( 2, at = seq(0, 100, 20), labels = seq( 0, 1, 0.2 ) ); - # scatter plot plot( Copula[ , Display[ 1 ] ], Copula[ , Display[ 2 ] ], main = "Copula", xlab = db_FX$Fields[[ Display[ 2 ] + 1 ]], ylab = db_FX$Fields[[ Display[ 1 ] + 1 ]] ); Modified: pkg/Meucci/demo/S_LognormalSample.R =================================================================== --- pkg/Meucci/demo/S_LognormalSample.R 2013-09-16 09:42:42 UTC (rev 3117) +++ pkg/Meucci/demo/S_LognormalSample.R 2013-09-16 10:40:00 UTC (rev 3118) @@ -8,7 +8,7 @@ #' See Meucci's script for "S_LognormalSample.m". #' #' @author Xavier Valls \email{flamejat@@gmail.com} -#' @export +#' ################################################################################################################## ### Input parameters Modified: pkg/Meucci/demo/S_MaxMinVariance.R =================================================================== --- pkg/Meucci/demo/S_MaxMinVariance.R 2013-09-16 09:42:42 UTC (rev 3117) +++ pkg/Meucci/demo/S_MaxMinVariance.R 2013-09-16 10:40:00 UTC (rev 3118) @@ -2,13 +2,13 @@ #' in A. Meucci, "Risk and Asset Allocation", Springer, 2005, Chapter 2. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 53 - Location-dispersion ellipsoid and statistics". +#' #' See Meucci's script for "S_MaxMinVariance.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} -if ( !require( "mvtnorm" ) ) stop("mvtnorm package installation required for this script") - ################################################################################################################## ### Input parameters Mu = rbind( 0.5, 0.5 ); @@ -81,7 +81,7 @@ # plot statistics versus geometry dev.new(); Scaled_Theta = Theta / (pi / 2); - # plot standard deviation as function of direction +# plot standard deviation as function of direction plot( Scaled_Theta, SDev, type = "l", xlab = "theta/(pi/2)", xlim = c( Scaled_Theta[ 1 ], Scaled_Theta[length(Scaled_Theta)] ) ); # plot radius of ellipsoid as function of direction lines( Scaled_Theta, Radius, col="red" ); Modified: pkg/Meucci/demo/S_OrderStatisticsPdfLognormal.R =================================================================== --- pkg/Meucci/demo/S_OrderStatisticsPdfLognormal.R 2013-09-16 09:42:42 UTC (rev 3117) +++ pkg/Meucci/demo/S_OrderStatisticsPdfLognormal.R 2013-09-16 10:40:00 UTC (rev 3118) @@ -1,14 +1,14 @@ -library(scatterplot3d); - #' This script script shows that the pdf of the r-th order statistics of a lognormal random variable, #' as described in A. Meucci, "Risk and Asset Allocation", Springer, 2005, Chapter 2. #' #' @references -#' \url{http://} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 78 - Order statistics". +#' #' See Meucci's script for "S_OrderStatisticsPdfLognormal.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} -#' @export +#' if ( !require( "scatterplot3d" ) ) stop("scatterplot3d package installation required for this script") Modified: pkg/Meucci/demo/S_OrderStatisticsPdfStudentT.R =================================================================== --- pkg/Meucci/demo/S_OrderStatisticsPdfStudentT.R 2013-09-16 09:42:42 UTC (rev 3117) +++ pkg/Meucci/demo/S_OrderStatisticsPdfStudentT.R 2013-09-16 10:40:00 UTC (rev 3118) @@ -2,11 +2,13 @@ #' as described in A. Meucci, "Risk and Asset Allocation", Springer, 2005, Chapter 2. #' #' @references -#' \url{http://} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 78 - Order statistics". +#' #' See Meucci's script for "S_OrderStatisticsPdfStudentT.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} -#' @export +#' if ( !require( "scatterplot3d" ) ) stop("scatterplot3d package installation required for this script") Modified: pkg/Meucci/demo/S_ResidualAnalysisTheory.R =================================================================== --- pkg/Meucci/demo/S_ResidualAnalysisTheory.R 2013-09-16 09:42:42 UTC (rev 3117) +++ pkg/Meucci/demo/S_ResidualAnalysisTheory.R 2013-09-16 10:40:00 UTC (rev 3118) @@ -6,7 +6,7 @@ #' See Meucci's script for "S_ResidualAnalysisTheory.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} -#' @export +#' ################################################################################################################## ### Inputs Modified: pkg/Meucci/demo/S_SelectionHeuristics.R =================================================================== --- pkg/Meucci/demo/S_SelectionHeuristics.R 2013-09-16 09:42:42 UTC (rev 3117) +++ pkg/Meucci/demo/S_SelectionHeuristics.R 2013-09-16 10:40:00 UTC (rev 3118) @@ -215,7 +215,6 @@ #' See Meucci's script for "S_SelectionHeuristics.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} -#' ################################################################################################################## Modified: pkg/Meucci/demo/S_Wishart.R =================================================================== --- pkg/Meucci/demo/S_Wishart.R 2013-09-16 09:42:42 UTC (rev 3117) +++ pkg/Meucci/demo/S_Wishart.R 2013-09-16 10:40:00 UTC (rev 3118) @@ -1,18 +1,21 @@ #' This script generates a sample from the 2x2 Wishart distribution. -#' it shows that determinant and trace are positive, i.e. the matrix is positive -#' it shows that the marginal diagonal are gamma-distributed +#' - it shows that determinant and trace are positive, i.e. the matrix is positive +#' - it shows that the marginal diagonal are gamma-distributed #' Described in A. Meucci, "Risk and Asset Allocation", Springer, 2005, Chapter 2. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 75 - Simulation of a Wishart random variable". +#' #' See Meucci's script for "S_Wishart.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} -#' @export if ( !require( "scatterplot3d" ) ) stop("scatterplot3d package installation required for this script") + ################################################################################################################### ### Set inputs + s = c( 1, 1 ); # variances r = 0.3; # correlation Sigma = diag( c( s ) ) %*% rbind( c( 1, r ), c( r, 1 ) ) %*% diag( c( s ) ); @@ -23,11 +26,11 @@ ### Generate draws # initialize storage vectors/matrices -W_xx = matrix( NaN, nSim, 1 ); -W_yy = matrix( NaN, nSim, 1 ); -W_xy = matrix( NaN, nSim, 1 ); -Vec_W = matrix( NaN, nSim, 4 ); -Dets = matrix( NaN, nSim, 1 ); +W_xx = matrix( NaN, nSim, 1 ); +W_yy = matrix( NaN, nSim, 1 ); +W_xy = matrix( NaN, nSim, 1 ); +Vec_W = matrix( NaN, nSim, 4 ); +Dets = matrix( NaN, nSim, 1 ); Traces = matrix( NaN, nSim, 1 ); # generate draws and store elements of W, trace and determinant @@ -112,4 +115,3 @@ print(Covariance); print(Sample_Mean); print(Sample_Covariance); - Modified: pkg/Meucci/demo/S_WishartCorrelation.R =================================================================== --- pkg/Meucci/demo/S_WishartCorrelation.R 2013-09-16 09:42:42 UTC (rev 3117) +++ pkg/Meucci/demo/S_WishartCorrelation.R 2013-09-16 10:40:00 UTC (rev 3118) @@ -3,11 +3,13 @@ #' "Risk and Asset Allocation", Springer, 2005, Chapter 2. #' #' @references -#' \url{http://} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 87 - Correlation and location-dispersion ellipsoid", "E 75 - Simulation of a Wishart random variable". +#' #' See Meucci's script for "S_WishartCorrelation.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} -#' @export +#' ################################################################################################################### ### Inputs Modified: pkg/Meucci/demo/S_WishartLocationDispersion.R =================================================================== --- pkg/Meucci/demo/S_WishartLocationDispersion.R 2013-09-16 09:42:42 UTC (rev 3117) +++ pkg/Meucci/demo/S_WishartLocationDispersion.R 2013-09-16 10:40:00 UTC (rev 3118) @@ -3,11 +3,13 @@ #' as described in A. Meucci, "Risk and Asset Allocation", Springer, 2005, Chapter 2. #' #' @references -#' \url{http://} -#' See Meucci's script for "S_WishartCorrelation.m" +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 87 - Correlation and location-dispersion ellipsoid", "E 75 - Simulation of a Wishart random variable". #' +#' See Meucci's script for "S_WishartLocationDispersion.m" +#' #' @author Xavier Valls \email{flamejat@@gmail.com} -#' @export +#' ################################################################################################################### ### Set input parameters Modified: pkg/Meucci/man/LognormalCopulaPdf.Rd =================================================================== --- pkg/Meucci/man/LognormalCopulaPdf.Rd 2013-09-16 09:42:42 UTC (rev 3117) +++ pkg/Meucci/man/LognormalCopulaPdf.Rd 2013-09-16 10:40:00 UTC (rev 3118) @@ -1,19 +1,18 @@ \name{LognormalCopulaPdf} \alias{LognormalCopulaPdf} -\title{Computes the pdf of the copula of the lognormal distribution at the generic point u in the unit hypercube, -as described in A. Meucci, "Risk and Asset Allocation", Springer, 2005.} +\title{Computes the pdf of the copula of the lognormal distribution at the generic point u in the unit hypercube.} \usage{ LognormalCopulaPdf(u, Mu, Sigma) } \arguments{ - \item{u}{: [vector] (J x 1) grades} + \item{u}{[vector] (J x 1) grades} - \item{Mu}{: [vector] (N x 1) location parameter} + \item{Mu}{[vector] (N x 1) location parameter} - \item{Sigma}{: [matrix] (N x N) scatter parameter} + \item{Sigma}{[matrix] (N x N) scatter parameter} } \value{ - F_U : [vector] (J x 1) PDF values + F_U [vector] (J x 1) PDF values } \description{ Computes the pdf of the copula of the lognormal @@ -25,7 +24,10 @@ Xavier Valls \email{flamejat at gmail.com} } \references{ - \url{http://} See Meucci's script for - "LognormalCopulaPdf.m" + A. Meucci - "Exercises in Advanced Risk and Portfolio + Management" \url{http://symmys.com/node/170}, "E 36 - Pdf + of the lognormal copula". + + See Meucci's script for "LognormalCopulaPdf.m" } [TRUNCATED] To get the complete diff run: svnlook diff /svnroot/returnanalytics -r 3118 From noreply at r-forge.r-project.org Mon Sep 16 19:24:03 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 16 Sep 2013 19:24:03 +0200 (CEST) Subject: [Returnanalytics-commits] r3119 - in pkg/Meucci: R demo man Message-ID: <20130916172403.C2294183F49@r-forge.r-project.org> Author: xavierv Date: 2013-09-16 19:24:03 +0200 (Mon, 16 Sep 2013) New Revision: 3119 Modified: pkg/Meucci/R/BlackScholesCallPrice.R pkg/Meucci/R/ConvertChangeInYield2Price.R pkg/Meucci/R/InterExtrapolate.R pkg/Meucci/R/MaxRsqCS.R pkg/Meucci/R/PerformIidAnalysis.R pkg/Meucci/R/ProjectionStudentT.R pkg/Meucci/demo/S_AutocorrelatedProcess.R pkg/Meucci/demo/S_BondProjectionPricingNormal.R pkg/Meucci/demo/S_BondProjectionPricingStudentT.R pkg/Meucci/demo/S_CallsProjectionPricing.R pkg/Meucci/demo/S_CrossSectionConstrainedIndustries.R pkg/Meucci/demo/S_CrossSectionIndustries.R pkg/Meucci/demo/S_DerivativesInvariants.R pkg/Meucci/demo/S_EquitiesInvariants.R pkg/Meucci/demo/S_EquityProjectionPricing.R pkg/Meucci/demo/S_FactorAnalysisNotOk.R pkg/Meucci/demo/S_FactorResidualCorrelation.R pkg/Meucci/demo/S_FixedIncomeInvariants.R pkg/Meucci/demo/S_HedgeOptions.R pkg/Meucci/demo/S_HorizonEffect.R pkg/Meucci/demo/S_WishartLocationDispersion.R pkg/Meucci/man/BlackScholesCallPrice.Rd pkg/Meucci/man/ConvertChangeInYield2Price.Rd pkg/Meucci/man/InterExtrapolate.Rd pkg/Meucci/man/MaxRsqCS.Rd pkg/Meucci/man/PerformIidAnalysis.Rd pkg/Meucci/man/ProjectionStudentT.Rd Log: - updated documentation for half chapter 3 demo scripts and its functions Modified: pkg/Meucci/R/BlackScholesCallPrice.R =================================================================== --- pkg/Meucci/R/BlackScholesCallPrice.R 2013-09-16 10:40:00 UTC (rev 3118) +++ pkg/Meucci/R/BlackScholesCallPrice.R 2013-09-16 17:24:03 UTC (rev 3119) @@ -1,22 +1,25 @@ -#' Compute the Black-Scholes price of a European call or put option +#' @title Compute the Black-Scholes price of a European call or put option. +#' +#' @description Compute the Black-Scholes price of a European call or put option #' as described in A. Meucci, "Risk and Asset Allocation", Springer, 2005. #' -#' @param spot : [scalar] spot price of underlying -#' @param K : [scalar] strike of the call optioon -#' @param r : [scalar] risk free rate as a fraction -#' @param vol : [scalar] volatility of the underlying as a fraction -#' @param T : [scalar] time to maturity in years +#' @param spot [scalar] spot price of underlying +#' @param K [scalar] strike of the call optioon +#' @param r [scalar] risk free rate as a fraction +#' @param vol [scalar] volatility of the underlying as a fraction +#' @param T [scalar] time to maturity in years #' -#' @return c : [scalar] price of European call(s) -#' @return p : [scalar] price of European put(s) -#' @return delta : [scalar] delta of the call(s) or put(s) -#' @return cash : [scalar] cash held in a replicating portfolio +#' @return c [scalar] price of European call(s) +#' @return p [scalar] price of European put(s) +#' @return delta [scalar] delta of the call(s) or put(s) +#' @return cash [scalar] cash held in a replicating portfolio #' #' @note #' Code is vectorized, so the inputs can be vectors or matrices (but sizes must match) #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' #' See Meucci's script for "BlackScholesCallPrice.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} @@ -26,9 +29,9 @@ { d1 = ( log( spot / K ) + ( r + vol * vol / 2) * T) / (vol * sqrt(T)); d2 = d1 - vol * sqrt(T); - delta = pnorm(d1); - cash = -K * exp( -r * T ) * pnorm( d2 ); - c = spot * delta + cash; + delta = pnorm(d1); # delta of the call + cash = -K * exp( -r * T ) * pnorm( d2 ); # cash held in a replicating portfolio + c = spot * delta + cash; # price of call return( list( c = c, delta = delta, cash = cash ) ); } @@ -40,9 +43,9 @@ { d1 = ( log( spot / K ) + ( r + vol * vol / 2) * T) / (vol * sqrt(T)); d2 = d1 - vol * sqrt(T); - delta = pnorm( -d1 ); - cash = -K * exp( -r * T ) * pnorm( d2 ); - p = -( spot * delta + cash ); + delta = pnorm( -d1 ); # delta of the call + cash = -K * exp( -r * T ) * pnorm( d2 ); # cash held in a replicating portfolio + p = -( spot * delta + cash ); # price of put return( list( put = p, delta = delta, cash = cash ) ); } @@ -54,9 +57,9 @@ { d1 = ( log( spot / K ) + ( r + vol * vol / 2) * T) / (vol * sqrt(T)); d2 = d1 - vol * sqrt(T); - cash = -K * exp( -r * T ) * pnorm( d2 ); - c = spot * pnorm( d1 ) + cash; - p = -( spot * pnorm( -d1 ) + cash); + cash = -K * exp( -r * T ) * pnorm( d2 ); # cash held in a replicating portfolio + c = spot * pnorm( d1 ) + cash; # price of call + p = -( spot * pnorm( -d1 ) + cash ); # price of put return( list( call = c, put = p, cash = cash ) ); } Modified: pkg/Meucci/R/ConvertChangeInYield2Price.R =================================================================== --- pkg/Meucci/R/ConvertChangeInYield2Price.R 2013-09-16 10:40:00 UTC (rev 3118) +++ pkg/Meucci/R/ConvertChangeInYield2Price.R 2013-09-16 17:24:03 UTC (rev 3119) @@ -1,17 +1,21 @@ -#' Convert change in yield-to-maturity to price for fixed-income securities, as described in +#' @title Convert change in yield-to-maturity to price for fixed-income securities +#' +#' @description Convert change in yield-to-maturity to price for fixed-income securities, as described in #' A. Meucci "Risk and Asset Allocation", Springer, 2005 #' -#' @param Exp_DY : [vector] (N x 1) expected value of change in yield to maturity -#' @param Cov_DY : [matrix] (N x N) covariance of change in yield to maturity -#' @param Times2Mat : [scalar] time to maturity -#' @param CurrentPrices : [vector] (N x 1) current prices +#' @param Exp_DY [vector] (N x 1) expected value of change in yield to maturity +#' @param Cov_DY [matrix] (N x N) covariance of change in yield to maturity +#' @param Times2Mat [scalar] time to maturity +#' @param CurrentPrices [vector] (N x 1) current prices #' -#' @return Exp_Prices : [vector] (N x 1) expected prices -#' @return Cov_Prices : [matrix] (N x N) covariance of prices +#' @return Exp_Prices [vector] (N x 1) expected prices +#' @return Cov_Prices [matrix] (N x N) covariance of prices #' #' @references -#' \url{http://} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' #' See (6.77)-(6.79) in "Risk and Asset Allocation"-Springer (2005), by A. Meucci +#' #' See Meucci's script for "ConvertChangeInYield2Price.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/R/InterExtrapolate.R =================================================================== --- pkg/Meucci/R/InterExtrapolate.R 2013-09-16 10:40:00 UTC (rev 3118) +++ pkg/Meucci/R/InterExtrapolate.R 2013-09-16 17:24:03 UTC (rev 3119) @@ -1,28 +1,27 @@ #' Interpolate and extrapolate using n-linear interpolation (tensor product linear). #' -#' @param V : [array] p-dimensional array to be interpolated/extrapolated at the list of points in the array Xi. +#' @param V [array] p-dimensional array to be interpolated/extrapolated at the list of points in the array Xi. # interpne will work in any number of dimensions >= 1 -#' @param Xi : [array] (n x p) array of n points to interpolate/extrapolate. Each point is one row of the array Xi. -#' @param nodelist : [cell array] (optional) cell array of nodes in each dimension. +#' @param Xi [array] (n x p) array of n points to interpolate/extrapolate. Each point is one row of the array Xi. +#' @param nodelist [cell array] (optional) cell array of nodes in each dimension. # If nodelist is not provided, then by default I will assume nodelist[[i]] = 1:size(V,i). The nodes in # nodelist need not be uniformly spaced. -#' @param method : [string] (optional) chacter string, denotes the interpolation method used. default method = 'linear' +#' @param method [string] (optional) chacter string, denotes the interpolation method used. default method = 'linear' # 'linear' --> n-d linear tensor product interpolation/extrapolation # 'nearest' --> n-d nearest neighbor interpolation/extrapolation # in 2-d, 'linear' is equivalent to a bilinear interpolant # in 3-d, it is commonly known as trilinear interpolation. #' -#' @return Vpred : [array] (n x 1) array of interpolated/extrapolated values +#' @return Vpred [array] (n x 1) array of interpolated/extrapolated values #' #' @note -#' Initially written by John D'Errico -#' Vpred = interpne(V,Xi) -#' Vpred = interpne(V,Xi,nodelist) -#' Vpred = interpne(V,Xi,nodelist,method) +#' Initially written by John D'Errico. +#' #' Extrapolating long distances outside the support of V is rarely advisable. #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' #' See Meucci's script for "InterExtrapolate.R" #' #' @author Xavier Valls \email{flamejat@@gmail.com} @@ -33,12 +32,12 @@ # [x1,x2] = meshgrid(0:.2:1); # z = exp(x1+x2); # Xi = rand(100,2)*2-.5; -# Zi = interpne(z,Xi,{0:.2:1, 0:.2:1},'linear'); +# Zi = InterExtrapolate(z,Xi,{0:.2:1, 0:.2:1},'linear'); # surf(0:.2:1,0:.2:1,z) # plot3( Xi(:,1),Xi(:,2),Zi,'ro') # -InterExtrapolate = function( V, Xi, nodelist, method ) +InterExtrapolate = function( V, Xi, nodelist = NULL, method = NULL ) { # get some sizes @@ -147,7 +146,7 @@ # tensor product linear is not too nasty. Vpred = matrix( 0, nrow(Xi), 1); # define the 2^ndims corners of a hypercube (MATLAB's corners = (dec2bin(0:(2^ndims-1))== '1');) - corners = lapply( strsplit( intToBin ( 0 : ( 2^ndims - 1 ) ), split=""), as.integer ); + corners = lapply( strsplit( intToBin ( 0 : ( 2^ndims - 1 ) ), split = "" ), as.integer ); nc = length( corners ); Modified: pkg/Meucci/R/MaxRsqCS.R =================================================================== --- pkg/Meucci/R/MaxRsqCS.R 2013-09-16 10:40:00 UTC (rev 3118) +++ pkg/Meucci/R/MaxRsqCS.R 2013-09-16 17:24:03 UTC (rev 3119) @@ -1,4 +1,7 @@ -#' Solve for G that maximises sample r-square of X*G'*B' with X under constraints A*G<=D +#' @title Solve for G that maximises sample r-square of X*G'*B' with X under constraints A*G<=D +#' and Aeq*G=Deq +#' +#' @description Solve for G that maximises sample r-square of X*G'*B' with X under constraints A*G<=D #' and Aeq*G=Deq (A,D, Aeq,Deq conformable matrices),as described in A. Meucci, #' "Risk and Asset Allocation", Springer, 2005. #' @@ -19,6 +22,8 @@ #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' Used in "E 123 ? Cross-section factors: generalized cross-section industry factors". +#' #' See Meucci's script for "MaxRsqCS.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/R/PerformIidAnalysis.R =================================================================== --- pkg/Meucci/R/PerformIidAnalysis.R 2013-09-16 10:40:00 UTC (rev 3118) +++ pkg/Meucci/R/PerformIidAnalysis.R 2013-09-16 17:24:03 UTC (rev 3119) @@ -1,4 +1,6 @@ -#' This function performs simple invariance (i.i.d.) tests on a time series, as described in +#' @title Performs simple invariance (i.i.d.) tests on a time series. +#' +#' @description This function performs simple invariance (i.i.d.) tests on a time series, as described in #' A. Meucci "Risk and Asset Allocation", Springer, 2005 #' #' @param Dates : [vector] (T x 1) dates @@ -6,12 +8,16 @@ #' @param Str : [string] title for the plot #' #' @note it checks the evolution over time -# it checks that the variables are identically distributed by looking at the histogram of two subsamples -# it checks that the variables are independent by looking at the 1-lag scatter plot -# under i.i.d. the location-dispersion ellipsoid should be a circle #' +#' it checks that the variables are identically distributed by looking at the histogram of two subsamples +#' +#' it checks that the variables are independent by looking at the 1-lag scatter plot +#' +#' under i.i.d. the location-dispersion ellipsoid should be a circle +#' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' #' See Meucci's script for "PerformIidAnalysis.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} @@ -52,9 +58,5 @@ m = cbind( apply( cbind( X, Y ), 2, mean )); S = cov( cbind( X, Y )); TwoDimEllipsoid( m, S, 2, FALSE, FALSE); - #axisLimits = axis; - #textX = axisLimits(1:2)*[-0.1,1.1]'; - #textY = axisLimits(3:4)*[0.1,0.9]'; - #text(textX, textY, Str); } \ No newline at end of file Modified: pkg/Meucci/R/ProjectionStudentT.R =================================================================== --- pkg/Meucci/R/ProjectionStudentT.R 2013-09-16 10:40:00 UTC (rev 3118) +++ pkg/Meucci/R/ProjectionStudentT.R 2013-09-16 17:24:03 UTC (rev 3119) @@ -1,17 +1,21 @@ -#' Perform the horizon projection of a Student t invariant, as described in +#' @title Perform the horizon projection of a Student t invariant +#' +#' @description Perform the horizon projection of a Student t invariant, as described in #' A. Meucci "Risk and Asset Allocation", Springer, 2005 #' -#' @param nu : [scalar] degree of freedom -#' @param s : [scalar] scatter parameter -#' @param m : [scalar] location parameter -#' @param T : [scalar] multiple of the estimation period to the invesment horizon +#' @param nu [scalar] degree of freedom +#' @param s [scalar] scatter parameter +#' @param m [scalar] location parameter +#' @param T [scalar] multiple of the estimation period to the invesment horizon #' -#' @return x_Hor : [scalar] -#' @return f_Hor : [scalar] -#' @return F_Hor : [scalar] +#' @return x_Hor [scalar] probabilities at horizon +#' @return f_Hor [scalar] horizon discretized pdf (non-standarized) +#' @return F_Hor [scalar] horizon discretized cdf (non-standarized) #' #' @references -#' \url{http://} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 141 ? Fixed-income market: projection of Student t invariants". +#' #' See Meucci's script for "ProjectionStudentT.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/demo/S_AutocorrelatedProcess.R =================================================================== --- pkg/Meucci/demo/S_AutocorrelatedProcess.R 2013-09-16 10:40:00 UTC (rev 3118) +++ pkg/Meucci/demo/S_AutocorrelatedProcess.R 2013-09-16 17:24:03 UTC (rev 3119) @@ -1,15 +1,17 @@ - -#' This script simulates a Ornstein-Uhlenbeck AR(1) process, as described in A. Meucci, " -#' Risk and Asset Allocation", Springer, 2005, Chapter 3. +#' This script simulates a Ornstein-Uhlenbeck AR(1) process, as described in A. Meucci, +#' "Risk and Asset Allocation", Springer, 2005, Chapter 3. #' #' @references -#' \url{http://} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 133 ? Simulation of a Ornstein-Uhlenbeck process". +#' #' See Meucci's script for "S_AutocorrelatedProcess.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} ################################################################################################################## ### Input parameters + theta = 0.1; # reversion speed m = 0.05; # long term mean sigma = 0.01; # volatility @@ -19,7 +21,7 @@ ################################################################################################################## ### Determine parameters var = sigma^2 / 2 / theta * ( 1 - exp( -2 * theta * tau ) ); -sd = sqrt(var); +sd = sqrt(var); eps = rnorm( T, 0, sd ); x = matrix( NaN, T, 1); @@ -30,5 +32,5 @@ x[ t + 1 ] = m + exp( -theta * tau ) * ( x[ t ] - m ) + eps[ t ]; } -dev.new() +dev.new(); plot( x, type="l", main = "AR(1) process vs. time" ); Modified: pkg/Meucci/demo/S_BondProjectionPricingNormal.R =================================================================== --- pkg/Meucci/demo/S_BondProjectionPricingNormal.R 2013-09-16 10:40:00 UTC (rev 3118) +++ pkg/Meucci/demo/S_BondProjectionPricingNormal.R 2013-09-16 17:24:03 UTC (rev 3119) @@ -1,15 +1,15 @@ - #'This script projects the distribution of the market invariants for the bond markets #'(i.e. the changes in yield to maturity) from the estimation interval to the investment horizon #'Then it computes the distribution of prices at the investment horizon as described in A. Meucci, #'"Risk and Asset Allocation", Springer, 2005, Chapter 3. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 140 ? Fixed-income market: projection of normal invariants". +#' #' See Meucci's script for "S_BondProjectionPricingNormal.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} -#' ################################################################################################################## ### Inputs @@ -61,5 +61,3 @@ BondCov_Prices = cov( BondMarket_Scenarios ); print( BondExp_Prices ); print( BondCov_Prices ); - -### EOF \ No newline at end of file Modified: pkg/Meucci/demo/S_BondProjectionPricingStudentT.R =================================================================== --- pkg/Meucci/demo/S_BondProjectionPricingStudentT.R 2013-09-16 10:40:00 UTC (rev 3118) +++ pkg/Meucci/demo/S_BondProjectionPricingStudentT.R 2013-09-16 17:24:03 UTC (rev 3119) @@ -4,11 +4,12 @@ #'horizon as described in A. Meucci,"Risk and Asset Allocation", Springer, 2005, Chapter 3. #' #' @references -#' \url{http://} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 141 ? Fixed-income market: projection of Student t invariants". +#' #' See Meucci's script for "S_BondProjectionPricingStudentT.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} -#' ################################################################################################################## ### Inputs Modified: pkg/Meucci/demo/S_CallsProjectionPricing.R =================================================================== --- pkg/Meucci/demo/S_CallsProjectionPricing.R 2013-09-16 10:40:00 UTC (rev 3118) +++ pkg/Meucci/demo/S_CallsProjectionPricing.R 2013-09-16 17:24:03 UTC (rev 3119) @@ -3,7 +3,9 @@ #'"Risk and Asset Allocation", Springer, 2005, Chapter 3. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 143 ? Derivatives market: projection of invariants". +#' #' See Meucci's script for "S_CallsProjectionPricing.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} @@ -27,8 +29,8 @@ J = 10000; # number of simulations ################################################################################################################## -numCalls = length( Time2Mats ); -timeLength = length( implVol$spot ); +numCalls = length( Time2Mats ); +timeLength = length( implVol$spot ); numSurfPoints = length( implVol$days2Maturity ) * length( implVol$moneyness ); ################################################################################################################## @@ -36,6 +38,7 @@ # variables in X are changes in log(spot) and changes in log(imp.vol) # evaluated at the 'numSurfPoints' points on the vol surface (vectorized). X = matrix( 0, timeLength - 1, numSurfPoints + 1 ); + # log-changes of underlying spot X[ , 1 ] = diff( log( implVol$spot ) ); @@ -46,12 +49,12 @@ X[ , i+1 ] = diff( log( impVolSeries[ , i ] ) ); } -muX = apply( X , 2, mean ); +muX = apply( X , 2, mean ); SigmaX = cov( X ); ################################################################################################################## ### Project distribution to investment horizon -muX = muX * tau / tau_tilde; +muX = muX * tau / tau_tilde; SigmaX = SigmaX * tau / tau_tilde; ################################################################################################################## Modified: pkg/Meucci/demo/S_CrossSectionConstrainedIndustries.R =================================================================== --- pkg/Meucci/demo/S_CrossSectionConstrainedIndustries.R 2013-09-16 10:40:00 UTC (rev 3118) +++ pkg/Meucci/demo/S_CrossSectionConstrainedIndustries.R 2013-09-16 17:24:03 UTC (rev 3119) @@ -3,7 +3,9 @@ #' Springer, 2005, Chapter 3. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 123 ? Cross-section factors: generalized cross-section industry factors". +#' #' See Meucci's script for "S_CrossSectionConstrainedIndustries.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/demo/S_CrossSectionIndustries.R =================================================================== --- pkg/Meucci/demo/S_CrossSectionIndustries.R 2013-09-16 10:40:00 UTC (rev 3118) +++ pkg/Meucci/demo/S_CrossSectionIndustries.R 2013-09-16 17:24:03 UTC (rev 3119) @@ -2,7 +2,9 @@ #' "Risk and Asset Allocation", Springer, 2005, Chapter 3. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 122 ? Cross-section factors: unconstrained cross-section industry factors". +#' #' See Meucci's script for "S_CrossSectionIndustries.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/demo/S_DerivativesInvariants.R =================================================================== --- pkg/Meucci/demo/S_DerivativesInvariants.R 2013-09-16 10:40:00 UTC (rev 3118) +++ pkg/Meucci/demo/S_DerivativesInvariants.R 2013-09-16 17:24:03 UTC (rev 3119) @@ -2,7 +2,9 @@ #' in A. Meucci,"Risk and Asset Allocation", Springer, 2005, Chapter 3. #' #' @references -#' \url{http://} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 142 - Derivatives market: quest for invariance". +#' #' See Meucci's script for "S_DerivativesInvariants.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/demo/S_EquitiesInvariants.R =================================================================== --- pkg/Meucci/demo/S_EquitiesInvariants.R 2013-09-16 10:40:00 UTC (rev 3118) +++ pkg/Meucci/demo/S_EquitiesInvariants.R 2013-09-16 17:24:03 UTC (rev 3119) @@ -2,13 +2,13 @@ #' A. Meucci "Risk and Asset Allocation", Springer, 2005, chapter 3. #' #' @references -#' \url{http://} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 135 ? Equity market: quest for invariance". +#' #' See Meucci's script for "S_EquitiesInvariants.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} - - ################################################################################################################## ### Load daily stock prices from the utility sector in the S&P 500 data("equities"); @@ -35,4 +35,3 @@ # fourth invariant W = P[ 3 : length( P ) ] - 2 * P[ 2: ( length( P ) -1 ) ] + P[ 1 : ( length( P ) -2 ) ]; PerformIidAnalysis( 1 : length( W ), W, 'Analysis for W' ); - Modified: pkg/Meucci/demo/S_EquityProjectionPricing.R =================================================================== --- pkg/Meucci/demo/S_EquityProjectionPricing.R 2013-09-16 10:40:00 UTC (rev 3118) +++ pkg/Meucci/demo/S_EquityProjectionPricing.R 2013-09-16 17:24:03 UTC (rev 3119) @@ -1,14 +1,15 @@ - #' This script projects the distribution of the market invariants for the stock market (i.e. the compounded returns) #' from the estimation interval (normal assumption) to the investment horizon. Then it computes the distribution of prices #' at the investment horizon analytically, by full Monte Carlo, and by delta/duration approximation. -#' Described in A. Meucci "Risk and Asset Allocation", Springer, 2005, -#' chapter 3. #' +#' Described in A. Meucci "Risk and Asset Allocation", Springer, 2005, chapter 3. +#' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. -#' See Meucci's script for "S_EquitiesInvariance.m" +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 138 ? Equity market: linear vs. compounded returns projection II". #' +#' See Meucci's script for "S_EquityProjectionPricing.m" +#' #' @author Xavier Valls \email{flamejat@@gmail.com} ################################################################################################################# Modified: pkg/Meucci/demo/S_FactorAnalysisNotOk.R =================================================================== --- pkg/Meucci/demo/S_FactorAnalysisNotOk.R 2013-09-16 10:40:00 UTC (rev 3118) +++ pkg/Meucci/demo/S_FactorAnalysisNotOk.R 2013-09-16 17:24:03 UTC (rev 3119) @@ -2,12 +2,13 @@ #'"Risk and Asset Allocation", Springer, 2005, Chapter 3. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 111 ? Hidden factors: puzzle". +#' #' See Meucci's script for "S_FactorAnalysisNotOk.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} - ################################################################################################################## ### Inputs @@ -36,12 +37,13 @@ FA = factanal(X, K, scores = "Bartlett" ); # factor analysis recovers the structure exactly however... -S_ = FA$loadings %*% t( FA$loadings ) + diag( FA$uniquenesses, length( FA$uniquenesses) ); +S_ = FA$loadings %*% t( FA$loadings ) + diag( FA$uniquenesses, length( FA$uniquenesses) ); Match = 1 - max( abs( ( S - S_) / S) ); print(Match); # ...the systematic+idiosyncratic decomposition is NOT recovered -U_ = X - FA$scores %*% t(FA$loadings); # compute residuals +U_ = X - FA$scores %*% t(FA$loadings); # compute residuals S_U = cor( U_ ); # compute correlations + # residuals are not idiosyncratic print( S_U ); \ No newline at end of file Modified: pkg/Meucci/demo/S_FactorResidualCorrelation.R =================================================================== --- pkg/Meucci/demo/S_FactorResidualCorrelation.R 2013-09-16 10:40:00 UTC (rev 3118) +++ pkg/Meucci/demo/S_FactorResidualCorrelation.R 2013-09-16 17:24:03 UTC (rev 3119) @@ -3,7 +3,9 @@ #' "Risk and Asset Allocation", Springer, 2005, Chapter 3. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 125 ? Correlation factors-residual: normal example". +#' #' See Meucci's script for "S_FactorResidualCorrelation.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} @@ -14,8 +16,8 @@ nSim = 10000; mu = 0.1 + 0.3 * runif(N); sigma = 0.5 * mu; -dd = matrix(rnorm( N*N ), N, N ); -Corr = cov2cor( dd %*% t( dd ) ); +dd = matrix(rnorm( N*N ), N, N ); +Corr = cov2cor( dd %*% t( dd ) ); Sigma = diag( sigma, length(sigma) ) %*% Corr %*% diag( sigma, length(sigma) ); ################################################################################################################## @@ -24,7 +26,7 @@ ################################################################################################################## ### Generate a random vector beta -beta = matrix(1, N ) + rnorm(N) * 0.1; +beta = matrix( 1, N ) + rnorm(N) * 0.1; ################################################################################################################## ### Compute factor realization by cross-sectional regression and residuals Modified: pkg/Meucci/demo/S_FixedIncomeInvariants.R =================================================================== --- pkg/Meucci/demo/S_FixedIncomeInvariants.R 2013-09-16 10:40:00 UTC (rev 3118) +++ pkg/Meucci/demo/S_FixedIncomeInvariants.R 2013-09-16 17:24:03 UTC (rev 3119) @@ -2,7 +2,9 @@ #' "Risk and Asset Allocation", Springer, 2005, Chapter 3. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 139 ? Fixed-income market: quest for invariance". +#' #' See Meucci's script for "S_FixedIncomeInvariants.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/demo/S_HedgeOptions.R =================================================================== --- pkg/Meucci/demo/S_HedgeOptions.R 2013-09-16 10:40:00 UTC (rev 3118) +++ pkg/Meucci/demo/S_HedgeOptions.R 2013-09-16 17:24:03 UTC (rev 3119) @@ -2,7 +2,9 @@ #' A. Meucci "Risk and Asset Allocation", Springer, 2005, Chapter 3. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 127 ? Factors on demand: no-Greek hedging". +#' #' See Meucci's script for "S_HedgeOptions.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} @@ -23,8 +25,8 @@ ################################################################################################################## ### Underlying and volatility surface -numCalls = length( Time2Mats ); -timeLength = length( implVol$spot ); +numCalls = length( Time2Mats ); +timeLength = length( implVol$spot ); numSurfPoints = length( implVol$days2Maturity ) * length( implVol$moneyness ); ################################################################################################################## Modified: pkg/Meucci/demo/S_HorizonEffect.R =================================================================== --- pkg/Meucci/demo/S_HorizonEffect.R 2013-09-16 10:40:00 UTC (rev 3118) +++ pkg/Meucci/demo/S_HorizonEffect.R 2013-09-16 17:24:03 UTC (rev 3119) @@ -1,16 +1,26 @@ - #'This script studies horizon effect on explicit factors / implicit loadings linear model, as described in #'A. Meucci, "Risk and Asset Allocation", Springer, 2005, Chapter 3. -#'Compounded returns follow the linear model X = tau*muX + D*F + epsilon, where +#'Compounded returns follow the linear model X = tau*muX + D*F + epsilon, where: +#' #' tau: investment horizon (in weeks) +#' #' muX: expected weekly compounded returns +#' #' F: factor compounded returns, with zero expectation and tau-proportional covariance +#' #' D: matrix of factor loadings +#' #' epsilon: uncorrelated (idiosyncratic) shocks. +#' #' R = exp(X)-1 and Z = exp(F)-1 are the linear returns #' +#' @note See "E 116 ? Time series factors: analysis of residuals I" from +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 126 ? Factors on demand: horizon effect". +#' #' See Meucci's script for "S_HorizonEffect.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} @@ -76,6 +86,7 @@ ################################################################################################################## ### Plots + # relationship between the constant nd the intercept dev.new(); plot(tauRangeWeeks, aMinusTauMuX, type= "l", xlab = expression(paste("investment horizon, ", tau,", weeks")), @@ -85,8 +96,6 @@ dev.new(); plot(tauRangeWeeks, normDminusB, type = "l", xlab = expression(paste("investment horizon, ", tau,", weeks")), main = expression("norm of (D-B)"^t)); - - # determine if U idiosyncratic dev.new(); plot(tauRangeWeeks, maxCorrU, col = "red", type = "l", xlab = expression(paste("investment horizon, ", tau,", weeks")), Modified: pkg/Meucci/demo/S_WishartLocationDispersion.R =================================================================== --- pkg/Meucci/demo/S_WishartLocationDispersion.R 2013-09-16 10:40:00 UTC (rev 3118) +++ pkg/Meucci/demo/S_WishartLocationDispersion.R 2013-09-16 17:24:03 UTC (rev 3119) @@ -22,11 +22,11 @@ ################################################################################################################### ### Set input parameters -W_xx = matrix( NaN, nSim, 1 ); -W_yy = matrix( NaN, nSim, 1 ); -W_xy = matrix( NaN, nSim, 1 ); -Vec_W = matrix( NaN, nSim, 4 ); -Dets = matrix( NaN, nSim, 1 ); +W_xx = matrix( NaN, nSim, 1 ); +W_yy = matrix( NaN, nSim, 1 ); +W_xy = matrix( NaN, nSim, 1 ); +Vec_W = matrix( NaN, nSim, 4 ); +Dets = matrix( NaN, nSim, 1 ); Traces = matrix( NaN, nSim, 1 ); @@ -80,7 +80,7 @@ S = diag( 1 / c( sqrt( var_Wxx ), sqrt( var_Wxy ))) %*% S_xx_xy %*% diag( 1 / c( sqrt( var_Wxx ), sqrt( var_Wxy ))); S_hat = cov( X ); -figure(); +dev.new(); plot( X_1, X_2, xlab = "X_1", ylab = "X_2"); TwoDimEllipsoid(E, S, 1, TRUE, FALSE); Modified: pkg/Meucci/man/BlackScholesCallPrice.Rd =================================================================== --- pkg/Meucci/man/BlackScholesCallPrice.Rd 2013-09-16 10:40:00 UTC (rev 3118) +++ pkg/Meucci/man/BlackScholesCallPrice.Rd 2013-09-16 17:24:03 UTC (rev 3119) @@ -2,8 +2,7 @@ \alias{BlackScholesCallPrice} \alias{BlackScholesCallPutPrice} \alias{BlackScholesPutPrice} -\title{Compute the Black-Scholes price of a European call or put option - as described in A. Meucci, "Risk and Asset Allocation", Springer, 2005.} +\title{Compute the Black-Scholes price of a European call or put option.} \usage{ BlackScholesCallPrice(spot, K, r, vol, T) @@ -12,25 +11,25 @@ [TRUNCATED] To get the complete diff run: svnlook diff /svnroot/returnanalytics -r 3119 From noreply at r-forge.r-project.org Mon Sep 16 21:54:19 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 16 Sep 2013 21:54:19 +0200 (CEST) Subject: [Returnanalytics-commits] r3120 - in pkg/PortfolioAnalytics: . R man Message-ID: <20130916195419.E7525185F06@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-16 21:54:19 +0200 (Mon, 16 Sep 2013) New Revision: 3120 Modified: pkg/PortfolioAnalytics/NAMESPACE pkg/PortfolioAnalytics/R/chart.RiskReward.R pkg/PortfolioAnalytics/R/chart.Weights.R pkg/PortfolioAnalytics/R/charts.DE.R pkg/PortfolioAnalytics/R/charts.GenSA.R pkg/PortfolioAnalytics/R/charts.PSO.R pkg/PortfolioAnalytics/R/charts.ROI.R pkg/PortfolioAnalytics/R/charts.RP.R pkg/PortfolioAnalytics/R/charts.efficient.frontier.R pkg/PortfolioAnalytics/R/charts.groups.R pkg/PortfolioAnalytics/R/charts.risk.R pkg/PortfolioAnalytics/R/constrained_objective.R pkg/PortfolioAnalytics/R/constraint_fn_map.R pkg/PortfolioAnalytics/R/constraints.R pkg/PortfolioAnalytics/R/constraintsFUN.R pkg/PortfolioAnalytics/R/extract.efficient.frontier.R pkg/PortfolioAnalytics/R/extractstats.R pkg/PortfolioAnalytics/R/objective.R pkg/PortfolioAnalytics/R/optFUN.R pkg/PortfolioAnalytics/R/optimize.portfolio.R pkg/PortfolioAnalytics/R/portfolio.R pkg/PortfolioAnalytics/R/random_portfolios.R pkg/PortfolioAnalytics/R/trailingFUN.R pkg/PortfolioAnalytics/man/add.constraint.Rd pkg/PortfolioAnalytics/man/add.objective.Rd pkg/PortfolioAnalytics/man/barplotGroupWeights.Rd pkg/PortfolioAnalytics/man/box_constraint.Rd pkg/PortfolioAnalytics/man/chart.EfficientFrontier.Rd pkg/PortfolioAnalytics/man/chart.EfficientFrontierOverlay.Rd pkg/PortfolioAnalytics/man/chart.GroupWeights.Rd pkg/PortfolioAnalytics/man/chart.RiskBudget.opt.list.Rd pkg/PortfolioAnalytics/man/chart.RiskBudget.optimize.portfolio.Rd pkg/PortfolioAnalytics/man/chart.RiskReward.Rd pkg/PortfolioAnalytics/man/chart.Weights.EF.Rd pkg/PortfolioAnalytics/man/chart.Weights.Rd pkg/PortfolioAnalytics/man/constrained_objective.Rd pkg/PortfolioAnalytics/man/constraint.Rd pkg/PortfolioAnalytics/man/constraint_v2.Rd pkg/PortfolioAnalytics/man/create.EfficientFrontier.Rd pkg/PortfolioAnalytics/man/diversification.Rd pkg/PortfolioAnalytics/man/diversification_constraint.Rd pkg/PortfolioAnalytics/man/etl_milp_opt.Rd pkg/PortfolioAnalytics/man/etl_opt.Rd pkg/PortfolioAnalytics/man/extractEfficientFrontier.Rd pkg/PortfolioAnalytics/man/extractGroups.Rd pkg/PortfolioAnalytics/man/factor_exposure_constraint.Rd pkg/PortfolioAnalytics/man/fn_map.Rd pkg/PortfolioAnalytics/man/get_constraints.Rd pkg/PortfolioAnalytics/man/gmv_opt_toc.Rd pkg/PortfolioAnalytics/man/group_constraint.Rd pkg/PortfolioAnalytics/man/group_fail.Rd pkg/PortfolioAnalytics/man/insert_constraints.Rd pkg/PortfolioAnalytics/man/insert_objectives.Rd pkg/PortfolioAnalytics/man/maxret_milp_opt.Rd pkg/PortfolioAnalytics/man/meanetl.efficient.frontier.Rd pkg/PortfolioAnalytics/man/meanvar.efficient.frontier.Rd pkg/PortfolioAnalytics/man/minmax_objective.Rd pkg/PortfolioAnalytics/man/optimize.portfolio.Rd pkg/PortfolioAnalytics/man/optimize.portfolio.rebalancing.Rd pkg/PortfolioAnalytics/man/plot.optimize.portfolio.DEoptim.Rd pkg/PortfolioAnalytics/man/plot.optimize.portfolio.GenSA.Rd pkg/PortfolioAnalytics/man/plot.optimize.portfolio.ROI.Rd pkg/PortfolioAnalytics/man/plot.optimize.portfolio.Rd pkg/PortfolioAnalytics/man/plot.optimize.portfolio.pso.Rd pkg/PortfolioAnalytics/man/plot.optimize.portfolio.random.Rd pkg/PortfolioAnalytics/man/portfolio.spec.Rd pkg/PortfolioAnalytics/man/portfolio_risk_objective.Rd pkg/PortfolioAnalytics/man/position_limit_constraint.Rd pkg/PortfolioAnalytics/man/random_portfolios.Rd pkg/PortfolioAnalytics/man/randomize_portfolio_v1.Rd pkg/PortfolioAnalytics/man/return_constraint.Rd pkg/PortfolioAnalytics/man/return_objective.Rd pkg/PortfolioAnalytics/man/risk_budget_objective.Rd pkg/PortfolioAnalytics/man/rp_grid.Rd pkg/PortfolioAnalytics/man/rp_sample.Rd pkg/PortfolioAnalytics/man/rp_simplex.Rd pkg/PortfolioAnalytics/man/trailingFUN.Rd pkg/PortfolioAnalytics/man/transaction_cost_constraint.Rd pkg/PortfolioAnalytics/man/turnover_constraint.Rd pkg/PortfolioAnalytics/man/turnover_objective.Rd pkg/PortfolioAnalytics/man/weight_concentration_objective.Rd pkg/PortfolioAnalytics/man/weight_sum_constraint.Rd Log: Updating documentation files. Modified: pkg/PortfolioAnalytics/NAMESPACE =================================================================== --- pkg/PortfolioAnalytics/NAMESPACE 2013-09-16 17:24:03 UTC (rev 3119) +++ pkg/PortfolioAnalytics/NAMESPACE 2013-09-16 19:54:19 UTC (rev 3120) @@ -27,11 +27,8 @@ export(factor_exposure_constraint) export(fn_map) export(generatesequence) -export(get_constraints) export(group_constraint) -export(group_fail) export(HHI) -export(insert_constraints) export(insert_objectives) export(is.constraint) export(is.objective) Modified: pkg/PortfolioAnalytics/R/chart.RiskReward.R =================================================================== --- pkg/PortfolioAnalytics/R/chart.RiskReward.R 2013-09-16 17:24:03 UTC (rev 3119) +++ pkg/PortfolioAnalytics/R/chart.RiskReward.R 2013-09-16 19:54:19 UTC (rev 3120) @@ -2,6 +2,9 @@ #' classic risk reward scatter #' +#' This function charts the \code{optimize.portfolio} object in risk-return space. +#' +#' @details #' \code{neighbors} may be specified in three ways. #' The first is as a single number of neighbors. This will extract the \code{neighbors} closest #' portfolios in terms of the \code{out} numerical statistic. @@ -11,25 +14,26 @@ #' This matrix should look like the output of \code{\link{extractStats}}, and should contain #' \code{risk.col},\code{return.col}, and weights columns all properly named. #' -#' @param object optimal portfolio created by \code{\link{optimize.portfolio}} -#' @param neighbors set of 'neighbor' portfolios to overplot, see Details -#' @param \dots any other passthru parameters -#' @param return.col string matching the objective of a 'return' objective, on vertical axis -#' @param risk.col string matching the objective of a 'risk' objective, on horizontal axis -#' @param chart.assets TRUE/FALSE. Includes a risk reward scatter of the assets in the chart -#' @param element.color color for the default plot scatter points -#' @param cex.axis The magnification to be used for axis annotation relative to the current setting of \code{cex} -#' @param xlim set the x-axis limit, same as in \code{\link{plot}} -#' @param ylim set the y-axis limit, same as in \code{\link{plot}} +#' @param object optimal portfolio created by \code{\link{optimize.portfolio}}. +#' @param neighbors set of 'neighbor' portfolios to overplot, see Details. +#' @param \dots any other passthru parameters. +#' @param return.col string matching the objective of a 'return' objective, on vertical axis. +#' @param risk.col string matching the objective of a 'risk' objective, on horizontal axis. +#' @param chart.assets TRUE/FALSE. Includes a risk reward scatter of the assets in the chart. +#' @param element.color color for the default plot scatter points. +#' @param cex.axis The magnification to be used for axis annotation relative to the current setting of \code{cex}. +#' @param xlim set the x-axis limit, same as in \code{\link{plot}}. +#' @param ylim set the y-axis limit, same as in \code{\link{plot}}. #' @param rp TRUE/FALSE to generate random portfolios to plot the feasible space -#' @param main a main title for the plot +#' @param main a main title for the plot. #' @param labels.assets TRUE/FALSE to include the names in the plot. #' @param pch.assets plotting character of the assets, same as in \code{\link{plot}} -#' @param cex.assets A numerical value giving the amount by which the asset points should be magnified relative to the default. -#' @param cex.lab A numerical value giving the amount by which the labels should be magnified relative to the default. -#' @param colorset color palette or vector of colors to use +#' @param cex.assets numerical value giving the amount by which the asset points should be magnified relative to the default. +#' @param cex.lab numerical value giving the amount by which the labels should be magnified relative to the default. +#' @param colorset color palette or vector of colors to use. #' @seealso \code{\link{optimize.portfolio}} #' @rdname chart.RiskReward +#' @name chart.RiskReward #' @export chart.RiskReward <- function(object, ...){ UseMethod("chart.RiskReward") Modified: pkg/PortfolioAnalytics/R/chart.Weights.R =================================================================== --- pkg/PortfolioAnalytics/R/chart.Weights.R 2013-09-16 17:24:03 UTC (rev 3119) +++ pkg/PortfolioAnalytics/R/chart.Weights.R 2013-09-16 19:54:19 UTC (rev 3120) @@ -1,27 +1,27 @@ #' boxplot of the weights of the optimal portfolios #' -#' Chart the optimal weights and upper and lower bounds on weights of a portfolio run via \code{\link{optimize.portfolio}} +#' Chart the optimal weights and upper and lower bounds on weights of a portfolio run via \code{\link{optimize.portfolio}}. #' -#' @param object optimal portfolio object created by \code{\link{optimize.portfolio}} -#' @param neighbors set of 'neighbor' portfolios to overplot -#' @param \dots any other passthru parameters +#' @param object optimal portfolio object created by \code{\link{optimize.portfolio}}. +#' @param neighbors set of 'neighbor' portfolios to overplot. See Details. +#' @param \dots any other passthru parameters . #' @param main an overall title for the plot: see \code{\link{title}} #' @param las numeric in \{0,1,2,3\}; the style of axis labels #' \describe{ -#' \item{0:}{always parallel to the axis [\emph{default}],} +#' \item{0:}{always parallel to the axis,} #' \item{1:}{always horizontal,} #' \item{2:}{always perpendicular to the axis,} -#' \item{3:}{always vertical.} +#' \item{3:}{always vertical [\emph{default}].} #' } #' @param xlab a title for the x axis: see \code{\link{title}} #' @param cex.lab The magnification to be used for x and y labels relative to the current setting of \code{cex} -#' @param element.color color for the default plot lines -#' @param cex.axis The magnification to be used for axis annotation relative to the current setting of \code{cex} -#' @param colorset color palette or vector of colors to use -#' @param legend.loc location of the legend. If NULL, the legend will not be plotted -#' @param cex.legend The magnification to be used for legend annotation relative to the current setting of \code{cex} -#' @param plot.type "line" or "barplot" +#' @param element.color provides the color for drawing less-important chart elements, such as the box lines, axis lines, etc. +#' @param cex.axis The magnification to be used for axis annotation relative to the current setting of \code{cex}. +#' @param colorset color palette or vector of colors to use. +#' @param legend.loc location of the legend. If NULL, the legend will not be plotted. +#' @param cex.legend The magnification to be used for legend annotation relative to the current setting of \code{cex}. +#' @param plot.type "line" or "barplot" to plot. #' @seealso \code{\link{optimize.portfolio}} #' @rdname chart.Weights #' @name chart.Weights Modified: pkg/PortfolioAnalytics/R/charts.DE.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.DE.R 2013-09-16 17:24:03 UTC (rev 3119) +++ pkg/PortfolioAnalytics/R/charts.DE.R 2013-09-16 19:54:19 UTC (rev 3120) @@ -310,7 +310,7 @@ #' plot method for optimize.portfolio.DEoptim output #' -#' scatter and weights chart for DEoptim portfolio optimizations run with trace=TRUE +#' Scatter and weights chart for DEoptim portfolio optimizations run with trace=TRUE #' #' \code{return.col} must be the name of a function used to compute the return metric on the random portfolio weights #' \code{risk.col} must be the name of a function used to compute the risk metric on the random portfolio weights Modified: pkg/PortfolioAnalytics/R/charts.GenSA.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.GenSA.R 2013-09-16 17:24:03 UTC (rev 3119) +++ pkg/PortfolioAnalytics/R/charts.GenSA.R 2013-09-16 19:54:19 UTC (rev 3120) @@ -154,10 +154,10 @@ #' plot method for optimize.portfolio.DEoptim output #' -#' scatter and weights chart for GenSA portfolio optimizations run with trace=TRUE +#' Scatter and weights chart for GenSA portfolio optimizations run with trace=TRUE #' -#' \code{return.col} must be the name of a function used to compute the return metric on the random portfolio weights -#' \code{risk.col} must be the name of a function used to compute the risk metric on the random portfolio weights +#' \code{return.col} must be the name of a function used to compute the return metric on the random portfolio weights. +#' \code{risk.col} must be the name of a function used to compute the risk metric on the random portfolio weights. #' #' @param x object created by \code{\link{optimize.portfolio}} #' @param ... any other passthru parameters Modified: pkg/PortfolioAnalytics/R/charts.PSO.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.PSO.R 2013-09-16 17:24:03 UTC (rev 3119) +++ pkg/PortfolioAnalytics/R/charts.PSO.R 2013-09-16 19:54:19 UTC (rev 3120) @@ -212,10 +212,10 @@ #' plot method for optimize.portfolio.pso output #' -#' scatter and weights chart for pso portfolio optimizations run with trace=TRUE +#' Scatter and weights chart for pso portfolio optimizations run with trace=TRUE #' -#' \code{return.col} must be the name of a function used to compute the return metric on the random portfolio weights -#' \code{risk.col} must be the name of a function used to compute the risk metric on the random portfolio weights +#' \code{return.col} must be the name of a function used to compute the return metric on the random portfolio weights. +#' \code{risk.col} must be the name of a function used to compute the risk metric on the random portfolio weights. #' #' @param x object created by \code{\link{optimize.portfolio}} #' @param ... any other passthru parameters Modified: pkg/PortfolioAnalytics/R/charts.ROI.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.ROI.R 2013-09-16 17:24:03 UTC (rev 3119) +++ pkg/PortfolioAnalytics/R/charts.ROI.R 2013-09-16 19:54:19 UTC (rev 3120) @@ -156,15 +156,12 @@ #' plot method for optimize.portfolio.ROI output #' -#' scatter and weights chart for ROI portfolio optimizations run with trace=TRUE +#' Scatter and weights chart for ROI portfolio optimizations run with trace=TRUE #' -#' \code{return.col} must be the name of a function used to compute the return metric on the random portfolio weights -#' \code{risk.col} must be the name of a function used to compute the risk metric on the random portfolio weights -#' #' The ROI optimizers do not store the portfolio weights like DEoptim or random #' portfolios random portfolios can be generated for the scatter plot. #' -#' \code{return.col} must be the name of a function used to compute the return metric on the random portfolio weights +#' \code{return.col} must be the name of a function used to compute the return metric on the random portfolio weights. #' \code{risk.col} must be the name of a function used to compute the risk metric on the random portfolio weights #' #' @param x object created by \code{\link{optimize.portfolio}} Modified: pkg/PortfolioAnalytics/R/charts.RP.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.RP.R 2013-09-16 17:24:03 UTC (rev 3119) +++ pkg/PortfolioAnalytics/R/charts.RP.R 2013-09-16 19:54:19 UTC (rev 3120) @@ -267,10 +267,10 @@ #' plot method for optimize.portfolio.random output #' -#' scatter and weights chart for random portfolio optimizations run with trace=TRUE +#' Scatter and weights chart for random portfolio optimizations run with trace=TRUE #' -#' \code{return.col} must be the name of a function used to compute the return metric on the random portfolio weights -#' \code{risk.col} must be the name of a function used to compute the risk metric on the random portfolio weights +#' \code{return.col} must be the name of a function used to compute the return metric on the random portfolio weights. +#' \code{risk.col} must be the name of a function used to compute the risk metric on the random portfolio weights. #' #' \code{neighbors} may be specified in three ways. #' The first is as a single number of neighbors. This will extract the \code{neighbors} closest @@ -298,9 +298,9 @@ #' plot method for optimize.portfolio output #' -#' scatter and weights chart for portfolio optimization +#' Scatter and weights chart for portfolio optimization #' -#' this is a fallback that will be called for classes of portfolio that do not have specific pre-existing plot methods. +#' This is a fallback that will be called for classes of portfolio that do not have specific pre-existing plot methods. #' #' \code{neighbors} may be specified in three ways. #' The first is as a single number of neighbors. This will extract the \code{neighbors} closest Modified: pkg/PortfolioAnalytics/R/charts.efficient.frontier.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.efficient.frontier.R 2013-09-16 17:24:03 UTC (rev 3119) +++ pkg/PortfolioAnalytics/R/charts.efficient.frontier.R 2013-09-16 19:54:19 UTC (rev 3120) @@ -2,7 +2,7 @@ #' Chart the efficient frontier and risk-return scatter #' #' Chart the efficient frontier and risk-return scatter of the assets for -#' optimize.portfolio. or efficient.frontier objects +#' \code{optimize.portfolio} or \code{efficient.frontier} objects #' #' @details #' For objects created by optimize.portfolio with 'DEoptim', 'random', or 'pso' @@ -16,12 +16,12 @@ #' For objects created by optimize.portfolio with 'ROI' specified as the #' optimize_method: #' \itemize{ -#' \item The mean-StdDev or mean-etl efficient frontier can be plotted for optimal +#' \item The mean-StdDev or mean-ETL efficient frontier can be plotted for optimal #' portfolio objects created by \code{optimize.portfolio}. #' #' \item If \code{match.col="StdDev"}, the mean-StdDev efficient frontier is plotted. #' -#' \item If \code{match.col="ETL"} (also "ES" or "CVaR"), the mean-etl efficient frontier is plotted. +#' \item If \code{match.col="ETL"} (also "ES" or "CVaR"), the mean-ETL efficient frontier is plotted. #' } #' #' Note that \code{trace=TRUE} must be specified in \code{\link{optimize.portfolio}} @@ -33,27 +33,27 @@ #' will be plotted using a risk free rate of 0. Set \code{rf=NULL} to omit #' this from the plot. #' -#' @param object object of class optimize.portfolio.ROI to chart +#' @param object object to chart. #' @param \dots passthru parameters to \code{\link{plot}} #' @param match.col string name of column to use for risk (horizontal axis). #' \code{match.col} must match the name of an objective measure in the #' \code{objective_measures} or \code{opt_values} slot in the object created #' by \code{\link{optimize.portfolio}}. -#' @param n.portfolios number of portfolios to use to plot the efficient frontier -#' @param xlim set the x-axis limit, same as in \code{\link{plot}} -#' @param ylim set the y-axis limit, same as in \code{\link{plot}} -#' @param cex.axis A numerical value giving the amount by which the axis should be magnified relative to the default. +#' @param n.portfolios number of portfolios to use to plot the efficient frontier. +#' @param xlim set the x-axis limit, same as in \code{\link{plot}}. +#' @param ylim set the y-axis limit, same as in \code{\link{plot}}. +#' @param cex.axis numerical value giving the amount by which the axis should be magnified relative to the default. #' @param element.color provides the color for drawing less-important chart elements, such as the box lines, axis lines, etc. -#' @param main a main title for the plot -#' @param RAR.text Risk Adjusted Return ratio text to plot in the legend -#' @param rf risk free rate. If \code{rf} is not null, the maximum Sharpe Ratio or modified Sharpe Ratio tangency portfolio will be plotted -#' @param tangent.line TRUE/FALSE to plot the tangent line -#' @param cex.legend A numerical value giving the amount by which the legend should be magnified relative to the default. -#' @param chart.assets TRUE/FALSE to include the assets +#' @param main a main title for the plot. +#' @param RAR.text string name for risk adjusted return text to plot in the legend. +#' @param rf risk free rate. If \code{rf} is not null, the maximum Sharpe Ratio or modified Sharpe Ratio tangency portfolio will be plotted. +#' @param tangent.line TRUE/FALSE to plot the tangent line. +#' @param cex.legend numerical value giving the amount by which the legend should be magnified relative to the default. +#' @param chart.assets TRUE/FALSE to include the assets. #' @param labels.assets TRUE/FALSE to include the asset names in the plot. -#' \code{chart.assets} must be \code{TRUE} to plot asset names -#' @param pch.assets plotting character of the assets, same as in \code{\link{plot}} -#' @param cex.assets A numerical value giving the amount by which the asset points and labels should be magnified relative to the default. +#' \code{chart.assets} must be \code{TRUE} to plot asset names. +#' @param pch.assets plotting character of the assets, same as in \code{\link{plot}}. +#' @param cex.assets numerical value giving the amount by which the asset points and labels should be magnified relative to the default. #' @author Ross Bennett #' @rdname chart.EfficientFrontier #' @export @@ -270,21 +270,21 @@ #' Chart weights along an efficient frontier #' -#' This function produces a stacked barplot of weights along the efficient frontier. +#' This function produces a stacked barplot of weights along an efficient frontier. #' -#' @param object object of class \code{efficient.frontier} or \code{optimize.portfolio} +#' @param object object of class \code{efficient.frontier} or \code{optimize.portfolio}. #' @param \dots passthru parameters to \code{barplot}. -#' @param colorset color palette to use -#' @param n.portfolios number of portfolios to extract along the efficient frontier -#' @param by.groups TRUE/FALSE. If TRUE, the group weights are charted +#' @param colorset color palette or vector of colors to use. +#' @param n.portfolios number of portfolios to extract along the efficient frontier. +#' @param by.groups TRUE/FALSE. If TRUE, the group weights are charted. #' @param match.col string name of column to use for risk (horizontal axis). Must match the name of an objective. #' @param main title used in the plot. -#' @param cex.lab The magnification to be used for x-axis and y-axis labels relative to the current setting of 'cex' -#' @param cex.axis The magnification to be used for sizing the axis text relative to the current setting of 'cex', similar to \code{\link{plot}} -#' @param cex.legend The magnification to be used for sizing the legend relative to the current setting of 'cex', similar to \code{\link{plot}} -#' @param legend.labels character vector to use for the legend labels +#' @param cex.lab the magnification to be used for x-axis and y-axis labels relative to the current setting of 'cex'. +#' @param cex.axis the magnification to be used for sizing the axis text relative to the current setting of 'cex', similar to \code{\link{plot}}. +#' @param cex.legend the magnification to be used for sizing the legend relative to the current setting of 'cex', similar to \code{\link{plot}}. +#' @param legend.labels character vector to use for the legend labels. #' @param element.color provides the color for drawing less-important chart elements, such as the box lines, axis lines, etc. -#' @param legend.loc NULL, "topright", "right", or "bottomright". If legend.loc is NULL, the legend will not be plotted +#' @param legend.loc NULL, "topright", "right", or "bottomright". If legend.loc is NULL, the legend will not be plotted. #' @author Ross Bennett #' @rdname chart.Weights.EF #' @export @@ -515,7 +515,7 @@ #' Plot multiple efficient frontiers #' -#' Overlay the efficient frontiers of multiple portfolio objects on a single plot +#' Overlay the efficient frontiers of multiple portfolio objects on a single plot. #' #' @param R an xts object of asset returns #' @param portfolio_list list of portfolio objects created by \code{\link{portfolio.spec}} @@ -524,23 +524,23 @@ #' This is only used for objects of class \code{optimize.portfolio} #' @param match.col string name of column to use for risk (horizontal axis). #' Must match the name of an objective. -#' @param search_size passed to optimize.portfolio for type="DEoptim" or type="random" +#' @param search_size passed to optimize.portfolio for type="DEoptim" or type="random". #' @param main title used in the plot. -#' @param cex.axis The magnification to be used for sizing the axis text relative to the current setting of 'cex', similar to \code{\link{plot}}. +#' @param cex.axis the magnification to be used for sizing the axis text relative to the current setting of 'cex', similar to \code{\link{plot}}. #' @param element.color provides the color for drawing less-important chart elements, such as the box lines, axis lines, etc. -#' @param legend.loc location of the legend; NULL, "bottomright", "bottom", "bottomleft", "left", "topleft", "top", "topright", "right" and "center" -#' @param legend.labels character vector to use for the legend labels +#' @param legend.loc location of the legend; NULL, "bottomright", "bottom", "bottomleft", "left", "topleft", "top", "topright", "right" and "center". +#' @param legend.labels character vector to use for the legend labels. #' @param cex.legend The magnification to be used for sizing the legend relative to the current setting of 'cex', similar to \code{\link{plot}}. -#' @param xlim set the x-axis limit, same as in \code{\link{plot}} -#' @param ylim set the y-axis limit, same as in \code{\link{plot}} -#' @param ... passthrough parameters to \code{\link{plot}} -#' @param chart.assets TRUE/FALSE to include the assets -#' @param labels.assets TRUE/FALSE to include the asset names in the plot -#' @param pch.assets plotting character of the assets, same as in \code{\link{plot}} +#' @param xlim set the x-axis limit, same as in \code{\link{plot}}. +#' @param ylim set the y-axis limit, same as in \code{\link{plot}}. +#' @param \dots passthrough parameters to \code{\link{plot}}. +#' @param chart.assets TRUE/FALSE to include the assets. +#' @param labels.assets TRUE/FALSE to include the asset names in the plot. +#' @param pch.assets plotting character of the assets, same as in \code{\link{plot}}. #' @param cex.assets A numerical value giving the amount by which the asset points and labels should be magnified relative to the default. -#' @param col vector of colors with length equal to the number of portfolios in \code{portfolio_list} -#' @param lty vector of line types with length equal to the number of portfolios in \code{portfolio_list} -#' @param lwd vector of line widths with length equal to the number of portfolios in \code{portfolio_list} +#' @param col vector of colors with length equal to the number of portfolios in \code{portfolio_list}. +#' @param lty vector of line types with length equal to the number of portfolios in \code{portfolio_list}. +#' @param lwd vector of line widths with length equal to the number of portfolios in \code{portfolio_list}. #' @author Ross Bennett #' @export chart.EfficientFrontierOverlay <- function(R, portfolio_list, type, n.portfolios=25, match.col="ES", search_size=2000, main="Efficient Frontiers", cex.axis=0.8, element.color="darkgray", legend.loc=NULL, legend.labels=NULL, cex.legend=0.8, xlim=NULL, ylim=NULL, ..., chart.assets=TRUE, labels.assets=TRUE, pch.assets=21, cex.assets=0.8, col=NULL, lty=NULL, lwd=NULL){ Modified: pkg/PortfolioAnalytics/R/charts.groups.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.groups.R 2013-09-16 17:24:03 UTC (rev 3119) +++ pkg/PortfolioAnalytics/R/charts.groups.R 2013-09-16 19:54:19 UTC (rev 3120) @@ -1,25 +1,25 @@ #' Chart weights by group or category #' -#' @param object object of class \code{optimize.portfolio} -#' @param ... passthrough parameters to \code{\link{plot}} +#' @param object object of class \code{optimize.portfolio}. +#' @param \dots passthrough parameters to \code{\link{plot}}. #' @param grouping #' \itemize{ -#' \item{groups: }{group the weights group constraints} -#' \item{category_labels: }{group the weights by category_labels in portfolio object} +#' \item{groups: }{group the weights by group constraints.} +#' \item{category_labels: }{group the weights by category_labels in the \code{portfolio} object.} #' } -#' @param plot.type "line" or "barplot" -#' @param main an overall title for the plot: see \code{\link{title}} +#' @param plot.type "line" or "barplot". +#' @param main an overall title for the plot: see \code{\link{title}}. #' @param las numeric in \{0,1,2,3\}; the style of axis labels #' \describe{ -#' \item{0:}{always parallel to the axis [\emph{default}],} +#' \item{0:}{always parallel to the axis,} #' \item{1:}{always horizontal,} #' \item{2:}{always perpendicular to the axis,} -#' \item{3:}{always vertical.} +#' \item{3:}{always vertical[\emph{default}].} #' } -#' @param xlab a title for the x axis: see \code{\link{title}} -#' @param cex.lab The magnification to be used for x and y labels relative to the current setting of \code{cex} -#' @param element.color color for the default border and axis -#' @param cex.axis The magnification to be used for x and y axis relative to the current setting of \code{cex} +#' @param xlab a title for the x axis: see \code{\link{title}}. +#' @param cex.lab the magnification to be used for x and y labels relative to the current setting of \code{cex}. +#' @param element.color color for the default border and axis. +#' @param cex.axis the magnification to be used for x and y axis relative to the current setting of \code{cex}. #' @author Ross Bennett #' @export chart.GroupWeights <- function(object, ..., grouping=c("groups", "category"), plot.type="line", main="Group Weights", las=3, xlab=NULL, cex.lab=0.8, element.color="darkgray", cex.axis=0.8){ @@ -95,7 +95,7 @@ #' @param ... passthrough parameters to \code{\link{plot}} #' @param grouping #' \itemize{ -#' \item{groups: }{group the weights group constraints} +#' \item{groups: }{group the weights by group constraints} #' \item{category_labels: }{group the weights by category_labels in portfolio object} #' } #' @param main an overall title for the plot: see \code{\link{title}} Modified: pkg/PortfolioAnalytics/R/charts.risk.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.risk.R 2013-09-16 17:24:03 UTC (rev 3119) +++ pkg/PortfolioAnalytics/R/charts.risk.R 2013-09-16 19:54:19 UTC (rev 3120) @@ -26,21 +26,21 @@ #' properly named contribution and pct_contrib columns. #' #' @param object optimal portfolio object created by \code{\link{optimize.portfolio}} -#' @param neighbors risk contribution or pct_contrib of neighbor portfolios to be plotted -#' @param ... passthrough parameters to \code{\link{plot}} -#' @param risk.type plot risk contribution in absolute terms or percentage contribution -#' @param main main title for the chart -#' @param ylab label for the y-axis -#' @param xlab a title for the x axis: see \code{\link{title}} -#' @param cex.lab The magnification to be used for x and y labels relative to the current setting of \code{cex} -#' @param cex.axis The magnification to be used for axis annotation relative to the current setting of \code{cex} -#' @param element.color color for the default plot lines +#' @param neighbors risk contribution or pct_contrib of neighbor portfolios to be plotted, see details. +#' @param \dots passthrough parameters to \code{\link{plot}}. +#' @param risk.type "absolute" or "percentage" to plot risk contribution in absolute terms or percentage contribution. +#' @param main main title for the chart. +#' @param ylab label for the y-axis. +#' @param xlab label for the x-axis +#' @param cex.lab the magnification to be used for x and y labels relative to the current setting of \code{cex}. +#' @param cex.axis the magnification to be used for axis annotation relative to the current setting of \code{cex}. +#' @param element.color provides the color for drawing less-important chart elements, such as the box lines, axis lines, etc. #' @param las numeric in \{0,1,2,3\}; the style of axis labels #' \describe{ -#' \item{0:}{always parallel to the axis [\emph{default}],} +#' \item{0:}{always parallel to the axis,} #' \item{1:}{always horizontal,} #' \item{2:}{always perpendicular to the axis,} -#' \item{3:}{always vertical.} +#' \item{3:}{always vertical [\emph{default}].} #' } #' @param ylim set the y-axis limit, same as in \code{\link{plot}} #' @author Ross Bennett @@ -206,17 +206,17 @@ #' This function charts the absolute contribution or percent contribution of #' the resulting objective measures in the \code{opt.list} object. #' -#' @param object list of optimal portfolio objects created by \code{\link{optimizations.combine}} -#' @param \dots any other passthru parameter +#' @param object list of optimal portfolio objects created by \code{\link{optimizations.combine}}. +#' @param \dots any other passthru parameter. #' @param match.col string of risk column to match. The \code{opt.list} object #' may contain risk budgets for ES or StdDev and this will match the proper -#' column names (e.g. ES.contribution). -#' @param risk.type "absolute" or "percentage" plot risk contribution in absolute terms or percentage contribution -#' @param main main title for the chart -#' @param plot.type "line" or "barplot" -#' @param cex.axis The magnification to be used for axis annotation relative to the current setting of \code{cex} -#' @param cex.lab The magnification to be used for axis annotation relative to the current setting of \code{cex} -#' @param element.color color for the default plot lines +#' column names of the objectives list outp (e.g. ES.contribution). +#' @param risk.type "absolute" or "percentage" to plot risk contribution in absolute terms or percentage contribution. +#' @param main main title for the chart. +#' @param plot.type "line" or "barplot". +#' @param cex.axis the magnification to be used for axis annotation relative to the current setting of \code{cex}. +#' @param cex.lab the magnification to be used for axis annotation relative to the current setting of \code{cex}. +#' @param element.color provides the color for drawing less-important chart elements, such as the box lines, axis lines, etc. #' @param las numeric in \{0,1,2,3\}; the style of axis labels #' \describe{ #' \item{0:}{always parallel to the axis [\emph{default}],} Modified: pkg/PortfolioAnalytics/R/constrained_objective.R =================================================================== --- pkg/PortfolioAnalytics/R/constrained_objective.R 2013-09-16 17:24:03 UTC (rev 3119) +++ pkg/PortfolioAnalytics/R/constrained_objective.R 2013-09-16 19:54:19 UTC (rev 3120) @@ -294,15 +294,15 @@ #' calculate a numeric return value for a portfolio based on a set of constraints and objectives #' -#' function to calculate a numeric return value for a portfolio based on a set of constraints, -#' we'll try to make as few assumptions as possible, and only run objectives that are required by the user +#' Function to calculate a numeric return value for a portfolio based on a set of constraints and objectives. +#' We'll try to make as few assumptions as possible and only run objectives that are enabled by the user. #' #' If the user has passed in either min_sum or max_sum constraints for the portfolio, or both, -#' and are using a numerical optimization method like DEoptim, and normalize=TRUE, the default, +#' and are using a numerical optimization method like DEoptim, and normalize=TRUE, #' we'll normalize the weights passed in to whichever boundary condition has been violated. #' If using random portfolios, all the portfolios generated will meet the constraints by construction. -#' NOTE: this means that the weights produced by a numeric optimization algorithm like DEoptim -#' might violate your constraints, so you'd need to renormalize them after optimizing +#' NOTE: this means that the weights produced by a numeric optimization algorithm like DEoptim, pso, or GenSA +#' might violate constraints, and will need to be renormalized after optimizing. #' We apply the same normalization in \code{\link{optimize.portfolio}} so that the weights you see have been #' normalized to min_sum if the generated portfolio is smaller than min_sum or max_sum if the #' generated portfolio is larger than max_sum. @@ -321,7 +321,7 @@ #' #' When you are optimizing a return objective, you must specify a negative multiplier [TRUNCATED] To get the complete diff run: svnlook diff /svnroot/returnanalytics -r 3120 From noreply at r-forge.r-project.org Mon Sep 16 22:01:00 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 16 Sep 2013 22:01:00 +0200 (CEST) Subject: [Returnanalytics-commits] r3121 - in pkg/PortfolioAnalytics: . R Message-ID: <20130916200100.95053185F06@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-16 22:01:00 +0200 (Mon, 16 Sep 2013) New Revision: 3121 Modified: pkg/PortfolioAnalytics/DESCRIPTION pkg/PortfolioAnalytics/R/optFUN.R Log: Adding corpcor require to optFUN and to the depends list in NAMESPACE. Modified: pkg/PortfolioAnalytics/DESCRIPTION =================================================================== --- pkg/PortfolioAnalytics/DESCRIPTION 2013-09-16 19:54:19 UTC (rev 3120) +++ pkg/PortfolioAnalytics/DESCRIPTION 2013-09-16 20:01:00 UTC (rev 3121) @@ -25,7 +25,8 @@ ROI.plugin.glpk, ROI.plugin.quadprog, pso, - GenSA + GenSA, + corpcor License: GPL Copyright: (c) 2004-2012 Collate: Modified: pkg/PortfolioAnalytics/R/optFUN.R =================================================================== --- pkg/PortfolioAnalytics/R/optFUN.R 2013-09-16 19:54:19 UTC (rev 3120) +++ pkg/PortfolioAnalytics/R/optFUN.R 2013-09-16 20:01:00 UTC (rev 3121) @@ -561,6 +561,7 @@ d <- rep(-moments$mean, 3) + stopifnot("package:corpcor" %in% search() || require("foreach",quietly = TRUE)) qp.result <- try(solve.QP(Dmat=make.positive.definite(2*lambda*V), dvec=d, Amat=t(Amat), bvec=rhs, meq=meq), silent=TRUE) if(inherits(qp.result, "try-error")) stop("No solution found, consider adjusting constraints.") @@ -672,7 +673,8 @@ d <- rep(-moments$mean, 3) - qp.result <- try(solve.QP(Dmat=corpcor:::make.positive.definite(2*lambda*V), + stopifnot("package:corpcor" %in% search() || require("foreach",quietly = TRUE)) + qp.result <- try(solve.QP(Dmat=make.positive.definite(2*lambda*V), dvec=d, Amat=t(Amat), bvec=rhs, meq=meq), silent=TRUE) if(inherits(qp.result, "try-error")) stop("No solution found, consider adjusting constraints.") From noreply at r-forge.r-project.org Mon Sep 16 22:10:15 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 16 Sep 2013 22:10:15 +0200 (CEST) Subject: [Returnanalytics-commits] r3122 - pkg/PortfolioAnalytics/demo Message-ID: <20130916201015.E0071185F06@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-16 22:10:15 +0200 (Mon, 16 Sep 2013) New Revision: 3122 Modified: pkg/PortfolioAnalytics/demo/00Index Log: Updating 00Index for demo files. Modified: pkg/PortfolioAnalytics/demo/00Index =================================================================== --- pkg/PortfolioAnalytics/demo/00Index 2013-09-16 20:01:00 UTC (rev 3121) +++ pkg/PortfolioAnalytics/demo/00Index 2013-09-16 20:10:15 UTC (rev 3122) @@ -12,6 +12,7 @@ demo_opt_combine Demonstrate how to combine and chart the optimal weights for multiple optimizations. demo_weight_concentration Demonstrate how to use the weight concentration objective. backwards_compat Demonstrate how to solve optimization problems using v1 specification with a v1_constraint object. -demo_random_portfolios Demonstrate examples from script.workshop2012.R using random portfolios -demo_proportional_cost_ROI Demonstrate how to use proportional transaction cost constraint with quadprog solver +demo_random_portfolios Demonstrate examples from script.workshop2012.R using random portfolios. +demo_proportional_cost Demonstrate how to use proportional transaction cost constraint. +demo_return_target Demonstrate how to specify a target return as a constraint or objective. From noreply at r-forge.r-project.org Mon Sep 16 23:10:19 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 16 Sep 2013 23:10:19 +0200 (CEST) Subject: [Returnanalytics-commits] r3123 - pkg/PortfolioAnalytics/R Message-ID: <20130916211019.55BA21854BC@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-16 23:10:19 +0200 (Mon, 16 Sep 2013) New Revision: 3123 Modified: pkg/PortfolioAnalytics/R/constraint_fn_map.R pkg/PortfolioAnalytics/R/constraintsFUN.R Log: Removing constrained_group_tmp and txfrm_* functions. I originally wrote these to be used in a mapping function, but these are not being used. Modified: pkg/PortfolioAnalytics/R/constraint_fn_map.R =================================================================== --- pkg/PortfolioAnalytics/R/constraint_fn_map.R 2013-09-16 20:10:15 UTC (rev 3122) +++ pkg/PortfolioAnalytics/R/constraint_fn_map.R 2013-09-16 21:10:19 UTC (rev 3123) @@ -247,114 +247,8 @@ max_pos_short=tmp_max_pos_short)) } -#' Transform weights that violate min or max box constraints -#' -#' This is a helper function called inside constraint_fnMap to transform the weights vector to satisfy box constraints. -#' -#' @param weights vector of weights -#' @param min vector of minimum asset weights from box constraints -#' @param max vector of maximum asset weights from box constraints -#' @author Ross Bennett -#' @export -txfrm_box_constraint <- function(weights, min, max) { - # 1. Check if any elements of the weights vector violate min or max - # 2. If min or max is violated, then set those weights equal to their respective min or max values - # The length of the weights vector must be equal to the length of min and max vectors so that an element-by-element comparison is done - if(any(weights < min) | any(weights > max)){ - # get the index of elements in the weights vector that violate min - idx.min <- which(weights < min) - # set those elements in the weights vector equal to their respective min - weights[idx.min] = min[idx.min] - # print(weights) - # get the index of elements in the weights vector that violate max - idx.max <- which(weights > max) - # set those elements in the weights vector equal to their respective max - weights[idx.max] = max[idx.max] - # print(weights) - # The transformation will likely change the sum of weights and violate min_sum or max_sum - # Should we normalize here by transforming the entire weights vector? - # Normalizing by transforming the entire weights may violate min and max, but will get us *close* - # if(sum(weights) < min_sum) weights <- min_sum / sum(weights) * weights - # if(sum(weights) > max_sum) weights <- max_sum / sum(weights) * weights - } - return(weights) -} -#' Transform weights that violate group constraints -#' -#' This is a helper function called inside constraint_fnMap to transform the weights vector to satisfy group constraints. -#' -#' @param weights vector of weights -#' @param groups vector of groups -#' @param cLO vector of minimum group weights from group constraints -#' @param cUP vector of maximum group weights from group constraints -#' @author Ross Bennett -#' @export -txfrm_group_constraint <- function(weights, groups, cLO, cUP){ - n.groups <- length(groups) - k <- 1 - l <- 0 - for(i in 1:n.groups){ - j <- groups[i] - tmp.w <- weights[k:(l+j)] - # normalize weights for a given group that sum to less than specified group min - grp.min <- cLO[i] - if(sum(tmp.w) < grp.min) { - weights[k:(l+j)] <- (grp.min / sum(tmp.w)) * tmp.w - } - # normalize weights for a given group that sum to greater than specified group max - grp.max <- cUP[i] - if(sum(tmp.w) > grp.max) { - weights[k:(l+j)] <- (grp.max / sum(tmp.w)) * tmp.w - } - k <- k + j - l <- k - 1 - } - # Normalizing the weights inside the groups changes the sum of the weights. - # Should normalizing the sum of weights take place here or somewhere else? - # Re-normalizing the weights will get us *close* to satisfying the group constraints. - # Maybe then add a penalty in constrained objective for violation of group constraints? - return(weights) -} -#' Transform weights that violate weight_sum constraints -#' -#' This is a helper function called inside constraint_fnMap to transform the weights vector to satisfy weight_sum constraints. -#' -#' @param weights vector of weights -#' @param min_sum minimum sum of asset weights -#' @param max_sum maximum sum of asset weights -#' @author Ross Bennett -#' @export -txfrm_weight_sum_constraint <- function(weights, min_sum, max_sum){ - # normalize to max_sum - if(sum(weights) > max_sum) { weights <- (max_sum / sum(weights)) * weights } - # normalize to min_sum - if(sum(weights) < min_sum) { weights <- (min_sum / sum(weights)) * weights } - return(weights) -} - -#' Transform weights for position_limit constraints -#' -#' This is a helper function called inside constraint_fnMap to transform the weights vector to satisfy position_limit constraints. -#' This function sets the minimum nassets-max_pos assets equal to 0 such that the max_pos number of assets will have non-zero weights. -#' -#' @param weights vector of weights -#' @param max_pos maximum position of assets with non_zero weights -#' @param nassets number of assets -#' @param tolerance tolerance for non-zero weights -#' @author Ross Bennett -#' @export -txfrm_position_limit_constraint <- function(weights, max_pos, nassets, tolerance=.Machine$double.eps^0.5){ - # account for weights that are very small (less than .Machine$double.eps^0.5) and are basically zero - # check if max_pos is violated - if(sum(abs(weights) > tolerance) > max_pos){ - # set the minimum nassets-max_pos weights equal to 0 - weights[head(order(weights), nassets - max_pos)] <- 0 - } - return(weights) -} - #' Transform a weights vector to satisfy leverage, box, group, and position_limit constraints using logic from \code{randomize_portfolio} #' #' This function uses a block of code from \code{\link{randomize_portfolio}} Modified: pkg/PortfolioAnalytics/R/constraintsFUN.R =================================================================== --- pkg/PortfolioAnalytics/R/constraintsFUN.R 2013-09-16 20:10:15 UTC (rev 3122) +++ pkg/PortfolioAnalytics/R/constraintsFUN.R 2013-09-16 21:10:19 UTC (rev 3123) @@ -1,67 +1,3 @@ -#' Generic function to impose group constraints on a vector of weights -#' -#' This function gets group subsets of the weights vector and checks if the sum -#' of the weights in that group violates the minimum or maximum value. If the -#' sum of weights in a given group violates its maximum or minimum value, the -#' group of weights is normalized to be equal to the minimum or maximum value. -#' This group normalization causes the sum of weights to change. The weights -#' vector is then normalized so that the min_sum and max_sum constraints are -#' satisfied. This "re-normalization" of the weights vector may then cause the -#' group constraints to not be satisfied. -#' -#' Group constraints are implemented in ROI solvers, but this function could -#' be used in constrained_objective for random portfolios, DEoptim, pso, or -#' gensa solvers. -#' -#' @param groups vector to group assets -#' @param cLO vector of group weight minimums -#' @param cUP vector of group weight maximums -#' @param weights vector of weights -#' @param min_sum minimum sum of weights -#' @param max_sum maximum sum of weights -#' @param normalize TRUE/FALSE to normalize the weights vector to satisfy the min_sum and max_sum constraints -#' -#' @author Ross Bennett -#' @export -constrained_group_tmp <- function(groups, cLO, cUP, weights, min_sum, max_sum, normalize=TRUE){ - # Modify the args later to accept a portfolio or constraint object - n.groups <- length(groups) - - k <- 1 - l <- 0 - for(i in 1:n.groups){ - j <- groups[i] - tmp.w <- weights[k:(l+j)] - # normalize weights for a given group that sum to less than specified group min - grp.min <- cLO[i] - if(sum(tmp.w) < grp.min) { - weights[k:(l+j)] <- (grp.min / sum(tmp.w)) * tmp.w - } - # normalize weights for a given group that sum to greater than specified group max - grp.max <- cUP[i] - if(sum(tmp.w) > grp.max) { - weights[k:(l+j)] <- (grp.max / sum(tmp.w)) * tmp.w - } - # cat(sum(tmp.w), "\t", cLO[i], "\n") - # cat(k, " ", l+j, "\n") - k <- k + j - l <- k - 1 - } - # Normalizing the weights inside the groups changes the sum of the weights. - # Should normalizing the sum of weights take place here or somewhere else? - - if(normalize){ - # max_sum and min_sum normalization borrowed from constrained_objective - # Normalize to max_sum - if(sum(weights) > max_sum) { weights <- (max_sum / sum(weights)) * weights } - # Normalize to min_sum - if(sum(weights) < min_sum) { weights <- (min_sum / sum(weights)) * weights } - } - # "Re-normalizing" the weights causes some of the group constraints to - # be violated. Can this be addressed later with a penalty term for violating - # the group constraints? Or another way? - return(weights) -} #' Function to compute diversification as a constraint #' From noreply at r-forge.r-project.org Mon Sep 16 23:42:08 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 16 Sep 2013 23:42:08 +0200 (CEST) Subject: [Returnanalytics-commits] r3124 - pkg/PortfolioAnalytics Message-ID: <20130916214208.BF681185800@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-16 23:42:08 +0200 (Mon, 16 Sep 2013) New Revision: 3124 Modified: pkg/PortfolioAnalytics/NAMESPACE Log: removing deleted functions from NAMESPACE. Modified: pkg/PortfolioAnalytics/NAMESPACE =================================================================== --- pkg/PortfolioAnalytics/NAMESPACE 2013-09-16 21:10:19 UTC (rev 3123) +++ pkg/PortfolioAnalytics/NAMESPACE 2013-09-16 21:42:08 UTC (rev 3124) @@ -10,7 +10,6 @@ export(chart.RiskReward) export(chart.Weights.EF) export(chart.Weights) -export(constrained_group_tmp) export(constrained_objective_v2) export(constrained_objective) export(constraint_ROI) @@ -71,10 +70,6 @@ export(turnover_constraint) export(turnover_objective) export(turnover) -export(txfrm_box_constraint) -export(txfrm_group_constraint) -export(txfrm_position_limit_constraint) -export(txfrm_weight_sum_constraint) export(update_constraint_v1tov2) export(var.portfolio) export(weight_concentration_objective) From noreply at r-forge.r-project.org Mon Sep 16 23:45:22 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 16 Sep 2013 23:45:22 +0200 (CEST) Subject: [Returnanalytics-commits] r3125 - pkg/PortfolioAnalytics/man Message-ID: <20130916214522.1FB01185053@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-16 23:45:21 +0200 (Mon, 16 Sep 2013) New Revision: 3125 Removed: pkg/PortfolioAnalytics/man/constrained_group_tmp.Rd pkg/PortfolioAnalytics/man/txfrm_box_constraint.Rd pkg/PortfolioAnalytics/man/txfrm_group_constraint.Rd pkg/PortfolioAnalytics/man/txfrm_position_limit_constraint.Rd pkg/PortfolioAnalytics/man/txfrm_weight_sum_constraint.Rd Log: Deleting .Rd files for deleted functions. Deleted: pkg/PortfolioAnalytics/man/constrained_group_tmp.Rd =================================================================== --- pkg/PortfolioAnalytics/man/constrained_group_tmp.Rd 2013-09-16 21:42:08 UTC (rev 3124) +++ pkg/PortfolioAnalytics/man/constrained_group_tmp.Rd 2013-09-16 21:45:21 UTC (rev 3125) @@ -1,45 +0,0 @@ -\name{constrained_group_tmp} -\alias{constrained_group_tmp} -\title{Generic function to impose group constraints on a vector of weights} -\usage{ - constrained_group_tmp(groups, cLO, cUP, weights, min_sum, - max_sum, normalize = TRUE) -} -\arguments{ - \item{groups}{vector to group assets} - - \item{cLO}{vector of group weight minimums} - - \item{cUP}{vector of group weight maximums} - - \item{weights}{vector of weights} - - \item{min_sum}{minimum sum of weights} - - \item{max_sum}{maximum sum of weights} - - \item{normalize}{TRUE/FALSE to normalize the weights - vector to satisfy the min_sum and max_sum constraints} -} -\description{ - This function gets group subsets of the weights vector - and checks if the sum of the weights in that group - violates the minimum or maximum value. If the sum of - weights in a given group violates its maximum or minimum - value, the group of weights is normalized to be equal to - the minimum or maximum value. This group normalization - causes the sum of weights to change. The weights vector - is then normalized so that the min_sum and max_sum - constraints are satisfied. This "re-normalization" of the - weights vector may then cause the group constraints to - not be satisfied. -} -\details{ - Group constraints are implemented in ROI solvers, but - this function could be used in constrained_objective for - random portfolios, DEoptim, pso, or gensa solvers. -} -\author{ - Ross Bennett -} - Deleted: pkg/PortfolioAnalytics/man/txfrm_box_constraint.Rd =================================================================== --- pkg/PortfolioAnalytics/man/txfrm_box_constraint.Rd 2013-09-16 21:42:08 UTC (rev 3124) +++ pkg/PortfolioAnalytics/man/txfrm_box_constraint.Rd 2013-09-16 21:45:21 UTC (rev 3125) @@ -1,24 +0,0 @@ -\name{txfrm_box_constraint} -\alias{txfrm_box_constraint} -\title{Transform weights that violate min or max box constraints} -\usage{ - txfrm_box_constraint(weights, min, max) -} -\arguments{ - \item{weights}{vector of weights} - - \item{min}{vector of minimum asset weights from box - constraints} - - \item{max}{vector of maximum asset weights from box - constraints} -} -\description{ - This is a helper function called inside constraint_fnMap - to transform the weights vector to satisfy box - constraints. -} -\author{ - Ross Bennett -} - Deleted: pkg/PortfolioAnalytics/man/txfrm_group_constraint.Rd =================================================================== --- pkg/PortfolioAnalytics/man/txfrm_group_constraint.Rd 2013-09-16 21:42:08 UTC (rev 3124) +++ pkg/PortfolioAnalytics/man/txfrm_group_constraint.Rd 2013-09-16 21:45:21 UTC (rev 3125) @@ -1,26 +0,0 @@ -\name{txfrm_group_constraint} -\alias{txfrm_group_constraint} -\title{Transform weights that violate group constraints} -\usage{ - txfrm_group_constraint(weights, groups, cLO, cUP) -} -\arguments{ - \item{weights}{vector of weights} - - \item{groups}{vector of groups} - - \item{cLO}{vector of minimum group weights from group - constraints} - - \item{cUP}{vector of maximum group weights from group - constraints} -} -\description{ - This is a helper function called inside constraint_fnMap - to transform the weights vector to satisfy group - constraints. -} -\author{ - Ross Bennett -} - Deleted: pkg/PortfolioAnalytics/man/txfrm_position_limit_constraint.Rd =================================================================== --- pkg/PortfolioAnalytics/man/txfrm_position_limit_constraint.Rd 2013-09-16 21:42:08 UTC (rev 3124) +++ pkg/PortfolioAnalytics/man/txfrm_position_limit_constraint.Rd 2013-09-16 21:45:21 UTC (rev 3125) @@ -1,28 +0,0 @@ -\name{txfrm_position_limit_constraint} -\alias{txfrm_position_limit_constraint} -\title{Transform weights for position_limit constraints} -\usage{ - txfrm_position_limit_constraint(weights, max_pos, - nassets, tolerance = .Machine$double.eps^0.5) -} -\arguments{ - \item{weights}{vector of weights} - - \item{max_pos}{maximum position of assets with non_zero - weights} - - \item{nassets}{number of assets} - - \item{tolerance}{tolerance for non-zero weights} -} -\description{ - This is a helper function called inside constraint_fnMap - to transform the weights vector to satisfy position_limit - constraints. This function sets the minimum - nassets-max_pos assets equal to 0 such that the max_pos - number of assets will have non-zero weights. -} -\author{ - Ross Bennett -} - Deleted: pkg/PortfolioAnalytics/man/txfrm_weight_sum_constraint.Rd =================================================================== --- pkg/PortfolioAnalytics/man/txfrm_weight_sum_constraint.Rd 2013-09-16 21:42:08 UTC (rev 3124) +++ pkg/PortfolioAnalytics/man/txfrm_weight_sum_constraint.Rd 2013-09-16 21:45:21 UTC (rev 3125) @@ -1,22 +0,0 @@ -\name{txfrm_weight_sum_constraint} -\alias{txfrm_weight_sum_constraint} -\title{Transform weights that violate weight_sum constraints} -\usage{ - txfrm_weight_sum_constraint(weights, min_sum, max_sum) -} -\arguments{ - \item{weights}{vector of weights} - - \item{min_sum}{minimum sum of asset weights} - - \item{max_sum}{maximum sum of asset weights} -} -\description{ - This is a helper function called inside constraint_fnMap - to transform the weights vector to satisfy weight_sum - constraints. -} -\author{ - Ross Bennett -} - From noreply at r-forge.r-project.org Tue Sep 17 06:09:16 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Tue, 17 Sep 2013 06:09:16 +0200 (CEST) Subject: [Returnanalytics-commits] r3126 - pkg/PortfolioAnalytics/vignettes Message-ID: <20130917040916.447261852B0@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-17 06:09:15 +0200 (Tue, 17 Sep 2013) New Revision: 3126 Removed: pkg/PortfolioAnalytics/vignettes/optimization-overview.Snw pkg/PortfolioAnalytics/vignettes/optimization-overview.pdf Modified: pkg/PortfolioAnalytics/vignettes/ROI_vignette.Rnw pkg/PortfolioAnalytics/vignettes/ROI_vignette.pdf Log: Updating ROI vignette and removing optimization-overview vignette. Modified: pkg/PortfolioAnalytics/vignettes/ROI_vignette.Rnw =================================================================== --- pkg/PortfolioAnalytics/vignettes/ROI_vignette.Rnw 2013-09-16 21:45:21 UTC (rev 3125) +++ pkg/PortfolioAnalytics/vignettes/ROI_vignette.Rnw 2013-09-17 04:09:15 UTC (rev 3126) @@ -113,9 +113,9 @@ @ \subsection{Visualization} -The chart of the optimal weights as well as the box constraints can be created with \code{chart.Weights.ROI}. The blue dots are the optimal weights and the gray triangles are the \code{min} and \code{max} of the box constraints. +The chart of the optimal weights as well as the box constraints can be created with \code{chart.Weights}. The blue dots are the optimal weights and the gray triangles are the \code{min} and \code{max} of the box constraints. <>= -chart.Weights.ROI(opt_maxret) +chart.Weights(opt_maxret) @ The optimal portfolio can be plotted in risk-return space along with other feasible portfolios. The return metric is defined in the \code{return.col} argument and the risk metric is defined in the \code{risk.col} argument. The scatter chart includes the optimal portfolio (blue dot) and other feasible portfolios (gray circles) to show the overall feasible space given the constraints. By default, if \code{rp} is not passed in, the feasible portfolios are generated with \code{random\_portfolios} to satisfy the constraints of the portfolio object. Modified: pkg/PortfolioAnalytics/vignettes/ROI_vignette.pdf =================================================================== (Binary files differ) Deleted: pkg/PortfolioAnalytics/vignettes/optimization-overview.Snw =================================================================== --- pkg/PortfolioAnalytics/vignettes/optimization-overview.Snw 2013-09-16 21:45:21 UTC (rev 3125) +++ pkg/PortfolioAnalytics/vignettes/optimization-overview.Snw 2013-09-17 04:09:15 UTC (rev 3126) @@ -1,390 +0,0 @@ -\documentclass[a4paper]{article} -\usepackage[round]{natbib} -\usepackage{bm} -\usepackage{verbatim} -\usepackage[latin1]{inputenc} -% \VignetteIndexEntry{Portfolio Optimization with CVaR budgets in PortfolioAnalytics} -\bibliographystyle{abbrvnat} - -\usepackage{url} - -\let\proglang=\textsf -\newcommand{\pkg}[1]{{\fontseries{b}\selectfont #1}} -\newcommand{\R}[1]{{\fontseries{b}\selectfont #1}} -\newcommand{\email}[1]{\href{mailto:#1}{\normalfont\texttt{#1}}} -\newcommand{\E}{\mathsf{E}} -\newcommand{\VAR}{\mathsf{VAR}} -\newcommand{\COV}{\mathsf{COV}} -\newcommand{\Prob}{\mathsf{P}} - -\renewcommand{\topfraction}{0.85} -\renewcommand{\textfraction}{0.1} -\renewcommand{\baselinestretch}{1.5} -\setlength{\textwidth}{15cm} \setlength{\textheight}{22cm} \topmargin-1cm \evensidemargin0.5cm \oddsidemargin0.5cm - -\usepackage[latin1]{inputenc} -% or whatever - -\usepackage{lmodern} -\usepackage[T1]{fontenc} -% Or whatever. Note that the encoding and the font should match. If T1 -% does not look nice, try deleting the line with the fontenc. - -\begin{document} - -\title{Vignette: Portfolio Optimization with CVaR budgets\\ -in PortfolioAnalytics} -\author{Kris Boudt, Peter Carl and Brian Peterson } -\date{June 1, 2010} - -\maketitle -\tableofcontents - - -\bigskip - -\section{General information} - -Risk budgets are a central tool to estimate and manage the portfolio risk allocation. They decompose total portfolio risk into the risk contribution of each position. \citet{ BoudtCarlPeterson2010} propose several portfolio allocation strategies that use an appropriate transformation of the portfolio Conditional Value at Risk (CVaR) budget as an objective or constraint in the portfolio optimization problem. This document explains how risk allocation optimized portfolios can be obtained under general constraints in the \verb"PortfolioAnalytics" package of \citet{PortAnalytics}. - -\verb"PortfolioAnalytics" is designed to provide numerical solutions for portfolio problems with complex constraints and objective sets comprised of any R function. It can e.g.~construct portfolios that minimize a risk objective with (possibly non-linear) per-asset constraints on returns and drawdowns \citep{CarlPetersonBoudt2010}. The generality of possible constraints and objectives is a distinctive characteristic of the package with respect to RMetrics \verb"fPortfolio" of \citet{fPortfolioBook}. For standard Markowitz optimization problems, use of \verb"fPortfolio" rather than \verb"PortfolioAnalytics" is recommended. - -\verb"PortfolioAnalytics" solves the following type of problem -\begin{equation} \min_w g(w) \ \ s.t. \ \ -\left\{ \begin{array}{l} h_1(w)\leq 0 \\ \vdots \\ h_q(w)\leq 0. \end{array} \right. \label{optimproblem}\end{equation} \verb"PortfolioAnalytics" first merges the objective function and constraints into a penalty augmented objective function -\begin{equation} L(w) = g(w) + \mbox{penalty}\sum_{i=1}^q \lambda_i \max(h_i(w),0), \label{eq:constrainedobj} \end{equation} -where $\lambda_i$ is a multiplier to tune the relative importance of the constraints. The default values of penalty and $\lambda_i$ (called \verb"multiplier" in \verb"PortfolioAnalytics") are 10000 and 1, respectively. - -The minimum of this function is found through the \emph{Differential Evolution} (DE) algorithm of \citet{StornPrice1997} and ported to R by \citet{MullenArdiaGilWindoverCline2009}. DE is known for remarkable performance regarding continuous numerical problems \citep{PriceStornLampinen2006}. It has recently been advocated for optimizing portfolios under non-convex settings by \citet{Ardia2010} and \citet{Yollin2009}, among others. We use the R implementation of DE in the \verb"DEoptim" package of \citet{DEoptim}. - -The latest version of the \verb"PortfolioAnalytics" package can be downloaded from R-forge through the following command: -\begin{verbatim} -install.packages("PortfolioAnalytics", repos="http://R-Forge.R-project.org") -\end{verbatim} - -Its principal functions are: -\begin{itemize} -\item \verb"constraint(assets,min,max,min_sum,max_sum)": the portfolio optimization specification starts with specifying the shape of the weight vector through the function \verb"constraint". The weights have to be between \verb"min} and \verb"max" and their sum between \verb"min_sum" and \verb"max_sum". The first argument \verb"assets" is either a number indicating the number of portfolio assets or a vector holding the names of the assets. - -\item \verb"add.objective(constraints, type, name)": \verb"constraints" is a list holding the objective to be minimized and the constraints. New elements to this list are added by the function \verb"add.objective". Many common risk budget objectives and constraints are prespecified and can be identified by specifying the \verb"type" and \verb"name". - - -\item \verb"constrained_objective(w, R, constraints)": given the portfolio weight and return data, it evaluates the penalty augmented objective function in (\ref{eq:constrainedobj}). - -\item \verb"optimize.portfolio(R,constraints)": this function returns the portfolio weight that solves the problem in (\ref{optimproblem}). {\it R} is the multivariate return series of the portfolio components. - -\item \verb"optimize.portfolio.rebalancing(R,constraints,rebalance_on,trailing_periods": this function solves the multiperiod optimization problem. It returns for each rebalancing period the optimal weights and allows the estimation sample to be either from inception or a moving window. - -\end{itemize} - -Next we illustrate these functions on monthly return data for bond, US equity, international equity and commodity indices, which are the first 4 series -in the dataset \verb"indexes". The first step is to load the package \verb"PortfolioAnalytics" and the dataset. An important first note is that some of the functions (especially \verb" optimize.portfolio.rebalancing") requires the dataset to be a \verb"xts" object \citep{xts}. - - -<>= -options(width=80) -@ - -<>=| -library(PortfolioAnalytics) -#source("constrained_objective.R") -data(indexes) -class(indexes) -indexes <- indexes[,1:4] -head(indexes,2) -tail(indexes,2) -@ - -In what follows, we first illustrate the construction of the penalty augmented objective function. Then we present the code for solving the optimization problem. - -\section{Setting of the objective function} - -\subsection{Weight constraints} - -<>=| -# Wcons <- constraint( assets = colnames(indexes[,1:4]) ,min = rep(0,4), -# max=rep(1,4), min_sum=1,max_sum=1 ) -pspec <- portfolio.spec(assets=colnames(indexes[,1:4])) -pspec <- add.constraint(portfolio=pspec, type="leverage", min_sum=1, max_sum=1) -pspec <- add.constraint(portfolio=pspec, type="box", min=0, max=1) -@ - -Given the weight constraints, we can call the value of the function to be minimized. We consider the case of no violation and a case of violation. By default, \verb"normalize=TRUE" which means that if the sum of weights exceeds \verb"max_sum", the weight vector is normalized by multiplying it with \verb"sum(weights)/max_sum" such that the weights evaluated in the objective function satisfy the \verb"max_sum" constraint. -<>=| -# constrained_objective_v1( w = rep(1/4,4) , R = indexes[,1:4] , constraints = Wcons) -# constrained_objective_v1( w = rep(1/3,4) , R = indexes[,1:4] , constraints = Wcons) -# constrained_objective_v1( w = rep(1/3,4) , R = indexes[,1:4] , constraints = Wcons, normalize=FALSE) -constrained_objective(w = rep(1/4, 4), R = indexes[, 1:4], portfolio = pspec) -constrained_objective(w = rep(1/3, 4), R = indexes[, 1:4], portfolio = pspec) -constrained_objective(w = rep(1/3, 4), R = indexes[, 1:4], portfolio = pspec, - normalize=FALSE) -@ - -The latter value can be recalculated as penalty times the weight violation, that is: $10000 \times 1/3.$ - -\subsection{Minimum CVaR objective function} - -Suppose now we want to find the portfolio that minimizes the 95\% portfolio CVaR subject to the weight constraints listed above. - -<>=| -# ObjSpec = add.objective_v1( constraints = Wcons , type="risk",name="CVaR", -# arguments=list(p=0.95), enabled=TRUE) -pspec <- add.objective(portfolio = pspec, type = "risk", name = "CVaR", - arguments = list(p=0.95)) -@ - -The value of the objective function is: -<>=| -# constrained_objective_v1( w = rep(1/4,4) , R = indexes[,1:4] , constraints = ObjSpec) -constrained_objective( w = rep(1/4,4) , R = indexes[,1:4], portfolio = pspec) -@ -This is the CVaR of the equal-weight portfolio as computed by the function \verb"ES" in the \verb"PerformanceAnalytics" package of \citet{ Carl2007} -<>=| -library(PerformanceAnalytics) -out<-ES(indexes[,1:4],weights = rep(1/4,4),p=0.95, portfolio_method="component") -out$MES -@ -All arguments in the function \verb"ES" can be passed on through \verb"arguments". E.g. to reduce the impact of extremes on the portfolio results, it is recommended to winsorize the data using the option clean="boudt". - -<>=| -out<-ES(indexes[,1:4],weights = rep(1/4,4),p=0.95,clean="boudt", - portfolio_method="component") -out$MES -@ - - -For the formulation of the objective function, this implies setting: -<>=| -# ObjSpec = add.objective_v1( constraints = Wcons , type="risk",name="CVaR", -# arguments=list(p=0.95,clean="boudt"), enabled=TRUE) -pspec <- add.objective(portfolio = pspec, type = "risk", name = "CVaR", - arguments = list(p=0.95, clean="boudt"), indexnum=1) -constrained_objective( w = rep(1/4,4) , R = indexes[,1:4], portfolio=pspec) -@ - -An additional argument that is not available for the moment in \verb"ES" is to estimate the conditional covariance matrix through -the constant conditional correlation model of \citet{Bollerslev90}. - -For the formulation of the objective function, this implies setting: -<>=| -# ObjSpec = add.objective_v1( constraints = Wcons , type="risk",name="CVaR", -# arguments=list(p=0.95,clean="boudt"), -# enabled=TRUE, garch=TRUE) -pspec <- add.objective(portfolio = pspec, type = "risk", name = "CVaR", - arguments = list(p=0.95, clean="boudt"), - indexnum=1, garch=TRUE) -constrained_objective( w = rep(1/4,4) , R = indexes[,1:4], portfolio = pspec) -@ - -\subsection{Minimum CVaR concentration objective function} - -Add the minimum 95\% CVaR concentration objective to the objective function: -<>=| -# ObjSpec = add.objective_v1( constraints = Wcons , type="risk_budget_objective", -# name="CVaR", arguments=list(p=0.95,clean="boudt"), -# min_concentration=TRUE, enabled=TRUE) -pspec <- add.objective(portfolio=pspec, type="risk_budget_objective", - name="CVaR", arguments=list(p=0.95,clean="boudt"), - min_concentration=TRUE) -@ -The value of the objective function is: -<>=| -# constrained_objective_v1( w = rep(1/4,4) , R = indexes[,1:4] , -# constraints = ObjSpec) -constrained_objective( w = rep(1/4,4) , R = indexes[,1:4] , portfolio = pspec) -@ -We can verify that this is effectively the largest CVaR contribution of that portfolio as follows: -<>=| -ES(indexes[,1:4],weights = rep(1/4,4),p=0.95,clean="boudt", - portfolio_method="component") -@ - -\subsection{Risk allocation constraints} - -We see that in the equal-weight portfolio, the international equities and commodities investment -cause more than 30\% of total risk. We could specify as a constraint that no asset can contribute -more than 30\% to total portfolio risk. This involves the construction of the following objective function: - -<>=| -# ObjSpec = add.objective_v1( constraints = Wcons , type="risk_budget_objective", -# name="CVaR", max_prisk = 0.3, -# arguments=list(p=0.95,clean="boudt"), enabled=TRUE) -# constrained_objective_v1( w = rep(1/4,4) , R = indexes[,1:4] , -# constraints = ObjSpec) -pspec = add.objective( portfolio = pspec , type="risk_budget_objective",name="CVaR", - max_prisk = 0.3, arguments=list(p=0.95,clean="boudt")) -constrained_objective( w = rep(1/4,4), R = indexes[,1:4], portfolio = pspec) -@ - -This value corresponds to the penalty parameter which has by default the value of 10000 times the exceedances: $ 10000*(0.045775103+0.054685023)\approx 1004.601.$ - -\section{Optimization} - -The penalty augmented objective function is minimized through Differential Evolution. Two parameters are crucial in tuning the optimization: \verb"search_size" and \verb"itermax". The optimization routine -\begin{enumerate} -\item First creates the initial generation of \verb"NP= search_size/itermax" guesses for the optimal value of the parameter vector, using the \verb"random_portfolios" function generating random weights satisfying the weight constraints. -\item Then DE evolves over this population of candidate solutions using alteration and selection operators in order to minimize the objective function. It restarts \verb"itermax" times. -\end{enumerate} It is important that \verb"search_size/itermax" is high enough. It is generally recommended that this ratio is at least ten times the length of the weight vector. For more details on the use of DE strategy in portfolio allocation, we refer the -reader to \citet{Ardia2010}. - -\subsection{Minimum CVaR portfolio under an upper 40\% CVaR allocation constraint} - -The functions needed to obtain the minimum CVaR portfolio under an upper 40\% CVaR allocation constraint are the following: -\begin{verbatim} -> ObjSpec <- constraint(assets = colnames(indexes[,1:4]),min = rep(0,4), -+ max=rep(1,4), min_sum=1,max_sum=1 ) -> ObjSpec <- add.objective_v1( constraints = ObjSpec, type="risk", -+ name="CVaR", arguments=list(p=0.95,clean="boudt"),enabled=TRUE) -> ObjSpec <- add.objective_v1( constraints = ObjSpec, -+ type="risk_budget_objective", name="CVaR", max_prisk = 0.4, -+ arguments=list(p=0.95,clean="boudt"), enabled=TRUE) -> set.seed(1234) -> out = optimize.portfolio_v1(R= indexes[,1:4],constraints=ObjSpec, -+ optimize_method="DEoptim",itermax=10, search_size=2000) -\end{verbatim} -After the call to these functions it starts to explore the feasible space iteratively: -\begin{verbatim} -Iteration: 1 bestvalit: 0.029506 bestmemit: 0.810000 0.126000 0.010000 0.140000 -Iteration: 2 bestvalit: 0.029506 bestmemit: 0.810000 0.126000 0.010000 0.140000 -Iteration: 3 bestvalit: 0.029272 bestmemit: 0.758560 0.079560 0.052800 0.112240 -Iteration: 4 bestvalit: 0.029272 bestmemit: 0.758560 0.079560 0.052800 0.112240 -Iteration: 5 bestvalit: 0.029019 bestmemit: 0.810000 0.108170 0.010000 0.140000 -Iteration: 6 bestvalit: 0.029019 bestmemit: 0.810000 0.108170 0.010000 0.140000 -Iteration: 7 bestvalit: 0.029019 bestmemit: 0.810000 0.108170 0.010000 0.140000 -Iteration: 8 bestvalit: 0.028874 bestmemit: 0.692069 0.028575 0.100400 0.071600 -Iteration: 9 bestvalit: 0.028874 bestmemit: 0.692069 0.028575 0.100400 0.071600 -Iteration: 10 bestvalit: 0.028874 bestmemit: 0.692069 0.028575 0.100400 0.071600 -elapsed time:1.85782111114926 -\end{verbatim} - -If \verb"TRACE=FALSE" the only output in \verb"out" is the weight vector that optimizes the objective function. - -\begin{verbatim} -> out[[1]] - US Bonds US Equities Int'l Equities Commodities - 0.77530240 0.03201150 0.11247491 0.08021119 \end{verbatim} - -If \verb"TRACE=TRUE" additional information is given such as the value of the objective function and the different constraints. - -\subsection{Minimum CVaR concentration portfolio} - -The functions needed to obtain the minimum CVaR concentration portfolio are the following: - -\begin{verbatim} -> ObjSpec <- constraint(assets = colnames(indexes[,1:4]) ,min = rep(0,4), -+ max=rep(1,4), min_sum=1,max_sum=1 ) -> ObjSpec <- add.objective_v1( constraints = ObjSpec, -+ type="risk_budget_objective", name="CVaR", -+ arguments=list(p=0.95,clean="boudt"), -+ min_concentration=TRUE,enabled=TRUE) -> set.seed(1234) -> out = optimize.portfolio_v1(R= indexes[,1:4],constraints=ObjSpec, -+ optimize_method="DEoptim",itermax=50, search_size=5000) -\end{verbatim} -The iterations are as follows: -\begin{verbatim} -Iteration: 1 bestvalit: 0.010598 bestmemit: 0.800000 0.100000 0.118000 0.030000 -Iteration: 2 bestvalit: 0.010598 bestmemit: 0.800000 0.100000 0.118000 0.030000 -Iteration: 3 bestvalit: 0.010598 bestmemit: 0.800000 0.100000 0.118000 0.030000 -Iteration: 4 bestvalit: 0.010598 bestmemit: 0.800000 0.100000 0.118000 0.030000 -Iteration: 5 bestvalit: 0.010598 bestmemit: 0.800000 0.100000 0.118000 0.030000 -Iteration: 45 bestvalit: 0.008209 bestmemit: 0.976061 0.151151 0.120500 0.133916 -Iteration: 46 bestvalit: 0.008170 bestmemit: 0.897703 0.141514 0.109601 0.124004 -Iteration: 47 bestvalit: 0.008170 bestmemit: 0.897703 0.141514 0.109601 0.124004 -Iteration: 48 bestvalit: 0.008170 bestmemit: 0.897703 0.141514 0.109601 0.124004 -Iteration: 49 bestvalit: 0.008170 bestmemit: 0.897703 0.141514 0.109601 0.124004 -Iteration: 50 bestvalit: 0.008170 bestmemit: 0.897703 0.141514 0.109601 0.124004 -elapsed time:4.1324522222413 -\end{verbatim} -This portfolio has the equal risk contribution characteristic: -\begin{verbatim} -> out[[1]] - US Bonds US Equities Int'l Equities Commodities - 0.70528537 0.11118139 0.08610905 0.09742419 -> ES(indexes[,1:4],weights = out[[1]],p=0.95,clean="boudt", -+ portfolio_method="component") -$MES - [,1] -[1,] 0.03246264 - -$contribution - US Bonds US Equities Int'l Equities Commodities - 0.008169565 0.008121930 0.008003228 0.008167917 - -$pct_contrib_MES - US Bonds US Equities Int'l Equities Commodities - 0.2516605 0.2501931 0.2465366 0.2516098 \end{verbatim} - - - - -\subsection{Dynamic optimization} - -Dynamic rebalancing of the risk budget optimized portfolio is possible through the function \verb"optimize.portfolio.rebalancing". Additional arguments are \verb"rebalance\_on} which indicates the rebalancing frequency (years, quarters, months). The estimation is either done from inception (\verb"trailing\_periods=0") or through moving window estimation, where each window has \verb"trailing_periods" observations. The minimum number of observations in the estimation sample is specified by \verb"training_period". Its default value is 36, which corresponds to three years for monthly data. - -As an example, consider the minimum CVaR concentration portfolio, with estimation from in inception and monthly rebalancing. Since we require a minimum estimation length of total number of observations -1, we can optimize the portfolio only for the last two months. - -\begin{verbatim} -> set.seed(1234) -> out = optimize.portfolio.rebalancing_v1(R= indexes,constraints=ObjSpec, rebalance_on ="months", -+ optimize_method="DEoptim",itermax=50, search_size=5000, training_period = nrow(indexes)-1 ) -\end{verbatim} - -For each of the optimization, the iterations are given as intermediate output: -\begin{verbatim} -Iteration: 1 bestvalit: 0.010655 bestmemit: 0.800000 0.100000 0.118000 0.030000 -Iteration: 2 bestvalit: 0.010655 bestmemit: 0.800000 0.100000 0.118000 0.030000 -Iteration: 49 bestvalit: 0.008207 bestmemit: 0.787525 0.124897 0.098001 0.108258 -Iteration: 50 bestvalit: 0.008195 bestmemit: 0.774088 0.122219 0.095973 0.104338 -elapsed time:4.20546416666773 -Iteration: 1 bestvalit: 0.011006 bestmemit: 0.770000 0.050000 0.090000 0.090000 -Iteration: 2 bestvalit: 0.010559 bestmemit: 0.498333 0.010000 0.070000 0.080000 -Iteration: 49 bestvalit: 0.008267 bestmemit: 0.828663 0.126173 0.100836 0.114794 -Iteration: 50 bestvalit: 0.008267 bestmemit: 0.828663 0.126173 0.100836 0.114794 -elapsed time:4.1060591666566 -overall elapsed time:8.31152777777778 -\end{verbatim} -The output is a list holding for each rebalancing period the output of the optimization, such as portfolio weights. -\begin{verbatim} -> out[[1]]$weights - US Bonds US Equities Int'l Equities Commodities - 0.70588695 0.11145087 0.08751686 0.09514531 -> out[[2]]$weights - US Bonds US Equities Int'l Equities Commodities - 0.70797640 0.10779728 0.08615059 0.09807574 -\end{verbatim} -But also the value of the objective function: -\begin{verbatim} -> out[[1]]$out -[1] 0.008195072 -> out[[2]]$out -[1] 0.008266844 -\end{verbatim} -The first and last observation from the estimation sample: -\begin{verbatim} -> out[[1]]$data_summary -$first - US Bonds US Equities Int'l Equities Commodities -1980-01-31 -0.0272 0.061 0.0462 0.0568 - -$last - US Bonds US Equities Int'l Equities Commodities -2009-11-30 0.0134 0.0566 0.0199 0.015 - -> out[[2]]$data_summary -$first - US Bonds US Equities Int'l Equities Commodities -1980-01-31 -0.0272 0.061 0.0462 0.0568 - -$last - US Bonds US Equities Int'l Equities Commodities -2009-12-31 -0.0175 0.0189 0.0143 0.0086 -\end{verbatim} - -Of course, DE is a stochastic optimizaer and typically will only find a near-optimal solution that depends on the seed. The function \verb"optimize.portfolio.parallel" in \verb"PortfolioAnalytics" allows to run an arbitrary number of portfolio sets in parallel in order to develop "confidence bands" around your solution. It is based on Revolution's \verb"foreach" package \citep{foreach}. - -\bibliography{PA} - - -\end{document} - Deleted: pkg/PortfolioAnalytics/vignettes/optimization-overview.pdf =================================================================== (Binary files differ) From noreply at r-forge.r-project.org Tue Sep 17 06:25:58 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Tue, 17 Sep 2013 06:25:58 +0200 (CEST) Subject: [Returnanalytics-commits] r3127 - pkg/PerformanceAnalytics/R Message-ID: <20130917042558.708CE1852B0@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-17 06:25:57 +0200 (Tue, 17 Sep 2013) New Revision: 3127 Modified: pkg/PerformanceAnalytics/R/PortfolioRisk.R Log: Modifying Portsd so that the asset names are returned with contribution and pct_contrib. Modified: pkg/PerformanceAnalytics/R/PortfolioRisk.R =================================================================== --- pkg/PerformanceAnalytics/R/PortfolioRisk.R 2013-09-17 04:09:15 UTC (rev 3126) +++ pkg/PerformanceAnalytics/R/PortfolioRisk.R 2013-09-17 04:25:57 UTC (rev 3127) @@ -231,10 +231,13 @@ dpm2 = derportm2(w,sigma) dersd = (0.5*as.vector(dpm2))/sqrt(pm2); contrib = dersd*as.vector(w) + names(contrib) = names(w) + pct_contrib = contrib/sqrt(pm2) + names(pct_contrib) = names(w) # check if( abs( sum(contrib)-sqrt(pm2))>0.01*sqrt(pm2)) { print("error") } else { - ret<-list( sqrt(pm2) , contrib , contrib/sqrt(pm2) ) + ret<-list( sqrt(pm2) , contrib , pct_contrib ) names(ret) <- c("StdDev","contribution","pct_contrib_StdDev") } return(ret) From noreply at r-forge.r-project.org Tue Sep 17 22:15:09 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Tue, 17 Sep 2013 22:15:09 +0200 (CEST) Subject: [Returnanalytics-commits] r3128 - pkg/PortfolioAnalytics/R Message-ID: <20130917201509.DA7C71846B3@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-17 22:15:09 +0200 (Tue, 17 Sep 2013) New Revision: 3128 Modified: pkg/PortfolioAnalytics/R/extractstats.R Log: Adding ETL.ETL and ETL.MES to name.replace Modified: pkg/PortfolioAnalytics/R/extractstats.R =================================================================== --- pkg/PortfolioAnalytics/R/extractstats.R 2013-09-17 04:25:57 UTC (rev 3127) +++ pkg/PortfolioAnalytics/R/extractstats.R 2013-09-17 20:15:09 UTC (rev 3128) @@ -41,7 +41,7 @@ #' @param rnames character vector of names to check for cleanup name.replace <- function(rnames){ rnames<-gsub("objective_measures.",'',rnames) - matchvec<-c('mean.mean','median.median','ES.ES','CVaR.ES','ES.MES','CVaR.MES','VaR.MVaR','maxDrawdown.maxDrawdown','sd.sd','StdDev.StdDev') + matchvec<-c('mean.mean','median.median','ES.ES','ETL.ETL','CVaR.ES','ES.MES','ETL.MES','CVaR.MES','VaR.MVaR','maxDrawdown.maxDrawdown','sd.sd','StdDev.StdDev') for(str in matchvec){ pos<-pmatch(str,rnames) if(!is.na(pos)){ @@ -50,6 +50,7 @@ median.median = {rnames[pos]<-'median'}, CVaR.MES =, CVaR.ES = {rnames[pos]<-'CVaR'}, ES.MES =, ES.ES = {rnames[pos]<-'ES'}, + ETL.MES =, ETL.ETL = {rnames[pos]<-'ETL'}, VaR.MVaR = {rnames[pos]<-'VaR'}, maxDrawdown.maxDrawdown = {rnames[pos]<-'maxDrawdown'}, sd.sd=, StdDev.StdDev = {rnames[pos]<-'StdDev'}, From noreply at r-forge.r-project.org Wed Sep 18 07:08:47 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 18 Sep 2013 07:08:47 +0200 (CEST) Subject: [Returnanalytics-commits] r3129 - pkg/PortfolioAnalytics/R Message-ID: <20130918050847.584D2185011@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-18 07:08:46 +0200 (Wed, 18 Sep 2013) New Revision: 3129 Modified: pkg/PortfolioAnalytics/R/charts.DE.R pkg/PortfolioAnalytics/R/charts.GenSA.R pkg/PortfolioAnalytics/R/charts.PSO.R pkg/PortfolioAnalytics/R/charts.ROI.R pkg/PortfolioAnalytics/R/charts.RP.R Log: Adding check for returns object of chart.Scatter.* functions. Modified: pkg/PortfolioAnalytics/R/charts.DE.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.DE.R 2013-09-17 20:15:09 UTC (rev 3128) +++ pkg/PortfolioAnalytics/R/charts.DE.R 2013-09-18 05:08:46 UTC (rev 3129) @@ -100,6 +100,7 @@ if(!inherits(object, "optimize.portfolio.DEoptim")) stop("object must be of class 'optimize.portfolio.DEoptim'") R <- object$R + if(is.null(R)) stop("Returns object not detected, must run optimize.portfolio with trace=TRUE") portfolio <- object$portfolio xtract = extractStats(object) columnnames = colnames(xtract) Modified: pkg/PortfolioAnalytics/R/charts.GenSA.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.GenSA.R 2013-09-17 20:15:09 UTC (rev 3128) +++ pkg/PortfolioAnalytics/R/charts.GenSA.R 2013-09-18 05:08:46 UTC (rev 3129) @@ -83,6 +83,7 @@ if(!inherits(object, "optimize.portfolio.GenSA")) stop("object must be of class 'optimize.portfolio.GenSA'") R <- object$R + if(is.null(R)) stop("Returns object not detected, must run optimize.portfolio with trace=TRUE") # If the user does not pass in rp, then we will generate random portfolios if(rp){ permutations <- match.call(expand.dots=TRUE)$permutations Modified: pkg/PortfolioAnalytics/R/charts.PSO.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.PSO.R 2013-09-17 20:15:09 UTC (rev 3128) +++ pkg/PortfolioAnalytics/R/charts.PSO.R 2013-09-18 05:08:46 UTC (rev 3129) @@ -82,6 +82,7 @@ if(!inherits(object, "optimize.portfolio.pso")) stop("object must be of class 'optimize.portfolio.pso'") R <- object$R + if(is.null(R)) stop("Returns object not detected, must run optimize.portfolio with trace=TRUE") # portfolio <- object$portfolio xtract = extractStats(object) columnnames = colnames(xtract) Modified: pkg/PortfolioAnalytics/R/charts.ROI.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.ROI.R 2013-09-17 20:15:09 UTC (rev 3128) +++ pkg/PortfolioAnalytics/R/charts.ROI.R 2013-09-18 05:08:46 UTC (rev 3129) @@ -84,6 +84,7 @@ if(!inherits(object, "optimize.portfolio.ROI")) stop("object must be of class 'optimize.portfolio.ROI'") R <- object$R + if(is.null(R)) stop("Returns object not detected, must run optimize.portfolio with trace=TRUE") # If the user does not pass in rp, then we will generate random portfolios if(rp){ permutations <- match.call(expand.dots=TRUE)$permutations Modified: pkg/PortfolioAnalytics/R/charts.RP.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.RP.R 2013-09-17 20:15:09 UTC (rev 3128) +++ pkg/PortfolioAnalytics/R/charts.RP.R 2013-09-18 05:08:46 UTC (rev 3129) @@ -100,7 +100,8 @@ if(!inherits(object, "optimize.portfolio.random")){ stop("object must be of class 'optimize.portfolio.random'") } - R <- object$R + R <- object$R + if(is.null(R)) stop("Returns object not detected, must run optimize.portfolio with trace=TRUE") xtract = extractStats(object) columnnames = colnames(xtract) #return.column = grep(paste("objective_measures",return.col,sep='.'),columnnames) From noreply at r-forge.r-project.org Wed Sep 18 07:35:15 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 18 Sep 2013 07:35:15 +0200 (CEST) Subject: [Returnanalytics-commits] r3130 - pkg/PortfolioAnalytics/vignettes Message-ID: <20130918053515.DE5451852C8@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-18 07:35:15 +0200 (Wed, 18 Sep 2013) New Revision: 3130 Modified: pkg/PortfolioAnalytics/vignettes/portfolio_vignette.Rnw pkg/PortfolioAnalytics/vignettes/portfolio_vignette.pdf Log: Modifying portfolio_vignette to add content and charts. Modified: pkg/PortfolioAnalytics/vignettes/portfolio_vignette.Rnw =================================================================== --- pkg/PortfolioAnalytics/vignettes/portfolio_vignette.Rnw 2013-09-18 05:08:46 UTC (rev 3129) +++ pkg/PortfolioAnalytics/vignettes/portfolio_vignette.Rnw 2013-09-18 05:35:15 UTC (rev 3130) @@ -1,19 +1,51 @@ -\documentclass[12pt,letterpaper,english]{article} +\documentclass[a4paper]{article} \usepackage[OT1]{fontenc} \usepackage{Sweave} -\usepackage{verbatim} \usepackage{Rd} -\usepackage{Sweave} +\usepackage{amsmath} +\usepackage{hyperref} +\usepackage[round]{natbib} +\usepackage{bm} +\usepackage{verbatim} +\usepackage[latin1]{inputenc} +\bibliographystyle{abbrvnat} + +\usepackage{url} + +\let\proglang=\textsf +%\newcommand{\pkg}[1]{{\fontseries{b}\selectfont #1}} +%\newcommand{\R}[1]{{\fontseries{b}\selectfont #1}} +%\newcommand{\email}[1]{\href{mailto:#1}{\normalfont\texttt{#1}}} +%\newcommand{\E}{\mathsf{E}} +%\newcommand{\VAR}{\mathsf{VAR}} +%\newcommand{\COV}{\mathsf{COV}} +%\newcommand{\Prob}{\mathsf{P}} + +\renewcommand{\topfraction}{0.85} +\renewcommand{\textfraction}{0.1} +\renewcommand{\baselinestretch}{1.5} +\setlength{\textwidth}{15cm} \setlength{\textheight}{22cm} \topmargin-1cm \evensidemargin0.5cm \oddsidemargin0.5cm + +\usepackage[latin1]{inputenc} +% or whatever + +\usepackage{lmodern} +\usepackage[T1]{fontenc} +% Or whatever. Note that the encoding and the font should match. If T1 +% does not look nice, try deleting the line with the fontenc. + \begin{document} +\SweaveOpts{concordance=TRUE} + \title{Creating a Portfolio Object with PortfolioAnalytics} \author{Ross Bennett} \maketitle \begin{abstract} -The purpose of this vignette is to demonstrate the new interface in PortfolioAnalytics to specify a portfolio object and to add constraints and objectives. +The purpose of this vignette is to demonstrate the new interface in PortfolioAnalytics to specify a portfolio object, add constraints and objectis, and run optimizations. \end{abstract} \tableofcontents @@ -55,14 +87,24 @@ \section{Adding Constraints to the Portfolio Object} Adding constraints to the portfolio object is done with \code{add.constraint}. The \code{add.constraint} function is the main interface for adding and/or updating constraints to the portfolio object. This function allows the user to specify the portfolio to add the constraints to, the type of constraints, arguments for the constraint, and whether or not to enable the constraint (\code{enabled=TRUE} is the default). If updating an existing constraint, the indexnum argument can be specified. -Here we add a constraint that the weights must sum to 1, or the full investment constraint. +\subsection{Leverage Constraint} + +The \code{leverage} constraint specifies the constraint on the sum of the weights. Aliases for the \code{leverage} constraint type include \code{weight\_sum}, \code{weight}, and \code{leverage}. Here we add a constraint that the weights must sum to 1, or the full investment constraint. <<>>= # Add the full investment constraint that specifies the weights must sum to 1. pspec <- add.constraint(portfolio=pspec, type="weight_sum", min_sum=1, max_sum=1) +@ +There are two special cases for the leverage constraint: +\begin{enumerate} +\item The sum of the weights equal 1, i.e. the full investment constraint. The full investment constraint can be specified with \code{type="full\_investment"}. This automatically sets \code{min\_sum=1} and \code{max\_sum=1.} +\item The sum of the weights equal 0, i.e. the dollar neutral or active constraint. This constraint can be specified with \code{type="dollar\_neutral"} or \code{type="active"}. +\end{enumerate} + +<<>>= # The full investment constraint can also be specified with type="full_investment" # pspec <- add.constraint(portfolio=pspec, type="full_investment") @@ -75,7 +117,8 @@ # pspec <- add.constraint(portfolio=pspec, type="active") @ -Here we add box constraints for the asset weights so that the minimum weight of any asset must be greater than or equal to 0.05 and the maximum weight of any asset must be less than or equal to 0.4. The values for min and max can be passed in as scalars or vectors. If min and max are scalars, the values for min and max will be replicated as vectors to the length of assets. If min and max are not specified, a minimum weight of 0 and maximum weight of 1 are assumed. Note that min and max can be specified as vectors with different weights for linear inequality constraints. +\subsection{Box Constraint} +Box constraints allows the user to specify upper and lower bounds on the weights of the assets. Here we add box constraints for the asset weights so that the minimum weight of any asset must be greater than or equal to 0.05 and the maximum weight of any asset must be less than or equal to 0.4. The values for min and max can be passed in as scalars or vectors. If min and max are scalars, the values for min and max will be replicated as vectors to the length of assets. If min and max are not specified, a minimum weight of 0 and maximum weight of 1 are assumed. Note that min and max can be specified as vectors with different weights for linear inequality constraints. <<>>= # Add box constraints pspec <- add.constraint(portfolio=pspec, @@ -95,29 +138,20 @@ # pspec <- add.constraint(portfolio=pspec, type="long_only") @ -The portfolio object now has 2 objects in the constraints list. One object for the sum of weights constraint and another for the box constraint. -<<>>= -print(pspec) -@ -The \code{summary} function gives a more detailed view of the constraints. +\subsection{Group Constraint} +Group constraints allow the user to specify the the sum of weights by group. Group constraints are currently supported by the ROI, DEoptim, and random portfolio solvers. The following code groups the assets such that the first 3 assets are grouped together labeled GroupA and the fourth asset is in its own group labeled GroupB. The \code{group\_min} argument specifies that the sum of the weights in GroupA must be greater than or equal to 0.1 and the sum of the weights in GroupB must be greater than or equal to 0.15. The \code{group\_max} argument specifies that the sum of the weights in GroupA must be less than or equal to 0.85 and the sum of the weights in GroupB must be less than or equal to 0.55.The \code{group\_labels} argument is optional and is useful if groups is not a named list for labeling groups in terms of market capitalization, sector, etc. <<>>= -summary(pspec) -@ - - -Another common constraint that can be added is a group constraint. Group constraints are currently supported by the ROI, DEoptim, and random portfolio solvers. The following code groups the assets such that the first 3 assets are grouped together labeled GroupA and the fourth asset is in its own group labeled GroupB. The \code{group\_min} argument specifies that the sum of the weights in GroupA must be greater than or equal to 0.1 and the sum of the weights in GroupB must be greater than or equal to 0.15. The \code{group\_max} argument specifies that the sum of the weights in GroupA must be less than or equal to 0.85 and the sum of the weights in GroupB must be less than or equal to 0.55.The \code{group\_labels} argument is optional and is useful for labeling groups in terms of market capitalization, sector, etc. -<<>>= # Add group constraints pspec <- add.constraint(portfolio=pspec, type="group", groups=list(groupA=c(1, 2, 3), grouB=4), group_min=c(0.1, 0.15), - group_max=c(0.85, 0.55), - group_labels=c("GroupA", "GroupB")) + group_max=c(0.85, 0.55)) @ -A position limit constraint can be added to limit the number of assets with non-zero, long, or short positions. The ROI solver interfaces to the Rglpk package (i.e. using the glpk plugin) for solving maximizing return and ETL/ES/cVaR objectives. The Rglpk package supports integer programming and thus supports position limit constraints for the \code{max\_pos} argument. The quadprog package does not support integer programming, and therefore \code{max\_pos} is not supported for the ROI solver using the quadprog plugin. Note that \code{max\_pos\_long} and \code{max\_pos\_short} are not supported for either ROI solver. All position limit constraints are fully supported for DEoptim and random solvers. +\subsection{Position Limit Constraint} +The position limit constraint allows the user to specify limits on the number of assets with non-zero, long, or short positions. The ROI solver interfaces to the Rglpk package (i.e. using the glpk plugin) for solving maximizing return and ETL/ES/cVaR objectives. The Rglpk package supports integer programming and thus supports position limit constraints for the \code{max\_pos} argument. The quadprog package does not support integer programming, and therefore \code{max\_pos} is not supported for the ROI solver using the quadprog plugin. Note that \code{max\_pos\_long} and \code{max\_pos\_short} are not supported for either ROI solver. All position limit constraints are fully supported for DEoptim and random solvers. <<>>= # Add position limit constraint such that we have a maximum number of three assets with non-zero weights. @@ -127,22 +161,48 @@ # pspec <- add.constraint(portfolio=pspec, type="position_limit", max_pos_long=3, max_pos_short=3) @ -A target diversification can be specified as a constraint. Diversification is defined as $diversification = \sum_{i=1}^N w_i^2$ for $N$ assets. The diversification constraint is implemented for the global optimizers by applying a penalty if the diversification value is more than 5\% away from \code{div\_target}. -TODO add support for diversification as a constraint for ROI solvers. Can't do this with Rglpk, but can add as a penalty term for quadratic utility and minimum variance problems +\subsection{Diversification Constraint} +The diversification constraint allows the user to target diversification. Diversification is defined as $diversification = \sum_{i=1}^N w_i^2$ for $N$ assets. The diversification constraint is implemented for the global optimizers by applying a penalty if the diversification value is more than 5\% away from \code{div\_target}. Note that diversification as a constraint is not supported for the ROI solvers, it is only supported for the global numeric solvers. <<>>= pspec <- add.constraint(portfolio=pspec, type="diversification", div_target=0.7) @ -A target turnover can be specified as a constraint. The turnover is calculated from a set of initial weights. The initial weights can be specified, by default they are the initial weights in the portfolio object. The turnover constraint is implemented for the global optimizers by applying a penalty if the turnover value is more than 5\% away from \code{turnover\_target}. Note that the turnover constraint is not currently supported for the ROI solvers. +\subsection{Turnover Constraint} +A target turnover can be specified as a constraint. The turnover is calculated from a set of initial weights. The initial weights can be specified, by default they are the initial weights in the portfolio object. The turnover constraint is implemented for the global optimizers by applying a penalty if the turnover value is more than 5\% away from \code{turnover\_target}. Note that the turnover constraint is not currently supported for quadratic utility and minimum variance problems using the ROI solver. <<>>= pspec <- add.constraint(portfolio=pspec, type="turnover", turnover_target=0.2) @ -A target mean return can be specified as a constraint. +\subsection{Target Return Constraint} +The target return constraint allows the user to specify a target mean return. <<>>= pspec <- add.constraint(portfolio=pspec, type="return", return_target=0.007) @ +\subsection{Factor Exposure Constraint} +The factor exposure constraint allows the user to set upper and lower bounds on exposures to risk factors. The exposures can be passed in as a vector or matrix. Here we specify a vector for \code{B} with arbitrary values, e.g. betas of the assets, with a market risk exposure range of 0.6 to 0.9. +<<>>= +pspec <- add.constraint(portfolio=pspec, type="factor_exposure", + B=c(-0.08, 0.37, 0.79, 1.43), + lower=0.6, upper=0.9) +@ + +\subsection{Transaction Cost Constraint} +The transaction cost constraint allows the user to specify proportional transaction costs. Proportional transaction cost constraints can be implemented for quadratic utility and minimum variance problems using the ROI solver. Transaction costs are supported as a penalty for the global numeric solvers. Here we add the transaction cost contraint with the proportional transaction cost value of 1\%. +<<>>= +pspec <- add.constraint(portfolio=pspec, type="transaction_cost", ptc=0.01) +@ + +The print method for the portfolio object shows a concise view of the portfolio and the constraints that have been added. +<<>>= +print(pspec) +@ + +The \code{summary} function gives a more detailed view of the constraints. +<<>>= +summary(pspec) +@ + This demonstrates adding constraints to the portfolio object. As an alternative to adding constraints directly to the portfolio object, constraints can be specified as separate objects. \subsection{Specifying Constraints as Separate Objects} @@ -174,23 +234,449 @@ # target return constraint ret_constr <- return_constraint(return_target=0.007) + +# factor exposure constraint +exp_constr <- factor_exposure_constraint(assets=pspec$assets, + B=c(-0.08, 0.37, 0.79, 1.43), + lower=0.6, upper=0.9) + +# transaction cost constraint +ptc_constr <- transaction_cost_constraint(assets=pspec$assets, ptc=0.01) @ \section{Adding Objectives} -Business objectives can be added to the portfolio object with \code{add.objective}. The \code{add.objective} function is the main function for adding and/or updating business objectives to the portfolio object. This function allows the user to specify the portfolio to add the objectives to, the type (currently 'return', 'risk', or 'risk\_budget'), name of the objective function, arguments to the objective function, and whether or not to enable the objective. If updating an existing constraint, the indexnum argument can be specified. +Objectives can be added to the portfolio object with \code{add.objective}. The \code{add.objective} function is the main function for adding and/or updating business objectives to the portfolio object. This function allows the user to specify the \verb"portfolio" to add the objectives to, the \verb"type" (currently 'return', 'risk', 'risk\_budget', or 'weight\_concentration'), \verb"name" of the objective function, \verb"arguments" to the objective function, and whether or not to \verb"enable" the objective. If updating an existing constraint, the \verb"indexnum" argument can be specified. -Here we add a risk objective to minimize portfolio variance. Note that the name of the function must correspond to a function in R. Many functions are available in the PerformanceAnalytics package. +\subsection{Portfolio Risk Objective} +The portfolio risk objective allows the user to specify a risk function to minimize +Here we add a risk objective to minimize portfolio expected tail loss with a confidence level of 0.95. Other default arguments to the function can be passed in as a named list to arguments. Note that the name of the function must correspond to a function in R. Many functions are available in the \verb"PerformanceAnalytics" package or a user defined function. <<>>= pspec <- add.objective(portfolio=pspec, type='risk', - name='var', - enabled=TRUE) + name='ETL', + arguments=list(p=0.95)) @ -TODO Add more objectives +\subsection{Portfolio Return Objective} +The return objective allows the user to specify a return function to maximize. Here we add a return objective to maximize the portfolio mean return. +<<>>= +pspec <- add.objective(portfolio=pspec, + type='return', + name='mean') +@ +\subsection{Portfolio Risk Budget Objective} +The portfolio risk objective allows the user to specify constraints to minimize component contribution (i.e. equal risk contribution) or specify upper and lower bounds on percentage risk contribution. Here we specify that no asset can contribute more than 30\% to total portfolio risk. See the risk budget optimization vignette for more detailed examples of portfolio optimizations with risk budgets. +<<>>= +pspec <- add.objective(portfolio=pspec, type="risk_budget", name="ETL", + arguments=list(p=0.95), max_prisk=0.3) + +# for an equal risk contribution portfolio, set min_concentration=TRUE +# pspec <- add.objective(portfolio=pspec, type="risk_budget", name="ETL", +# arguments=list(p=0.95), min_concentration=TRUE) +@ + + +\subsection{Portfolio Weight Concentration Objective} +The weight concentration objective allows the user to specify an objective to minimize concentration as measured by the Herfindahl-Hirschman Index. For otpimization problems solved with the global numeric optimizers, the portfolio HHI value is penalized using \code{conc\_aversion} value as the multiplier. + +For quadratic utility problems with weight concentration as an objective using the ROI solver, this is implemented as a penalty to the objective function. The objective function is implemented as follows: + +\begin{eqnarray} +\underset{\boldsymbol{w}}{\text{maximize}} +\boldsymbol{w}' \boldsymbol{\mu} - \frac{\lambda}{2}(\boldsymbol{w}' \boldsymbol{\Sigma} \boldsymbol{w} + \lambda_{hhi} * HHI)\\ +\end{eqnarray} +Where $\mu$ is the estimated mean asset returns, $\lambda$ is the risk aversion parameter, $lambda_{hhi}$ is the concentration aversion parameter, $HHI$ is the portfolio $HHI$, $\boldsymbol{\Sigma}$ is the estimated covariance matrix of asset returns and $\boldsymbol{w}$ is the set of weights. + +Here we add a weight concentration objective for the overall portfolio HHI. +<<>>= +pspec <- add.objective(portfolio=pspec, type="weight_concentration", + name="HHI", conc_aversion=0.1) +@ + +The weight concentration aversion parameter by groups can also be specified. Here we add a weight concentration objective specifying groups and concentration aversion parameters by group. +<<>>= +pspec <- add.objective(portfolio=pspec, type="weight_concentration", + name="HHI", + conc_aversion=c(0.03, 0.06), + conc_groups=list(c(1, 2), + c(3, 4))) +@ + +The print method for the portfolio object will now show all the constraints and objectives that have been added. +<<>>= +print(pspec) +@ + +The \code{summary} function gives a more detailed view. +<<>>= +summary(pspec) +@ + +\section{Solvers} +The PortfolioAnalytics package currently supports random portfolios, DEoptim, pso, GenSA, and ROI as back ends. Note that some of the QP/LP problems are solved directly with Rglpk and quadprog. The solver can be specified with the \code{optimize\_method} argument in \code{optimize.portfolio} and \code{optimize.portfolio.rebalancing}. + +\subsection{DEoptim} +PortfolioAnalytics uses the \code{DEoptim} function from the R package \verb"DEoptim". Differential evolution is a stochastic global optimization algorithm. See \code{?DEoptim} and the references contained therein for more information. See also \href{http://cran.r-project.org/web/packages/DEoptim/vignettes/DEoptimPortfolioOptimization.pdf}{Large scale portfolio optimization with DEoptim}. +\subsection{Random Portfolios} +PortfolioAnalytics has three methods to generate random portfolios. +\begin{enumerate} +\item The 'sample' method to generate random portfolios is based on an idea by Pat Burns. This is the most flexible method, but also the slowest, and can generate portfolios to satisfy leverage, box, group, and position limit constraints. +\item The 'simplex' method to generate random portfolios is based on a paper by W. T. Shaw. The simplex method is useful to generate random portfolios with the full investment constraint, where the sum of the weights is equal to 1, and min box constraints. Values for \code{min\_sum} and \code{max\_sum} of the leverage constraint will be ignored, the sum of weights will equal 1. All other constraints such as the box constraint max, group and position limit constraints will be handled by elimination. If the constraints are very restrictive, this may result in very few feasible portfolios remaining. +\item The 'grid' method to generate random portfolios is based on the \code{gridSearch} function in package \verb"NMOF". The grid search method only satisfies the \code{min} and \code{max} box constraints. The \code{min\_sum} and \code{max\_sum} leverage constraints will likely be violated and the weights in the random portfolios should be normalized. Normalization may cause the box constraints to be violated and will be penalized in \code{constrained\_objective}. +\end{enumerate} + +The following plots illustrate the various methods to generate random portfolios. + +<>= +R <- edhec[, 1:4] + +# set up simple portfolio with leverage and box constraints +pspec <- portfolio.spec(assets=colnames(R)) +pspec <- add.constraint(portfolio=pspec, type="leverage", + min_sum=0.99, max_sum=1.01) +pspec <- add.constraint(portfolio=pspec, type="box", min=0, max=1) + +# generate random portfolios using the 3 methods +rp1 <- random_portfolios(portfolio=pspec, permutations=5000, + rp_method='sample') +rp2 <- random_portfolios(portfolio=pspec, permutations=5000, + rp_method='simplex') +rp3 <- random_portfolios(portfolio=pspec, permutations=5000, + rp_method='grid') + +# show feasible portfolios in mean-StdDev space +tmp1.mean <- apply(rp1, 1, function(x) mean(R %*% x)) +tmp1.StdDev <- apply(rp1, 1, function(x) StdDev(R=R, weights=x)) +tmp2.mean <- apply(rp2, 1, function(x) mean(R %*% x)) +tmp2.StdDev <- apply(rp2, 1, function(x) StdDev(R=R, weights=x)) +tmp3.mean <- apply(rp3, 1, function(x) mean(R %*% x)) +tmp3.StdDev <- apply(rp3, 1, function(x) StdDev(R=R, weights=x)) + +# plot feasible portfolios +plot(x=tmp1.StdDev, y=tmp1.mean, col="gray", main="Random Portfolio Methods") +points(x=tmp2.StdDev, y=tmp2.mean, col="red", pch=2) +points(x=tmp3.StdDev, y=tmp3.mean, col="lightgreen", pch=5) +legend("bottomright", legend=c("sample", "simplex", "grid"), + col=c("gray", "red", "lightgreen"), + pch=c(1, 2, 5), bty="n") +@ + +Figure 1 shows the feasible space using the different random portfolio methods. The 'sample' method has relatively even coverage of the feasible space. The 'simplex' method also has relatively even coverage of the space, but it is also more concentrated around the assets. The 'grid' method is pushed to the interior of the space due to the normalization. + +The \code{fev} argument controls the face-edge-vertex biasing. Higher values for \code{fev} will result in the weights vector more concentrated on a single asset. This can be seen in the following charts. +<>= +fev <- 0:5 +par(mfrow=c(2, 3)) +for(i in 1:length(fev)){ + rp <- random_portfolios(portfolio=pspec, permutations=2000, + rp_method='simplex', fev=fev[i]) + tmp.mean <- apply(rp, 1, function(x) mean(R %*% x)) + tmp.StdDev <- apply(rp, 1, function(x) StdDev(R=R, weights=x)) + plot(x=tmp.StdDev, y=tmp.mean, main=paste("FEV =", fev[i]), + ylab="mean", xlab="StdDev", col=rgb(0, 0, 100, 50, maxColorValue=255)) +} +par(mfrow=c(1,1)) +@ + +Figure 2 shows the feasible space varying the fev values. + +The \code{fev} argument can be passed in as a vector for more even coverage of the feasible space. The default value is \code{fev=0:5}. +<>= +par(mfrow=c(1, 2)) +# simplex +rp_simplex <- random_portfolios(portfolio=pspec, permutations=2000, + rp_method='simplex', fev=0:5) +tmp.mean <- apply(rp_simplex, 1, function(x) mean(R %*% x)) +tmp.StdDev <- apply(rp_simplex, 1, function(x) StdDev(R=R, weights=x)) +plot(x=tmp.StdDev, y=tmp.mean, main="rp_method=simplex fev=0:5", + ylab="mean", xlab="StdDev", col=rgb(0, 0, 100, 50, maxColorValue=255)) +#sample +rp_sample <- random_portfolios(portfolio=pspec, permutations=2000, + rp_method='sample') +tmp.mean <- apply(rp_sample, 1, function(x) mean(R %*% x)) +tmp.StdDev <- apply(rp_sample, 1, function(x) StdDev(R=R, weights=x)) +plot(x=tmp.StdDev, y=tmp.mean, main="rp_method=sample", + ylab="mean", xlab="StdDev", col=rgb(0, 0, 100, 50, maxColorValue=255)) +par(mfrow=c(1,1)) +@ + +Figure 3 shows the feasible space comparing the 'sample' and 'simplex' methods to generate random portfolios. + +\subsection{pso} +PortfolioAnalytics uses the \code{psoptim} function from the R package \verb"pso". Particle swarm optimization is a heuristic optimization algorithm. See \code{?psoptim} and the references contained therein for more information. + +\subsection{GenSA} +PortfolioAnalytics uses the \code{GenSA} function from the R package \verb"GenSA". Generalized simmulated annealing is generic probabilistic heuristic optimization algorithm. See \code{?GenSA} and the references contained therein for more information. + +\subsection{ROI} +The \verb"ROI" package serves as an interface to the \verb"Rglpk" package and the \verb"quadprog" package to solve linear and quadratic programming problems. The interface to the \verb"ROI" package solves a limited type of convex optimization problems: + +\begin{enumerate} +\item Maxmimize portfolio return subject leverage, box, group, position limit, target mean return, and/or factor exposure constraints on weights. +\item Minimize portfolio variance subject to leverage, box, group, turnover, and/or factor exposure constraints (otherwise known as global minimum variance portfolio). +\item Minimize portfolio variance subject to leverage, box, group, and/or factor exposure constraints and a desired portfolio return. +\item Maximize quadratic utility subject to leverage, box, group, target mean return, turnover, and/or factor exposure constraints and risk aversion parameter. +(The risk aversion parameter is passed into \code{optimize.portfolio} as an added argument to the \code{portfolio} object). +\item Minimize ETL subject to leverage, box, group, position limit, target mean return, and/or factor exposure constraints and target portfolio return. +\end{enumerate} + + \section{Optimization} -TODO +The previous sections demonstrated how to specify a portfolio object, add constraints, add objectives, and the solvers available. This section will demonstrate run the optimizations via \code{optimize.portfolio}. Only a small number of examples will be shown here, see the demos for several more examples. +\subsection{Initial Portfolio Object} +<<>>= +library(DEoptim) +library(ROI) +require(ROI.plugin.glpk) +require(ROI.plugin.quadprog) +data(edhec) +R <- edhec[, 1:6] +colnames(R) <- c("CA", "CTAG", "DS", "EM", "EQMN", "ED") +funds <- colnames(R) + +# Create an initial portfolio object with leverage and box constraints +init <- portfolio.spec(assets=funds) +init <- add.constraint(portfolio=init, type="leverage", + min_sum=0.99, max_sum=1.01) +init <- add.constraint(portfolio=init, type="box", min=0.05, max=0.65) +@ + +\subsection{Maximize mean return with ROI} +Add an objective to maximize mean return. +<<>>= +maxret <- add.objective(portfolio=init, type="return", name="mean") +@ + +Run the optimization. +<<>>= +opt_maxret <- optimize.portfolio(R=R, portfolio=maxret, + optimize_method="ROI", trace=TRUE) + +print(opt_maxret) +@ + +Chart the weights and optimal portfolio in risk-return space. +<>= +chart.Weights(opt_maxret) +chart.RiskReward(opt_maxret, risk.col="StdDev", return.col="mean", + main="Maximum Return Optimization", chart.assets=TRUE, + xlim=c(0, 0.05)) +@ + +\subsection{Minimize variance with ROI} +Add an objective to minimize portfolio variance. +<<>>= +minvar <- add.objective(portfolio=init, type="risk", name="var") +@ + +Run the optimization. Note that although 'var' is the risk metric, 'StdDev' is returned as an objective measure. +<<>>= +opt_minvar <- optimize.portfolio(R=R, portfolio=minvar, + optimize_method="ROI", trace=TRUE) +print(opt_minvar) +@ + +Chart the weights and optimal portfolio in risk-return space. +<>= +chart.Weights(opt_minvar) +chart.RiskReward(opt_minvar, risk.col="StdDev", return.col="mean", + main="Minimum Variance Optimization", chart.assets=TRUE, + xlim=c(0, 0.05)) +@ + +\subsection{Maximize quadratic utility with ROI} +Add mean and var objectives for quadratic utility. Note that the risk aversion parameter for quadratic utility is specifed in the objective as shown below. +<<>>= +qu <- add.objective(portfolio=init, type="return", name="mean") +qu <- add.objective(portfolio=qu, type="risk", name="var", risk_aversion=0.25) +@ + +Run the optimization. +<<>>= +opt_qu <- optimize.portfolio(R=R, portfolio=qu, + optimize_method="ROI", trace=TRUE) +print(opt_qu) +@ + +<>= +chart.Weights(opt_qu) +chart.RiskReward(opt_qu, risk.col="StdDev", return.col="mean", + main="Quadratic Utility Optimization", chart.assets=TRUE, + xlim=c(0, 0.05)) +@ + +\subsection{Minimize expected tail loss with ROI} +Add ETL objective. +<<>>= +etl <- add.objective(portfolio=init, type="risk", name="ETL") +@ + +Run the optimization. +<<>>= +opt_etl <- optimize.portfolio(R=R, portfolio=etl, + optimize_method="ROI", trace=TRUE) +print(opt_etl) +@ + +<>= +chart.Weights(opt_etl) +chart.RiskReward(opt_etl, risk.col="ES", return.col="mean", + main="ETL Optimization", chart.assets=TRUE, + xlim=c(0, 0.14)) +@ + +\subsection{Maximize mean return per unit ETL with random portfolios} +Add mean and ETL objectives. +<<>>= +meanETL <- add.objective(portfolio=init, type="return", name="mean") +meanETL <- add.objective(portfolio=meanETL, type="risk", name="ETL", + arguments=list(p=0.95)) +@ + +Run the optimization. The default random portfolio method is 'sample'. +<<>>= +opt_meanETL <- optimize.portfolio(R=R, portfolio=meanETL, + optimize_method="random", + trace=TRUE, search_size=2000) +print(opt_meanETL) +@ + +The optimization was run with \code{trace=TRUE} so that iterations and other output from random portfolios is stored in the \code{opt\_meanETL} object. The \code{extractStats} function can be used to get a matrix of the weights and objective measures at each iteration. +<<>>= +stats_meanETL <- extractStats(opt_meanETL) +dim(stats_meanETL) +head(stats_meanETL) +@ + +Chart the optimal weights and optimal portfolio in risk-return space. Because the optimization was run with \code{trace=TRUE}, the chart of the optimal portfolio also includes the trace portfolios of the optimization. This is usefule to visualize the feasible space of the portfolios. The 'neighbor' portfolios relative to the optimal portfolio weights can be included the chart of the optimal weights. +<>= +chart.Weights(opt_meanETL, neighbors=25) +chart.RiskReward(opt_meanETL, risk.col="ETL", return.col="mean", + main="mean-ETL Optimization") +@ + +Calculate and plot the portfolio component ETL contribution. +<>= +pct_contrib <- ES(R=R, p=0.95, portfolio_method="component", + weights=extractWeights(opt_meanETL)) +barplot(pct_contrib$pct_contrib_MES, cex.names=0.8, las=3) +@ + +This figure shows that the Equity Market Nuetral strategy has greater than 50\% risk contribution. A risk budget objective can be added to limit risk contribution percentage to 40\%. + +\subsection{Maximize mean return per unit ETL with ETL risk budgets} +Add objectives. +<<>>= +# change the box constraints to long only [TRUNCATED] To get the complete diff run: svnlook diff /svnroot/returnanalytics -r 3130 From noreply at r-forge.r-project.org Wed Sep 18 07:40:20 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 18 Sep 2013 07:40:20 +0200 (CEST) Subject: [Returnanalytics-commits] r3131 - in pkg/PortfolioAnalytics: R man Message-ID: <20130918054020.A408D1852C8@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-18 07:40:20 +0200 (Wed, 18 Sep 2013) New Revision: 3131 Modified: pkg/PortfolioAnalytics/R/optimize.portfolio.R pkg/PortfolioAnalytics/man/optimize.portfolio.Rd Log: Modifying documentation for optimize.portfolio Modified: pkg/PortfolioAnalytics/R/optimize.portfolio.R =================================================================== --- pkg/PortfolioAnalytics/R/optimize.portfolio.R 2013-09-18 05:35:15 UTC (rev 3130) +++ pkg/PortfolioAnalytics/R/optimize.portfolio.R 2013-09-18 05:40:20 UTC (rev 3131) @@ -904,10 +904,7 @@ #' constrained optimization of portfolios #' #' This function aims to provide a wrapper for constrained optimization of -#' portfolios that allows the user to specify box constraints and business -#' objectives. -#' It will be the objective function\code{FUN} passed to any supported \R -#' optimization solver. +#' portfolios that specify constraints and objectives. #' #' @details #' This function currently supports DEoptim, random portfolios, pso, GenSA, and ROI as back ends. @@ -944,9 +941,7 @@ #' #' Because these convex optimization problem are standardized, there is no need for a penalty term. #' The \code{multiplier} argument in \code{\link{add.objective}} passed into the complete constraint object are ingnored by the ROI solver. -#' -#' If you would like to interface with \code{optimize.portfolio} using matrix formulations, then use \code{ROI_old}. -# +#' #' @note #' An object of class \code{v1_constraint} can be passed in for the \code{constraints} argument. #' The \code{v1_constraint} object was used in the previous 'v1' specification to specify the Modified: pkg/PortfolioAnalytics/man/optimize.portfolio.Rd =================================================================== --- pkg/PortfolioAnalytics/man/optimize.portfolio.Rd 2013-09-18 05:35:15 UTC (rev 3130) +++ pkg/PortfolioAnalytics/man/optimize.portfolio.Rd 2013-09-18 05:40:20 UTC (rev 3131) @@ -121,10 +121,8 @@ } \description{ This function aims to provide a wrapper for constrained - optimization of portfolios that allows the user to - specify box constraints and business objectives. It will - be the objective function\code{FUN} passed to any - supported \R optimization solver. + optimization of portfolios that specify constraints and + objectives. } \details{ This function currently supports DEoptim, random @@ -182,10 +180,6 @@ \code{multiplier} argument in \code{\link{add.objective}} passed into the complete constraint object are ingnored by the ROI solver. - - If you would like to interface with - \code{optimize.portfolio} using matrix formulations, then - use \code{ROI_old}. } \note{ An object of class \code{v1_constraint} can be passed in From noreply at r-forge.r-project.org Wed Sep 18 10:49:27 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 18 Sep 2013 10:49:27 +0200 (CEST) Subject: [Returnanalytics-commits] r3132 - in pkg/Meucci: R demo man Message-ID: <20130918084927.440851852B5@r-forge.r-project.org> Author: xavierv Date: 2013-09-18 10:49:26 +0200 (Wed, 18 Sep 2013) New Revision: 3132 Modified: pkg/Meucci/R/CentralAndStandardizedStatistics.R pkg/Meucci/R/FitMultivariateGarch.R pkg/Meucci/R/FitOrnsteinUhlenbeck.R pkg/Meucci/R/LognormalMoments2Parameters.R pkg/Meucci/R/MaxRsqCS.R pkg/Meucci/R/MaxRsqTS.R pkg/Meucci/R/ProjectionStudentT.R pkg/Meucci/R/SimulateJumpDiffusionMerton.R pkg/Meucci/demo/S_AutocorrelatedProcess.R pkg/Meucci/demo/S_BondProjectionPricingNormal.R pkg/Meucci/demo/S_BondProjectionPricingStudentT.R pkg/Meucci/demo/S_CallsProjectionPricing.R pkg/Meucci/demo/S_CrossSectionConstrainedIndustries.R pkg/Meucci/demo/S_CrossSectionIndustries.R pkg/Meucci/demo/S_EquitiesInvariants.R pkg/Meucci/demo/S_EquityProjectionPricing.R pkg/Meucci/demo/S_FactorAnalysisNotOk.R pkg/Meucci/demo/S_FactorResidualCorrelation.R pkg/Meucci/demo/S_FixedIncomeInvariants.R pkg/Meucci/demo/S_HedgeOptions.R pkg/Meucci/demo/S_HorizonEffect.R pkg/Meucci/demo/S_JumpDiffusionMerton.R pkg/Meucci/demo/S_LinVsLogReturn.R pkg/Meucci/demo/S_MultiVarSqrRootRule.R pkg/Meucci/demo/S_ProjectNPriceMvGarch.R pkg/Meucci/demo/S_ProjectSummaryStatistics.R pkg/Meucci/demo/S_PureResidualBonds.R pkg/Meucci/demo/S_ResidualAnalysisTheory.R pkg/Meucci/demo/S_SelectionHeuristics.R pkg/Meucci/demo/S_StatArbSwaps.R pkg/Meucci/demo/S_SwapPca2Dim.R pkg/Meucci/demo/S_TimeSeriesConstrainedIndustries.R pkg/Meucci/demo/S_TimeSeriesIndustries.R pkg/Meucci/demo/S_TimeSeriesVsCrossSectionIndustries.R pkg/Meucci/demo/S_Toeplitz.R pkg/Meucci/demo/S_VolatilityClustering.R pkg/Meucci/man/CentralAndStandardizedStatistics.Rd pkg/Meucci/man/FitMultivariateGarch.Rd pkg/Meucci/man/FitOrnsteinUhlenbeck.Rd pkg/Meucci/man/LognormalMoments2Parameters.Rd pkg/Meucci/man/MaxRsqCS.Rd pkg/Meucci/man/MaxRsqTS.Rd pkg/Meucci/man/ProjectionStudentT.Rd pkg/Meucci/man/SimulateJumpDiffusionMerton.Rd pkg/Meucci/man/garch1f4.Rd pkg/Meucci/man/garch2f8.Rd Log: - updated documentation for chapter 3 demo scripts and its functions Modified: pkg/Meucci/R/CentralAndStandardizedStatistics.R =================================================================== --- pkg/Meucci/R/CentralAndStandardizedStatistics.R 2013-09-18 05:40:20 UTC (rev 3131) +++ pkg/Meucci/R/CentralAndStandardizedStatistics.R 2013-09-18 08:49:26 UTC (rev 3132) @@ -1,6 +1,12 @@ -#' Compute central and standardized statistics, as described in A. Meucci -#' "Risk and Asset Allocation", Springer, 2005 +#' @title Compute central and standardized statistics. #' +#' @description Compute central and standardized statistics, as described in A. Meucci +#' "Risk and Asset Allocation", Springer, 2005. +#' +#' Computes the central moments \deqn{ CM_1^X \equiv \mu_{X}\,, \quad CM_n^X \equiv E \{(X - E\{ X \})^{n}\}\,, \quad n=2,3,\ldots ,} +#' and from them the standarized statistics \deqn{ \mu_{X},\sigma_{X},sk_{X},ku_{X},\gamma_{X}^{(5)}, \ldots ,\gamma_{X}^{(n)} .} +#' where \deqn{\gamma_{X}^{(n)} \equiv E \{(X - \mu_{X})^{n}\}/\sigma_{X}^{n},\quad n\geq3 .} +#' #' @param X : [vector] (J x 1) draws from the distribution #' @param N : [scalar] highest degree for the central moment #' @@ -8,9 +14,14 @@ #' @return mu : [vector] (1 x N) central moments up to order N #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 97 - Projection of skewness, kurtosis, and all standardized summary statistics". #' See Meucci's script for "CentralAndStandardizedStatistics.m" #' +#' Kendall, M., Stuart, A., 1969. The Advanced Theory of Statistics, Volume, 3rd Edition. Griffin. +#' +#' A. Meucci - "Annualization and general projection of skweness, kurtosis, and all summary statistics", +#' GARP Risk Professional August 2010, 55?56. \url{http://symmys.com/node/136}. #' @author Xavier Valls \email{flamejat@@gmail.com} #' @export Modified: pkg/Meucci/R/FitMultivariateGarch.R =================================================================== --- pkg/Meucci/R/FitMultivariateGarch.R 2013-09-18 05:40:20 UTC (rev 3131) +++ pkg/Meucci/R/FitMultivariateGarch.R 2013-09-18 08:49:26 UTC (rev 3132) @@ -11,10 +11,12 @@ #' @return CTMF : [matrix] coefficient matrix C-tilde (in the notation of the paper) #' @return Hhat : [matrix] forecasted conditional covariance matrix #' -#' @note Initially written by Olivier Ledoit and Michael Wolf +#' @note Code for MATLAB initially written by Olivier Ledoit and Michael Wolf #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 136 - Equity market: multivariate GARCH process". +#' #' See Meucci's script for "FitMultivariateGarch.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} @@ -123,11 +125,14 @@ #' #' @note #' MATLAB's script initially written by Olivier Ledoit, 4/28/1997 -#' Uses a conditional t-distribution with fixed degrees of freedom -#' Difference with garch1f: errors come from the score alone +#' +#' Uses a conditional t-distribution with fixed degrees of freedom +#' +#' Difference with garch1f: errors come from the score alone #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' #' See Meucci's script for "FitMultivariateGarch.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} @@ -385,14 +390,17 @@ #' @return hferr : [scalar] standard error on hf #' #' @note -#' Initially written by Olivier Ledoit, 4/28/1997 -#' Uses a conditional t-distribution with fixed degrees of freedom -#' Steepest Ascent on boundary, Hessian off boundary, no grid search +#' MATLAB's code initially written by Olivier Ledoit, 4/28/1997 +#' +#' Uses a conditional t-distribution with fixed degrees of freedom +#' +#' Steepest Ascent on boundary, Hessian off boundary, no grid search #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. -#' See Meucci's script for "FitMultivariateGarch.m" #' +#' See Meucci's script for "FitMultivariateGarch.m" +#' #' @author Xavier Valls \email{flamejat@@gmail.com} #' @export @@ -681,10 +689,11 @@ # @return XXX : [matrix] positive semi-definite matrix with same diagonal elements as A that is closest # to A according to the Frobenius norm # -# @note Written initially by Ilya Sharapov (1997) +# @note MATLAB's code written initially by Ilya Sharapov (1997) # # @references -# \url{http://symmys.com/node/170} +# A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +# # See Meucci's script for "FitMultivariateGarch.m" # # @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/R/FitOrnsteinUhlenbeck.R =================================================================== --- pkg/Meucci/R/FitOrnsteinUhlenbeck.R 2013-09-18 05:40:20 UTC (rev 3131) +++ pkg/Meucci/R/FitOrnsteinUhlenbeck.R 2013-09-18 08:49:26 UTC (rev 3132) @@ -1,4 +1,6 @@ -#' Fit a multivariate OU process at estimation step tau, as described in A. Meucci +#' @title Fits a multivariate Ornstein - Uhlenbeck process at estimation step tau. +#' +#' @description Fit a multivariate OU process at estimation step tau, as described in A. Meucci #' "Risk and Asset Allocation", Springer, 2005 #' #' @param Y : [matrix] (T x N) @@ -14,6 +16,7 @@ #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' #' See Meucci's script for "FitOrnsteinUhlenbeck.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/R/LognormalMoments2Parameters.R =================================================================== --- pkg/Meucci/R/LognormalMoments2Parameters.R 2013-09-18 05:40:20 UTC (rev 3131) +++ pkg/Meucci/R/LognormalMoments2Parameters.R 2013-09-18 08:49:26 UTC (rev 3132) @@ -1,7 +1,7 @@ #' @title Computes the mean and standard deviation of a lognormal distribution from its parameters. #' #' @description determines $\mu$ and $\sigma^2$ from $\Expect\{X\}$ and $\Var\{X\}$, and uses it to determine $\mu$ -#' and $\sigma^{2}$ such that $\Expect\left\{ X\right\} \bydef 3$ and $\Var\left\{ X\right\} \bydef 5$, as described in +#' and $\sigma^{2}$ such that $\Expect\left\{ X\right\} \equiv 3$ and $\Var\left\{ X\right\} \equiv 5$, as described in #' A. Meucci, "Risk and Asset Allocation", Springer, 2005. #' #' \deqn{\sigma^{2} = \ln \left( 1 + \frac{V}{E^{2}} \right) , } Modified: pkg/Meucci/R/MaxRsqCS.R =================================================================== --- pkg/Meucci/R/MaxRsqCS.R 2013-09-18 05:40:20 UTC (rev 3131) +++ pkg/Meucci/R/MaxRsqCS.R 2013-09-18 08:49:26 UTC (rev 3132) @@ -22,7 +22,7 @@ #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. -#' Used in "E 123 ? Cross-section factors: generalized cross-section industry factors". +#' Used in "E 123 - Cross-section factors: generalized cross-section industry factors". #' #' See Meucci's script for "MaxRsqCS.m" #' Modified: pkg/Meucci/R/MaxRsqTS.R =================================================================== --- pkg/Meucci/R/MaxRsqTS.R 2013-09-18 05:40:20 UTC (rev 3131) +++ pkg/Meucci/R/MaxRsqTS.R 2013-09-18 08:49:26 UTC (rev 3132) @@ -15,7 +15,7 @@ #' @return B : [matrix] (N x K) #' #' @note -#' Initial code by Tai-Ho Wang +#' Initial MATLAB's code by Tai-Ho Wang. #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. Modified: pkg/Meucci/R/ProjectionStudentT.R =================================================================== --- pkg/Meucci/R/ProjectionStudentT.R 2013-09-18 05:40:20 UTC (rev 3131) +++ pkg/Meucci/R/ProjectionStudentT.R 2013-09-18 08:49:26 UTC (rev 3132) @@ -14,7 +14,7 @@ #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, -#' "E 141 ? Fixed-income market: projection of Student t invariants". +#' "E 141 - Fixed-income market: projection of Student t invariants". #' #' See Meucci's script for "ProjectionStudentT.m" #' Modified: pkg/Meucci/R/SimulateJumpDiffusionMerton.R =================================================================== --- pkg/Meucci/R/SimulateJumpDiffusionMerton.R 2013-09-18 05:40:20 UTC (rev 3131) +++ pkg/Meucci/R/SimulateJumpDiffusionMerton.R 2013-09-18 08:49:26 UTC (rev 3132) @@ -1,22 +1,28 @@ - -#' This function simulates a jump diffusion process, as described in A. Meucci "Risk and Asset Allocation", -#' Springer, 2005 +#'@title Simulates a Merton jump-diffusion process. #' -#' @param m : [scalar] deterministic drift of diffusion -#' @param s : [scalar] standard deviation of diffusion -#' @param l : [scalar] Poisson process arrival rate -#' @param a : [scalar] drift of log-jump -#' @param D : [scalar] st.dev of log-jump -#' @param ts : [vector] time steps -#' @param J : [scalar] number of simulations +#' @description This function simulates a jump diffusion process, as described in A. Meucci "Risk and Asset Allocation", +#' Springer, 2005. #' -#' @return X : [matrix] (J x length(ts)) of simulations +#' @param m [scalar] deterministic drift of diffusion +#' @param s [scalar] standard deviation of diffusion +#' @param l [scalar] Poisson process arrival rate +#' @param a [scalar] drift of log-jump +#' @param D [scalar] st.dev of log-jump +#' @param ts [vector] time steps +#' @param J [scalar] number of simulations #' +#' @return X [matrix] (J x length(ts)) of simulations +#' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 132 - Simulation of a jump-diffusion process". +#' #' See Meucci's script for "SimulateJumpDiffusionMerton.m" #' -#' @author Xavier Valls \email{flamejat@@gmail.com} +#' Merton, R. C., 1976. "Option pricing when underlying stocks are discontinuous". Journal of Financial +#' Economics 3, 125?144. +#' +#'@author Xavier Valls \email{flamejat@@gmail.com} #' @export SimulateJumpDiffusionMerton = function( m, s, l, a, D, ts, J ) Modified: pkg/Meucci/demo/S_AutocorrelatedProcess.R =================================================================== --- pkg/Meucci/demo/S_AutocorrelatedProcess.R 2013-09-18 05:40:20 UTC (rev 3131) +++ pkg/Meucci/demo/S_AutocorrelatedProcess.R 2013-09-18 08:49:26 UTC (rev 3132) @@ -3,7 +3,7 @@ #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, -#' "E 133 ? Simulation of a Ornstein-Uhlenbeck process". +#' "E 133 - Simulation of a Ornstein-Uhlenbeck process". #' #' See Meucci's script for "S_AutocorrelatedProcess.m" #' Modified: pkg/Meucci/demo/S_BondProjectionPricingNormal.R =================================================================== --- pkg/Meucci/demo/S_BondProjectionPricingNormal.R 2013-09-18 05:40:20 UTC (rev 3131) +++ pkg/Meucci/demo/S_BondProjectionPricingNormal.R 2013-09-18 08:49:26 UTC (rev 3132) @@ -5,7 +5,7 @@ #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, -#' "E 140 ? Fixed-income market: projection of normal invariants". +#' "E 140 - Fixed-income market: projection of normal invariants". #' #' See Meucci's script for "S_BondProjectionPricingNormal.m" #' Modified: pkg/Meucci/demo/S_BondProjectionPricingStudentT.R =================================================================== --- pkg/Meucci/demo/S_BondProjectionPricingStudentT.R 2013-09-18 05:40:20 UTC (rev 3131) +++ pkg/Meucci/demo/S_BondProjectionPricingStudentT.R 2013-09-18 08:49:26 UTC (rev 3132) @@ -5,7 +5,7 @@ #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, -#' "E 141 ? Fixed-income market: projection of Student t invariants". +#' "E 141 - Fixed-income market: projection of Student t invariants". #' #' See Meucci's script for "S_BondProjectionPricingStudentT.m" #' Modified: pkg/Meucci/demo/S_CallsProjectionPricing.R =================================================================== --- pkg/Meucci/demo/S_CallsProjectionPricing.R 2013-09-18 05:40:20 UTC (rev 3131) +++ pkg/Meucci/demo/S_CallsProjectionPricing.R 2013-09-18 08:49:26 UTC (rev 3132) @@ -4,7 +4,7 @@ #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, -#' "E 143 ? Derivatives market: projection of invariants". +#' "E 143 - Derivatives market: projection of invariants". #' #' See Meucci's script for "S_CallsProjectionPricing.m" #' Modified: pkg/Meucci/demo/S_CrossSectionConstrainedIndustries.R =================================================================== --- pkg/Meucci/demo/S_CrossSectionConstrainedIndustries.R 2013-09-18 05:40:20 UTC (rev 3131) +++ pkg/Meucci/demo/S_CrossSectionConstrainedIndustries.R 2013-09-18 08:49:26 UTC (rev 3132) @@ -4,7 +4,7 @@ #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, -#' "E 123 ? Cross-section factors: generalized cross-section industry factors". +#' "E 123 - Cross-section factors: generalized cross-section industry factors". #' #' See Meucci's script for "S_CrossSectionConstrainedIndustries.m" #' Modified: pkg/Meucci/demo/S_CrossSectionIndustries.R =================================================================== --- pkg/Meucci/demo/S_CrossSectionIndustries.R 2013-09-18 05:40:20 UTC (rev 3131) +++ pkg/Meucci/demo/S_CrossSectionIndustries.R 2013-09-18 08:49:26 UTC (rev 3132) @@ -3,7 +3,7 @@ #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, -#' "E 122 ? Cross-section factors: unconstrained cross-section industry factors". +#' "E 122 - Cross-section factors: unconstrained cross-section industry factors". #' #' See Meucci's script for "S_CrossSectionIndustries.m" #' Modified: pkg/Meucci/demo/S_EquitiesInvariants.R =================================================================== --- pkg/Meucci/demo/S_EquitiesInvariants.R 2013-09-18 05:40:20 UTC (rev 3131) +++ pkg/Meucci/demo/S_EquitiesInvariants.R 2013-09-18 08:49:26 UTC (rev 3132) @@ -3,7 +3,7 @@ #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, -#' "E 135 ? Equity market: quest for invariance". +#' "E 135 - Equity market: quest for invariance". #' #' See Meucci's script for "S_EquitiesInvariants.m" #' Modified: pkg/Meucci/demo/S_EquityProjectionPricing.R =================================================================== --- pkg/Meucci/demo/S_EquityProjectionPricing.R 2013-09-18 05:40:20 UTC (rev 3131) +++ pkg/Meucci/demo/S_EquityProjectionPricing.R 2013-09-18 08:49:26 UTC (rev 3132) @@ -6,7 +6,7 @@ #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, -#' "E 138 ? Equity market: linear vs. compounded returns projection II". +#' "E 138 - Equity market: linear vs. compounded returns projection II". #' #' See Meucci's script for "S_EquityProjectionPricing.m" #' Modified: pkg/Meucci/demo/S_FactorAnalysisNotOk.R =================================================================== --- pkg/Meucci/demo/S_FactorAnalysisNotOk.R 2013-09-18 05:40:20 UTC (rev 3131) +++ pkg/Meucci/demo/S_FactorAnalysisNotOk.R 2013-09-18 08:49:26 UTC (rev 3132) @@ -3,7 +3,7 @@ #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, -#' "E 111 ? Hidden factors: puzzle". +#' "E 111 - Hidden factors: puzzle". #' #' See Meucci's script for "S_FactorAnalysisNotOk.m" #' Modified: pkg/Meucci/demo/S_FactorResidualCorrelation.R =================================================================== --- pkg/Meucci/demo/S_FactorResidualCorrelation.R 2013-09-18 05:40:20 UTC (rev 3131) +++ pkg/Meucci/demo/S_FactorResidualCorrelation.R 2013-09-18 08:49:26 UTC (rev 3132) @@ -4,7 +4,7 @@ #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, -#' "E 125 ? Correlation factors-residual: normal example". +#' "E 125 - Correlation factors-residual: normal example". #' #' See Meucci's script for "S_FactorResidualCorrelation.m" #' Modified: pkg/Meucci/demo/S_FixedIncomeInvariants.R =================================================================== --- pkg/Meucci/demo/S_FixedIncomeInvariants.R 2013-09-18 05:40:20 UTC (rev 3131) +++ pkg/Meucci/demo/S_FixedIncomeInvariants.R 2013-09-18 08:49:26 UTC (rev 3132) @@ -3,7 +3,7 @@ #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, -#' "E 139 ? Fixed-income market: quest for invariance". +#' "E 139 - Fixed-income market: quest for invariance". #' #' See Meucci's script for "S_FixedIncomeInvariants.m" #' Modified: pkg/Meucci/demo/S_HedgeOptions.R =================================================================== --- pkg/Meucci/demo/S_HedgeOptions.R 2013-09-18 05:40:20 UTC (rev 3131) +++ pkg/Meucci/demo/S_HedgeOptions.R 2013-09-18 08:49:26 UTC (rev 3132) @@ -3,7 +3,7 @@ #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, -#' "E 127 ? Factors on demand: no-Greek hedging". +#' "E 127 - Factors on demand: no-Greek hedging". #' #' See Meucci's script for "S_HedgeOptions.m" #' Modified: pkg/Meucci/demo/S_HorizonEffect.R =================================================================== --- pkg/Meucci/demo/S_HorizonEffect.R 2013-09-18 05:40:20 UTC (rev 3131) +++ pkg/Meucci/demo/S_HorizonEffect.R 2013-09-18 08:49:26 UTC (rev 3132) @@ -14,12 +14,12 @@ #' #' R = exp(X)-1 and Z = exp(F)-1 are the linear returns #' -#' @note See "E 116 ? Time series factors: analysis of residuals I" from +#' @note See "E 116 - Time series factors: analysis of residuals I" from #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, -#' "E 126 ? Factors on demand: horizon effect". +#' "E 126 - Factors on demand: horizon effect". #' #' See Meucci's script for "S_HorizonEffect.m" #' Modified: pkg/Meucci/demo/S_JumpDiffusionMerton.R =================================================================== --- pkg/Meucci/demo/S_JumpDiffusionMerton.R 2013-09-18 05:40:20 UTC (rev 3131) +++ pkg/Meucci/demo/S_JumpDiffusionMerton.R 2013-09-18 08:49:26 UTC (rev 3132) @@ -1,10 +1,13 @@ -#'This script simulates a jump-diffusion process, as described in A. Meucci, "Risk and Asset Allocation", +#' This script simulates a jump-diffusion process, as described in A. Meucci, "Risk and Asset Allocation", #' Springer, 2005, Chapter 3. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. -#' See Meucci's script for "S_JumoDiffusionMerton.m" +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 132 - Simulation of a jump-diffusion process". #' +#' See Meucci's script for "S_JumpDiffusionMerton.m" +#' @note see Merton, R. C., 1976. "Option pricing when underlying stocks are discontinuous". Journal of Financial +#' Economics 3, 125?144. #' @author Xavier Valls \email{flamejat@@gmail.com} ################################################################################################################## Modified: pkg/Meucci/demo/S_LinVsLogReturn.R =================================================================== --- pkg/Meucci/demo/S_LinVsLogReturn.R 2013-09-18 05:40:20 UTC (rev 3131) +++ pkg/Meucci/demo/S_LinVsLogReturn.R 2013-09-18 08:49:26 UTC (rev 3132) @@ -2,7 +2,9 @@ #' in A. Meucci "Risk and Asset Allocation", Springer, 2005, chapter 3. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 137 - Equity market: linear vs. compounded returns projection I". +#' #' See Meucci's script for "S_LinVsLogReturn.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/demo/S_MultiVarSqrRootRule.R =================================================================== --- pkg/Meucci/demo/S_MultiVarSqrRootRule.R 2013-09-18 05:40:20 UTC (rev 3131) +++ pkg/Meucci/demo/S_MultiVarSqrRootRule.R 2013-09-18 08:49:26 UTC (rev 3132) @@ -2,7 +2,9 @@ #' Described in A. Meucci,"Risk and Asset Allocation", Springer, 2005, Chapter 3. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 95 - Multivariate square-root rule". +#' #' See Meucci's script for "S_MultiVarSqrRootRule.m" # #' @author Xavier Valls \email{flamejat@@gmail.com} @@ -18,7 +20,6 @@ ################################################################################################################## ### Plots Agg = list(); -#names(Agg)=c( "M_hat" , "S_hat", "M_norm", "S_norm"); dev.new(); plot( swaps$X[ , 1 ],swaps$X[ , 2], xlab = swaps$Names[[1]][1], ylab = swaps$Names[[2]][1] ); @@ -27,8 +28,6 @@ for( s in 1 : length(Steps) ) { - - # compute series at aggregated time steps k = Steps[ s ]; AggX = NULL; @@ -36,11 +35,10 @@ while( ( t + k + 1 ) <= T ) { NewTerm = apply( matrix(swaps$X[ t : (t+k-1), ], ,ncol(swaps$X) ),2,sum); - AggX = rbind( AggX, NewTerm ); ##ok + AggX = rbind( AggX, NewTerm ); t = t + k; } - # empirical mean/covariance if(s==1) @@ -64,6 +62,4 @@ h1 = TwoDimEllipsoid( Agg[[ s ]]$M_norm, Agg[[ s ]]$S_norm, 1, 0, 0 ); h2 = TwoDimEllipsoid( Agg[[ s ]]$M_hat, Agg[[ s ]]$S_hat, 1, 0, 0 ); - - } Modified: pkg/Meucci/demo/S_ProjectNPriceMvGarch.R =================================================================== --- pkg/Meucci/demo/S_ProjectNPriceMvGarch.R 2013-09-18 05:40:20 UTC (rev 3131) +++ pkg/Meucci/demo/S_ProjectNPriceMvGarch.R 2013-09-18 08:49:26 UTC (rev 3132) @@ -4,7 +4,9 @@ #'"Risk and Asset Allocation", Springer, 2005, Chapter 3. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 136 - Equity market: multivariate GARCH process". +#' #' See Meucci's script for "S_ProjectNPriceMvGarch.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} @@ -19,9 +21,9 @@ ### Inputs Prices = Equities$Prices[ , c(4, 5)]; -J = 10000; # numbers of MC scenarios +J = 10000; # numbers of MC scenarios N = ncol(Prices); # numbers of securities -T = 22; # projection horizon +T = 22; # projection horizon ################################################################################################################## ### Estimation of daily compounded returns distribution Modified: pkg/Meucci/demo/S_ProjectSummaryStatistics.R =================================================================== --- pkg/Meucci/demo/S_ProjectSummaryStatistics.R 2013-09-18 05:40:20 UTC (rev 3131) +++ pkg/Meucci/demo/S_ProjectSummaryStatistics.R 2013-09-18 08:49:26 UTC (rev 3132) @@ -3,7 +3,9 @@ #' "Risk and Asset Allocation", Springer, 2005, chapter 3. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 97 - Projection of skewness, kurtosis, and all standardized summary statistics". +#' #' See Meucci's script for "S_ProjectSummaryStatistics.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/demo/S_PureResidualBonds.R =================================================================== --- pkg/Meucci/demo/S_PureResidualBonds.R 2013-09-18 05:40:20 UTC (rev 3131) +++ pkg/Meucci/demo/S_PureResidualBonds.R 2013-09-18 08:49:26 UTC (rev 3132) @@ -1,9 +1,10 @@ - #' This script models the joint distribution of the yet-to-be realized key rates of the government curve, #' as described in A. Meucci "Risk and Asset Allocation", Springer, 2005, chapter 3. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 112 - Pure residual models: duration/curve attribution". +#' #' See Meucci's script for "S_PureResidualBonds.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/demo/S_ResidualAnalysisTheory.R =================================================================== --- pkg/Meucci/demo/S_ResidualAnalysisTheory.R 2013-09-18 05:40:20 UTC (rev 3131) +++ pkg/Meucci/demo/S_ResidualAnalysisTheory.R 2013-09-18 08:49:26 UTC (rev 3132) @@ -2,9 +2,12 @@ #' Springer, 2005, Chapter 3. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 117 - Time series factors: analysis of residuals II". +#' #' See Meucci's script for "S_ResidualAnalysisTheory.m" -#' +#' @note See #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 116 - Time series factors: analysis of residuals I". #' @author Xavier Valls \email{flamejat@@gmail.com} #' Modified: pkg/Meucci/demo/S_SelectionHeuristics.R =================================================================== --- pkg/Meucci/demo/S_SelectionHeuristics.R 2013-09-18 05:40:20 UTC (rev 3131) +++ pkg/Meucci/demo/S_SelectionHeuristics.R 2013-09-18 08:49:26 UTC (rev 3132) @@ -7,7 +7,9 @@ #' @return g : [scalar] r-square for the selected factors #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 128 - Factors on demand: selection heuristics". +#' #' See Meucci's script for "SelectGoodness.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} @@ -38,7 +40,9 @@ #' @note sorted by ascending order #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 128 - Factors on demand: selection heuristics". +#' #' See Meucci's script for "SelectNaive.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} @@ -82,7 +86,9 @@ #' @note same than recursive rejection, but it starts from the empty set, instead of from the full set #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 128 - Factors on demand: selection heuristics". +#' #' See Meucci's script for "SelectAcceptByS.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} @@ -137,7 +143,9 @@ #' problem by eliminating the factors one at a time starting from the full set #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 128 - Factors on demand: selection heuristics". +#' #' See Meucci's script for "SelectRejectByS.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} @@ -184,7 +192,9 @@ #' o !!! extremely time consuming !!! #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 128 - Factors on demand: selection heuristics". +#' #' See Meucci's script for "SelectRejectByS.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} @@ -211,7 +221,9 @@ #' as described in A. Meucci, "Risk and Asset Allocation", Springer, 2005, Chapter 3. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 128 - Factors on demand: selection heuristics". +#' #' See Meucci's script for "S_SelectionHeuristics.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/demo/S_StatArbSwaps.R =================================================================== --- pkg/Meucci/demo/S_StatArbSwaps.R 2013-09-18 05:40:20 UTC (rev 3131) +++ pkg/Meucci/demo/S_StatArbSwaps.R 2013-09-18 08:49:26 UTC (rev 3132) @@ -2,9 +2,11 @@ #' "Risk and Asset Allocation", Springer, 2005, Chapter 3. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 144 ? Statistical arbitrage: co-integration trading ". #' See Meucci's script for "S_StatArbSwaps.m" #' +#' A. Meucci - "Review of statistical arbitrage, cointegration, and multivariate Ornstein-Uhlenbeck", 2009. \url{http://symmys.com/node/132} #' @author Xavier Valls \email{flamejat@@gmail.com} # TODO: Check the loadings of the principal components analysis, fix the date ticks on the plots. @@ -19,6 +21,7 @@ PC = princomp( covmat=S ); E = PC$loadings Lam = ( PC$sdev )^2 + ################################################################################################################## ### Set up dates ticks dev.new(); @@ -26,7 +29,6 @@ XTick = NULL; years = as.numeric(format(swapParRates$Dates[1],"%Y")) : as.numeric(format(swapParRates$Dates[length(swapParRates$Dates)],"%Y")) - for( n in years ) { XTick = cbind( XTick, datenum(n,1,1) ); ##ok @@ -61,8 +63,6 @@ #set(gca(), 'xlim', X_Lim, 'XTick', XTick); #datetick('x','yy','keeplimits','keepticks'); - #grid off; - #title(['eigendirection n. ' num2str(n) ', theta = ' num2str(Theta)],'FontWeight','bold'); } dev.new(); Modified: pkg/Meucci/demo/S_SwapPca2Dim.R =================================================================== --- pkg/Meucci/demo/S_SwapPca2Dim.R 2013-09-18 05:40:20 UTC (rev 3131) +++ pkg/Meucci/demo/S_SwapPca2Dim.R 2013-09-18 08:49:26 UTC (rev 3132) @@ -1,31 +1,40 @@ #' This script performs the principal component analysis of a simplified two-point swap curve. #' it computes and plots, among others, -#' 1. the invariants, namely rate changes -#' 2. the location-dispersion ellipsoid of rates along with the 2-d location-dispersion ellipsoid -#' 3. the effect on the curve of the two uncorrelated principal factors -#' Described in A. Meucci, "Risk and Asset Allocation", Springer, 2005, Chapter 3. +#' 1. the invariants, namely rate changes #' +#' 2. the location-dispersion ellipsoid of rates along with the 2-d location-dispersion ellipsoid +#' +#' 3. the effect on the curve of the two uncorrelated principal factors +#' +#' Described in A. Meucci, "Risk and Asset Allocation", Springer, 2005, Chapter 3. +#' #' @references -#' \url{http://} -#' See Meucci's script for "S_AutocorrelatedProcess.m" +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 110 ? Hidden factors: principal component analysis of a two-point swap curve". #' +#' See Meucci's script for "S_SwapPca2Dim.m" +#' #' @author Xavier Valls \email{flamejat@@gmail.com} + ################################################################################################################## ### Load data + data("swap2y4y.mat" ); ################################################################################################################## ### Current curve + Current_Curve = swap2y4y$Rates[ nrow( swap2y4y$Rates ), ]; dev.new(); plot(c( 2, 4 ), Current_Curve, type = "l", main = "Current_Curve", xlab = "time to maturity, years", ylab = "par swap rate, #" ); ################################################################################################################## ### Determine weekly invariants (changes in rates) [TRUNCATED] To get the complete diff run: svnlook diff /svnroot/returnanalytics -r 3132 From noreply at r-forge.r-project.org Wed Sep 18 11:05:14 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 18 Sep 2013 11:05:14 +0200 (CEST) Subject: [Returnanalytics-commits] r3133 - in pkg/Meucci: R demo man Message-ID: <20130918090514.B2EA11852B5@r-forge.r-project.org> Author: xavierv Date: 2013-09-18 11:05:14 +0200 (Wed, 18 Sep 2013) New Revision: 3133 Modified: pkg/Meucci/R/CentralAndStandardizedStatistics.R pkg/Meucci/R/LognormalMoments2Parameters.R pkg/Meucci/R/SimulateJumpDiffusionMerton.R pkg/Meucci/demo/S_JumpDiffusionMerton.R pkg/Meucci/demo/S_StatArbSwaps.R pkg/Meucci/demo/S_SwapPca2Dim.R pkg/Meucci/demo/S_TimeSeriesConstrainedIndustries.R pkg/Meucci/demo/S_TimeSeriesIndustries.R pkg/Meucci/demo/S_TimeSeriesVsCrossSectionIndustries.R pkg/Meucci/demo/S_Toeplitz.R pkg/Meucci/demo/S_VolatilityClustering.R pkg/Meucci/man/CentralAndStandardizedStatistics.Rd pkg/Meucci/man/LognormalMoments2Parameters.Rd pkg/Meucci/man/SimulateJumpDiffusionMerton.Rd Log: - fixed problems with non-ASCII characters Modified: pkg/Meucci/R/CentralAndStandardizedStatistics.R =================================================================== --- pkg/Meucci/R/CentralAndStandardizedStatistics.R 2013-09-18 08:49:26 UTC (rev 3132) +++ pkg/Meucci/R/CentralAndStandardizedStatistics.R 2013-09-18 09:05:14 UTC (rev 3133) @@ -21,7 +21,7 @@ #' Kendall, M., Stuart, A., 1969. The Advanced Theory of Statistics, Volume, 3rd Edition. Griffin. #' #' A. Meucci - "Annualization and general projection of skweness, kurtosis, and all summary statistics", -#' GARP Risk Professional August 2010, 55?56. \url{http://symmys.com/node/136}. +#' GARP Risk Professional August 2010, 55-56. \url{http://symmys.com/node/136}. #' @author Xavier Valls \email{flamejat@@gmail.com} #' @export Modified: pkg/Meucci/R/LognormalMoments2Parameters.R =================================================================== --- pkg/Meucci/R/LognormalMoments2Parameters.R 2013-09-18 08:49:26 UTC (rev 3132) +++ pkg/Meucci/R/LognormalMoments2Parameters.R 2013-09-18 09:05:14 UTC (rev 3133) @@ -1,7 +1,6 @@ #' @title Computes the mean and standard deviation of a lognormal distribution from its parameters. #' -#' @description determines $\mu$ and $\sigma^2$ from $\Expect\{X\}$ and $\Var\{X\}$, and uses it to determine $\mu$ -#' and $\sigma^{2}$ such that $\Expect\left\{ X\right\} \equiv 3$ and $\Var\left\{ X\right\} \equiv 5$, as described in +#' @description Computes the mean and standard deviation of a lognormal distribution from its parameters, as described in #' A. Meucci, "Risk and Asset Allocation", Springer, 2005. #' #' \deqn{\sigma^{2} = \ln \left( 1 + \frac{V}{E^{2}} \right) , } @@ -24,6 +23,8 @@ #' @author Xavier Valls \email{flamejat@@gmail.com} #' @export +#determines $\mu$ and $\sigma^2$ from $\Expect\{X\}$ and $\Var\{X\}$, and uses it to determine $\mu$ +# and $\sigma^{2}$ such that $\Expect\left\{ X\right\} \equiv 3$ and $\Var\left\{ X\right\} \equiv 5$ LognormalMoments2Parameters = function( e, v ) { sig2 = log( 1 + v / ( e^2 ) ); Modified: pkg/Meucci/R/SimulateJumpDiffusionMerton.R =================================================================== --- pkg/Meucci/R/SimulateJumpDiffusionMerton.R 2013-09-18 08:49:26 UTC (rev 3132) +++ pkg/Meucci/R/SimulateJumpDiffusionMerton.R 2013-09-18 09:05:14 UTC (rev 3133) @@ -1,4 +1,4 @@ -#'@title Simulates a Merton jump-diffusion process. +#' @title Simulates a Merton jump-diffusion process. #' #' @description This function simulates a jump diffusion process, as described in A. Meucci "Risk and Asset Allocation", #' Springer, 2005. @@ -20,7 +20,7 @@ #' See Meucci's script for "SimulateJumpDiffusionMerton.m" #' #' Merton, R. C., 1976. "Option pricing when underlying stocks are discontinuous". Journal of Financial -#' Economics 3, 125?144. +#' Economics 3, 125-144. #' #'@author Xavier Valls \email{flamejat@@gmail.com} #' @export Modified: pkg/Meucci/demo/S_JumpDiffusionMerton.R =================================================================== --- pkg/Meucci/demo/S_JumpDiffusionMerton.R 2013-09-18 08:49:26 UTC (rev 3132) +++ pkg/Meucci/demo/S_JumpDiffusionMerton.R 2013-09-18 09:05:14 UTC (rev 3133) @@ -7,7 +7,7 @@ #' #' See Meucci's script for "S_JumpDiffusionMerton.m" #' @note see Merton, R. C., 1976. "Option pricing when underlying stocks are discontinuous". Journal of Financial -#' Economics 3, 125?144. +#' Economics 3, 125-144. #' @author Xavier Valls \email{flamejat@@gmail.com} ################################################################################################################## Modified: pkg/Meucci/demo/S_StatArbSwaps.R =================================================================== --- pkg/Meucci/demo/S_StatArbSwaps.R 2013-09-18 08:49:26 UTC (rev 3132) +++ pkg/Meucci/demo/S_StatArbSwaps.R 2013-09-18 09:05:14 UTC (rev 3133) @@ -3,7 +3,7 @@ #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, -#' "E 144 ? Statistical arbitrage: co-integration trading ". +#' "E 144 - Statistical arbitrage: co-integration trading ". #' See Meucci's script for "S_StatArbSwaps.m" #' #' A. Meucci - "Review of statistical arbitrage, cointegration, and multivariate Ornstein-Uhlenbeck", 2009. \url{http://symmys.com/node/132} Modified: pkg/Meucci/demo/S_SwapPca2Dim.R =================================================================== --- pkg/Meucci/demo/S_SwapPca2Dim.R 2013-09-18 08:49:26 UTC (rev 3132) +++ pkg/Meucci/demo/S_SwapPca2Dim.R 2013-09-18 09:05:14 UTC (rev 3133) @@ -10,7 +10,7 @@ #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, -#' "E 110 ? Hidden factors: principal component analysis of a two-point swap curve". +#' "E 110 - Hidden factors: principal component analysis of a two-point swap curve". #' #' See Meucci's script for "S_SwapPca2Dim.m" #' Modified: pkg/Meucci/demo/S_TimeSeriesConstrainedIndustries.R =================================================================== --- pkg/Meucci/demo/S_TimeSeriesConstrainedIndustries.R 2013-09-18 08:49:26 UTC (rev 3132) +++ pkg/Meucci/demo/S_TimeSeriesConstrainedIndustries.R 2013-09-18 09:05:14 UTC (rev 3133) @@ -4,7 +4,7 @@ #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, -#' "E 115 ? Time series factors: generalized time-series industry factors". +#' "E 115 - Time series factors: generalized time-series industry factors". #' #' See Meucci's script for "S_TimeSeriesConstrainedIndustries.m" #' Modified: pkg/Meucci/demo/S_TimeSeriesIndustries.R =================================================================== --- pkg/Meucci/demo/S_TimeSeriesIndustries.R 2013-09-18 08:49:26 UTC (rev 3132) +++ pkg/Meucci/demo/S_TimeSeriesIndustries.R 2013-09-18 09:05:14 UTC (rev 3133) @@ -3,7 +3,7 @@ #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, -#' "E 114 ? Time series factors: unconstrained time series industry factors". +#' "E 114 - Time series factors: unconstrained time series industry factors". #' #' See Meucci's script for "S_TimeSeriesIndustries.m" #' Modified: pkg/Meucci/demo/S_TimeSeriesVsCrossSectionIndustries.R =================================================================== --- pkg/Meucci/demo/S_TimeSeriesVsCrossSectionIndustries.R 2013-09-18 08:49:26 UTC (rev 3132) +++ pkg/Meucci/demo/S_TimeSeriesVsCrossSectionIndustries.R 2013-09-18 09:05:14 UTC (rev 3133) @@ -4,7 +4,7 @@ #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, -#' "E 124 ? Cross-section factors: comparison cross-section with time-series industry factors". +#' "E 124 - Cross-section factors: comparison cross-section with time-series industry factors". #' See Meucci's script for "S_TimeSeriesVsCrossSectionIndustries.m" #' #' A. Meucci - "Review of linear factor models: Unexpected common features and the systematic-plus-idiosyncratic myth", 2010. \url{http://www.symmys.com/node/336} Modified: pkg/Meucci/demo/S_Toeplitz.R =================================================================== --- pkg/Meucci/demo/S_Toeplitz.R 2013-09-18 08:49:26 UTC (rev 3132) +++ pkg/Meucci/demo/S_Toeplitz.R 2013-09-18 09:05:14 UTC (rev 3133) @@ -3,7 +3,7 @@ #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, -#' "E 130 ? Eigenvectors for Toeplitz structure". +#' "E 130 - Eigenvectors for Toeplitz structure". #' #' See Meucci's script for "S_Toeplitz.R" #' Modified: pkg/Meucci/demo/S_VolatilityClustering.R =================================================================== --- pkg/Meucci/demo/S_VolatilityClustering.R 2013-09-18 08:49:26 UTC (rev 3132) +++ pkg/Meucci/demo/S_VolatilityClustering.R 2013-09-18 09:05:14 UTC (rev 3133) @@ -3,7 +3,7 @@ #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, -#' "E 134 ? Simulation of a GARCH process". +#' "E 134 - Simulation of a GARCH process". #' #' See Meucci's script for "S_VolatilityClustering.m" #' Modified: pkg/Meucci/man/CentralAndStandardizedStatistics.Rd =================================================================== --- pkg/Meucci/man/CentralAndStandardizedStatistics.Rd 2013-09-18 08:49:26 UTC (rev 3132) +++ pkg/Meucci/man/CentralAndStandardizedStatistics.Rd 2013-09-18 09:05:14 UTC (rev 3133) @@ -43,7 +43,7 @@ A. Meucci - "Annualization and general projection of skweness, kurtosis, and all summary statistics", GARP - Risk Professional August 2010, 55?56. + Risk Professional August 2010, 55-56. \url{http://symmys.com/node/136}. } Modified: pkg/Meucci/man/LognormalMoments2Parameters.Rd =================================================================== --- pkg/Meucci/man/LognormalMoments2Parameters.Rd 2013-09-18 08:49:26 UTC (rev 3132) +++ pkg/Meucci/man/LognormalMoments2Parameters.Rd 2013-09-18 09:05:14 UTC (rev 3133) @@ -16,11 +16,9 @@ sig2 [scalar] variance of the normal distribution } \description{ - determines $\mu$ and $\sigma^2$ from $\Expect\{X\}$ and - $\Var\{X\}$, and uses it to determine $\mu$ and - $\sigma^{2}$ such that $\Expect\left\{ X\right\} \equiv - 3$ and $\Var\left\{ X\right\} \equiv 5$, as described in - A. Meucci, "Risk and Asset Allocation", Springer, 2005. + Computes the mean and standard deviation of a lognormal + distribution from its parameters, as described in A. + Meucci, "Risk and Asset Allocation", Springer, 2005. \deqn{\sigma^{2} = \ln \left( 1 + \frac{V}{E^{2}} \right) , } \deqn{\mu = \ln(E) - \frac{1}{2} \ln \left( 1 + Modified: pkg/Meucci/man/SimulateJumpDiffusionMerton.Rd =================================================================== --- pkg/Meucci/man/SimulateJumpDiffusionMerton.Rd 2013-09-18 08:49:26 UTC (rev 3132) +++ pkg/Meucci/man/SimulateJumpDiffusionMerton.Rd 2013-09-18 09:05:14 UTC (rev 3133) @@ -39,6 +39,6 @@ Merton, R. C., 1976. "Option pricing when underlying stocks are discontinuous". Journal of Financial Economics - 3, 125?144. + 3, 125-144. } From noreply at r-forge.r-project.org Wed Sep 18 11:28:29 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 18 Sep 2013 11:28:29 +0200 (CEST) Subject: [Returnanalytics-commits] r3134 - pkg/PortfolioAnalytics/R Message-ID: <20130918092829.971961852B5@r-forge.r-project.org> Author: braverock Date: 2013-09-18 11:28:29 +0200 (Wed, 18 Sep 2013) New Revision: 3134 Modified: pkg/PortfolioAnalytics/R/optimize.portfolio.R Log: - add some extra options for pso optimization Modified: pkg/PortfolioAnalytics/R/optimize.portfolio.R =================================================================== --- pkg/PortfolioAnalytics/R/optimize.portfolio.R 2013-09-18 09:05:14 UTC (rev 3133) +++ pkg/PortfolioAnalytics/R/optimize.portfolio.R 2013-09-18 09:28:29 UTC (rev 3134) @@ -61,7 +61,7 @@ if(inherits(mout,"try-error")) { message(paste("portfolio moment function failed with message",mout)) } else { - dotargs <- mout + dotargs <- c(dotargs,mout) } normalize_weights <- function(weights){ @@ -333,7 +333,12 @@ names(dotargs[pm > 0L]) <- PSOcargs[pm] controlPSO$maxit <- maxit controlPSO[pm] <- dotargs[pm > 0L] - if(!hasArg(reltol)) controlPSO$reltol <- .000001 # 1/1000 of 1% change in objective is significant + if(!hasArg(reltol)) controlPSO$reltol <- .0001 # 1/100 of 1% change in objective is insignificant enough to restart a swarm + #NOTE reltol has a different meaning for pso than it has for DEoptim. for DEoptim, reltol is a stopping criteria, for pso, + # it is a restart criteria. + + if(!hasArg(s)) controlPSO$s<-N*10 #swarm size + if(!hasArg(maxit.stagnate)) controlPSO$maxit.stagnate <- controlPSO$s #stopping criteria if(hasArg(trace) && try(trace==TRUE,silent=TRUE)) controlPSO$trace <- TRUE if(hasArg(trace) && isTRUE(trace)) { controlPSO$trace <- TRUE From noreply at r-forge.r-project.org Wed Sep 18 11:35:44 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 18 Sep 2013 11:35:44 +0200 (CEST) Subject: [Returnanalytics-commits] r3135 - pkg/PortfolioAnalytics/demo Message-ID: <20130918093544.2F3A21852B5@r-forge.r-project.org> Author: braverock Date: 2013-09-18 11:35:43 +0200 (Wed, 18 Sep 2013) New Revision: 3135 Modified: pkg/PortfolioAnalytics/demo/testing_pso.R Log: minor edits to improve clarity Modified: pkg/PortfolioAnalytics/demo/testing_pso.R =================================================================== --- pkg/PortfolioAnalytics/demo/testing_pso.R 2013-09-18 09:28:29 UTC (rev 3134) +++ pkg/PortfolioAnalytics/demo/testing_pso.R 2013-09-18 09:35:43 UTC (rev 3135) @@ -4,14 +4,8 @@ library(xts) library(quadprog) -library(Rglpk) library(PerformanceAnalytics) -library(ROI) -library(ROI.plugin.glpk) -library(ROI.plugin.quadprog) -library(Ecdat) library(PortfolioAnalytics) -library(DEoptim) library(pso) # General Parameters for sample code @@ -30,6 +24,7 @@ # ===================== # Max return under box constraints, fully invested +print('Max return under box constraints, fully invested') max.port <- gen.constr max.port$min <- rep(0.01,N) max.port$max <- rep(0.30,N) @@ -41,6 +36,7 @@ # ===================== # Mean-variance: Fully invested, Global Minimum Variance Portfolio +print('Mean-variance: Fully invested, Global Minimum Variance Portfolio') gmv.port <- gen.constr gmv.port$objectives[[4]]$enabled <- TRUE gmv.solution <- optimize.portfolio(R=R, constraints=gmv.port, optimize_method="pso", trace=TRUE) @@ -48,15 +44,12 @@ # ======================== -# Minimize CVaR with target return +# Minimize CVaR # +print('Min-CVaR') cvar.port <- gen.constr cvar.port$min <- rep(0,N) cvar.port$max <- rep(1,N) cvar.port$objectives[[3]]$enabled <- TRUE cvar.port$objectives[[3]]$arguments <- list(p=0.95, clean="boudt") cvar.solution <- optimize.portfolio(R=R, constraints=cvar.port, optimize_method="pso", trace=TRUE) - - - - From noreply at r-forge.r-project.org Wed Sep 18 13:40:27 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 18 Sep 2013 13:40:27 +0200 (CEST) Subject: [Returnanalytics-commits] r3136 - in pkg/Meucci: . R demo man Message-ID: <20130918114028.0EEBB181201@r-forge.r-project.org> Author: xavierv Date: 2013-09-18 13:40:27 +0200 (Wed, 18 Sep 2013) New Revision: 3136 Modified: pkg/Meucci/R/CentralAndStandardizedStatistics.R pkg/Meucci/R/FitExpectationMaximization.R pkg/Meucci/R/LognormalMoments2Parameters.R pkg/Meucci/R/MleRecursionForStudentT.R pkg/Meucci/R/PlotMarginalsNormalInverseWishart.R pkg/Meucci/R/QuantileMixture.R pkg/Meucci/TODO pkg/Meucci/demo/S_EigenvalueDispersion.R pkg/Meucci/demo/S_EstimateExpectedValueEvaluation.R pkg/Meucci/demo/S_EstimateMomentsComboEvaluation.R pkg/Meucci/demo/S_EstimateQuantileEvaluation.R pkg/Meucci/demo/S_Estimator.R pkg/Meucci/demo/S_ExpectationMaximizationHighYield.R pkg/Meucci/demo/S_FitSwapToStudentT.R pkg/Meucci/demo/S_GenerateMixtureSample.R pkg/Meucci/demo/S_MaximumLikelihood.R pkg/Meucci/demo/S_PasturMarchenko.R pkg/Meucci/demo/S_SemiCircular.R pkg/Meucci/demo/S_ShrinkageEstimators.R pkg/Meucci/demo/S_TStatApprox.R pkg/Meucci/man/CentralAndStandardizedStatistics.Rd pkg/Meucci/man/FitExpectationMaximization.Rd pkg/Meucci/man/LognormalMoments2Parameters.Rd pkg/Meucci/man/MleRecursionForStudentT.Rd pkg/Meucci/man/PlotMarginalsNormalInverseWishart.Rd pkg/Meucci/man/QuantileMixture.Rd Log: - updated documentation for chapter 4 demo scripts and its functions Modified: pkg/Meucci/R/CentralAndStandardizedStatistics.R =================================================================== --- pkg/Meucci/R/CentralAndStandardizedStatistics.R 2013-09-18 09:35:43 UTC (rev 3135) +++ pkg/Meucci/R/CentralAndStandardizedStatistics.R 2013-09-18 11:40:27 UTC (rev 3136) @@ -7,18 +7,18 @@ #' and from them the standarized statistics \deqn{ \mu_{X},\sigma_{X},sk_{X},ku_{X},\gamma_{X}^{(5)}, \ldots ,\gamma_{X}^{(n)} .} #' where \deqn{\gamma_{X}^{(n)} \equiv E \{(X - \mu_{X})^{n}\}/\sigma_{X}^{n},\quad n\geq3 .} #' -#' @param X : [vector] (J x 1) draws from the distribution -#' @param N : [scalar] highest degree for the central moment +#' @param X [vector] (J x 1) draws from the distribution +#' @param N [scalar] highest degree for the central moment #' -#' @return ga : [vector] (1 x N) standardized statistics up to order N -#' @return mu : [vector] (1 x N) central moments up to order N +#' @return ga [vector] (1 x N) standardized statistics up to order N +#' @return mu [vector] (1 x N) central moments up to order N #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, #' "E 97 - Projection of skewness, kurtosis, and all standardized summary statistics". #' See Meucci's script for "CentralAndStandardizedStatistics.m" #' -#' Kendall, M., Stuart, A., 1969. The Advanced Theory of Statistics, Volume, 3rd Edition. Griffin. +#' Kendall, M., Stuart, A. - "The Advanced Theory of Statistics", 1969. Volume, 3rd Edition. Griffin. #' #' A. Meucci - "Annualization and general projection of skweness, kurtosis, and all summary statistics", #' GARP Risk Professional August 2010, 55-56. \url{http://symmys.com/node/136}. Modified: pkg/Meucci/R/FitExpectationMaximization.R =================================================================== --- pkg/Meucci/R/FitExpectationMaximization.R 2013-09-18 09:35:43 UTC (rev 3135) +++ pkg/Meucci/R/FitExpectationMaximization.R 2013-09-18 11:40:27 UTC (rev 3136) @@ -1,5 +1,7 @@ -#' Expectation-Maximization (EM) algorithm to recover missing observations in a time series , -#' as described in A. Meucci, "Risk and Asset Allocation", Springer, 2005. +#' @title Expectation-Maximization (EM) algorithm to recover missing observations in a time series. +#' +#' @description Expectation-Maximization (EM) algorithm to recover missing observations in a time series , +#' as described in A. Meucci, "Risk and Asset Allocation", Springer, 2005, section 4.6.2 "Missing data". #' #' @param X : [matrix] (T x N) of data #' @@ -9,9 +11,15 @@ #' @return CountLoop : [scalar] number of iterations of the algorithm #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 177 - Expectation-Maximization algorithm for missing data: formulas" #' See Meucci's script for "FitExpectationMaximization.m" #' +#' Dempster, A. P. and Laird, M. N. and Rubin, D. B. - "Maximum Likelihood from Incomplete Data Via the EM Algorithm", +#' Journal of the Royal Statistical Society, 1977 vol 39 pag. 1-22. +#' +#' Bilmes, J. A.- "A Gentle Tutorial of the EM Algorithm and its Application to Parameter Estimation for Gaussian Mixture +#' and Hidden Markov Models", 1998. #' @author Xavier Valls \email{flamejat@@gmail.com} #' @export Modified: pkg/Meucci/R/LognormalMoments2Parameters.R =================================================================== --- pkg/Meucci/R/LognormalMoments2Parameters.R 2013-09-18 09:35:43 UTC (rev 3135) +++ pkg/Meucci/R/LognormalMoments2Parameters.R 2013-09-18 11:40:27 UTC (rev 3136) @@ -16,7 +16,7 @@ #' @note Inverts the formulas (1.98)-(1.99) in "Risk and Asset Allocation", Springer, 2005. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, "E 25- Simulation of a lognormal random variable". +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, "E 25 - Simulation of a lognormal random variable". #' #' See Meucci's script for "LognormalMoments2Parameters.m" #' Modified: pkg/Meucci/R/MleRecursionForStudentT.R =================================================================== --- pkg/Meucci/R/MleRecursionForStudentT.R 2013-09-18 09:35:43 UTC (rev 3135) +++ pkg/Meucci/R/MleRecursionForStudentT.R 2013-09-18 11:40:27 UTC (rev 3136) @@ -1,6 +1,14 @@ -#' Compute recursively the ML estimators of location and scatter of a multivariate Student t distribution with -#' given degrees of freedom, as described in A. Meucci, "Risk and Asset Allocation", Springer, 2005. -#' +#' @title Compute recursively the ML estimators of location and scatter of a multivariate Student t distribution with +#' given degrees of freedom. +#' +#' @description Compute recursively the ML estimators of location and scatter of a multivariate Student t distribution with +#' given degrees of freedom, as described in A. Meucci, "Risk and Asset Allocation", Springer, 2005, section 4.3 - "Maximum likelihood estimators" +#' +#' Returns \deqn{ \widehat{\mu} = \sum\limits_{t= 1}^{T} \frac{w_{t}}{\sum\limits_{s= 1}^{T}w_{s}} x_{t} ,} +#' \deqn{\widehat{\Sigma} = \frac{1}{T}\sum\limits_{t= 1}^{T}w_{t}(x_{t}-\widehat{\mu})(x_{t}-\widehat{\mu})^\prime} +#' +#' where, adapted to the Sudent T distribution, \deqn{ w_{t}\equiv \frac{\nu+N}{\nu+(x_{t}-\widehat{\mu})^\prime \widehat{\Sigma}^{-1}( x-\widehat{\mu}) }} +#' #' @param x : [matrix] (T x N) observations #' @param Nu : [scalar] degrees of freedom parameter #' @param Tolerance : [scalar] tolerance parameter. Default: 10^(-10) @@ -9,7 +17,9 @@ #' @return Sigma : [matrix] (N x N) covariance #' #' @references -#' \url{http://} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 188 - Maximum likelihood estimation of a multivariate Student t distribution". +#' #' See Meucci's script for "MleRecursionForStudentT.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} @@ -24,8 +34,8 @@ Ones_T = matrix( 1, T, 1 ); # fixed for fast matrix operation # initialize variables - w = matrix( 1, T, 1 ); - Mu = matrix( 0, N, 1 ); + w = matrix( 1, T, 1 ); + Mu = matrix( 0, N, 1 ); Sigma = matrix( 0, N, N ); Error = 10^6; @@ -34,20 +44,20 @@ { # update - Mu_Old = Mu; + Mu_Old = Mu; Sigma_Old = Sigma; # Step 1 - W = w %*% Ones_N; + W = w %*% Ones_N; Mu = matrix( apply( W * x, 2, sum ) ) / sum( w ); - x_c = x - Ones_T %*% t(Mu); + x_c = x - Ones_T %*% t(Mu); Sigma = t( W * x_c ) %*% x_c / T; # Step 2 InvS = solve(Sigma); - Ma2 = apply( ( x_c %*% InvS ) * x_c, 1, sum ); - w = ( Nu + N) / ( Nu + Ma2 ); + Ma2 = apply( ( x_c %*% InvS ) * x_c, 1, sum ); + w = ( Nu + N) / ( Nu + Ma2 ); # convergence Error = sum( diag( (Sigma - Sigma_Old) ^2) ) / N + t(Mu - Mu_Old) %*% ( Mu - Mu_Old ) / N; Modified: pkg/Meucci/R/PlotMarginalsNormalInverseWishart.R =================================================================== --- pkg/Meucci/R/PlotMarginalsNormalInverseWishart.R 2013-09-18 09:35:43 UTC (rev 3135) +++ pkg/Meucci/R/PlotMarginalsNormalInverseWishart.R 2013-09-18 11:40:27 UTC (rev 3136) @@ -17,7 +17,7 @@ #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. -#' See Meucci's script for "QuantileMixture.m" +#' See Meucci's script for "PlotMarginalsNormalInverseWishart.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} #' @export Modified: pkg/Meucci/R/QuantileMixture.R =================================================================== --- pkg/Meucci/R/QuantileMixture.R 2013-09-18 09:35:43 UTC (rev 3135) +++ pkg/Meucci/R/QuantileMixture.R 2013-09-18 11:40:27 UTC (rev 3136) @@ -1,21 +1,24 @@ - -#' Computes the quantile of a mixture distirbution by linear interpolation/extrapolation of the cdf.the confidence +#' @title Computes the quantile of a mixture distribution by linear interpolation/extrapolation of the cdf. +#' +#' @description Computes the quantile of a mixture distribution by linear interpolation/extrapolation of the cdf. The confidence #' level p can be vector. If this vector is uniformly distributed on [0,1] the sample Q is distributed as the mixture. -#' Described in A. Meucci "Risk and Asset Allocation", Springer, 2005 +#' Described in A. Meucci "Risk and Asset Allocation", Springer, 2005. #' -#' @param p : [scalar] in [0,1], probability -#' @param a : [scalar] in (0,1), mixing probability -#' @param m_Y : [scalar] mean of normal component -#' @param s_Y : [scalar] standard deviation of normal component -#' @param m_Z : [scalar] first parameters of the log-normal component -#' @param s_Z : [scalar] second parameter of the log-normal component +#' @param p [scalar] in [0,1], probability +#' @param a [scalar] in (0,1), mixing probability +#' @param m_Y [scalar] mean of normal component +#' @param s_Y [scalar] standard deviation of normal component +#' @param m_Z [scalar] first parameters of the log-normal component +#' @param s_Z [scalar] second parameter of the log-normal component #' -#' @return Q : [scalar] quantile +#' @return Q [scalar] quantile #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. -#' See Meucci's script for "QuantileMixture.m" +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 184 - Estimation of a quantile of a mixture I". #' +#'See Meucci's script for "QuantileMixture.m" +#' #' @author Xavier Valls \email{flamejat@@gmail.com} #' @export @@ -26,13 +29,14 @@ # compute second moment Ex2 = a * (m_Y^2 + s_Y^2) + (1 - a) * exp( 2 * m_Z + 2 * s_Z * s_Z); - s = sqrt( Ex2 - m * m ); + s = sqrt( Ex2 - m * m ); # compute cdf on suitable range X = m + 6 * s * seq( -1, 1, 0.001 ); F = a * pnorm( X, m_Y, s_Y) + (1 - a) * plnorm(X, m_Z, s_Z); X = X[!duplicated(F)]; F = unique(F); + # compute quantile by interpolation Q = interp1( F, X, p, method = "linear"); Modified: pkg/Meucci/TODO =================================================================== --- pkg/Meucci/TODO 2013-09-18 09:35:43 UTC (rev 3135) +++ pkg/Meucci/TODO 2013-09-18 11:40:27 UTC (rev 3136) @@ -13,3 +13,4 @@ - find the exercises and sections they come from - write down the equations * Not Sure if EntropyProg returns what it should with empty matrices as arguments for the constraints +* Write text version of the formulas in the documentation \ No newline at end of file Modified: pkg/Meucci/demo/S_EigenvalueDispersion.R =================================================================== --- pkg/Meucci/demo/S_EigenvalueDispersion.R 2013-09-18 09:35:43 UTC (rev 3135) +++ pkg/Meucci/demo/S_EigenvalueDispersion.R 2013-09-18 11:40:27 UTC (rev 3136) @@ -2,7 +2,9 @@ #' "Risk and Asset Allocation", Springer, 2005, Chapter 4. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 168 - Sample covariance and eigenvalue dispersion". +#' #' See Meucci's script for "S_EigenValueDispersion.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} @@ -10,15 +12,15 @@ ################################################################################################################## ### Inputs -N = 50; +N = 50; +nSim = 50; SampleLenght = seq( N , 10 * N, N) -nSim = 50; ################################################################################################################## ### Generate mesh and surface -Mu = matrix( 0, N, 1 ); -Sigma= diag( 1, N ); +Mu = matrix( 0, N, 1 ); +Sigma = diag( 1, N ); # compute true eigenvalues Eigen = eigen(Sigma); @@ -27,7 +29,7 @@ EVal = diag( Eigen$Values[ Index, Index ]); # compute eigenvalues of sample estimator -nSampleLenght = length( SampleLenght ); +nSampleLenght = length( SampleLenght ); Store_EVal_Hat = matrix( NaN, nSampleLenght, N ); # preallocation for speed for( i in 1 : nSampleLenght ) { Modified: pkg/Meucci/demo/S_EstimateExpectedValueEvaluation.R =================================================================== --- pkg/Meucci/demo/S_EstimateExpectedValueEvaluation.R 2013-09-18 09:35:43 UTC (rev 3135) +++ pkg/Meucci/demo/S_EstimateExpectedValueEvaluation.R 2013-09-18 11:40:27 UTC (rev 3136) @@ -2,7 +2,9 @@ #' and inefficiency, as described in A. Meucci, "Risk and Asset Allocation", Springer, 2005, Chapter 4. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 182 - Moment-based functional of a mixture III ". +#' #' See Meucci's script for "S_EigenValueDispersion.R" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/demo/S_EstimateMomentsComboEvaluation.R =================================================================== --- pkg/Meucci/demo/S_EstimateMomentsComboEvaluation.R 2013-09-18 09:35:43 UTC (rev 3135) +++ pkg/Meucci/demo/S_EstimateMomentsComboEvaluation.R 2013-09-18 11:40:27 UTC (rev 3136) @@ -1,8 +1,10 @@ -#'This script familiarizes the user with the evaluation of an estimator:replicability, loss, error, -#'bias and inefficiency as described in A. Meucci,"Risk and Asset Allocation", Springer, 2005, Chapter 4. +#' This script familiarizes the user with the evaluation of an estimator:replicability, loss, error, +#' bias and inefficiency as described in A. Meucci,"Risk and Asset Allocation", Springer, 2005, Chapter 4. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 181 - Moment-based functional of a mixture II ". +#' #' See Meucci's script for "S_EstimateMomentsComboEvaluation.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} @@ -25,8 +27,8 @@ print(G_fX); # series generated by "nature": do not know the distribution -P = runif(T); -i_T = t( matrix (QuantileMixture( P, a, m_Y, s_Y, m_Z, s_Z ) ) ); +P = runif(T); +i_T = t( matrix ( QuantileMixture( P, a, m_Y, s_Y, m_Z, s_Z ) ) ); G_Hat_a = function(X) (X[ , 1] - X[ , ncol(X) ]) * X[ , 2 ] * X[ , 2 ]; G_Hat_b = function(X) apply( X, 1, mean); @@ -50,7 +52,8 @@ # randomize series generated by "nature" to check replicability nSim = 10000; -I_T = matrix( NaN, nSim, T); +I_T = matrix( NaN, nSim, T); + for( t in 1 : T ) { P = matrix( runif(nSim), nSim, 1); @@ -62,20 +65,20 @@ Gc = G_Hat_c(I_T); # tentative estimator of unknown functional Gd = G_Hat_d(I_T); # tentative estimator of unknown functional -Loss_Ga = (Ga-G_fX)^2; -Loss_Gb = (Gb-G_fX)^2; -Loss_Gc = (Gc-G_fX)^2; -Loss_Gd = (Gd-G_fX)^2; +Loss_Ga = (Ga-G_fX)^2; +Loss_Gb = (Gb-G_fX)^2; +Loss_Gc = (Gc-G_fX)^2; +Loss_Gd = (Gd-G_fX)^2; -Err_Ga = sqrt(mean(Loss_Ga)); -Err_Gb = sqrt(mean(Loss_Gb)); -Err_Gc = sqrt(mean(Loss_Gc)); -Err_Gd = sqrt(mean(Loss_Gd)); +Err_Ga = sqrt(mean(Loss_Ga)); +Err_Gb = sqrt(mean(Loss_Gb)); +Err_Gc = sqrt(mean(Loss_Gc)); +Err_Gd = sqrt(mean(Loss_Gd)); -Bias_Ga = abs(mean(Ga)-G_fX); -Bias_Gb = abs(mean(Gb)-G_fX); -Bias_Gc = abs(mean(Gc)-G_fX); -Bias_Gd = abs(mean(Gd)-G_fX); +Bias_Ga = abs(mean(Ga)-G_fX); +Bias_Gb = abs(mean(Gb)-G_fX); +Bias_Gc = abs(mean(Gc)-G_fX); +Bias_Gd = abs(mean(Gd)-G_fX); Ineff_Ga = sd(Ga); Ineff_Gb = sd(Gb); @@ -125,11 +128,14 @@ for( j in 1 : length(m_s) ) { m_Y = m_s[ j ]; + # functional of the distribution to be estimated G_fX = a * ( m_Y ^ 2 + s_Y^2 - m_Y ) + ( 1 - a ) *( exp( 2 * m_Z + 2 * s_Z^2 ) - exp( m_Z + 0.5 * s_Z^2 ) ); + # randomize series generated by "nature" to check replicability nSim = 10000; - I_T = matrix( NaN, nSim, T); + I_T = matrix( NaN, nSim, T); + for( t in 1 : T ) { P = matrix( runif(nSim) ); @@ -141,20 +147,20 @@ Gc = G_Hat_c(I_T); Gd = G_Hat_d(I_T); - Loss_Ga = (Ga-G_fX)^2; - Loss_Gb = (Gb-G_fX)^2; - Loss_Gc = (Gc-G_fX)^2; - Loss_Gd = (Gd-G_fX)^2; + Loss_Ga = (Ga-G_fX)^2; + Loss_Gb = (Gb-G_fX)^2; + Loss_Gc = (Gc-G_fX)^2; + Loss_Gd = (Gd-G_fX)^2; - Err_Ga = sqrt(mean(Loss_Ga)); - Err_Gb = sqrt(mean(Loss_Gb)); - Err_Gc = sqrt(mean(Loss_Gc)); - Err_Gd = sqrt(mean(Loss_Gd)); + Err_Ga = sqrt(mean(Loss_Ga)); + Err_Gb = sqrt(mean(Loss_Gb)); + Err_Gc = sqrt(mean(Loss_Gc)); + Err_Gd = sqrt(mean(Loss_Gd)); - Bias_Ga = abs(mean(Ga)-G_fX); - Bias_Gb = abs(mean(Gb)-G_fX); - Bias_Gc = abs(mean(Gc)-G_fX); - Bias_Gd = abs(mean(Gd)-G_fX); + Bias_Ga = abs(mean(Ga)-G_fX); + Bias_Gb = abs(mean(Gb)-G_fX); + Bias_Gc = abs(mean(Gc)-G_fX); + Bias_Gd = abs(mean(Gd)-G_fX); Ineff_Ga = sd(Ga); Ineff_Gb = sd(Gb); @@ -162,15 +168,15 @@ Ineff_Gd = sd(Gd); #store results - Err_Gasq = cbind( Err_Gasq, Err_Ga^2 ); ##ok<*AGROW> - Err_Gbsq = cbind( Err_Gbsq, Err_Gb^2 ); - Err_Gcsq = cbind( Err_Gcsq, Err_Gc^2 ); - Err_Gdsq = cbind( Err_Gdsq, Err_Gd^2 ); + Err_Gasq = cbind( Err_Gasq, Err_Ga^2 ); + Err_Gbsq = cbind( Err_Gbsq, Err_Gb^2 ); + Err_Gcsq = cbind( Err_Gcsq, Err_Gc^2 ); + Err_Gdsq = cbind( Err_Gdsq, Err_Gd^2 ); - Bias_Gasq = cbind(Bias_Gasq, Bias_Ga^2 ); - Bias_Gbsq = cbind(Bias_Gbsq, Bias_Gb^2 ); - Bias_Gcsq = cbind(Bias_Gcsq, Bias_Gc^2 ); - Bias_Gdsq = cbind(Bias_Gdsq, Bias_Gd^2 ); + Bias_Gasq = cbind(Bias_Gasq, Bias_Ga^2 ); + Bias_Gbsq = cbind(Bias_Gbsq, Bias_Gb^2 ); + Bias_Gcsq = cbind(Bias_Gcsq, Bias_Gc^2 ); + Bias_Gdsq = cbind(Bias_Gdsq, Bias_Gd^2 ); Ineff_Gasq = cbind( Ineff_Gasq, Ineff_Ga^2 ); Ineff_Gbsq = cbind( Ineff_Gbsq, Ineff_Gb^2 ); Modified: pkg/Meucci/demo/S_EstimateQuantileEvaluation.R =================================================================== --- pkg/Meucci/demo/S_EstimateQuantileEvaluation.R 2013-09-18 09:35:43 UTC (rev 3135) +++ pkg/Meucci/demo/S_EstimateQuantileEvaluation.R 2013-09-18 11:40:27 UTC (rev 3136) @@ -1,7 +1,10 @@ #'This script familiarizes the user with the evaluation of an estimator:replicability, loss, error, #'bias and inefficiency as described in A. Meucci,"Risk and Asset Allocation", Springer, 2005, Chapter 4. #' -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' @references +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 183 - Moment-based functional of a mixture IV". +#' #' See Meucci's script for "S_EstimateQuantileEvaluation.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} @@ -24,7 +27,7 @@ print( G_fX ); # series generated by "nature": do not know the distribution -P = runif( T ); +P = runif( T ); i_T = QuantileMixture( P, a, m_Y, s_Y, m_Z, s_Z ); G_Hat_e = function(X) apply( X, 1, median ); @@ -43,26 +46,26 @@ # randomize series generated by "nature" to check replicability nSim = 10000; -I_T = c(); +I_T = c(); for( t in 1:T ) { - P = runif(nSim); + P = runif(nSim); Simul = QuantileMixture(P,a,m_Y,s_Y,m_Z,s_Z); - I_T = cbind( I_T, Simul ); + I_T = cbind( I_T, Simul ); } Ge = G_Hat_e( I_T ); # tentative estimator of unknown functional Gb = G_Hat_b( I_T ); # tentative estimator of unknown functional -Loss_Ge = ( Ge - G_fX ) ^ 2; -Loss_Gb = ( Gb - G_fX ) ^ 2; +Loss_Ge = ( Ge - G_fX ) ^ 2; +Loss_Gb = ( Gb - G_fX ) ^ 2; -Err_Ge = sqrt( mean( Loss_Ge)); -Err_Gb = sqrt( mean( Loss_Gb)); +Err_Ge = sqrt( mean( Loss_Ge)); +Err_Gb = sqrt( mean( Loss_Gb)); -Bias_Ge = abs(mean(Ge)-G_fX); -Bias_Gb = abs(mean(Gb)-G_fX); +Bias_Ge = abs(mean(Ge)-G_fX); +Bias_Gb = abs(mean(Gb)-G_fX); Ineff_Ge = sd(Ge); Ineff_Gb = sd(Gb); @@ -98,38 +101,38 @@ # randomize series generated by "nature" to check replicability nSim = 10000; - I_T = NULL; + I_T = NULL; for( t in 1 : T ) { - P = runif(nSim); + P = runif(nSim); Simul = QuantileMixture(P, a, m_Y, s_Y, m_Z, s_Z); - I_T = cbind( I_T, Simul ); + I_T = cbind( I_T, Simul ); } Ge = G_Hat_e( I_T ); # tentative estimator of unknown functional Gb = G_Hat_b( I_T ); # tentative estimator of unknown functional - Loss_Ge = ( Ge - G_fX ) ^ 2; - Loss_Gb = ( Gb - G_fX ) ^ 2; + Loss_Ge = ( Ge - G_fX ) ^ 2; + Loss_Gb = ( Gb - G_fX ) ^ 2; - Err_Ge = sqrt( mean( Loss_Ge ) ); - Err_Gb = sqrt( mean( Loss_Gb ) ); + Err_Ge = sqrt( mean( Loss_Ge ) ); + Err_Gb = sqrt( mean( Loss_Gb ) ); - Bias_Ge = abs( mean( Ge ) - G_fX ); - Bias_Gb = abs( mean( Gb ) - G_fX ); + Bias_Ge = abs( mean( Ge ) - G_fX ); + Bias_Gb = abs( mean( Gb ) - G_fX ); Ineff_Ge = std( Ge ); Ineff_Gb = std( Gb ); #store results - Err_Gesq = cbind( Err_Gesq, Err_Ge ^ 2); ##ok<*AGROW> - Err_Gbsq = cbind(Err_Gbsq, Err_Gb^2); + Err_Gesq = cbind( Err_Gesq, Err_Ge^2 ); + Err_Gbsq = cbind( Err_Gbsq, Err_Gb^2 ); - Bias_Gesq = cbind( Bias_Gesq, Bias_Ge^2 ); - Bias_Gbsq = cbind( Bias_Gbsq, Bias_Gb^2 ); + Bias_Gesq = cbind( Bias_Gesq, Bias_Ge^2 ); + Bias_Gbsq = cbind( Bias_Gbsq, Bias_Gb^2 ); - Ineff_Gesq = cbind( Ineff_Gesq, Ineff_Ge ^ 2 ); - Ineff_Gbsq = cbind( Ineff_Gbsq, Ineff_Gb ^ 2 ); + Ineff_Gesq = cbind( Ineff_Gesq, Ineff_Ge^2 ); + Ineff_Gbsq = cbind( Ineff_Gbsq, Ineff_Gb^2 ); } ################################################################################################################### Modified: pkg/Meucci/demo/S_Estimator.R =================================================================== --- pkg/Meucci/demo/S_Estimator.R 2013-09-18 09:35:43 UTC (rev 3135) +++ pkg/Meucci/demo/S_Estimator.R 2013-09-18 11:40:27 UTC (rev 3136) @@ -2,9 +2,10 @@ #', as described in A. Meucci, "Risk and Asset Allocation", Springer, 2005, Chapter 4. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. -#' See Meucci's script for "S_EigenValueprintersion.R" +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, #' +#' See Meucci's script for "S_Estimator.R" +#' #' @author Xavier Valls \email{flamejat@@gmail.com} ################################################################################################################## Modified: pkg/Meucci/demo/S_ExpectationMaximizationHighYield.R =================================================================== --- pkg/Meucci/demo/S_ExpectationMaximizationHighYield.R 2013-09-18 09:35:43 UTC (rev 3135) +++ pkg/Meucci/demo/S_ExpectationMaximizationHighYield.R 2013-09-18 11:40:27 UTC (rev 3136) @@ -3,7 +3,9 @@ #' "Risk and Asset Allocation", Springer, 2005, Chapter 4. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 178 - Expectation-Maximization algorithm for missing data: example". +# #' See Meucci's script for "S_ExpectationMaximizationHighYield.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/demo/S_FitSwapToStudentT.R =================================================================== --- pkg/Meucci/demo/S_FitSwapToStudentT.R 2013-09-18 09:35:43 UTC (rev 3135) +++ pkg/Meucci/demo/S_FitSwapToStudentT.R 2013-09-18 11:40:27 UTC (rev 3136) @@ -2,7 +2,9 @@ #' Student t distribution, as described in A. Meucci, "Risk and Asset Allocation", Springer, 2005, Chapter 4. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 188 - Maximum likelihood estimation of a multivariate Student t distribution". +#' #' See Meucci's script for "S_FitSwapToStudentT.m" #' #' TO DO: Change colors from TwoDimEllipsoid in each iteration Modified: pkg/Meucci/demo/S_GenerateMixtureSample.R =================================================================== --- pkg/Meucci/demo/S_GenerateMixtureSample.R 2013-09-18 09:35:43 UTC (rev 3135) +++ pkg/Meucci/demo/S_GenerateMixtureSample.R 2013-09-18 11:40:27 UTC (rev 3136) @@ -2,7 +2,9 @@ #' Springer, 2005, Chapter 4. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 184 - Estimation of a quantile of a mixture I". +#' #' See Meucci's script for "S_GenerateMixtureSample.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com}### @@ -15,7 +17,7 @@ m_Z = 0; s_Z = 0.15; -T = 52; +T = 52; ################################################################################################################## ### Computations Modified: pkg/Meucci/demo/S_MaximumLikelihood.R =================================================================== --- pkg/Meucci/demo/S_MaximumLikelihood.R 2013-09-18 09:35:43 UTC (rev 3135) +++ pkg/Meucci/demo/S_MaximumLikelihood.R 2013-09-18 11:40:27 UTC (rev 3136) @@ -2,7 +2,9 @@ #' "Risk and Asset Allocation", Springer, 2005, Chapter 4. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 186 - Maximum likelihood estimation". +#' #' See Meucci's script for "S_MaximumLikelihood.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/demo/S_PasturMarchenko.R =================================================================== --- pkg/Meucci/demo/S_PasturMarchenko.R 2013-09-18 09:35:43 UTC (rev 3135) +++ pkg/Meucci/demo/S_PasturMarchenko.R 2013-09-18 11:40:27 UTC (rev 3136) @@ -2,7 +2,9 @@ #' "Risk and Asset Allocation", Springer, 2005, Chapter 4. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 190 - Random matrix theory: Marchenko-Pastur limit". +#' #' See Meucci's script for "S_PasturMarchenko.m" #' Modified: pkg/Meucci/demo/S_SemiCircular.R =================================================================== --- pkg/Meucci/demo/S_SemiCircular.R 2013-09-18 09:35:43 UTC (rev 3135) +++ pkg/Meucci/demo/S_SemiCircular.R 2013-09-18 11:40:27 UTC (rev 3136) @@ -2,9 +2,10 @@ #' "Risk and Asset Allocation", Springer, 2005, Chapter 4. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 189 - Random matrix theory: semi-circular law". +#' #' See Meucci's script for "S_SemiCircular.m" -#' ################################################################################################################## ### Inputs @@ -13,9 +14,9 @@ ################################################################################################################## ### Empirical eigenvalues -#X=rnorm(N); # normal -#X=( runif(N)-0.5 ) * sqrt(12); # uniform -X = log( matrix( runif(N^2), N, N )) + 1; # exponential +#X = rnorm( N ); # normal +#X = ( runif(N)-0.5 ) * sqrt(12); # uniform +X = log( matrix( runif(N^2), N, N )) + 1; # exponential Y = (X + t(X) ) / ( 2 * sqrt( 2 * N )); # symmetrize and rescale E = t(eigen(Y)$values); @@ -35,6 +36,5 @@ ################################################################################################################## ### Plots dev.new(); -#bar(t_, h); -plot(t_, h, type = "h", lwd = 5); -lines(t, g, col = "red", lwd = 3); +plot( t_, h, type = "h" , lwd = 5 ); +lines( t, g, col = "red", lwd = 3 ); Modified: pkg/Meucci/demo/S_ShrinkageEstimators.R =================================================================== --- pkg/Meucci/demo/S_ShrinkageEstimators.R 2013-09-18 09:35:43 UTC (rev 3135) +++ pkg/Meucci/demo/S_ShrinkageEstimators.R 2013-09-18 11:40:27 UTC (rev 3136) @@ -2,7 +2,9 @@ #' as described in A. Meucci, "Risk and Asset Allocation", Springer, 2005, Chapter 4. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 166 - Shrinkage estimator of location". +#' #' See Meucci's script for "S_ShrinkageEstimators.m" #' Modified: pkg/Meucci/demo/S_TStatApprox.R =================================================================== --- pkg/Meucci/demo/S_TStatApprox.R 2013-09-18 09:35:43 UTC (rev 3135) +++ pkg/Meucci/demo/S_TStatApprox.R 2013-09-18 11:40:27 UTC (rev 3136) @@ -13,7 +13,9 @@ #' @return F : [vector] ( J x 1 ) #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 193 - Simulation of the distribution of statistics of regression parameters". +#' #' See Meucci's script for "GenerateInvariants.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/man/CentralAndStandardizedStatistics.Rd =================================================================== --- pkg/Meucci/man/CentralAndStandardizedStatistics.Rd 2013-09-18 09:35:43 UTC (rev 3135) +++ pkg/Meucci/man/CentralAndStandardizedStatistics.Rd 2013-09-18 11:40:27 UTC (rev 3136) @@ -5,16 +5,14 @@ CentralAndStandardizedStatistics(X, N) } \arguments{ - \item{X}{: [vector] (J x 1) draws from the distribution} + \item{X}{[vector] (J x 1) draws from the distribution} - \item{N}{: [scalar] highest degree for the central - moment} + \item{N}{[scalar] highest degree for the central moment} } \value{ - ga : [vector] (1 x N) standardized statistics up to order - N + ga [vector] (1 x N) standardized statistics up to order N - mu : [vector] (1 x N) central moments up to order N + mu [vector] (1 x N) central moments up to order N } \description{ Compute central and standardized statistics, as described @@ -38,8 +36,8 @@ summary statistics". See Meucci's script for "CentralAndStandardizedStatistics.m" - Kendall, M., Stuart, A., 1969. The Advanced Theory of - Statistics, Volume, 3rd Edition. Griffin. + Kendall, M., Stuart, A. - "The Advanced Theory of + Statistics", 1969. Volume, 3rd Edition. Griffin. A. Meucci - "Annualization and general projection of skweness, kurtosis, and all summary statistics", GARP Modified: pkg/Meucci/man/FitExpectationMaximization.Rd =================================================================== --- pkg/Meucci/man/FitExpectationMaximization.Rd 2013-09-18 09:35:43 UTC (rev 3135) +++ pkg/Meucci/man/FitExpectationMaximization.Rd 2013-09-18 11:40:27 UTC (rev 3136) @@ -1,7 +1,6 @@ \name{FitExpectationMaximization} \alias{FitExpectationMaximization} -\title{Expectation-Maximization (EM) algorithm to recover missing observations in a time series , -as described in A. Meucci, "Risk and Asset Allocation", Springer, 2005.} +\title{Expectation-Maximization (EM) algorithm to recover missing observations in a time series.} \usage{ FitExpectationMaximization(X) } @@ -21,14 +20,26 @@ \description{ Expectation-Maximization (EM) algorithm to recover missing observations in a time series , as described in - A. Meucci, "Risk and Asset Allocation", Springer, 2005. + A. Meucci, "Risk and Asset Allocation", Springer, 2005, + section 4.6.2 "Missing data". } \author{ Xavier Valls \email{flamejat at gmail.com} } \references{ A. Meucci - "Exercises in Advanced Risk and Portfolio - Management" \url{http://symmys.com/node/170}. See - Meucci's script for "FitExpectationMaximization.m" + Management" \url{http://symmys.com/node/170}, "E 177 - + Expectation-Maximization algorithm for missing data: + formulas" See Meucci's script for + "FitExpectationMaximization.m" + + Dempster, A. P. and Laird, M. N. and Rubin, D. B. - + "Maximum Likelihood from Incomplete Data Via the EM + Algorithm", Journal of the Royal Statistical Society, + 1977 vol 39 pag. 1-22. + + Bilmes, J. A.- "A Gentle Tutorial of the EM Algorithm and + its Application to Parameter Estimation for Gaussian [TRUNCATED] To get the complete diff run: svnlook diff /svnroot/returnanalytics -r 3136 From noreply at r-forge.r-project.org Wed Sep 18 16:01:44 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 18 Sep 2013 16:01:44 +0200 (CEST) Subject: [Returnanalytics-commits] r3137 - pkg/PortfolioAnalytics/sandbox/symposium2013/docs Message-ID: <20130918140144.B63CD183DE7@r-forge.r-project.org> Author: peter_carl Date: 2013-09-18 16:01:43 +0200 (Wed, 18 Sep 2013) New Revision: 3137 Modified: pkg/PortfolioAnalytics/sandbox/symposium2013/docs/symposium-slides-2013.Rmd Log: - saving a version before making considerable changes to the flow Modified: pkg/PortfolioAnalytics/sandbox/symposium2013/docs/symposium-slides-2013.Rmd =================================================================== --- pkg/PortfolioAnalytics/sandbox/symposium2013/docs/symposium-slides-2013.Rmd 2013-09-18 11:40:27 UTC (rev 3136) +++ pkg/PortfolioAnalytics/sandbox/symposium2013/docs/symposium-slides-2013.Rmd 2013-09-18 14:01:43 UTC (rev 3137) @@ -4,8 +4,8 @@ # Process Insert process diagram here? Optional @@ -41,87 +46,57 @@ # Strategic allocation ...broadly described as periodically reallocating the portfolio to achieve a long-term goal -- Understanding the nature and sources of investment risk within the portfolio +- Understand the nature and sources of investment risk within the portfolio - Manage the resulting balance of risk and return of the portfolio -- Apply within the cotext of the current economic and market situation +- Apply within the context of the current economic and market situation - Think systematically about preferences and constraints -# Selected strategies -Daily data from the ... +# Selected hedge fund strategies +Monthly data of EDHEC hedge fund indexes from 1998 - +# Ex-post Performance +\includegraphics[width=1.0\textwidth]{../results/EDHEC-Distributions.png} -# Performance of strategies -Cumulative returns and drawdowns chart +# Ex-post Performance +Add table of relevant statistics here -# Performance of strategies -BarVaR charts +# Ex-post Correlations +\includegraphics[width=0.5\textwidth]{../results/EDHEC-cor-inception.png} +\includegraphics[width=0.5\textwidth]{../results/EDHEC-cor-tr36m.png} -# Performance of strategies -Rolling 36-month Performance - -# Performance of strategies -Pair of scatterplots since inception/last 36 months - -# Performance of strategies -Comparison of distributions - -# Correlation of strategies -Correlation charts, from inception and trailing 36-months - # Portfolio issues Markowitz (1952) described an investor's objectives as: * maximizing some measure of gain while * minimizing some measure of risk -Many approaches follow Markowitz by using mean return and standard devation of returns for "risk" +Many approaches follow Markowitz by using variance of returns for "risk" # Portfolio issues Most investors would prefer: @@ -136,79 +111,55 @@ Construct a portfolio that: * maximizes return, -* with per-asset conditional constraints, -* with a specific univariate risk limit, -* while minimizing component risk concentration, -* and limiting drawdowns to a threshhold value. +* with per-asset position limits, +* with a specific univariate portfolio risk limit, +* defining risk as losses, +* considering effects of skewness and kurtosis, +* and limiting contribution of risk for constituents +* or minimizing component risk contribution. Not a quadratic (or linear, or conical) problem any more. -# Risk rather than volatility +# Risk, not volatility * Expected Tail Loss (ETL) is also called Conditional Value-at-Risk (CVaR) and Expected Shortfall (ES) * ETL is the mean expected loss when the loss exceeds the VaR * ETL has all the properties a risk measure should have to be coherent and is a convex function of the portfolio weights * Returns are skewed and/or kurtotic, so we use Cornish-Fisher (or "modified") estimates of ETL instead + -# Use Random Portfolios -[Burns (2009)](http://www.portfolioprobe.com/blog/) describes Random Portfolios +# ETL sensitivity +Modified ETL demonstrates a better fit for historical CVaR at lower confidence levels, and breaks down at higher confidence levels +*Insert chart or charts* -* From a portfolio seed, generate random pemutations of weights that meet your constraints on each asset -* add more here - -Sampling can help provide insight into the goals and constraints of the optimization - -* Covers the 'edge case' (min/max) constraints well -* Covers the 'interior' portfolios -* Useful for finding the search space for an optimizer -* Allows arbitrary number o fsamples -* Allows massively parallel execution - # Add general constraints Constraints specified for each asset in the portfolio: -* Maximum position: -* Minimum position: +* Maximum position: 30% +* Minimum position: 5% * Weights sum to 100% -* Weights step size of 0.5% +* Group constraints +* Rebalancing quarterly -Other settings: +# Estimation -* Confidence for VaR/ETL set at -* Random portfolios with X000 permutations -* Rebalancing quarterly (or monthly?) + -# Estimation - * Optimizer chooses portfolios based on forward looking estimates of risk and return based on the portfolio moments -* Estimates use the first four moments and co-moments +* Usually explicitly making trade-offs between correlation and volatility among members +* Modified ETL extends the tradeoffs to the first four moments and co-moments * Historical sample moments work fine as predictors in normal market regimes, but poorly when the market regime shifts -One of the largest challenges in optimization is improving the estimates of return and volatility +One of the largest challenges in optimization is improving the estimates of the moments # Forecasting ## Returns ## Volatility +## Correlation -# Forecasting correlation -# Equal-weight portfolio - -* Provides a benchmark to evaluate the performance of an optimized portfolio against -* Each asset in the portfolio is purchased in the same quantity at the beginning of the period -* The portfolio is rebalanced back to equal weight at the beginning of the next period -* Implies no information about return or risk -* Is the re-weighting adding or subtracting value? -* Do we have a useful view of return and risk? - -# Sampled portfolios -scatter chart with equal weight portfolio - -# Turnover from equal-weight -scatter chart colored by degree of turnover - # Multiple objectives Equal contribution to: @@ -261,6 +212,81 @@ Minimum risk portfolios generally suffer from the drawback of portfolio concentration. + + +# Equal-weight portfolio + +* Provides a benchmark to evaluate the performance of an optimized portfolio against +* Each asset in the portfolio is purchased in the same quantity at the beginning of the period +* The portfolio is rebalanced back to equal weight at the beginning of the next period +* Implies no information about return or risk +* Is the re-weighting adding or subtracting value? +* Do we have a useful view of return and risk? + +# Equal Contribution to Risk + + + + + + + +# Closed form optimizers + +# Use Random Portfolios +[Burns (2009)](http://www.portfolioprobe.com/blog/) describes Random Portfolios + +* From a portfolio seed, generate random pemutations of weights that meet your constraints on each asset +* add more here +* Random portfolios with X000 permutations + +Sampling can help provide insight into the goals and constraints of the optimization + +* Covers the 'edge case' (min/max) constraints well +* Covers the 'interior' portfolios +* Useful for finding the search space for an optimizer +* Allows arbitrary number of samples +* Allows massively parallel execution + +# Sampled portfolios +scatter chart with equal weight portfolio + +# Turnover from equal-weight +scatter chart colored by degree of turnover + +# Sampled portfolios +add assets to scatter - overconstrained? + +# Constrain by contribution to mETL +Add a constraint + +# Differential Evolution +All numerical optimizations are a tradeoff between speed and accuracy + +This space may well be non-convex in real portfolios + +Differential evolution will get more directed with each generation, rather than the uniform search of random portfolios + +Allows more logical 'space' to be searched with the same number of trial portfolios for more complex objectives + +doesn't test many portfolios on the interior of the portfolio space + +Early generations search a wider space; later generations increasingly focus on the space that is near-optimal + +Random jumps are performed in every generation to avoid local minima + +*Insert Chart* + +# Other Heuristic Methods +GenSA, SOMA, +Such functions are very compute intensive ? so linear, quadradic or conical objectives are better addressed through closed-form optimizers + +However, many business objectives do not fall into those categories... + +...and brute force solutions are often intractable + + + # Ex-ante results Unstacked bar chart comparing allocations across objectives @@ -283,32 +309,42 @@ * Rebalancing periodically and examining out of sample performance can help you refine objectives * Differential Optimization and parallelization are valuable as objectives get more complicated -# References -Figure out bibtex links in markup -# Appendix -Slides after this point are not likely to be included in the final presentation - # _PortfolioAnalytics_ -- Provides numerical solutions to portfolios with complex constraints and objectives -- Unifies the interface across different numerical and closed-form optimizers +- Provides numerical solutions to portfolios with complex constraints and objectives comprised of any function +- Unifies the interface across different numerical and closed-form optimizers, including ... *ADD LIST* - Implements a front-end to two analytical solvers: **Differential Evolution** and **Random Portfolios** - Preserves the flexibility to define any kind of objective and constraint - Work-in-progress, available on R-Forge in the _ReturnAnalytics_ project + # Other packages -* _PerformanceAnalytics_ - * Library of econometric functions for performance and risk analysis of financial instruments and portfolios +## _PerformanceAnalytics_ + * Returns-based analysis of performance and risk for financial instruments and portfolios -* _rugarch_ and _rmgarch_ - * By Alexios Ghalanos - * The univariate and multivariate GARCH parts of the rgarch project on R-Forge +## _ROI_ + * Infrastructure package for optimization that facilitates use of different solvers by K. Hornik, D. Meyer, and S. Theussl + +## _DEoptim_ + * Implements Differential Evolution, a very powerful, elegant, population based stochastic function minimizer + +## _xts_ + * Time series package specifically for finance by Jeff Ryan and Josh Ulrich -* _xts_ - * By Jeff Ryan and Jush Ulrich - * Time series package specifically for finance +# Thanks +* Brian Peterson +* Kris Boudt +* Doug Martin +* Ross Bennett + +# References +Figure out bibtex links in markup + +# Appendix +Slides after this point are not likely to be included in the final presentation + # Scratch Slides likely to be deleted after this point \ No newline at end of file From noreply at r-forge.r-project.org Wed Sep 18 18:20:01 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 18 Sep 2013 18:20:01 +0200 (CEST) Subject: [Returnanalytics-commits] r3138 - in pkg/Meucci: R demo man Message-ID: <20130918162002.056BA185DD8@r-forge.r-project.org> Author: xavierv Date: 2013-09-18 18:20:01 +0200 (Wed, 18 Sep 2013) New Revision: 3138 Modified: pkg/Meucci/R/GenerateUniformDrawsOnUnitSphere.R pkg/Meucci/demo/S_CornishFisher.R pkg/Meucci/demo/S_ESContributionFactors.R pkg/Meucci/demo/S_ESContributionsStudentT.R pkg/Meucci/demo/S_ExtremeValueTheory.R pkg/Meucci/demo/S_InvestorsObjective.R pkg/Meucci/demo/S_VaRContributionsUniform.R pkg/Meucci/man/GenerateUniformDrawsOnUnitSphere.Rd Log: - updated documentation for chapter 5 demo scripts and its functions Modified: pkg/Meucci/R/GenerateUniformDrawsOnUnitSphere.R =================================================================== --- pkg/Meucci/R/GenerateUniformDrawsOnUnitSphere.R 2013-09-18 14:01:43 UTC (rev 3137) +++ pkg/Meucci/R/GenerateUniformDrawsOnUnitSphere.R 2013-09-18 16:20:01 UTC (rev 3138) @@ -1,4 +1,6 @@ -#' Generate a uniform sample on the unit hypersphere, as described in A. Meucci, +#' @title Generate a uniform sample on the unit hypersphere. +#' +#' @description Generate a uniform sample on the unit hypersphere, as described in A. Meucci, #' "Risk and Asset Allocation", Springer, 2005. #' #' @param J : [scalar] number of draws @@ -6,10 +8,11 @@ #' #' @return X : [matrix] (T x N) of draws #' -#'@note -#' Initial script by Xiaoyu Wang - Dec 2006 +#' @note +#' Initial MATLAB's script by Xiaoyu Wang - Dec 2006 +#' #' We decompose X=U*R, where U is a uniform distribution on unit sphere and -# R is a distribution on (0,1) proportional to r^(Dims-1), i.e. the area of surface of radius r +# R is a distribution on (0,1) proportional to r^(Dims-1), i.e. the area of surface of radius r. #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. Modified: pkg/Meucci/demo/S_CornishFisher.R =================================================================== --- pkg/Meucci/demo/S_CornishFisher.R 2013-09-18 14:01:43 UTC (rev 3137) +++ pkg/Meucci/demo/S_CornishFisher.R 2013-09-18 16:20:01 UTC (rev 3138) @@ -2,7 +2,9 @@ #'assumptions as described in A. Meucci,"Risk and Asset Allocation", Springer, 2005, Chapter 5. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 223 - Cornish-Fisher approximation of the Value-at-Risk". +#' #' See Meucci's script for "S_CornishFisher.m" # #' @author Xavier Valls \email{flamejat@@gmail.com} @@ -21,7 +23,7 @@ c = seq(0.001, 0.999, 0.001 ); z = qnorm( c ); -Q_CF = E_X + Sd_X * ( z + Sk_X / 6 * ( z ^ 2 - 1 ) ); +Q_CF = E_X + Sd_X * ( z + Sk_X / 6 * ( z ^ 2 - 1 ) ); Q_true = qlnorm( c,mu,sig ); x = Q_true; Modified: pkg/Meucci/demo/S_ESContributionFactors.R =================================================================== --- pkg/Meucci/demo/S_ESContributionFactors.R 2013-09-18 14:01:43 UTC (rev 3137) +++ pkg/Meucci/demo/S_ESContributionFactors.R 2013-09-18 16:20:01 UTC (rev 3138) @@ -1,22 +1,23 @@ -library(MASS); -library(Matrix); #' This script computes the expected shortfall and the contributions to ES from each factor in simulations, using #' the conditional expectation definition of the contributions as described in A. Meucci,"Risk and Asset Allocation", #' Springer, 2005, Chapter 5. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 235 - Expected shortfall and linear factor models". +#' #' See Meucci's script for "S_ESContributionFactors.m" # #' @author Xavier Valls \email{flamejat@@gmail.com} ################################################################################################################### ### Inputs +if( !require( "Matrix" ) ) stop( "Matrix package installation is required to run this demo script" ); -N = 30; # number of securities -K = 10; # number of factors +N = 30; # number of securities +K = 10; # number of factors a = runif(N); # allocation -c = 0.95; # ES confidence +c = 0.95; # ES confidence ################################################################################################################### ### Generate market simulations @@ -41,6 +42,7 @@ sigma = as.matrix(.bdiag(list( eps * sigma_f, eps^2 * sigma_u))) #block diagonal matrix corr = cov2cor( sigma ); diag_sigma = sqrt( diag( sigma ) ); + # scenarios nSim = 10000; l = matrix( 1, nSim); @@ -55,17 +57,18 @@ ################################################################################################################### ### Risk management + # compute the objective Psi = M %*% a; # compute ES -th = ceiling((1-c) * nSim); # threshold +th = ceiling((1-c) * nSim); # threshold spc = matrix( 0, nSim, 1 ); spc[ 1 : th ] = 1; spc = spc / sum( spc ); Sort_Psi = sort( Psi ); -Index = order( Psi ); +Index = order( Psi ); ES_simul = t(Sort_Psi) %*% spc; # augment factor set to include residual @@ -74,8 +77,7 @@ b_ = cbind( t(a)%*%B, 1 ); # sort factors according to order induced by objective's realizations - -Sort_F_ = F_[Index, ]; +Sort_F_ = F_[Index, ]; DES_simul = matrix( NaN, 1, K+1 ); for( k in 1 : (K+1) ) { Modified: pkg/Meucci/demo/S_ESContributionsStudentT.R =================================================================== --- pkg/Meucci/demo/S_ESContributionsStudentT.R 2013-09-18 14:01:43 UTC (rev 3137) +++ pkg/Meucci/demo/S_ESContributionsStudentT.R 2013-09-18 16:20:01 UTC (rev 3138) @@ -1,12 +1,12 @@ -library(MASS); -library(Matrix); #' This script computes the expected shortfall and the contributions to ES from each security: #' - analytically, under the Student t assumption for the market #' - in simulations, using the conditional expectation definition of the contributions #' Described in A. Meucci,"Risk and Asset Allocation",Springer, 2005, Chapter 5. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 234 - Expected shortfall in elliptical markets III". +#' #' See Meucci's script for "S_ESContributionsStudentT.m" # #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/demo/S_ExtremeValueTheory.R =================================================================== --- pkg/Meucci/demo/S_ExtremeValueTheory.R 2013-09-18 14:01:43 UTC (rev 3137) +++ pkg/Meucci/demo/S_ExtremeValueTheory.R 2013-09-18 16:20:01 UTC (rev 3138) @@ -5,7 +5,9 @@ #' Described in A. Meucci,"Risk and Asset Allocation",Springer, 2005, Chapter 5. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 229 - Extreme value theory approximation of Value-at-Risk". +#' #' See Meucci's script for "S_ExtremeValueTheory.m" # #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/demo/S_InvestorsObjective.R =================================================================== --- pkg/Meucci/demo/S_InvestorsObjective.R 2013-09-18 14:01:43 UTC (rev 3137) +++ pkg/Meucci/demo/S_InvestorsObjective.R 2013-09-18 16:20:01 UTC (rev 3138) @@ -3,7 +3,9 @@ #' Allocation",Springer, 2005, Chapter 5. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 236 - Simulation of the investor?s objectives". +#' #' See Meucci's script for "S_InvestorsObjective.m" # #' @author Xavier Valls \email{flamejat@@gmail.com} @@ -33,19 +35,19 @@ ### Compute current prices p_1 = nu_1 * s2_1; p_2 = exp( mu_2 + 0.5 * s2_2 ^ 2 ); -p = matrix( c( p_1, p_2 )); +p = matrix( c( p_1, p_2 )); ################################################################################################################## ### Generate samnple of prices at the investment horizon -N = rmvnorm(J, cbind( 0, 0 ), rbind( c(1, r), c(r, 1))); +N = rmvnorm(J, cbind( 0, 0 ), rbind( c(1, r), c(r, 1))); N_1 = N[ , 1 ]; N_2 = N[ , 2 ]; U_1 = pnorm( N_1 ); U_2 = pnorm( N_2 ); -aa = nu_1 / 2; -bb = 2 * s2_1; +aa = nu_1 / 2; +bb = 2 * s2_1; P_1 = qgamma( U_1, aa, scale = bb); P_2 = qlnorm( U_2, mu_2, sqrt(s2_2)); @@ -58,7 +60,7 @@ PnL = (P - matrix( 1, J, 1) %*% t( p )) %*% a; # generate sample of benchmark-relative wealth -K = diag(1, 2) - p %*% t(b) / (t(b) %*% p)[1]; +K = diag(1, 2) - p %*% t(b) / (t(b) %*% p)[1]; WRel = P %*% t(K) %*% a; ################################################################################################################## Modified: pkg/Meucci/demo/S_VaRContributionsUniform.R =================================================================== --- pkg/Meucci/demo/S_VaRContributionsUniform.R 2013-09-18 14:01:43 UTC (rev 3137) +++ pkg/Meucci/demo/S_VaRContributionsUniform.R 2013-09-18 16:20:01 UTC (rev 3138) @@ -1,4 +1,3 @@ - #' This script computes the VaR and the contributions to VaR from each security #' - analytically, under the elliptical-uniform assumption for the market #' - in simulations, using the conditional expectation definition of the contributions @@ -6,8 +5,10 @@ #' Allocation",Springer, 2005, Chapter 5. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. -#' See Meucci's script for "S_VaRContributionsUniform.m" +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 222 - Value-at-Risk in elliptical markets III". +#' +#' See Meucci's script for "S_VaRContributionsUniform.m" and E 220 from the book. # #' @author Xavier Valls \email{flamejat@@gmail.com} @@ -17,8 +18,8 @@ N = 10; # market parameters (uniform on ellipsoid) -Mu = matrix(runif(N)); -A = matrix( runif(N*N), N, N) - 0.5; +Mu = matrix(runif(N)); +A = matrix( runif(N*N), N, N) - 0.5; Sigma = A * t(A); # allocation @@ -37,10 +38,11 @@ ################################################################################################################### ### Compute contributions by simulations (brute-force approach) # compute and sort the objective -Psi = M %*% a; +Psi = M %*% a; Q_sim = quantile( Psi, (1 - c) ); e = mean( abs( a )) / 100; # perturbation + DQ_simul = matrix( NaN, 1, N) ; for( n in 1 : N ) { @@ -48,10 +50,11 @@ a_e = a; a_e[ n ] = a[ n ] + e; - Psi_e = M %*% a_e; + Psi_e = M %*% a_e; Q_sim_e = quantile(Psi_e, (1 - c) ); DQ_simul[ n ] = ( Q_sim_e - Q_sim )/e; } + # compute contributions ContrQ_simul = a * t( DQ_simul ); @@ -61,8 +64,8 @@ gc = quantile(X[ ,1 ], (1 - c)); # ...the dependence on the allocation is analytical -Q_an = t(Mu) %*% a + gc * sqrt( t(a) %*% Sigma %*% a ); -DQ_an = Mu + gc * Sigma %*% a / sqrt( t(a) %*% Sigma %*% a )[1]; +Q_an = t(Mu) %*% a + gc * sqrt( t(a) %*% Sigma %*% a ); +DQ_an = Mu + gc * Sigma %*% a / sqrt( t(a) %*% Sigma %*% a )[1]; ContrQ_an = a * DQ_an; ################################################################################################################### Modified: pkg/Meucci/man/GenerateUniformDrawsOnUnitSphere.Rd =================================================================== --- pkg/Meucci/man/GenerateUniformDrawsOnUnitSphere.Rd 2013-09-18 14:01:43 UTC (rev 3137) +++ pkg/Meucci/man/GenerateUniformDrawsOnUnitSphere.Rd 2013-09-18 16:20:01 UTC (rev 3138) @@ -1,7 +1,6 @@ \name{GenerateUniformDrawsOnUnitSphere} \alias{GenerateUniformDrawsOnUnitSphere} -\title{Generate a uniform sample on the unit hypersphere, as described in A. Meucci, - "Risk and Asset Allocation", Springer, 2005.} +\title{Generate a uniform sample on the unit hypersphere.} \usage{ GenerateUniformDrawsOnUnitSphere(J, N) } @@ -19,9 +18,10 @@ Springer, 2005. } \note{ - Initial script by Xiaoyu Wang - Dec 2006 We decompose - X=U*R, where U is a uniform distribution on unit sphere - and + Initial MATLAB's script by Xiaoyu Wang - Dec 2006 + + We decompose X=U*R, where U is a uniform distribution on + unit sphere and } \author{ Xavier Valls \email{flamejat at gmail.com} From noreply at r-forge.r-project.org Wed Sep 18 23:03:40 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 18 Sep 2013 23:03:40 +0200 (CEST) Subject: [Returnanalytics-commits] r3139 - pkg/PortfolioAnalytics Message-ID: <20130918210340.36E921801C7@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-18 23:03:33 +0200 (Wed, 18 Sep 2013) New Revision: 3139 Modified: pkg/PortfolioAnalytics/DESCRIPTION Log: Updating suggests for ROI.plugin.quadprog. Modified: pkg/PortfolioAnalytics/DESCRIPTION =================================================================== --- pkg/PortfolioAnalytics/DESCRIPTION 2013-09-18 16:20:01 UTC (rev 3138) +++ pkg/PortfolioAnalytics/DESCRIPTION 2013-09-18 21:03:33 UTC (rev 3139) @@ -23,7 +23,7 @@ quadprog, ROI, ROI.plugin.glpk, - ROI.plugin.quadprog, + ROI.plugin.quadprog (>= 0.0.2), pso, GenSA, corpcor From noreply at r-forge.r-project.org Wed Sep 18 23:33:19 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 18 Sep 2013 23:33:19 +0200 (CEST) Subject: [Returnanalytics-commits] r3140 - pkg/PortfolioAnalytics/sandbox/symposium2013/docs Message-ID: <20130918213319.88E41185D97@r-forge.r-project.org> Author: peter_carl Date: 2013-09-18 23:33:19 +0200 (Wed, 18 Sep 2013) New Revision: 3140 Modified: pkg/PortfolioAnalytics/sandbox/symposium2013/docs/symposium-slides-2013.Rmd Log: - revised flow Modified: pkg/PortfolioAnalytics/sandbox/symposium2013/docs/symposium-slides-2013.Rmd =================================================================== --- pkg/PortfolioAnalytics/sandbox/symposium2013/docs/symposium-slides-2013.Rmd 2013-09-18 21:03:33 UTC (rev 3139) +++ pkg/PortfolioAnalytics/sandbox/symposium2013/docs/symposium-slides-2013.Rmd 2013-09-18 21:33:19 UTC (rev 3140) @@ -40,8 +40,10 @@ - Rebalancing periodically and examining out of sample performance will help refine objectives --> + # Strategic allocation ...broadly described as periodically reallocating the portfolio to achieve a long-term goal @@ -51,6 +53,96 @@ - Apply within the context of the current economic and market situation - Think systematically about preferences and constraints + + +# Portfolio preferences +Construct a portfolio that: + +* maximizes return, +* with per-asset position limits, +* with a specific univariate portfolio risk limit, +* defining risk as losses, +* considering effects of skewness and kurtosis, +* and limiting contribution of risk for constituents +* or equalizing component risk contribution. + + + +# Optimization frustration +Most investors would prefer: + +* to be approximately correct rather than precisely wrong +* to define risk as potential loss rather than volatility +* the flexibility to define any kind of objective and combine the constraints +* a framework for considering different sets of portfolio constraints for comparison through time +* to intuitively understand optimization through visualization + +# Risk budgeting +* Used to allocate the "risk" of a portfolio +* Decomposes the total portfolio risk into the risk contribution of each component position +* Literature on risk contribution has focused on volatility rather than downside risk +* Most financial returns series seem non-normal + +<--! Two-column slide with a facing histogram and qqplot --> + +# Measuring risk, not volatility +Measured with portfolio Conditional Value-at-Risk (CVaR) + +* Also called Expected Tail Loss (ETL) and Expected Shortfall (ES) +* ETL is the mean expected loss when the loss exceeds the VaR +* ETL has all the properties a risk measure should have to be coherent and is a convex function of the portfolio weights +* To account for skew and/or kurtosis, use Cornish-Fisher (or "modified") estimates of ETL instead (mETL) + + + +# ETL sensitivity +Modified ETL demonstrates a better fit for historical CVaR at lower confidence levels, and can break down at higher confidence levels +*Insert chart or charts* + + + +# _Ex ante_, not _ex post_ +The use of _ex ante_ risk budgets is more recent + +* Qian (2005): "risk parity portfolio" allocates portfolio variance equally +* Maillard _et al_ (2010): "equally-weighted risk contribution portfolio" or (ERC) +* Zhu _et al_ (2010): optimal mean-variance portfolio selection under constrained contributions + +We want to look at the allocation of risk through _ex ante_ downside risk contribution + +# Contribution to downside risk, not volatility +Use the modified CVaR contribution estimator from Boudt, _et al_ (2008) + +* CVaR contributions correspond to the conditional expectation of the return of the portfolio component when the portfolio loss is larger than its VaR loss. +* %CmETL is the ratio of the expected return on the position when the portfolio experiences a beyond-VaR loss to the expected value of the portfolio loss +* A high positive %CmETL indicates the position has a large loss when the portfolio also has a large loss +* The higher the percentage mETL, the more the portfolio downside risk is concentrated on that asset +* Allows us to directly optimize downside risk diversification +* Lends itself to a simple algorithm that computes both CVaR and component CVaR in less than a second, even for large portfolios + +We can use CVaR contributions as an objective or constraint in portfolio optimization + +# Two strategies for using downside contribution in allocation +## Equalize downside risk contribution + +* Define downside risk diversification as an objective + +## Downside risk budget + +* Impose bound constraints on the percentage mETL contributions + + +# An example +describe the example as a case study + # Selected hedge fund strategies Monthly data of EDHEC hedge fund indexes from 1998 @@ -90,48 +182,6 @@ \includegraphics[width=0.5\textwidth]{../results/EDHEC-cor-inception.png} \includegraphics[width=0.5\textwidth]{../results/EDHEC-cor-tr36m.png} -# Portfolio issues -Markowitz (1952) described an investor's objectives as: - -* maximizing some measure of gain while -* minimizing some measure of risk - -Many approaches follow Markowitz by using variance of returns for "risk" - -# Portfolio issues -Most investors would prefer: - -* to be approximately correct rather than precisely wrong -* to define risk as potential loss rather than volatility -* the flexibility to define any kind of objective and combine the constraints -* a framework for considering different sets of portfolio constraints for comparison through time -* to intuitively understand optimization through visualization - -# Portfolio issues -Construct a portfolio that: - -* maximizes return, -* with per-asset position limits, -* with a specific univariate portfolio risk limit, -* defining risk as losses, -* considering effects of skewness and kurtosis, -* and limiting contribution of risk for constituents -* or minimizing component risk contribution. - -Not a quadratic (or linear, or conical) problem any more. - -# Risk, not volatility - -* Expected Tail Loss (ETL) is also called Conditional Value-at-Risk (CVaR) and Expected Shortfall (ES) -* ETL is the mean expected loss when the loss exceeds the VaR -* ETL has all the properties a risk measure should have to be coherent and is a convex function of the portfolio weights -* Returns are skewed and/or kurtotic, so we use Cornish-Fisher (or "modified") estimates of ETL instead - - -# ETL sensitivity -Modified ETL demonstrates a better fit for historical CVaR at lower confidence levels, and breaks down at higher confidence levels -*Insert chart or charts* - # Add general constraints Constraints specified for each asset in the portfolio: @@ -178,6 +228,7 @@ * variance * modified ETL + + - - # Equal-weight portfolio * Provides a benchmark to evaluate the performance of an optimized portfolio against @@ -223,17 +274,39 @@ * Is the re-weighting adding or subtracting value? * Do we have a useful view of return and risk? +# Contribution of Risk in Equal Weight Portfolio +insert table + # Equal Contribution to Risk +The risk parity constraint that requires all assets to contribute to risk equally is usually too restrictive. +* Use the Minimum Concentration Component (MCC) risk contribution portfolio as an objective +* Minimize the largest ETL risk contribution in the portfolio +* Unconstrained, the MCC generates similar portfolios to the risk parity portfolio +* The MCC can, however, be more easily be combined with other objectives and constraints + +# Constrained Risk Contribution +Risk Budget as an eighth objective? + -# Closed form optimizers +# Optimizers +## Closed-form -# Use Random Portfolios +* add list from PortfA +* discuss stress testing briefly + +## Heuristic + +* Random portfolios +* Differential evolution +* Others + +# Random Portfolios [Burns (2009)](http://www.portfolioprobe.com/blog/) describes Random Portfolios * From a portfolio seed, generate random pemutations of weights that meet your constraints on each asset @@ -288,10 +361,10 @@ # Ex-ante results -Unstacked bar chart comparing allocations across objectives +scatter plot with multiple objectives # Ex-ante results -scatter plot with objectives +Unstacked bar chart comparing allocations across objectives # Ex-ante vs. ex-post results scatter plot with both overlaid From noreply at r-forge.r-project.org Thu Sep 19 10:29:51 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 19 Sep 2013 10:29:51 +0200 (CEST) Subject: [Returnanalytics-commits] r3141 - in pkg/Meucci: R demo man Message-ID: <20130919082951.2F4D2184D20@r-forge.r-project.org> Author: xavierv Date: 2013-09-19 10:29:50 +0200 (Thu, 19 Sep 2013) New Revision: 3141 Modified: pkg/Meucci/R/BlackLittermanFormula.R pkg/Meucci/R/ButterflyTradingFunctions.R pkg/Meucci/R/ConvertChangeInYield2Price.R pkg/Meucci/R/CovertCompoundedReturns2Price.R pkg/Meucci/R/EfficientFrontierPrices.R pkg/Meucci/R/EfficientFrontierReturns.R pkg/Meucci/R/EfficientFrontierReturnsBenchmark.R pkg/Meucci/R/GenerateUniformDrawsOnUnitSphere.R pkg/Meucci/R/Log2Lin.R pkg/Meucci/R/PlotCompositionEfficientFrontier.R pkg/Meucci/R/PlotMarginalsNormalInverseWishart.R pkg/Meucci/demo/S_BuyNHold.R pkg/Meucci/demo/S_CPPI.R pkg/Meucci/demo/S_MeanVarianceBenchmark.R pkg/Meucci/demo/S_MeanVarianceCalls.R pkg/Meucci/demo/S_MeanVarianceHorizon.R pkg/Meucci/demo/S_MeanVarianceOptimization.R pkg/Meucci/demo/S_UtilityMax.R pkg/Meucci/man/BlackLittermanFormula.Rd pkg/Meucci/man/ConvertChangeInYield2Price.Rd pkg/Meucci/man/ConvertCompoundedReturns2Price.Rd pkg/Meucci/man/EfficientFrontierPrices.Rd pkg/Meucci/man/EfficientFrontierReturns.Rd pkg/Meucci/man/EfficientFrontierReturnsBenchmark.Rd pkg/Meucci/man/HorizonPricing.Rd pkg/Meucci/man/Log2Lin.Rd pkg/Meucci/man/PlotCompositionEfficientFrontier.Rd pkg/Meucci/man/PlotMarginalsNormalInverseWishart.Rd Log: - updated documentation for chapter 6 demo scripts and its functions Modified: pkg/Meucci/R/BlackLittermanFormula.R =================================================================== --- pkg/Meucci/R/BlackLittermanFormula.R 2013-09-18 21:33:19 UTC (rev 3140) +++ pkg/Meucci/R/BlackLittermanFormula.R 2013-09-19 08:29:50 UTC (rev 3141) @@ -1,17 +1,20 @@ -#' This function computes the Black-Litterman formula for the moments of the posterior normal, as described in +#' @title Computes the Black-Litterman formula for the moments of the posterior normal. +#' +#' @description This function computes the Black-Litterman formula for the moments of the posterior normal, as described in #' A. Meucci, "Risk and Asset Allocation", Springer, 2005. #' -#' @param Mu : [vector] (N x 1) prior expected values. -#' @param Sigma : [matrix] (N x N) prior covariance matrix. -#' @param P : [matrix] (K x N) pick matrix. -#' @param v : [vector] (K x 1) vector of views. -#' @param Omega : [matrix] (K x K) matrix of confidence. +#' @param Mu [vector] (N x 1) prior expected values. +#' @param Sigma [matrix] (N x N) prior covariance matrix. +#' @param P [matrix] (K x N) pick matrix. +#' @param v [vector] (K x 1) vector of views. +#' @param Omega [matrix] (K x K) matrix of confidence. #' -#' @return BLMu : [vector] (N x 1) posterior expected values. -#' @return BLSigma : [matrix] (N x N) posterior covariance matrix. +#' @return BLMu [vector] (N x 1) posterior expected values. +#' @return BLSigma [matrix] (N x N) posterior covariance matrix. #' #' @references -#' \url{http://} +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' #' See Meucci's script for "BlackLittermanFormula.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/R/ButterflyTradingFunctions.R =================================================================== --- pkg/Meucci/R/ButterflyTradingFunctions.R 2013-09-18 21:33:19 UTC (rev 3140) +++ pkg/Meucci/R/ButterflyTradingFunctions.R 2013-09-19 08:29:50 UTC (rev 3141) @@ -12,7 +12,7 @@ return( s ) } -#' Compute the pricing in the horizon, as it appears in A. Meucci, "Fully Flexible Views: Theory and Practice", +#' Compute the pricing in the horizon as it appears in A. Meucci, "Fully Flexible Views: Theory and Practice", #' The Risk Magazine, October 2008, p 100-106. #' #' @param Butterflies List of securities with some analytics computed. Modified: pkg/Meucci/R/ConvertChangeInYield2Price.R =================================================================== --- pkg/Meucci/R/ConvertChangeInYield2Price.R 2013-09-18 21:33:19 UTC (rev 3140) +++ pkg/Meucci/R/ConvertChangeInYield2Price.R 2013-09-19 08:29:50 UTC (rev 3141) @@ -1,7 +1,7 @@ #' @title Convert change in yield-to-maturity to price for fixed-income securities #' #' @description Convert change in yield-to-maturity to price for fixed-income securities, as described in -#' A. Meucci "Risk and Asset Allocation", Springer, 2005 +#' A. Meucci "Risk and Asset Allocation", Springer, 2005. #' #' @param Exp_DY [vector] (N x 1) expected value of change in yield to maturity #' @param Cov_DY [matrix] (N x N) covariance of change in yield to maturity @@ -13,15 +13,13 @@ #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' See Meucci's script for "ConvertChangeInYield2Price.m". #' -#' See (6.77)-(6.79) in "Risk and Asset Allocation"-Springer (2005), by A. Meucci +#' A. Meucci - "Risk and Asset Allocation"-Springer (2005). See (6.77)-(6.79). #' -#' See Meucci's script for "ConvertChangeInYield2Price.m" -#' #' @author Xavier Valls \email{flamejat@@gmail.com} #' @export - ConvertChangeInYield2Price = function( Exp_DY, Cov_DY, Times2Mat, CurrentPrices ) { Mu = log( CurrentPrices ) - Times2Mat * Exp_DY; Modified: pkg/Meucci/R/CovertCompoundedReturns2Price.R =================================================================== --- pkg/Meucci/R/CovertCompoundedReturns2Price.R 2013-09-18 21:33:19 UTC (rev 3140) +++ pkg/Meucci/R/CovertCompoundedReturns2Price.R 2013-09-19 08:29:50 UTC (rev 3141) @@ -1,25 +1,27 @@ -#' Convert compounded returns to prices for equity-like securities, as described in -#' A. Meucci "Risk and Asset Allocation", Springer, 2005 +#' @title Convert compounded returns to prices for equity-like securities. #' -#' @param Exp_Comp_Rets : [vector] (N x 1) expected values of compounded returns -#' @param Cov_Comp_Rets : [matrix] (N x N) covariance matrix of compounded returns -#' @param Starting_Prices : [vector] (N x 1) +#' @description Convert compounded returns to prices for equity-like securities, as described in +#' A. Meucci "Risk and Asset Allocation", Springer, 2005. +#' +#' @param Exp_Comp_Rets [vector] (N x 1) expected values of compounded returns +#' @param Cov_Comp_Rets [matrix] (N x N) covariance matrix of compounded returns +#' @param Starting_Prices [vector] (N x 1) #' -#' @return Exp_Prices : [vector] (N x 1) expected values of prices -#' @return Cov_Prices : [matrix] (N x N) covariance matrix of prices +#' @return Exp_Prices [vector] (N x 1) expected values of prices +#' @return Cov_Prices [matrix] (N x N) covariance matrix of prices #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. -#' See (6.77)-(6.79) in "Risk and Asset Allocation"-Springer (2005), by A. Meucci -#' See Meucci's script for "ConvertCompoundedReturns2Price.m" +#' See Meucci's script for "ConvertCompoundedReturns2Price.m". #' +#' A. Meucci - "Risk and Asset Allocation"-Springer (2005). See (6.77)-(6.79). +#' #' @author Xavier Valls \email{flamejat@@gmail.com} #' @export - ConvertCompoundedReturns2Price = function(Exp_Comp_Rets, Cov_Comp_Rets, Starting_Prices) { - Mu = log(Starting_Prices) + Exp_Comp_Rets; + Mu = log(Starting_Prices) + Exp_Comp_Rets; Sigma = Cov_Comp_Rets; Exp_Prices = exp( Mu + 0.5 * diag( Sigma ) ); Modified: pkg/Meucci/R/EfficientFrontierPrices.R =================================================================== --- pkg/Meucci/R/EfficientFrontierPrices.R 2013-09-18 21:33:19 UTC (rev 3140) +++ pkg/Meucci/R/EfficientFrontierPrices.R 2013-09-19 08:29:50 UTC (rev 3141) @@ -1,20 +1,23 @@ -#' Compute the mean-variance efficient frontier (on prices) by quadratic programming, as described in +#' @title Computes the mean-variance efficient frontier (on prices) by quadratic programming +#' +#' @description Compute the mean-variance efficient frontier (on prices) by quadratic programming, as described in #' A. Meucci "Risk and Asset Allocation", Springer, 2005 #' -#' @param NumPortf : [scalar] number of portfolio in the efficient frontier -#' @param Covariance : [matrix] (N x N) covariance matrix -#' @param ExpectedValues : [vector] (N x 1) expected returns -#' @param Current_Prices : [vector] (N x 1) current prices -#' @param Budget : [scalar] budget constraint +#' @param NumPortf [scalar] number of portfolio in the efficient frontier +#' @param Covariance [matrix] (N x N) covariance matrix +#' @param ExpectedValues [vector] (N x 1) expected returns +#' @param Current_Prices [vector] (N x 1) current prices +#' @param Budget [scalar] budget constraint #' -#' @return ExpectedValue : [vector] (NumPortf x 1) expected values of the portfolios -#' @return Std_Deviation : [vector] (NumPortf x 1) standard deviations of the portfolios -#' @return Composition : [matrix] (NumPortf x N) optimal portfolios +#' @return ExpectedValue [vector] (NumPortf x 1) expected values of the portfolios +#' @return Std_Deviation [vector] (NumPortf x 1) standard deviations of the portfolios +#' @return Composition [matrix] (NumPortf x N) optimal portfolios #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. -#' See Meucci's script for "EfficientFrontierReturns.m" #' +#' See Meucci's script for "EfficientFrontierReturns.m". +#' #' @author Xavier Valls \email{flamejat@@gmail.com} #' @export Modified: pkg/Meucci/R/EfficientFrontierReturns.R =================================================================== --- pkg/Meucci/R/EfficientFrontierReturns.R 2013-09-18 21:33:19 UTC (rev 3140) +++ pkg/Meucci/R/EfficientFrontierReturns.R 2013-09-19 08:29:50 UTC (rev 3141) @@ -1,19 +1,22 @@ -#' Compute the mean-variance efficient frontier (on returns) by quadratic programming, as described in +#' @title Compute the mean-variance efficient frontier (on returns) by quadratic programming. +#' +#' @description Compute the mean-variance efficient frontier (on returns) by quadratic programming, as described in #' A. Meucci "Risk and Asset Allocation", Springer, 2005 #' -#' @param NumPortf : [scalar] number of portfolio in the efficient frontier -#' @param Covariance : [matrix] (N x N) covariance matrix -#' @param ExpectedValues : [vector] (N x 1) expected returns -#' @param Constraints : [struct] set of constraints. Default: weights sum to one, and no-short positions +#' @param NumPortf [scalar] number of portfolio in the efficient frontier +#' @param Covariance [matrix] (N x N) covariance matrix +#' @param ExpectedValues [vector] (N x 1) expected returns +#' @param Constraints [struct] set of constraints. Default: weights sum to one, and no-short positions #' -#' @return ExpectedValue : [vector] (NumPortf x 1) expected values of the portfolios -#' @return Volatility : [vector] (NumPortf x 1) standard deviations of the portfolios -#' @return Composition : [matrix] (NumPortf x N) optimal portfolios +#' @return ExpectedValue [vector] (NumPortf x 1) expected values of the portfolios +#' @return Volatility [vector] (NumPortf x 1) standard deviations of the portfolios +#' @return Composition [matrix] (NumPortf x N) optimal portfolios #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. -#' See Meucci's script for "EfficientFrontierReturns.m" #' +#' See Meucci's script for "EfficientFrontierReturns.m". +#' #' @author Xavier Valls \email{flamejat@@gmail.com} #' @export Modified: pkg/Meucci/R/EfficientFrontierReturnsBenchmark.R =================================================================== --- pkg/Meucci/R/EfficientFrontierReturnsBenchmark.R 2013-09-18 21:33:19 UTC (rev 3140) +++ pkg/Meucci/R/EfficientFrontierReturnsBenchmark.R 2013-09-19 08:29:50 UTC (rev 3141) @@ -1,18 +1,21 @@ -#' Compute the mean-variance efficient frontier (on returns) by quadratic programming, as described in +#' @title Computes the mean-variance efficient frontier (on returns) by quadratic programming. +#' +#' @description Compute the mean-variance efficient frontier (on returns) by quadratic programming, as described in #' A. Meucci "Risk and Asset Allocation", Springer, 2005 #' -#' @param NumPortf : [scalar] number of portfolio in the efficient frontier -#' @param Covariance : [matrix] (N x N) covariance matrix -#' @param ExpectedValues : [vector] (N x 1) expected returns -#' @param Benchmark : [vector] (N x 1) of benchmark weights -#' @param Constraints : [struct] set of constraints. Default: weights sum to one, and no-short positions +#' @param NumPortf [scalar] number of portfolio in the efficient frontier +#' @param Covariance [matrix] (N x N) covariance matrix +#' @param ExpectedValues [vector] (N x 1) expected returns +#' @param Benchmark [vector] (N x 1) of benchmark weights +#' @param Constraints [struct] set of constraints. Default: weights sum to one, and no-short positions #' -#' @return ExpectedValue : [vector] (NumPortf x 1) expected values of the portfolios -#' @return Volatility : [vector] (NumPortf x 1) standard deviations of the portfolios -#' @return Composition : [matrix] (NumPortf x N) optimal portfolios +#' @return ExpectedValue [vector] (NumPortf x 1) expected values of the portfolios +#' @return Volatility [vector] (NumPortf x 1) standard deviations of the portfolios +#' @return Composition [matrix] (NumPortf x N) optimal portfolios #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' #' See Meucci's script for "EfficientFrontierReturnsBenchmark.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/R/GenerateUniformDrawsOnUnitSphere.R =================================================================== --- pkg/Meucci/R/GenerateUniformDrawsOnUnitSphere.R 2013-09-18 21:33:19 UTC (rev 3140) +++ pkg/Meucci/R/GenerateUniformDrawsOnUnitSphere.R 2013-09-19 08:29:50 UTC (rev 3141) @@ -15,7 +15,7 @@ # R is a distribution on (0,1) proportional to r^(Dims-1), i.e. the area of surface of radius r. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. #' See Meucci's script for "GenerateUniformDrawsOnUnitSphere.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/R/Log2Lin.R =================================================================== --- pkg/Meucci/R/Log2Lin.R 2013-09-18 21:33:19 UTC (rev 3140) +++ pkg/Meucci/R/Log2Lin.R 2013-09-19 08:29:50 UTC (rev 3141) @@ -1,14 +1,17 @@ -#' Map moments of log-returns to linear returns, as described in A. Meucci, +#' @title Maps moments of log-returns to linear returns . +#' +#' @description Map moments of log-returns to linear returns, as described in A. Meucci, #' "Risk and Asset Allocation", Springer, 2005. #' -#' @param Mu : [vector] (N x 1) -#' @param Sigma : [matrix] (N x N) +#' @param Mu [vector] (N x 1) +#' @param Sigma [matrix] (N x N) #' -#' @return M : [vector] (N x 1) -#' @return S : [matrix] (N x N) +#' @return M [vector] (N x 1) +#' @return S [matrix] (N x N) #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' #' See Meucci's script for "Log2Lin.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/R/PlotCompositionEfficientFrontier.R =================================================================== --- pkg/Meucci/R/PlotCompositionEfficientFrontier.R 2013-09-18 21:33:19 UTC (rev 3140) +++ pkg/Meucci/R/PlotCompositionEfficientFrontier.R 2013-09-19 08:29:50 UTC (rev 3141) @@ -1,10 +1,13 @@ -#' Plot the efficient frontier, as described in A. Meucci, +#' @title Plots the efficient frontier +#' +#' @description Plot the efficient frontier, as described in A. Meucci, #' "Risk and Asset Allocation", Springer, 2005. #' #' @param Portfolios : [matrix] (M x N) M portfolios of size N (weights) #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' #' See Meucci's script for "PlotCompositionEfficientFrontier.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/R/PlotMarginalsNormalInverseWishart.R =================================================================== --- pkg/Meucci/R/PlotMarginalsNormalInverseWishart.R 2013-09-18 21:33:19 UTC (rev 3140) +++ pkg/Meucci/R/PlotMarginalsNormalInverseWishart.R 2013-09-19 08:29:50 UTC (rev 3141) @@ -1,13 +1,13 @@ #' Plot the marginals of the normal-inverse-Whishart model. #' Described in A. Meucci "Risk and Asset Allocation", Springer, 2005 #' -#' @param Mu_Simul : [] -#' @param InvSigma_Simul : [] -#' @param Mu_0 : [] -#' @param T_0 : [] -#' @param Sigma_0 : [] -#' @param Nu_0 : [] -#' @param Legend : [] +#' @param Mu_Simul [] +#' @param InvSigma_Simul [] +#' @param Mu_0 [] +#' @param T_0 [] +#' @param Sigma_0 [] +#' @param Nu_0 [] +#' @param Legend [] #' #' @note Numerically and analytically the marginal pdf of #' - the first entry of the random vector Mu @@ -17,6 +17,7 @@ #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' #' See Meucci's script for "PlotMarginalsNormalInverseWishart.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/demo/S_BuyNHold.R =================================================================== --- pkg/Meucci/demo/S_BuyNHold.R 2013-09-18 21:33:19 UTC (rev 3140) +++ pkg/Meucci/demo/S_BuyNHold.R 2013-09-19 08:29:50 UTC (rev 3141) @@ -2,7 +2,9 @@ #' Springer, 2005, Chapter 6. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 261 - Buy and hold". +#' #' See Meucci's script for "S_BuyNHold.m" # #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/demo/S_CPPI.R =================================================================== --- pkg/Meucci/demo/S_CPPI.R 2013-09-18 21:33:19 UTC (rev 3140) +++ pkg/Meucci/demo/S_CPPI.R 2013-09-19 08:29:50 UTC (rev 3141) @@ -2,8 +2,10 @@ #' A. Meucci,"Risk and Asset Allocation", Springer, 2005, Chapter 6. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. -#' See Meucci's script for "S_CPPI.m" +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 264 - Constant proportion portfolio insurance". +#' +#' See Meucci's script for "S_CPPI.m"E 264 ? Constant proportion portfolio insurance # #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/demo/S_MeanVarianceBenchmark.R =================================================================== --- pkg/Meucci/demo/S_MeanVarianceBenchmark.R 2013-09-18 21:33:19 UTC (rev 3140) +++ pkg/Meucci/demo/S_MeanVarianceBenchmark.R 2013-09-19 08:29:50 UTC (rev 3141) @@ -6,8 +6,10 @@ #' Described in A. Meucci,"Risk and Asset Allocation", Springer, 2005, Chapter 6. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. -#' See Meucci's script for "S_MeanVarianceBenchmark.m" +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 257 - Benchmark driven allocation I" and "E 258 - Benchmark driven allocation II". +#' +#' See Meucci's script for "S_MeanVarianceBenchmark.m" and "E 255 - Mean-variance pitfalls: two-step approach II" from the book. # #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/demo/S_MeanVarianceCalls.R =================================================================== --- pkg/Meucci/demo/S_MeanVarianceCalls.R 2013-09-18 21:33:19 UTC (rev 3140) +++ pkg/Meucci/demo/S_MeanVarianceCalls.R 2013-09-19 08:29:50 UTC (rev 3141) @@ -2,7 +2,9 @@ #' Described in A. Meucci,"Risk and Asset Allocation", Springer, 2005, Chapter 6. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 259 - Mean-variance for derivatives". +#' #' See Meucci's script for "S_MeanVarianceCalls.m" # #' @author Xavier Valls \email{flamejat@@gmail.com} @@ -66,9 +68,9 @@ ExpectedValues = matrix( apply( L, 2, mean) ); Covariance = cov( L ); NumPortf = 40; -#[e, vol, w] = -EFR = EfficientFrontierReturns( NumPortf, Covariance, ExpectedValues, Constr ); +EFR = PlotVolVsCompositionEfficientFrontiericientFrontierReturns( NumPortf, Covariance, ExpectedValues, Constr ); + ################################################################################################################## ### Plots PlotVolVsCompositionEfficientFrontier( EFR$Composition, EFR$Volatility ); Modified: pkg/Meucci/demo/S_MeanVarianceHorizon.R =================================================================== --- pkg/Meucci/demo/S_MeanVarianceHorizon.R 2013-09-18 21:33:19 UTC (rev 3140) +++ pkg/Meucci/demo/S_MeanVarianceHorizon.R 2013-09-19 08:29:50 UTC (rev 3141) @@ -5,8 +5,10 @@ #' Described in A. Meucci,"Risk and Asset Allocation", Springer, 2005, Chapter 6. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. -#' See Meucci's script for "S_MeanVarianceHorizon.m" +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 256 ? Mean-variance pitfalls: horizon effect". +#' +#' See Meucci's script for "S_MeanVarianceHorizon.m" and "E 255 - Mean-variance pitfalls: two-step approach II" from the book. # #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/demo/S_MeanVarianceOptimization.R =================================================================== --- pkg/Meucci/demo/S_MeanVarianceOptimization.R 2013-09-18 21:33:19 UTC (rev 3140) +++ pkg/Meucci/demo/S_MeanVarianceOptimization.R 2013-09-19 08:29:50 UTC (rev 3141) @@ -1,13 +1,15 @@ #' This script projects the distribution of the market invariants for the bond and stock markets #' (i.e. the changes in yield to maturity and compounded returns) from the estimation interval to the investment -#' horizon +#' horizon. #' Then it computes the distribution of prices at the investment horizon and performs the two-step mean-variance #' optimization. #' Described in A. Meucci,"Risk and Asset Allocation", Springer, 2005, Chapter 6. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. -#' See Meucci's script for "S_MeanVarianceHorizon.m" +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 254 - Mean-variance pitfalls: two-step approach I" and "E 255 - Mean-variance pitfalls: two-step approach II". +#' +#' See Meucci's script for "S_MeanVarianceOptimization.m" # #' @author Xavier Valls \email{flamejat@@gmail.com} @@ -100,7 +102,7 @@ SDev_Hrzn_DY_Hat = sigmas * sqrt(tau / tau_tilde); Corr_Hrzn_DY_Hat = matrix( 1, N, N ); # full co-dependence Cov_Hrzn_DY_Hat = diag(SDev_Hrzn_DY_Hat, length( SDev_Hrzn_DY_Hat)) %*% Corr_Hrzn_DY_Hat %*% diag(SDev_Hrzn_DY_Hat, length( SDev_Hrzn_DY_Hat)); -#[BondExp_Prices, BondCov_Prices] + CCY2P = ConvertChangeInYield2Price(Exp_Hrzn_DY_Hat, Cov_Hrzn_DY_Hat, u_minus_tau, BondCurrent_Prices_Shifted); print( CCY2P$Exp_Prices ); print( CCY2P$Cov_Prices ); @@ -121,8 +123,6 @@ NumPortf = 40; # frontier with QP (no short-sales) -#[ExpectedValue, EFP$Std_Deviation, EFP$Composition] - EFP = EfficientFrontierPrices( NumPortf, S, E,Current_Prices, Budget ); # step 2: ...evaluate satisfaction for all EFP$Composition on the frontier ... Modified: pkg/Meucci/demo/S_UtilityMax.R =================================================================== --- pkg/Meucci/demo/S_UtilityMax.R 2013-09-18 21:33:19 UTC (rev 3140) +++ pkg/Meucci/demo/S_UtilityMax.R 2013-09-19 08:29:50 UTC (rev 3141) @@ -2,8 +2,10 @@ #' A. Meucci,"Risk and Asset Allocation", Springer, 2005, Chapter 6. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. -#' See Meucci's script for "S_UtilityMax.m" +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 263 - Utility maximization II". +#' +#' See Meucci's script for "S_UtilityMax.m" and "E 262 ? Utility maximization I" from the book. # #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/man/BlackLittermanFormula.Rd =================================================================== --- pkg/Meucci/man/BlackLittermanFormula.Rd 2013-09-18 21:33:19 UTC (rev 3140) +++ pkg/Meucci/man/BlackLittermanFormula.Rd 2013-09-19 08:29:50 UTC (rev 3141) @@ -1,25 +1,24 @@ \name{BlackLittermanFormula} \alias{BlackLittermanFormula} -\title{This function computes the Black-Litterman formula for the moments of the posterior normal, as described in -A. Meucci, "Risk and Asset Allocation", Springer, 2005.} +\title{Computes the Black-Litterman formula for the moments of the posterior normal.} \usage{ BlackLittermanFormula(Mu, Sigma, P, v, Omega) } \arguments{ - \item{Mu}{: [vector] (N x 1) prior expected values.} + \item{Mu}{[vector] (N x 1) prior expected values.} - \item{Sigma}{: [matrix] (N x N) prior covariance matrix.} + \item{Sigma}{[matrix] (N x N) prior covariance matrix.} - \item{P}{: [matrix] (K x N) pick matrix.} + \item{P}{[matrix] (K x N) pick matrix.} - \item{v}{: [vector] (K x 1) vector of views.} + \item{v}{[vector] (K x 1) vector of views.} - \item{Omega}{: [matrix] (K x K) matrix of confidence.} + \item{Omega}{[matrix] (K x K) matrix of confidence.} } \value{ - BLMu : [vector] (N x 1) posterior expected values. + BLMu [vector] (N x 1) posterior expected values. - BLSigma : [matrix] (N x N) posterior covariance matrix. + BLSigma [matrix] (N x N) posterior covariance matrix. } \description{ This function computes the Black-Litterman formula for @@ -30,7 +29,9 @@ Xavier Valls \email{flamejat at gmail.com} } \references{ - \url{http://} See Meucci's script for - "BlackLittermanFormula.m" + A. Meucci - "Exercises in Advanced Risk and Portfolio + Management" \url{http://symmys.com/node/170}. + + See Meucci's script for "BlackLittermanFormula.m" } Modified: pkg/Meucci/man/ConvertChangeInYield2Price.Rd =================================================================== --- pkg/Meucci/man/ConvertChangeInYield2Price.Rd 2013-09-18 21:33:19 UTC (rev 3140) +++ pkg/Meucci/man/ConvertChangeInYield2Price.Rd 2013-09-19 08:29:50 UTC (rev 3141) @@ -24,18 +24,17 @@ \description{ Convert change in yield-to-maturity to price for fixed-income securities, as described in A. Meucci "Risk - and Asset Allocation", Springer, 2005 + and Asset Allocation", Springer, 2005. } \author{ Xavier Valls \email{flamejat at gmail.com} } \references{ A. Meucci - "Exercises in Advanced Risk and Portfolio - Management" \url{http://symmys.com/node/170}, + Management" \url{http://symmys.com/node/170}, See + Meucci's script for "ConvertChangeInYield2Price.m". - See (6.77)-(6.79) in "Risk and Asset Allocation"-Springer - (2005), by A. Meucci - - See Meucci's script for "ConvertChangeInYield2Price.m" + A. Meucci - "Risk and Asset Allocation"-Springer (2005). + See (6.77)-(6.79). } Modified: pkg/Meucci/man/ConvertCompoundedReturns2Price.Rd =================================================================== --- pkg/Meucci/man/ConvertCompoundedReturns2Price.Rd 2013-09-18 21:33:19 UTC (rev 3140) +++ pkg/Meucci/man/ConvertCompoundedReturns2Price.Rd 2013-09-19 08:29:50 UTC (rev 3141) @@ -1,29 +1,28 @@ \name{ConvertCompoundedReturns2Price} \alias{ConvertCompoundedReturns2Price} -\title{Convert compounded returns to prices for equity-like securities, as described in -A. Meucci "Risk and Asset Allocation", Springer, 2005} +\title{Convert compounded returns to prices for equity-like securities.} \usage{ ConvertCompoundedReturns2Price(Exp_Comp_Rets, Cov_Comp_Rets, Starting_Prices) } \arguments{ - \item{Exp_Comp_Rets}{: [vector] (N x 1) expected values - of compounded returns} + \item{Exp_Comp_Rets}{[vector] (N x 1) expected values of + compounded returns} - \item{Cov_Comp_Rets}{: [matrix] (N x N) covariance matrix + \item{Cov_Comp_Rets}{[matrix] (N x N) covariance matrix of compounded returns} - \item{Starting_Prices}{: [vector] (N x 1)} + \item{Starting_Prices}{[vector] (N x 1)} } \value{ - Exp_Prices : [vector] (N x 1) expected values of prices + Exp_Prices [vector] (N x 1) expected values of prices - Cov_Prices : [matrix] (N x N) covariance matrix of prices + Cov_Prices [matrix] (N x N) covariance matrix of prices } \description{ Convert compounded returns to prices for equity-like securities, as described in A. Meucci "Risk and Asset - Allocation", Springer, 2005 + Allocation", Springer, 2005. } \author{ Xavier Valls \email{flamejat at gmail.com} @@ -31,8 +30,9 @@ \references{ A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. See - (6.77)-(6.79) in "Risk and Asset Allocation"-Springer - (2005), by A. Meucci See Meucci's script for - "ConvertCompoundedReturns2Price.m" + Meucci's script for "ConvertCompoundedReturns2Price.m". + + A. Meucci - "Risk and Asset Allocation"-Springer (2005). + See (6.77)-(6.79). } Modified: pkg/Meucci/man/EfficientFrontierPrices.Rd =================================================================== --- pkg/Meucci/man/EfficientFrontierPrices.Rd 2013-09-18 21:33:19 UTC (rev 3140) +++ pkg/Meucci/man/EfficientFrontierPrices.Rd 2013-09-19 08:29:50 UTC (rev 3141) @@ -1,32 +1,30 @@ \name{EfficientFrontierPrices} \alias{EfficientFrontierPrices} -\title{Compute the mean-variance efficient frontier (on prices) by quadratic programming, as described in -A. Meucci "Risk and Asset Allocation", Springer, 2005} +\title{Computes the mean-variance efficient frontier (on prices) by quadratic programming} \usage{ EfficientFrontierPrices(NumPortf, Covariance, ExpectedValues, Current_Prices, Budget) } \arguments{ - \item{NumPortf}{: [scalar] number of portfolio in the + \item{NumPortf}{[scalar] number of portfolio in the efficient frontier} - \item{Covariance}{: [matrix] (N x N) covariance matrix} + \item{Covariance}{[matrix] (N x N) covariance matrix} - \item{ExpectedValues}{: [vector] (N x 1) expected - returns} + \item{ExpectedValues}{[vector] (N x 1) expected returns} - \item{Current_Prices}{: [vector] (N x 1) current prices} + \item{Current_Prices}{[vector] (N x 1) current prices} - \item{Budget}{: [scalar] budget constraint} + \item{Budget}{[scalar] budget constraint} } \value{ - ExpectedValue : [vector] (NumPortf x 1) expected values + ExpectedValue [vector] (NumPortf x 1) expected values of + the portfolios + + Std_Deviation [vector] (NumPortf x 1) standard deviations of the portfolios - Std_Deviation : [vector] (NumPortf x 1) standard - deviations of the portfolios - - Composition : [matrix] (NumPortf x N) optimal portfolios + Composition [matrix] (NumPortf x N) optimal portfolios } \description{ Compute the mean-variance efficient frontier (on prices) @@ -38,7 +36,8 @@ } \references{ A. Meucci - "Exercises in Advanced Risk and Portfolio - Management" \url{http://symmys.com/node/170}. See - Meucci's script for "EfficientFrontierReturns.m" + Management" \url{http://symmys.com/node/170}. + + See Meucci's script for "EfficientFrontierReturns.m". } Modified: pkg/Meucci/man/EfficientFrontierReturns.Rd =================================================================== --- pkg/Meucci/man/EfficientFrontierReturns.Rd 2013-09-18 21:33:19 UTC (rev 3140) +++ pkg/Meucci/man/EfficientFrontierReturns.Rd 2013-09-19 08:29:50 UTC (rev 3141) @@ -1,31 +1,29 @@ \name{EfficientFrontierReturns} \alias{EfficientFrontierReturns} -\title{Compute the mean-variance efficient frontier (on returns) by quadratic programming, as described in -A. Meucci "Risk and Asset Allocation", Springer, 2005} +\title{Compute the mean-variance efficient frontier (on returns) by quadratic programming.} \usage{ EfficientFrontierReturns(NumPortf, Covariance, ExpectedValues, Constraints = NULL) } \arguments{ - \item{NumPortf}{: [scalar] number of portfolio in the + \item{NumPortf}{[scalar] number of portfolio in the efficient frontier} - \item{Covariance}{: [matrix] (N x N) covariance matrix} + \item{Covariance}{[matrix] (N x N) covariance matrix} - \item{ExpectedValues}{: [vector] (N x 1) expected - returns} + \item{ExpectedValues}{[vector] (N x 1) expected returns} - \item{Constraints}{: [struct] set of constraints. - Default: weights sum to one, and no-short positions} + \item{Constraints}{[struct] set of constraints. Default: + weights sum to one, and no-short positions} } \value{ - ExpectedValue : [vector] (NumPortf x 1) expected values - of the portfolios + ExpectedValue [vector] (NumPortf x 1) expected values of + the portfolios - Volatility : [vector] (NumPortf x 1) standard deviations - of the portfolios + Volatility [vector] (NumPortf x 1) standard deviations of + the portfolios - Composition : [matrix] (NumPortf x N) optimal portfolios + Composition [matrix] (NumPortf x N) optimal portfolios } \description{ Compute the mean-variance efficient frontier (on returns) @@ -37,7 +35,8 @@ } \references{ A. Meucci - "Exercises in Advanced Risk and Portfolio - Management" \url{http://symmys.com/node/170}. See - Meucci's script for "EfficientFrontierReturns.m" + Management" \url{http://symmys.com/node/170}. + [TRUNCATED] To get the complete diff run: svnlook diff /svnroot/returnanalytics -r 3141 From noreply at r-forge.r-project.org Thu Sep 19 11:43:24 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 19 Sep 2013 11:43:24 +0200 (CEST) Subject: [Returnanalytics-commits] r3142 - in pkg/Meucci: R demo man Message-ID: <20130919094324.973AF185E4B@r-forge.r-project.org> Author: xavierv Date: 2013-09-19 11:43:24 +0200 (Thu, 19 Sep 2013) New Revision: 3142 Modified: pkg/Meucci/R/RandNormalInverseWishart.R pkg/Meucci/demo/S_AnalyzeNormalInverseWishart.R pkg/Meucci/demo/S_CorrelationPriorUniform.R pkg/Meucci/demo/S_MarkovChainMonteCarlo.R pkg/Meucci/man/RandNormalInverseWishart.Rd Log: - updated documentation for chapter 7 demo scripts and its functions Modified: pkg/Meucci/R/RandNormalInverseWishart.R =================================================================== --- pkg/Meucci/R/RandNormalInverseWishart.R 2013-09-19 08:29:50 UTC (rev 3141) +++ pkg/Meucci/R/RandNormalInverseWishart.R 2013-09-19 09:43:24 UTC (rev 3142) @@ -1,20 +1,21 @@ - -#' Generates a multivariate i.i.d. sample of lenght J from the normal-inverse-Wishart distribution, as described in +#' @title Generates a multivariate i.i.d. sample of lenght J from the normal-inverse-Wishart distribution. +#' +#' @description Generates a multivariate i.i.d. sample of lenght J from the normal-inverse-Wishart distribution, as described in #' A. Meucci "Risk and Asset Allocation", Springer, 2005. #' -#' @param Mu_0 : [vector] -#' @param T_0 : [scalar] -#' @param Sigma_0 : [matrix] -#' @param nu_0 : [scalar] -#' @param J : [scalar] +#' @param Mu_0 [vector] location parameter. +#' @param T_0 [scalar] number of observations. +#' @param Sigma_0 [matrix] scatter parameter. +#' @param nu_0 [scalar] degrees of freedom. +#' @param J [scalar] number of simulations to compute. #' -#' @return Mu : [vector] -#' @return Sigma : [matrix] -#' @return InvSigma : [matrix] +#' @return Mu [vector] location parameter from the normal-inverse-Wishart distribution. +#' @return Sigma [matrix] dispersion parameter from the normal-inverse-Wishart distribution. +#' @return InvSigma [matrix] inverse of the dispersion parameter from the normal-inverse-Wishart distribution. #' -#' @note -#' Mu|Sigma ~ N(Mu_0,Sigma/T_0) -#' inv(Sigma) ~ W(Nu_0,inv(Sigma_0)/Nu_0) +#' @note +#' \deqn{\mu\| \Sigma \sim N(\mu_{0}, \frac{\Sigma}{T_{0}}) }{Mu|Sigma ~ N(Mu_0,Sigma/T_0)} +#' \deqn{\Sigma^{-1} \sim W(\nu_{0},\frac{\Sigma_{0}^{-1}}{\nu_{0}})}{inv(Sigma) ~ W(Nu_0,inv(Sigma_0)/Nu_0)} #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. Modified: pkg/Meucci/demo/S_AnalyzeNormalInverseWishart.R =================================================================== --- pkg/Meucci/demo/S_AnalyzeNormalInverseWishart.R 2013-09-19 08:29:50 UTC (rev 3141) +++ pkg/Meucci/demo/S_AnalyzeNormalInverseWishart.R 2013-09-19 09:43:24 UTC (rev 3142) @@ -4,7 +4,9 @@ #' Described in A. Meucci,"Risk and Asset Allocation",Springer, 2005, Chapter 7. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 282 - Bayesian: normal-inverse-Wishart posterior". +#' #' See Meucci's script for "S_AnalyzeNormalInverseWishart.m" # #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/demo/S_CorrelationPriorUniform.R =================================================================== --- pkg/Meucci/demo/S_CorrelationPriorUniform.R 2013-09-19 08:29:50 UTC (rev 3141) +++ pkg/Meucci/demo/S_CorrelationPriorUniform.R 2013-09-19 09:43:24 UTC (rev 3142) @@ -3,7 +3,9 @@ #' Chapter 7. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 281 - Bayesian: prior on correlation". +#' #' See Meucci's script for "S_CorrelationPriorUniform.m" # #' @author Xavier Valls \email{flamejat@@gmail.com} @@ -18,13 +20,13 @@ ### Compute correlations in all scenarios CorrsAsTensor = array(0, dim = c(J,N,N) ); Eigs = NULL; -j = 1; +j = 1; while( j < J ) { - C = 2 * matrix( runif(K), 1, K ) - 1; + C = 2 * matrix( runif(K), 1, K ) - 1; Corr = diag( 1, N ); - k = 0; + k = 0; for( n in 1 : ( N - 1 ) ) { for( m in ( n + 1 ) : N ) @@ -58,7 +60,7 @@ ##################################################################################################################### ### Plots # univariate marginals -K = nrow( CorrsAsEntries ); +K = nrow( CorrsAsEntries ); Nbins = round( 5 * log( J ) ); for( k in 1 : K ) { Modified: pkg/Meucci/demo/S_MarkovChainMonteCarlo.R =================================================================== --- pkg/Meucci/demo/S_MarkovChainMonteCarlo.R 2013-09-19 08:29:50 UTC (rev 3141) +++ pkg/Meucci/demo/S_MarkovChainMonteCarlo.R 2013-09-19 09:43:24 UTC (rev 3142) @@ -2,7 +2,9 @@ #' Springer, 2005, Chapter 7. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 280 - Markov chain Monte Carlo". +#' #' See Meucci's script for "S_MarkovChainMonteCarlo.m" # #' @author Xavier Valls \email{flamejat@@gmail.com} @@ -19,9 +21,10 @@ ################################################################################################################## ### Set up MH algorithm nSim = 10000; -xt = matrix( NaN, nSim, 1); +xt = matrix( NaN, nSim, 1); +nacc = 0; xt[ 1 ] = 0; -nacc = 0; + for( i in 2 : nSim ) { # normal candidate @@ -30,12 +33,13 @@ f1 = kernel( r ); # kernel at past f2 = kernel( xt[ i-1 ] ); - prob = f1 / f2; + + prob = f1 / f2; xt[ i ] = xt[ i-1 ]; if( prob > 1 || runif(1) > (1 - prob) ) { xt[ i ] = r; - nacc = nacc + 1; + nacc = nacc + 1; } } ################################################################################################################## Modified: pkg/Meucci/man/RandNormalInverseWishart.Rd =================================================================== --- pkg/Meucci/man/RandNormalInverseWishart.Rd 2013-09-19 08:29:50 UTC (rev 3141) +++ pkg/Meucci/man/RandNormalInverseWishart.Rd 2013-09-19 09:43:24 UTC (rev 3142) @@ -1,27 +1,29 @@ \name{RandNormalInverseWishart} \alias{RandNormalInverseWishart} -\title{Generates a multivariate i.i.d. sample of lenght J from the normal-inverse-Wishart distribution, as described in -A. Meucci "Risk and Asset Allocation", Springer, 2005.} +\title{Generates a multivariate i.i.d. sample of lenght J from the normal-inverse-Wishart distribution.} \usage{ RandNormalInverseWishart(Mu_0, T_0, Sigma_0, nu_0, J) } \arguments{ - \item{Mu_0}{: [vector]} + \item{Mu_0}{[vector] location parameter.} - \item{T_0}{: [scalar]} + \item{T_0}{[scalar] number of observations.} - \item{Sigma_0}{: [matrix]} + \item{Sigma_0}{[matrix] scatter parameter.} - \item{nu_0}{: [scalar]} + \item{nu_0}{[scalar] degrees of freedom.} - \item{J}{: [scalar]} + \item{J}{[scalar] number of simulations to compute.} } \value{ - Mu : [vector] + Mu [vector] location parameter from the + normal-inverse-Wishart distribution. - Sigma : [matrix] + Sigma [matrix] dispersion parameter from the + normal-inverse-Wishart distribution. - InvSigma : [matrix] + InvSigma [matrix] inverse of the dispersion parameter + from the normal-inverse-Wishart distribution. } \description{ Generates a multivariate i.i.d. sample of lenght J from @@ -29,8 +31,10 @@ A. Meucci "Risk and Asset Allocation", Springer, 2005. } \note{ - Mu|Sigma ~ N(Mu_0,Sigma/T_0) inv(Sigma) ~ - W(Nu_0,inv(Sigma_0)/Nu_0) + \deqn{\mu\| \Sigma \sim N(\mu_{0}, \frac{\Sigma}{T_{0}}) + }{Mu|Sigma ~ N(Mu_0,Sigma/T_0)} \deqn{\Sigma^{-1} \sim + W(\nu_{0},\frac{\Sigma_{0}^{-1}}{\nu_{0}})}{inv(Sigma) ~ + W(Nu_0,inv(Sigma_0)/Nu_0)} } \author{ Xavier Valls \email{flamejat at gmail.com} From noreply at r-forge.r-project.org Thu Sep 19 19:31:52 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 19 Sep 2013 19:31:52 +0200 (CEST) Subject: [Returnanalytics-commits] r3143 - pkg/PortfolioAnalytics/vignettes Message-ID: <20130919173152.B4662185AC9@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-19 19:31:52 +0200 (Thu, 19 Sep 2013) New Revision: 3143 Modified: pkg/PortfolioAnalytics/vignettes/portfolio_vignette.Rnw pkg/PortfolioAnalytics/vignettes/portfolio_vignette.pdf Log: Making some modifications portfolio_vignette to improve charts. Modified: pkg/PortfolioAnalytics/vignettes/portfolio_vignette.Rnw =================================================================== --- pkg/PortfolioAnalytics/vignettes/portfolio_vignette.Rnw 2013-09-19 09:43:24 UTC (rev 3142) +++ pkg/PortfolioAnalytics/vignettes/portfolio_vignette.Rnw 2013-09-19 17:31:52 UTC (rev 3143) @@ -322,13 +322,13 @@ PortfolioAnalytics has three methods to generate random portfolios. \begin{enumerate} \item The 'sample' method to generate random portfolios is based on an idea by Pat Burns. This is the most flexible method, but also the slowest, and can generate portfolios to satisfy leverage, box, group, and position limit constraints. -\item The 'simplex' method to generate random portfolios is based on a paper by W. T. Shaw. The simplex method is useful to generate random portfolios with the full investment constraint, where the sum of the weights is equal to 1, and min box constraints. Values for \code{min\_sum} and \code{max\_sum} of the leverage constraint will be ignored, the sum of weights will equal 1. All other constraints such as the box constraint max, group and position limit constraints will be handled by elimination. If the constraints are very restrictive, this may result in very few feasible portfolios remaining. -\item The 'grid' method to generate random portfolios is based on the \code{gridSearch} function in package \verb"NMOF". The grid search method only satisfies the \code{min} and \code{max} box constraints. The \code{min\_sum} and \code{max\_sum} leverage constraints will likely be violated and the weights in the random portfolios should be normalized. Normalization may cause the box constraints to be violated and will be penalized in \code{constrained\_objective}. +\item The 'simplex' method to generate random portfolios is based on a paper by W. T. Shaw. The simplex method is useful to generate random portfolios with the full investment constraint, where the sum of the weights is equal to 1, and min box constraints. Values for \code{min\_sum} and \code{max\_sum} of the leverage constraint will be ignored, the sum of weights will equal 1. All other constraints such as the box constraint max, group and position limit constraints will be handled by elimination. If the constraints are very restrictive, this may result in very few feasible portfolios remaining. Another key point to note is that the solution may not be along the vertexes depending on the objective. For example, a risk budget objective will likely place the portfolio somewhere on the interior. +\item The 'grid' method to generate random portfolios is based on the \code{gridSearch} function in package \verb"NMOF". The grid search method only satisfies the \code{min} and \code{max} box constraints. The \code{min\_sum} and \code{max\_sum} leverage constraint will likely be violated and the weights in the random portfolios should be normalized. Normalization may cause the box constraints to be violated and will be penalized in \code{constrained\_objective}. \end{enumerate} The following plots illustrate the various methods to generate random portfolios. -<>= +<>= R <- edhec[, 1:4] # set up simple portfolio with leverage and box constraints @@ -354,7 +354,8 @@ tmp3.StdDev <- apply(rp3, 1, function(x) StdDev(R=R, weights=x)) # plot feasible portfolios -plot(x=tmp1.StdDev, y=tmp1.mean, col="gray", main="Random Portfolio Methods") +plot(x=tmp1.StdDev, y=tmp1.mean, col="gray", main="Random Portfolio Methods", + ylab="mean", xlab="StdDev") points(x=tmp2.StdDev, y=tmp2.mean, col="red", pch=2) points(x=tmp3.StdDev, y=tmp3.mean, col="lightgreen", pch=5) legend("bottomright", legend=c("sample", "simplex", "grid"), @@ -381,7 +382,7 @@ Figure 2 shows the feasible space varying the fev values. -The \code{fev} argument can be passed in as a vector for more even coverage of the feasible space. The default value is \code{fev=0:5}. +The \code{fev} argument can be passed in as a vector for more control over the coverage of the feasible space. The default value is \code{fev=0:5}. <>= par(mfrow=c(1, 2)) # simplex @@ -401,8 +402,6 @@ par(mfrow=c(1,1)) @ -Figure 3 shows the feasible space comparing the 'sample' and 'simplex' methods to generate random portfolios. - \subsection{pso} PortfolioAnalytics uses the \code{psoptim} function from the R package \verb"pso". Particle swarm optimization is a heuristic optimization algorithm. See \code{?psoptim} and the references contained therein for more information. @@ -458,12 +457,11 @@ print(opt_maxret) @ -Chart the weights and optimal portfolio in risk-return space. -<>= -chart.Weights(opt_maxret) -chart.RiskReward(opt_maxret, risk.col="StdDev", return.col="mean", - main="Maximum Return Optimization", chart.assets=TRUE, - xlim=c(0, 0.05)) +Chart the weights and optimal portfolio in risk-return space. The weights and a risk-reward scatter plot can be plotted separately as shown below with the \code{chart.Weights} and \code{chart.RiskReward} functions. The \code{plot} function will plot the weights and risk-reward scatter together. +<>= +plot(opt_maxret, risk.col="StdDev", return.col="mean", + main="Maximum Return Optimization", chart.assets=TRUE, + xlim=c(0, 0.05), ylim=c(0,0.0085)) @ \subsection{Minimize variance with ROI} @@ -480,11 +478,10 @@ @ Chart the weights and optimal portfolio in risk-return space. -<>= -chart.Weights(opt_minvar) -chart.RiskReward(opt_minvar, risk.col="StdDev", return.col="mean", - main="Minimum Variance Optimization", chart.assets=TRUE, - xlim=c(0, 0.05)) +<>= +plot(opt_minvar, risk.col="StdDev", return.col="mean", + main="Minimum Variance Optimization", chart.assets=TRUE, + xlim=c(0, 0.05), ylim=c(0,0.0085)) @ \subsection{Maximize quadratic utility with ROI} @@ -501,11 +498,10 @@ print(opt_qu) @ -<>= -chart.Weights(opt_qu) -chart.RiskReward(opt_qu, risk.col="StdDev", return.col="mean", - main="Quadratic Utility Optimization", chart.assets=TRUE, - xlim=c(0, 0.05)) +<>= +plot(opt_qu, risk.col="StdDev", return.col="mean", + main="Quadratic Utility Optimization", chart.assets=TRUE, + xlim=c(0, 0.05), ylim=c(0, 0.0085)) @ \subsection{Minimize expected tail loss with ROI} @@ -521,11 +517,10 @@ print(opt_etl) @ -<>= -chart.Weights(opt_etl) -chart.RiskReward(opt_etl, risk.col="ES", return.col="mean", - main="ETL Optimization", chart.assets=TRUE, - xlim=c(0, 0.14)) +<>= +plot(opt_etl, risk.col="ES", return.col="mean", + main="ETL Optimization", chart.assets=TRUE, + xlim=c(0, 0.14), ylim=c(0,0.0085)) @ \subsection{Maximize mean return per unit ETL with random portfolios} @@ -552,23 +547,22 @@ @ Chart the optimal weights and optimal portfolio in risk-return space. Because the optimization was run with \code{trace=TRUE}, the chart of the optimal portfolio also includes the trace portfolios of the optimization. This is usefule to visualize the feasible space of the portfolios. The 'neighbor' portfolios relative to the optimal portfolio weights can be included the chart of the optimal weights. -<>= -chart.Weights(opt_meanETL, neighbors=25) -chart.RiskReward(opt_meanETL, risk.col="ETL", return.col="mean", - main="mean-ETL Optimization") +<>= +plot(opt_meanETL, risk.col="ETL", return.col="mean", + main="mean-ETL Optimization", neighbors=25) @ Calculate and plot the portfolio component ETL contribution. -<>= +<>= pct_contrib <- ES(R=R, p=0.95, portfolio_method="component", weights=extractWeights(opt_meanETL)) -barplot(pct_contrib$pct_contrib_MES, cex.names=0.8, las=3) +barplot(pct_contrib$pct_contrib_MES, cex.names=0.8, las=3, col="lightblue") @ This figure shows that the Equity Market Nuetral strategy has greater than 50\% risk contribution. A risk budget objective can be added to limit risk contribution percentage to 40\%. \subsection{Maximize mean return per unit ETL with ETL risk budgets} -Add objectives. +Add objectives to maximize mean return per unit ETL with 40\% limit ETL risk budgets. <<>>= # change the box constraints to long only init$constraints[[2]]$min <- rep(0, 6) @@ -590,21 +584,20 @@ print(opt_rb_meanETL) @ -<>= -chart.Weights(opt_rb_meanETL) -chart.RiskReward(opt_rb_meanETL, risk.col="ETL", return.col="mean", - main="Risk Budget mean-ETL Optimization", - xlim=c(0,0.12), ylim=c(0.005,0.009)) +<>= +plot(opt_rb_meanETL, risk.col="ETL", return.col="mean", + main="Risk Budget mean-ETL Optimization", + xlim=c(0,0.12), ylim=c(0.005,0.009)) @ Chart the contribution to risk in percentage terms. -<>= +<>= chart.RiskBudget(opt_rb_meanETL, risk.type="percentage", neighbors=25) @ \subsection{Maximize mean return per unit ETL with ETL equal contribution to risk} -Add objectives. +Add objective to maximize mean return per unit ETL with ETL equal contribution to risk. <<>>= eq_meanETL <- add.objective(portfolio=init, type="return", name="mean") eq_meanETL <- add.objective(portfolio=eq_meanETL, type="risk", name="ETL", @@ -624,15 +617,14 @@ @ Chart the optimal weights and optimal portfolio in risk-return space. -<>= -chart.Weights(opt_eq_meanETL) -chart.RiskReward(opt_eq_meanETL, risk.col="ETL", return.col="mean", - main="Risk Budget mean-ETL Optimization", - xlim=c(0,0.12), ylim=c(0.005,0.009)) +<>= +plot(opt_eq_meanETL, risk.col="ETL", return.col="mean", + main="Risk Budget mean-ETL Optimization", + xlim=c(0,0.12), ylim=c(0.005,0.009)) @ Chart the contribution to risk in percentage terms. It is clear in this chart that the optimization results in a near equal risk contribution portfolio. -<>= +<>= chart.RiskBudget(opt_eq_meanETL, risk.type="percentage", neighbors=25) @ @@ -655,25 +647,25 @@ obj_combine <- extractObjectiveMeasures(opt_combine) @ -<>= +<>= chart.Weights(opt_combine, plot.type="bar", legend.loc="topleft", ylim=c(0, 1)) @ Chart the optimal portfolios of each optimization in risk-return space. -<>= +<>= chart.RiskReward(opt_combine, risk.col="ETL", return.col="mean", - main="ETL Optimization Comparison", xlim=c(0.018, 0.025), + main="ETL Optimization Comparison", xlim=c(0.018, 0.024), ylim=c(0.005, 0.008)) @ Calculate the STARR of each optimization -<>= +<>= STARR <- obj_combine[, "mean"] / obj_combine[, "ETL"] barplot(STARR, col="blue", cex.names=0.8, cex.axis=0.8, las=3, main="STARR", ylim=c(0,1)) @ -<>= +<>= chart.RiskBudget(opt_combine, match.col="ETL", risk.type="percent", ylim=c(0,1), legend.loc="topright") @ Modified: pkg/PortfolioAnalytics/vignettes/portfolio_vignette.pdf =================================================================== (Binary files differ) From noreply at r-forge.r-project.org Fri Sep 20 06:34:38 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 20 Sep 2013 06:34:38 +0200 (CEST) Subject: [Returnanalytics-commits] r3144 - pkg/PortfolioAnalytics/sandbox/symposium2013/docs Message-ID: <20130920043438.E1F09183E9B@r-forge.r-project.org> Author: peter_carl Date: 2013-09-20 06:34:38 +0200 (Fri, 20 Sep 2013) New Revision: 3144 Modified: pkg/PortfolioAnalytics/sandbox/symposium2013/docs/symposium-slides-2013.Rmd Log: - revised flow Modified: pkg/PortfolioAnalytics/sandbox/symposium2013/docs/symposium-slides-2013.Rmd =================================================================== --- pkg/PortfolioAnalytics/sandbox/symposium2013/docs/symposium-slides-2013.Rmd 2013-09-19 17:31:52 UTC (rev 3143) +++ pkg/PortfolioAnalytics/sandbox/symposium2013/docs/symposium-slides-2013.Rmd 2013-09-20 04:34:38 UTC (rev 3144) @@ -53,6 +53,37 @@ - Apply within the context of the current economic and market situation - Think systematically about preferences and constraints +Here we'll consider a strategic allocation to hedge funds + +# Selected hedge fund strategies +Monthly data of EDHEC hedge fund indexes from 1998 + +## Relative Value + +* Fixed Income Arb +* Convertible Arb +* Equity Market Neutral +* Event Driven + +## Directional + +* Equity Long/Short +* Global Macro +* CTA + +# Index Performance +\includegraphics[width=1.0\textwidth]{../results/EDHEC-Cumulative-Returns.png} + +# Index Performance +\includegraphics[width=1.0\textwidth]{../results/EDHEC-RollPerf.png} + +# Index Performance +Add table of relevant statistics here + +# Ex-post Correlations +\includegraphics[width=0.5\textwidth]{../results/EDHEC-cor-inception.png} +\includegraphics[width=0.5\textwidth]{../results/EDHEC-cor-tr36m.png} + -# Portfolio preferences +# Investor preferences... +In constructing a portfolio, most investors would prefer: + +* to be approximately correct rather than precisely wrong +* the flexibility to define any kind of objective and combine the constraints +* to define risk as potential loss rather than volatility +* a framework for considering different sets of portfolio constraints for comparison through time +* to intuitively understand optimization through visualization + + + +# ... Lead to portfolio preferences Construct a portfolio that: * maximizes return, * with per-asset position limits, * with a specific univariate portfolio risk limit, -* defining risk as losses, -* considering effects of skewness and kurtosis, -* and limiting contribution of risk for constituents -* or equalizing component risk contribution. +* defines risk as losses, +* considers the effects of skewness and kurtosis, and +* either limits contribution of risk for constituents or +* equalizes component risk contribution. -# Optimization frustration -Most investors would prefer: - -* to be approximately correct rather than precisely wrong -* to define risk as potential loss rather than volatility -* the flexibility to define any kind of objective and combine the constraints -* a framework for considering different sets of portfolio constraints for comparison through time -* to intuitively understand optimization through visualization - # Risk budgeting * Used to allocate the "risk" of a portfolio * Decomposes the total portfolio risk into the risk contribution of each component position * Literature on risk contribution has focused on volatility rather than downside risk -* Most financial returns series seem non-normal +* Most financial returns series seem non-normal, so we want to consider the effects of higher moments -<--! Two-column slide with a facing histogram and qqplot --> +# Return distributions +\includegraphics[width=1.0\textwidth]{../results/EDHEC-Distributions.png} # Measuring risk, not volatility -Measured with portfolio Conditional Value-at-Risk (CVaR) +Measure risk with Conditional Value-at-Risk (CVaR) * Also called Expected Tail Loss (ETL) and Expected Shortfall (ES) * ETL is the mean expected loss when the loss exceeds the VaR * ETL has all the properties a risk measure should have to be coherent and is a convex function of the portfolio weights * To account for skew and/or kurtosis, use Cornish-Fisher (or "modified") estimates of ETL instead (mETL) - +# Measuring risk +\includegraphics[width=1.0\textwidth]{../results/EDHEC-BarVaR.png} +# Measuring risk +Split graphic into two pages so it's readable + # ETL sensitivity Modified ETL demonstrates a better fit for historical CVaR at lower confidence levels, and can break down at higher confidence levels *Insert chart or charts* @@ -110,6 +155,10 @@ # _Ex ante_, not _ex post_ +_Ex post_ analysis of risk contribution has been around for a while + +* Litterman () + The use of _ex ante_ risk budgets is more recent * Qian (2005): "risk parity portfolio" allocates portfolio variance equally @@ -118,12 +167,18 @@ We want to look at the allocation of risk through _ex ante_ downside risk contribution -# Contribution to downside risk, not volatility +# Contribution to downside risk Use the modified CVaR contribution estimator from Boudt, _et al_ (2008) * CVaR contributions correspond to the conditional expectation of the return of the portfolio component when the portfolio loss is larger than its VaR loss. * %CmETL is the ratio of the expected return on the position when the portfolio experiences a beyond-VaR loss to the expected value of the portfolio loss * A high positive %CmETL indicates the position has a large loss when the portfolio also has a large loss + + + +# Contribution to downside risk * The higher the percentage mETL, the more the portfolio downside risk is concentrated on that asset * Allows us to directly optimize downside risk diversification * Lends itself to a simple algorithm that computes both CVaR and component CVaR in less than a second, even for large portfolios @@ -139,50 +194,7 @@ * Impose bound constraints on the percentage mETL contributions - -# An example -describe the example as a case study - -# Selected hedge fund strategies -Monthly data of EDHEC hedge fund indexes from 1998 - -## Relative Value - -* Fixed Income Arb -* Convertible Arb -* Equity Market Neutral -* Event Driven - -## Directional - -* Equity Long/Short -* Global Macro -* CTA - -# Ex-post Performance -\includegraphics[width=1.0\textwidth]{../results/EDHEC-Cumulative-Returns.png} - -# Ex-post Performance -\includegraphics[width=1.0\textwidth]{../results/EDHEC-BarVaR.png} - -# Ex-post Performance -\includegraphics[width=1.0\textwidth]{../results/EDHEC-RollPerf.png} - -# Ex-post Performance -\includegraphics[width=0.5\textwidth]{../results/EDHEC-ScatterSinceIncept.png} -\includegraphics[width=0.5\textwidth]{../results/EDHEC-Scatter36m.png} - -# Ex-post Performance -\includegraphics[width=1.0\textwidth]{../results/EDHEC-Distributions.png} - -# Ex-post Performance -Add table of relevant statistics here - -# Ex-post Correlations -\includegraphics[width=0.5\textwidth]{../results/EDHEC-cor-inception.png} -\includegraphics[width=0.5\textwidth]{../results/EDHEC-cor-tr36m.png} - -# Add general constraints +# Start with some general constraints Constraints specified for each asset in the portfolio: * Maximum position: 30% @@ -191,27 +203,22 @@ * Group constraints * Rebalancing quarterly -# Estimation +# Estimate +One of the largest challenges in optimization is improving the estimates of the moments - - -* Optimizer chooses portfolios based on forward looking estimates of risk and return based on the portfolio moments +* Optimizer chooses portfolios based on forward looking estimates of risk and return based on the constituent moments * Usually explicitly making trade-offs between correlation and volatility among members * Modified ETL extends the tradeoffs to the first four moments and co-moments -* Historical sample moments work fine as predictors in normal market regimes, but poorly when the market regime shifts +* Historical sample moments are used here as predictors -One of the largest challenges in optimization is improving the estimates of the moments + -# Forecasting -## Returns +# Define multiple objectives -## Volatility - -## Correlation - - -# Multiple objectives - Equal contribution to: * weight @@ -228,7 +235,7 @@ * variance * modified ETL - # Constrained Risk Contribution -Risk Budget as an eighth objective? +Risk Budget as an eighth objective set +* Drop the position constraints altogether +* No non-directional constituent may contribute more than 40% to portfolio risk +* No directional constituent may contribute more than 30% to portfolio risk, except for... +* ... Distressed, which cannot contribute more than 15% +* Directional, as a group, may not contribute more than 60% of the risk to the portfolio + # Optimizers ## Closed-form -* add list from PortfA -* discuss stress testing briefly +* Linear programming (LP) and mixed integer linear programming (MILP) +* Quadratic programming -## Heuristic +## General Purpose Continuous Solvers * Random portfolios * Differential evolution -* Others +* Partical swarm +* Simulated annealing + + # Random Portfolios -[Burns (2009)](http://www.portfolioprobe.com/blog/) describes Random Portfolios +It is what it sounds like -* From a portfolio seed, generate random pemutations of weights that meet your constraints on each asset -* add more here -* Random portfolios with X000 permutations +* From a portfolio seed, generate random permutations of weights that meet your constraints +* Several methods: [Burns (2009)](http://www.portfolioprobe.com/blog/), Shaw (2010), and Gilli, _et al_ (2011) Sampling can help provide insight into the goals and constraints of the optimization @@ -321,6 +340,17 @@ * Allows arbitrary number of samples * Allows massively parallel execution + + + + + # Sampled portfolios scatter chart with equal weight portfolio @@ -333,42 +363,15 @@ # Constrain by contribution to mETL Add a constraint -# Differential Evolution -All numerical optimizations are a tradeoff between speed and accuracy - -This space may well be non-convex in real portfolios - -Differential evolution will get more directed with each generation, rather than the uniform search of random portfolios - -Allows more logical 'space' to be searched with the same number of trial portfolios for more complex objectives - -doesn't test many portfolios on the interior of the portfolio space - -Early generations search a wider space; later generations increasingly focus on the space that is near-optimal - -Random jumps are performed in every generation to avoid local minima - -*Insert Chart* - -# Other Heuristic Methods -GenSA, SOMA, -Such functions are very compute intensive ? so linear, quadradic or conical objectives are better addressed through closed-form optimizers - -However, many business objectives do not fall into those categories... - -...and brute force solutions are often intractable - - - # Ex-ante results scatter plot with multiple objectives # Ex-ante results +scatter plot with multiple objectives, but in ETL space rather than variance + +# Ex-ante results Unstacked bar chart comparing allocations across objectives -# Ex-ante vs. ex-post results -scatter plot with both overlaid - # Out-of-sample results timeseries charts for cumulative return and drawdown @@ -378,31 +381,54 @@ # Conclusions As a framework for strategic allocation: +* Component contribution to risk is a useful tool * Random Portfolios can help you build intuition about your objectives and constraints * Rebalancing periodically and examining out of sample performance can help you refine objectives * Differential Optimization and parallelization are valuable as objectives get more complicated +# R Packages used -# _PortfolioAnalytics_ +## _PortfolioAnalytics_ - Provides numerical solutions to portfolios with complex constraints and objectives comprised of any function -- Unifies the interface across different numerical and closed-form optimizers, including ... *ADD LIST* -- Implements a front-end to two analytical solvers: **Differential Evolution** and **Random Portfolios** +- Unifies the interface across different closed-form optimizers and several analytical solvers +- Implements three methods for generating Random Portfolios, including 'sample', 'simplex', and 'grid' - Preserves the flexibility to define any kind of objective and constraint - Work-in-progress, available on R-Forge in the _ReturnAnalytics_ project +## _PerformanceAnalytics_ + * Returns-based analysis of performance and risk for financial instruments and portfolios, available on CRAN # Other packages - -## _PerformanceAnalytics_ - * Returns-based analysis of performance and risk for financial instruments and portfolios - ## _ROI_ - * Infrastructure package for optimization that facilitates use of different solvers by K. Hornik, D. Meyer, and S. Theussl + * Infrastructure package by K. Hornik, D. Meyer, and S. Theussl for optimization that facilitates use of different solvers... +## RGLPK + * ... such as GLPK, open source software for solving large-scale linear programming (LP), mixed integer linear programming (MILP) and other related problems + +## quadprog + * ... or this one, used for solving quadratic programming problems + +# Other packages ## _DEoptim_ * Implements Differential Evolution, a very powerful, elegant, population based stochastic function minimizer + +## _GenSA_ + * Implements functions for Generalized Simulated Annealing +## _pso_ + * An implementation of Partical Swarm Optimization consistent with the standard PSO 2007/2011 by Maurice Clerc, _et al._ + +# Other packages +## _foreach_ +* Steve Weston's remarkable parallel computing framework, which maps functions to data and aggregates results in parallel across multiple CPU cores and computers... + +## _doRedis_ + * A companion package to _foreach_ by Bryan Lewis that implements a simple but very flexible parallel back end to Redis, making it to run parallel jobs across multiple R sessions. + +## _doMPI_ + * Another companion to _foreach_ that provides a parallel backend across cores using the _parallel_ package + ## _xts_ * Time series package specifically for finance by Jeff Ryan and Josh Ulrich @@ -416,8 +442,34 @@ # References Figure out bibtex links in markup +http://www.portfolioprobe.com/about/random-portfolios-in-finance/ + # Appendix Slides after this point are not likely to be included in the final presentation +# Differential Evolution +All numerical optimizations are a tradeoff between speed and accuracy + +Differential evolution will get more directed with each generation, rather than the uniform search of random portfolios + +Allows more logical 'space' to be searched with the same number of trial portfolios for more complex objectives + +doesn't test many portfolios on the interior of the portfolio space + +Early generations search a wider space; later generations increasingly focus on the space that is near-optimal + +Random jumps are performed in every generation to avoid local minima + +*Insert Chart* + +# Other Heuristic Methods +GenSA, SOMA, + + + +# Ex-ante vs. ex-post results +scatter plot with both overlaid + + # Scratch Slides likely to be deleted after this point \ No newline at end of file From noreply at r-forge.r-project.org Fri Sep 20 07:16:14 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 20 Sep 2013 07:16:14 +0200 (CEST) Subject: [Returnanalytics-commits] r3145 - pkg/PortfolioAnalytics/R Message-ID: <20130920051614.8BFDC18597E@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-20 07:16:14 +0200 (Fri, 20 Sep 2013) New Revision: 3145 Modified: pkg/PortfolioAnalytics/R/random_portfolios.R Log: Correcting number of random numbers to generate for rp_simplex. Modified: pkg/PortfolioAnalytics/R/random_portfolios.R =================================================================== --- pkg/PortfolioAnalytics/R/random_portfolios.R 2013-09-20 04:34:38 UTC (rev 3144) +++ pkg/PortfolioAnalytics/R/random_portfolios.R 2013-09-20 05:16:14 UTC (rev 3145) @@ -483,7 +483,7 @@ k <- ceiling(permutations / length(fev)) # generate uniform[0, 1] random numbers - U <- runif(n=k*permutations, 0, 1) + U <- runif(n=k*length(fev)*nassets, 0, 1) Umat <- matrix(data=U, nrow=k, ncol=nassets) # do the transformation to the set of weights to satisfy lower bounds From noreply at r-forge.r-project.org Fri Sep 20 13:53:14 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 20 Sep 2013 13:53:14 +0200 (CEST) Subject: [Returnanalytics-commits] r3146 - in pkg/Meucci: . R demo man Message-ID: <20130920115314.85949185905@r-forge.r-project.org> Author: xavierv Date: 2013-09-20 13:53:14 +0200 (Fri, 20 Sep 2013) New Revision: 3146 Added: pkg/Meucci/R/pHistPriorPosterior.R pkg/Meucci/demo/S_EntropyView.R pkg/Meucci/man/pHistPriorPosterior.Rd Modified: pkg/Meucci/DESCRIPTION pkg/Meucci/NAMESPACE pkg/Meucci/R/EntropyProg.R pkg/Meucci/TODO pkg/Meucci/demo/00Index pkg/Meucci/demo/S_BlackLittermanBasic.R pkg/Meucci/demo/S_EvaluationGeneric.R pkg/Meucci/man/EntropyProg.Rd pkg/Meucci/man/pHist.Rd Log: - updated documentation until the end of the book and added S_EntropyView demo script from chapter 9 Modified: pkg/Meucci/DESCRIPTION =================================================================== --- pkg/Meucci/DESCRIPTION 2013-09-20 05:16:14 UTC (rev 3145) +++ pkg/Meucci/DESCRIPTION 2013-09-20 11:53:14 UTC (rev 3146) @@ -34,7 +34,8 @@ mvtnorm, dlm, quadprog, - kernlab + kernlab, + nloptr, Suggests: limSolve, Matrix, @@ -42,7 +43,6 @@ reshape2, Hmisc, moments, - nloptr, ggplot2, expm, latticeExtra, @@ -105,3 +105,4 @@ 'data.R' 'ButterflyTradingFunctions.R' 'RankingInformationFunctions.R' + 'pHistPriorPosterior.R' Modified: pkg/Meucci/NAMESPACE =================================================================== --- pkg/Meucci/NAMESPACE 2013-09-20 05:16:14 UTC (rev 3145) +++ pkg/Meucci/NAMESPACE 2013-09-20 11:53:14 UTC (rev 3146) @@ -48,6 +48,7 @@ export(PanicCopula) export(PartialConfidencePosterior) export(PerformIidAnalysis) +export(pHistPriorPosterior) export(PlotCompositionEfficientFrontier) export(PlotDistributions) export(PlotFrontier) Modified: pkg/Meucci/R/EntropyProg.R =================================================================== --- pkg/Meucci/R/EntropyProg.R 2013-09-20 05:16:14 UTC (rev 3145) +++ pkg/Meucci/R/EntropyProg.R 2013-09-20 11:53:14 UTC (rev 3146) @@ -41,13 +41,15 @@ #' Reversing 'p' and 'p_' leads to the empirical likelihood" estimator of Qin and Lawless (1994). #' See Robertson et al, "Forecasting Using Relative Entropy" (2002) for more theory #' @export -EntropyProg = function( p , A , b , Aeq , beq ) +EntropyProg = function( p , A = NULL , b = NULL , Aeq , beq ) { - library( nloptr ) - + library( nloptr ) + + if( !length(b) ) A = matrix( ,nrow = 0, ncol = 0) + if( !length(b) ) b = matrix( ,nrow = 0, ncol = 0) # count the number of constraints K_ = nrow( A ) # K_ is the number of inequality constraints in the matrix-vector pair A-b - K = nrow( Aeq ) # K is the number of equality views in the matrix-vector pair Aeq-beq + K = nrow( Aeq ) # K is the number of equality views in the matrix-vector pair Aeq-beq # parameter checks if ( K_ + K == 0 ) { stop( "at least one equality or inequality constraint must be specified")} @@ -56,8 +58,8 @@ if ( nrow(A)!=nrow(b) ) { stop( "number of equality constraints in matrix A must match number of elements in vector b") } # calculate derivatives of constraint matrices - A_ = t( A ) - b_= t( b ) + A_ = t( A ) + b_ = t( b ) Aeq_ = t( Aeq ) beq_ = t( beq ) @@ -104,9 +106,7 @@ v = optimResult$solution p_ = exp( log(p) - 1 - Aeq_ %*% v ) optimizationPerformance = list( converged = (optimResult$status > 0) , ml = optimResult$objective , iterations = optimResult$iterations , sumOfProbabilities = sum( p_ ) ) - } - - else # case inequality constraints are specified + }else # case inequality constraints are specified { # setup variables for constrained optimization InqMat = -diag( 1 , K_ + K ) # -1 * Identity Matrix with dimension equal to number of constraints @@ -194,7 +194,7 @@ #' @references #' \url{http://www.symmys.com} #' See Meucci script pHist.m used for plotting -#' @author Ram Ahluwalia \email{ram@@wingedfootcapital.com} +#' @author Ram Ahluwalia \email{ram@@wingedfootcapital.com} and Xavier Valls \email{flamejat@@gmail.com} pHist = function( X , p , nBins, freq = FALSE ) { @@ -204,7 +204,7 @@ nBins = round( 10 * log(J) ) } - dist = hist( x = X , breaks = nBins , freq = FALSE , main = "Portfolio return distribution" ) + dist = hist( x = X , breaks = nBins , plot = FALSE ); n = dist$counts x = dist$breaks D = x[2] - x[1] @@ -221,7 +221,7 @@ f = np/D } - barplot( f , x , 1 ) + plot( x , f , type = "h", main = "Portfolio return distribution") return( list( f = f , x = x ) ) } Added: pkg/Meucci/R/pHistPriorPosterior.R =================================================================== --- pkg/Meucci/R/pHistPriorPosterior.R (rev 0) +++ pkg/Meucci/R/pHistPriorPosterior.R 2013-09-20 11:53:14 UTC (rev 3146) @@ -0,0 +1,52 @@ +#' @title Plot prior and posterior distributions. +#' +#' @description Plot prior and posterior distributions, as described in A. Meucci, +#' "Risk and Asset Allocation", Springer, 2005. +#' +#' @param X : [matrix] (J x N) simulations +#' @param p : [vector] (J x 1) prior probabilities +#' @param p_ : [vector] (J x 1) posterior probabilities +#' +#' @references +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' +#' See Meucci's script for "pHistPriorPosterior.m" +#' +#' @author Xavier Valls \email{flamejat@@gmail.com} +#' @export + +pHistPriorPosterior = function( X, p, p_) +{ + X = as.matrix(X); + J = dim(X)[1]; + N = dim(X)[2]; + NBins = round(10 * log(J)); + + for( n in 1 : N ) + { + dev.new(); + + # set ranges + xl = min(X[ , n]); + xh = max(X[ , n]); + + par( mfrow = c( 2, 1 ) ); + + # prior numerical + pHist( X[ , n ], p, NBins); + # xlim([xl, xh]); + # y1 = ylim(); + # title('prior'); + + # posterior numerical + pHist( X[ , n ], p_, NBins); + # xlim([xl, xh]); + # y2 = ylim(); + # ylim([min(y1(1), y2(1)), max(y1(2), y2(2))]); + # title('posterior'); + + # subplot(2, 1, 1); + # ylim([min(y1(1), y2(1)), max(y1(2), y2(2))]); + } + +} \ No newline at end of file Modified: pkg/Meucci/TODO =================================================================== --- pkg/Meucci/TODO 2013-09-20 05:16:14 UTC (rev 3145) +++ pkg/Meucci/TODO 2013-09-20 11:53:14 UTC (rev 3146) @@ -1,7 +1,11 @@ Xavi's notes: * Matlab package doesn't seem to be necessary, find substitutes for its functions where possible -* There are some problems with charts and what can be done with base graphics. Maybe using ggplot2 instead of base graphics can provide more flexibility. +* There are some problems with charts and what can be done with base graphics: + - Maybe using ggplot2 instead of base graphics can provide more flexibility. + - How to change the layout after the plot like in MATLAB to change name, limits... (look at pHistPriorPosterior code for an example ) + - How to draw a histogram passing the x,y coordinates using the barplot function + ... * All the scripts from the papers need to be revised, some don't even work. * Maybe there are some packages that aren't needed anymore. Find out which of them. * Documentation for papers and functions from Ram - Manan. @@ -10,7 +14,6 @@ * Change coding style to one more R alike * Still 2 scripts left from the book: S_MeanVarianceCallsRobust from chapter 9 and S_OptionReplication from chapter 6 * Improve documentation for every script from the book: - - find the exercises and sections they come from - write down the equations * Not Sure if EntropyProg returns what it should with empty matrices as arguments for the constraints -* Write text version of the formulas in the documentation \ No newline at end of file +* Write text version of the formulas in the documentation Modified: pkg/Meucci/demo/00Index =================================================================== --- pkg/Meucci/demo/00Index 2013-09-20 05:16:14 UTC (rev 3145) +++ pkg/Meucci/demo/00Index 2013-09-20 11:53:14 UTC (rev 3146) @@ -39,6 +39,7 @@ S_ESContributionsStudentT computes the expected shortfall and the contributions to ES from each security S_EigenvalueDispersion displays the sample eigenvalues dispersion phenomenon S_EllipticalNDim decomposes the N-variate normal distribution into its radial and uniform components to generate an elliptical distribution +S_EntropyView illustrates the Entropy Pooling approach S_EquitiesInvariants performs the quest for invariance in the stock market S_EquityProjectionPricing projects the distribution of the market invariants for the stock market from the estimation interval (normal assumption) to the investment horizon. Then it computes the distribution of prices at the investment horizon analytically. S_EstimateExpectedValueEvaluation script familiarizes the user with the evaluation of an estimator replicability, loss, error, bias and inefficiency Modified: pkg/Meucci/demo/S_BlackLittermanBasic.R =================================================================== --- pkg/Meucci/demo/S_BlackLittermanBasic.R 2013-09-20 05:16:14 UTC (rev 3145) +++ pkg/Meucci/demo/S_BlackLittermanBasic.R 2013-09-20 11:53:14 UTC (rev 3146) @@ -5,9 +5,11 @@ #' Springer, 2005, Chapter 9. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. -#' See Meucci's script for "S_BlackLittermanBasic.m" +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 303 - Black-Litterman and beyond II". #' +#' See Meucci's script for "S_BlackLittermanBasic.m" and "E 302 - Black-Litterman and beyond I" +#' #' @author Xavier Valls \email{flamejat@@gmail.com} ################################################################################################################## @@ -23,7 +25,7 @@ ################################################################################################################## ### Modify expected returns the Black-Litterman way and compute new efficient frontier -P = cbind( 1, 0, 0, 0, 0, -1 ); # pick matrix +P = cbind( 1, 0, 0, 0, 0, -1 ); # pick matrix Omega = P %*% covNRets$Sigma %*% t( P ); Views = sqrt( diag( Omega ) ); # views value Added: pkg/Meucci/demo/S_EntropyView.R =================================================================== --- pkg/Meucci/demo/S_EntropyView.R (rev 0) +++ pkg/Meucci/demo/S_EntropyView.R 2013-09-20 11:53:14 UTC (rev 3146) @@ -0,0 +1,32 @@ +#' This script illustrates the Entropy Pooling approach, as described in A. Meucci, "Risk and Asset Allocation", +#' Springer, 2005, Chapter 9. +#' +#' @references +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 304 - Entropy pooling". +#' +#' See Meucci's script for "S_EntropyView.m" +#' +#' @author Xavier Valls \email{flamejat@@gmail.com} + +################################################################################################################## +### Market simulations +nSim = 100000; +B = ( runif( nSim ) < 0.5); +X = B * rnorm( nSim, -1, 1 ) + ( 1 - B ) * rnorm( nSim, 1, 1 ); + +################################################################################################################## +### View +# specify view E{X} = 0.5 and constraint 1'*p = 1. +p_prior = matrix( 1, nSim, 1) / nSim; +Aeq = rbind( X, matrix( 1, 1, nSim ) ); +beq = rbind( 0.5, 1 ); + +################################################################################################################## +### Posterior market distribution using the Entropy Pooling approach +#Using package's EntropyProg instead of Books EntropyMinimization (Same function, different names) +p_post = EntropyProg( p_prior, Aeq = Aeq, beq = beq)$p; +pHistPriorPosterior(X,p_prior, p_post); +fprintf('prior sample mean = #f\n', mean(X)); +fprintf('posterior sample mean = #f\n', X' * p_post); + Modified: pkg/Meucci/demo/S_EvaluationGeneric.R =================================================================== --- pkg/Meucci/demo/S_EvaluationGeneric.R 2013-09-20 05:16:14 UTC (rev 3145) +++ pkg/Meucci/demo/S_EvaluationGeneric.R 2013-09-20 11:53:14 UTC (rev 3146) @@ -6,12 +6,14 @@ #' @return Allocation : [vector] (N x 1) #' #' @note -#' compute optimal allocation, only possible if hidden parameters were known: thus it is not a "decision", we call it a "choice" +#' Compute optimal allocation, only possible if hidden parameters were known: thus it is not a "decision", we call it a "choice" #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. -#' See Meucci's script for " EvaluationChoiceOptimal.m" +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 285 - Estimation risk and opportunity cost". #' +#' See Meucci's script for " EvaluationChoiceOptimal.m" +#' #' @author Xavier Valls \email{flamejat@@gmail.com} @@ -40,7 +42,9 @@ #' @return CertaintyEquivalent : [scalar] #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 285 - Estimation risk and opportunity cost". +#' #' See Meucci's script for " EvaluationSatisfaction.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} @@ -66,7 +70,9 @@ #' scenario-dependent decision that tries to pick the optimal allocation #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 285 - Estimation risk and opportunity cost". +#' #' See Meucci's script for "EvaluationDecisionBestPerformer.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} @@ -93,7 +99,9 @@ #' @return C_Plus : [scalar] cost #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 285 - Estimation risk and opportunity cost". +#' #' See Meucci's script for "EvaluationDecisionBestPerformer.m" #' #' @author Xavier Valls \email{flamejat@@gmail.com} @@ -117,7 +125,9 @@ #' Described in A. Meucci "Risk and Asset Allocation", Springer, 2005, Chapter 8. #' #' @references -#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. +#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, +#' "E 285 - Estimation risk and opportunity cost". +#' #' See Meucci's script for "S_EvaluationGeneric.m" # #' @author Xavier Valls \email{flamejat@@gmail.com} @@ -140,12 +150,12 @@ ################################################################################################################## ### Input market parameters NumAssets = 10; -a = 0.5; # effect of correlation on expected values and volatility (hidden) +a = 0.5; # effect of correlation on expected values and volatility (hidden) Bottom = 0.06; -Top = 0.36; -Step = (Top - Bottom) / (NumAssets - 1); -v = seq( Bottom, Top, Step ) ; # volatility vector -Market = NULL; +Top = 0.36; +Step = (Top - Bottom) / (NumAssets - 1); +v = seq( Bottom, Top, Step ) ; # volatility vector +Market = list(); Market$T = 20; # not hidden Market$CurrentPrices = 10 * array( 1, NumAssets); # not hidden Modified: pkg/Meucci/man/EntropyProg.Rd =================================================================== --- pkg/Meucci/man/EntropyProg.Rd 2013-09-20 05:16:14 UTC (rev 3145) +++ pkg/Meucci/man/EntropyProg.Rd 2013-09-20 11:53:14 UTC (rev 3146) @@ -2,7 +2,7 @@ \alias{EntropyProg} \title{Entropy pooling program for blending views on scenarios with a prior scenario-probability distribution} \usage{ - EntropyProg(p, A, b, Aeq, beq) + EntropyProg(p, A = NULL, b = NULL, Aeq, beq) } \arguments{ \item{p}{a vector of initial probabilities based on prior Modified: pkg/Meucci/man/pHist.Rd =================================================================== --- pkg/Meucci/man/pHist.Rd 2013-09-20 05:16:14 UTC (rev 3145) +++ pkg/Meucci/man/pHist.Rd 2013-09-20 11:53:14 UTC (rev 3146) @@ -24,7 +24,8 @@ Generates histogram } \author{ - Ram Ahluwalia \email{ram at wingedfootcapital.com} + Ram Ahluwalia \email{ram at wingedfootcapital.com} and + Xavier Valls \email{flamejat at gmail.com} } \references{ \url{http://www.symmys.com} See Meucci script pHist.m Added: pkg/Meucci/man/pHistPriorPosterior.Rd =================================================================== --- pkg/Meucci/man/pHistPriorPosterior.Rd (rev 0) +++ pkg/Meucci/man/pHistPriorPosterior.Rd 2013-09-20 11:53:14 UTC (rev 3146) @@ -0,0 +1,27 @@ +\name{pHistPriorPosterior} +\alias{pHistPriorPosterior} +\title{Plot prior and posterior distributions.} +\usage{ + pHistPriorPosterior(X, p, p_) +} +\arguments{ + \item{X}{: [matrix] (J x N) simulations} + + \item{p}{: [vector] (J x 1) prior probabilities} + + \item{p_}{: [vector] (J x 1) posterior probabilities} +} +\description{ + Plot prior and posterior distributions, as described in + A. Meucci, "Risk and Asset Allocation", Springer, 2005. +} +\author{ + Xavier Valls \email{flamejat at gmail.com} +} +\references{ + A. Meucci - "Exercises in Advanced Risk and Portfolio + Management" \url{http://symmys.com/node/170}. + + See Meucci's script for "pHistPriorPosterior.m" +} + From noreply at r-forge.r-project.org Fri Sep 20 15:50:40 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 20 Sep 2013 15:50:40 +0200 (CEST) Subject: [Returnanalytics-commits] r3147 - in pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm: .Rproj.user/E5D7D248/sdb/prop R man Message-ID: <20130920135040.E7FFE1847E6@r-forge.r-project.org> Author: shubhanm Date: 2013-09-20 15:50:40 +0200 (Fri, 20 Sep 2013) New Revision: 3147 Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/INDEX pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.normDD.R pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.normDD.Rd Log: documentation modification Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/INDEX =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/INDEX 2013-09-20 11:53:14 UTC (rev 3146) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/prop/INDEX 2013-09-20 13:50:40 UTC (rev 3147) @@ -2,6 +2,7 @@ C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2FR%2FAdjustedSharpeRatio.R="43D7BE7F" C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2FR%2FCalmarRatio.R="8047D8D6" C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2FR%2FES.R="716852E0" +C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2FR%2FmaxDrawdown.R="41EDF843" C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2FR%2Ftable.Correlation.R="37353A3D" C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2Finst%2Fdoc%2FEmaxdd.Rnw="C9696525" C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2Finst%2Fdoc%2FPA-Bacon.Rnw="915ED3AD" @@ -12,7 +13,10 @@ C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2Fsandbox%2FShubhankit%2FR%2FmaxDDGBM.R="60D2FDD5" C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2Fsandbox%2FShubhankit%2FR%2Ftable.ComparitiveReturn.GLM.R="41919319" C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2Fsandbox%2FShubhankit%2FWeek6-7%2FCode%2FCovariance%20Matrix%20Integrated%20Regression%20Function%2Flmi.R="649AEBE6" +C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2Fsandbox%2FShubhankit%2Fnoniid.sm%2F.Rbuildignore="A38670D5" +C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2Fsandbox%2FShubhankit%2Fnoniid.sm%2F.Rhistory="80E892C0" C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2Fsandbox%2FShubhankit%2Fnoniid.sm%2FDESCRIPTION="5ED82E8C" +C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2Fsandbox%2FShubhankit%2Fnoniid.sm%2FNAMESPACE="7D033990" C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2Fsandbox%2FShubhankit%2Fnoniid.sm%2FR%2FACStdDev.annualized.R="33CAE6A9" C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2Fsandbox%2FShubhankit%2Fnoniid.sm%2FR%2FCDrawdown.R="5D302CE3" C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2Fsandbox%2FShubhankit%2Fnoniid.sm%2FR%2FEmaxDDGBM.R="4836AC8C" @@ -29,31 +33,43 @@ C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2Fsandbox%2FShubhankit%2Fnoniid.sm%2FR%2Ftable.EMaxDDGBM.R="FFE2C69" C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2Fsandbox%2FShubhankit%2Fnoniid.sm%2FR%2Ftable.Sharpe.R="F9E07115" C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2Fsandbox%2FShubhankit%2Fnoniid.sm%2FR%2Ftable.UnsmoothReturn.R="CF948D8A" +C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2Fsandbox%2FShubhankit%2Fnoniid.sm%2FR%2Ftable.normDD.R="3CCE8B2B" C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2Fsandbox%2FShubhankit%2Fnoniid.sm%2FRead-and-delete-me="7E8C5556" C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2Fsandbox%2FShubhankit%2Fnoniid.sm%2Fman%2FAcarSim.Rd="717883C7" C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2Fsandbox%2FShubhankit%2Fnoniid.sm%2Fman%2FEMaxDDGBM.Rd="2EE78BD4" +C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2Fsandbox%2FShubhankit%2Fnoniid.sm%2Fman%2FEmaxDDGBM.Rd="25C2A70F" C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2Fsandbox%2FShubhankit%2Fnoniid.sm%2Fman%2Fglmi.Rd="7909B0E2" C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2Fsandbox%2FShubhankit%2Fnoniid.sm%2Fman%2Flmi.Rd="88C0FBC4" +C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2Fsandbox%2FShubhankit%2Fnoniid.sm%2Fman%2Ftable.normDD.Rd="E66E994D" +C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2Fsandbox%2FShubhankit%2Fnoniid.sm%2Fvignettes%2FACFSTDEV.rnw="8F0CAC5C" C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2Fsandbox%2FShubhankit%2Fnoniid.sm%2Fvignettes%2FCommodityReport.Rnw="DA891E76" C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2Fsandbox%2FShubhankit%2Fnoniid.sm%2Fvignettes%2FConditionalDrawdown.Rnw="892D5F16" +C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2Fsandbox%2FShubhankit%2Fnoniid.sm%2Fvignettes%2FEmaxDDGBM.Rnw="14FC5963" +C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2Fsandbox%2FShubhankit%2Fnoniid.sm%2Fvignettes%2FGLMReturn.Rnw="69A99C19" +C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2Fsandbox%2FShubhankit%2Fnoniid.sm%2Fvignettes%2FGLMSmoothIndex.Rnw="B53EBB52" C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2Fsandbox%2FShubhankit%2Fnoniid.sm%2Fvignettes%2FLoSharpe.Rnw="2F79FCCA" +C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2Fsandbox%2FShubhankit%2Fnoniid.sm%2Fvignettes%2FNormCalmar.rnw="C18E8FE4" C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2Fsandbox%2FShubhankit%2Fnoniid.sm%2Fvignettes%2FOWReturn.Rnw="E76F5680" C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2Fsandbox%2FShubhankit%2Fnoniid.sm%2Fvignettes%2FOWReturn.log="9B1541E6" C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2Fsandbox%2FShubhankit%2Fnoniid.sm%2Fvignettes%2FOkunevWhite.Rnw="C9BD1399" C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2Fsandbox%2FShubhankit%2Fnoniid.sm%2Fvignettes%2FShaneAcarMaxLoss.Rnw="A036626A" C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2Fsandbox%2FShubhankit%2Fnoniid.sm%2Fvignettes%2FUnSmoothReturnAnalysis.Rnw="945F7AEE" C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2Fsandbox%2FShubhankit%2Fnoniid.sm.Rcheck%2F00install.out="CE4BAAEC" +C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2Fsandbox%2FShubhankit%2Fnoniid.sm.Rcheck%2Fnoniid.sm-manual.log="9D81EC8D" C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2Fsandbox%2FShubhankit%2Fsandbox%2FCommodity.Rnw="7A63D707" C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2Fsandbox%2FShubhankit%2Fsandbox%2FCommodityReport.Rnw="5049B60E" C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2Fsandbox%2FShubhankit%2Fsandbox%2FLoSharpe.R="DA251988" +C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2Fsandbox%2FShubhankit%2Fsandbox%2FLoSharpe.Rnw="A50A4AC2" C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2Fsandbox%2FShubhankit%2Fsandbox%2Fse.LoSharpe.R="D581FC31" C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2Fsandbox%2FShubhankit%2Fsandbox%2Ftable.Sharpe.R="1CFFFCF5" +C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2Fsandbox%2FShubhankit%2Fsandbox%2Fvignettes%2FMaximumLoss.Rnw="FD3F5B42" C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2Fsandbox%2FShubhankit%2Fvignettes%2FACFSTDEV.rnw="81399909" C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2Fsandbox%2FShubhankit%2Fvignettes%2FCommodity_ResearchReport.Rnw="F0290DA5" C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2Fsandbox%2FShubhankit%2Fvignettes%2FLoSharpeRatio.Rnw="915BCBE3" C%3A%2FUsers%2Fshubhankit%2FDesktop%2F1%20week%2Fpkg%2FPerformanceAnalytics%2Fsandbox%2FShubhankit%2Fvignettes%2FUnSmoothReturnAnalysis.Rnw="5D6C2593" C%3A%2FUsers%2Fshubhankit%2FDesktop%2Fa.snw="2991F3E9" C%3A%2FUsers%2Fshubhankit%2FDesktop%2Fdem.Rnw="AC219D7D" +~%2FR%2Fwin-library%2F3.0%2Fhighfrequency%2Fdoc%2Fhighfrequency.Rnw="FCE481B1" ~%2FR%2Fwin-library%2F3.0%2Fxtable%2Fdoc%2Fmargintable.Rnw="F24BE9F7" ~%2FR%2Fwin-library%2F3.0%2Fxtable%2Fdoc%2FxtableGallery.R="A16037E8" ~%2FR%2Fwin-library%2F3.0%2Fxtable%2Fdoc%2FxtableGallery.Rnw.R="EF28E7CC" Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.normDD.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.normDD.R 2013-09-20 11:53:14 UTC (rev 3146) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.normDD.R 2013-09-20 13:50:40 UTC (rev 3147) @@ -1,4 +1,4 @@ -#'@title Generalised Lambda Distribution Simulated Drawdown +#'@title Generalised Lambda Distribution Drawdown #'@description When selecting a hedge fund manager, one risk measure investors often #' consider is drawdown. How should drawdown distributions look? Carr Futures' #' Galen Burghardt, Ryan Duncan and Lianyan Liu share some insights from their @@ -23,6 +23,10 @@ #' Burghardt, G., Duncan, R. and L. Liu, \emph{Deciphering drawdown}. Risk magazine, Risk management for investors, September, S16-S20, 2003. \url{http://www.risk.net/data/risk/pdf/investor/0903_risk.pdf} #' @author Peter Carl, Brian Peterson, Shubhankit Mohan #' @keywords Simulated Drawdown Using Brownian Motion Assumptions +#' @examples +#' library(PerformanceAnalytics) +#' data(edhec) +#' table.normDD(edhec[,1]) #' @seealso Drawdowns.R #' @rdname table.normDD #' @export @@ -53,29 +57,32 @@ x = y[,column] mu = Return.annualized(x, scale = NA, geometric = TRUE) sig=StdDev.annualized(x) - skew = skewness(x) - kurt = kurtosis(x) + #skew = skewness(x) + #kurt = kurtosis(x) r <- matrix(0,T+1,n) # matrix to hold short rate paths s <- matrix(0,T+1,n) r[1,] <- r0 s[1,] <- s0 drawdown <- matrix(0,n) # return(Ed) + data=as.numeric(x) + # using starship model to fit lambda distribution + lpara= starship.adaptivegrid(data,list(lcvect=(0:4)/10,ldvect=(0:4)/10)) for(j in 1:n){ - r[2:(T+1),j]= rgl(T,mu,sig,skew,kurt) + r[2:(T+1),j]= rgl(T,lpara$lambda[1],lpara$lambda[2],lpara$lambda[3],lpara$lambda[4],param="fkml") for(i in 2:(T+1)){ - dr <- r[i,j]*dt - s[i,j] <- s[i-1,j] + (dr/100) + dr <- r[i,j] + s[i,j] <- (dr) } - drawdown[j] = as.numeric(maxdrawdown(s[,j])[1]) + drawdown[j] = as.numeric(maxDrawdown(s[,j])[1]) } z = c((mu*100), (sig*100), - ((mean(drawdown)))) + ((mean(drawdown)*100))) znames = c( "Annual Returns in %", "Std Devetions in %", Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.normDD.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.normDD.Rd 2013-09-20 11:53:14 UTC (rev 3146) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.normDD.Rd 2013-09-20 13:50:40 UTC (rev 3147) @@ -1,6 +1,6 @@ \name{table.normDD} \alias{table.normDD} -\title{Generalised Lambda Distribution Simulated Drawdown} +\title{Generalised Lambda Distribution Drawdown} \usage{ table.normDD(R, digits = 4) } From noreply at r-forge.r-project.org Fri Sep 20 17:27:22 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 20 Sep 2013 17:27:22 +0200 (CEST) Subject: [Returnanalytics-commits] r3148 - pkg/Meucci/demo Message-ID: <20130920152722.E69D8183FB9@r-forge.r-project.org> Author: xavierv Date: 2013-09-20 17:27:22 +0200 (Fri, 20 Sep 2013) New Revision: 3148 Modified: pkg/Meucci/demo/S_CPPI.R pkg/Meucci/demo/S_InvestorsObjective.R pkg/Meucci/demo/S_MeanVarianceHorizon.R pkg/Meucci/demo/S_UtilityMax.R Log: - fixed non-ASCII characters errors Modified: pkg/Meucci/demo/S_CPPI.R =================================================================== --- pkg/Meucci/demo/S_CPPI.R 2013-09-20 13:50:40 UTC (rev 3147) +++ pkg/Meucci/demo/S_CPPI.R 2013-09-20 15:27:22 UTC (rev 3148) @@ -5,7 +5,7 @@ #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, #' "E 264 - Constant proportion portfolio insurance". #' -#' See Meucci's script for "S_CPPI.m"E 264 ? Constant proportion portfolio insurance +#' See Meucci's script for "S_CPPI.m" # #' @author Xavier Valls \email{flamejat@@gmail.com} Modified: pkg/Meucci/demo/S_InvestorsObjective.R =================================================================== --- pkg/Meucci/demo/S_InvestorsObjective.R 2013-09-20 13:50:40 UTC (rev 3147) +++ pkg/Meucci/demo/S_InvestorsObjective.R 2013-09-20 15:27:22 UTC (rev 3148) @@ -4,7 +4,7 @@ #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, -#' "E 236 - Simulation of the investor?s objectives". +#' "E 236 - Simulation of the investor's objectives". #' #' See Meucci's script for "S_InvestorsObjective.m" # Modified: pkg/Meucci/demo/S_MeanVarianceHorizon.R =================================================================== --- pkg/Meucci/demo/S_MeanVarianceHorizon.R 2013-09-20 13:50:40 UTC (rev 3147) +++ pkg/Meucci/demo/S_MeanVarianceHorizon.R 2013-09-20 15:27:22 UTC (rev 3148) @@ -6,7 +6,7 @@ #' #' @references #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, -#' "E 256 ? Mean-variance pitfalls: horizon effect". +#' "E 256 - Mean-variance pitfalls: horizon effect". #' #' See Meucci's script for "S_MeanVarianceHorizon.m" and "E 255 - Mean-variance pitfalls: two-step approach II" from the book. # Modified: pkg/Meucci/demo/S_UtilityMax.R =================================================================== --- pkg/Meucci/demo/S_UtilityMax.R 2013-09-20 13:50:40 UTC (rev 3147) +++ pkg/Meucci/demo/S_UtilityMax.R 2013-09-20 15:27:22 UTC (rev 3148) @@ -5,7 +5,7 @@ #' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}, #' "E 263 - Utility maximization II". #' -#' See Meucci's script for "S_UtilityMax.m" and "E 262 ? Utility maximization I" from the book. +#' See Meucci's script for "S_UtilityMax.m" and "E 262 - Utility maximization I" from the book. # #' @author Xavier Valls \email{flamejat@@gmail.com} From noreply at r-forge.r-project.org Fri Sep 20 19:36:42 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 20 Sep 2013 19:36:42 +0200 (CEST) Subject: [Returnanalytics-commits] r3149 - in pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm: R man Message-ID: <20130920173642.7D16E185686@r-forge.r-project.org> Author: shubhanm Date: 2013-09-20 19:36:42 +0200 (Fri, 20 Sep 2013) New Revision: 3149 Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.normDD.R pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.normDD.Rd Log: Minor documentation changes Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R 2013-09-20 15:27:22 UTC (rev 3148) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R 2013-09-20 17:36:42 UTC (rev 3149) @@ -7,11 +7,11 @@ #'an object of class "formula" (or one that can be coerced to that class): a symbolic description of the model to be fitted. The details of model specification are given under ?Details?. #' #' -#'@param data +#'@param data #'an optional data frame, list or environment (or object coercible by as.data.frame to a data frame) containing the variables in the model. If not found in data, the variables are taken from environment(formula), typically the environment from which lm is called. #' #'@param vcov HC-HAC covariance estimation -#'@param weights +#'@param weights #'an optional vector of weights to be used in the fitting process. Should be NULL or a numeric vector. If non-NULL, weighted least squares is used with weights weights (that is, minimizing sum; otherwise ordinary least squares is used. See also ?Details?, #' #' Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.normDD.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.normDD.R 2013-09-20 15:27:22 UTC (rev 3148) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/table.normDD.R 2013-09-20 17:36:42 UTC (rev 3149) @@ -18,7 +18,6 @@ #' @param digits number of rounding off digits. #' @references Burghardt, G., and L. Liu, \emph{ It's the Autocorrelation, Stupid (November 2012) Newedge #' working paper.} -#' \code{\link[stats]{}} \cr #' \url{http://www.amfmblog.com/assets/Newedge-Autocorrelation.pdf} #' Burghardt, G., Duncan, R. and L. Liu, \emph{Deciphering drawdown}. Risk magazine, Risk management for investors, September, S16-S20, 2003. \url{http://www.risk.net/data/risk/pdf/investor/0903_risk.pdf} #' @author Peter Carl, Brian Peterson, Shubhankit Mohan @@ -26,8 +25,7 @@ #' @examples #' library(PerformanceAnalytics) #' data(edhec) -#' table.normDD(edhec[,1]) -#' @seealso Drawdowns.R +#' table.normDD(edhec[1:30,1]) #' @rdname table.normDD #' @export table.normDD <- @@ -76,9 +74,9 @@ dr <- r[i,j] s[i,j] <- (dr) } - - - drawdown[j] = as.numeric(maxDrawdown(s[,j])[1]) + # s= as.POSIXct(s, origin = "1960-01-01") + # drawdown[j] = as.numeric(maxDrawdown(as.POSIXct(s[,j], origin = "1960-01-01") )[1]) + drawdown[j] = as.numeric(maxDrawdown(s[,j])[1]) } z = c((mu*100), (sig*100), Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.normDD.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.normDD.Rd 2013-09-20 15:27:22 UTC (rev 3148) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/table.normDD.Rd 2013-09-20 17:36:42 UTC (rev 3149) @@ -33,22 +33,24 @@ \bold{10,000} iterations to produce a smooth distribution. } +\examples{ +library(PerformanceAnalytics) +data(edhec) +table.normDD(edhec[1:30,1]) +} \author{ Peter Carl, Brian Peterson, Shubhankit Mohan } \references{ Burghardt, G., and L. Liu, \emph{ It's the Autocorrelation, Stupid (November 2012) Newedge working - paper.} \code{\link[stats]{}} \cr + paper.} \url{http://www.amfmblog.com/assets/Newedge-Autocorrelation.pdf} Burghardt, G., Duncan, R. and L. Liu, \emph{Deciphering drawdown}. Risk magazine, Risk management for investors, September, S16-S20, 2003. \url{http://www.risk.net/data/risk/pdf/investor/0903_risk.pdf} } -\seealso{ - Drawdowns.R -} \keyword{Assumptions} \keyword{Brownian} \keyword{Drawdown} From noreply at r-forge.r-project.org Fri Sep 20 20:20:05 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Fri, 20 Sep 2013 20:20:05 +0200 (CEST) Subject: [Returnanalytics-commits] r3150 - in pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm: . .Rproj.user/E5D7D248/sdb/per/t tests tests/Examples Message-ID: <20130920182006.13F0A18515C@r-forge.r-project.org> Author: shubhanm Date: 2013-09-20 20:20:05 +0200 (Fri, 20 Sep 2013) New Revision: 3150 Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/tests/ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/tests/Examples/ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/tests/Examples/noniid.sm-Ex.R pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/tests/Examples/noniid.sm-Ex.Rout pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/tests/Examples/noniid.sm-Ex.pdf Removed: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/per/t/32D790F7 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/per/t/445E439C pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/per/t/44B07808 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/per/t/58E583C6 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/per/t/7D095D73 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/per/t/934ACCDE pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/per/t/C4A4A866 pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/per/t/F08D801A Log: Adding the tests/Examples for the package Deleted: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/per/t/32D790F7 =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/per/t/32D790F7 2013-09-20 17:36:42 UTC (rev 3149) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/per/t/32D790F7 2013-09-20 18:20:05 UTC (rev 3150) @@ -1,15 +0,0 @@ -{ - "contents" : "#'@title Fitting Generalized Linear Models with HC and HAC Covariance Matrix Estimators\n#'@description\n#' lm is used to fit generalized linear models, specified by giving a symbolic description of the linear predictor and a description of the error distribution.\n#' @details\n#' see \\code{\\link{lm}}.\n#' @param formula \n#'an object of class \"formula\" (or one that can be coerced to that class): a symbolic description of the model to be fitted. The details of model specification are given under ?Details?.\n#'\n#'\n#'@param data\t\n#'an optional data frame, list or environment (or object coercible by as.data.frame to a data frame) containing the variables in the model. If not found in data, the variables are taken from environment(formula), typically the environment from which lm is called.\n#'\n#'@param vcov HC-HAC covariance estimation\n#'@param weights\t\n#'an optional vector of weights to be used in the fitting process. Should be NULL or a numeric vector. If non-NULL, weighted least squares is used with weights weights (that is, minimizing sum(w*e^2)); otherwise ordinary least squares is used. See also ?Details?,\n#'\n#'\n#'@param subset \n#'an optional vector specifying a subset of observations to be used in the fitting process.\n#'@param na.action\t\n#'a function which indicates what should happen when the data contain NAs. The default is set by the na.action setting of options, and is na.fail if that is unset. The ?factory-fresh? default is na.omit. Another possible value is NULL, no action. Value na.exclude can be useful.\n#'\n#'@param method\t\n#'the method to be used; for fitting, currently only method = \"qr\" is supported; method = \"model.frame\" returns the model frame (the same as with model = TRUE, see below).\n#'\n#'@param model logicals. If TRUE the corresponding components of the fit (the model frame, the model matrix, the response, the QR decomposition) are returned.\t\n#'@param x logicals. If TRUE the corresponding components of the fit (the model frame, the model matrix, the response, the QR decomposition) are returned.\n#'@param y logicals. If TRUE the corresponding components of the fit (the model frame, the model matrix, the response, the QR decomposition) are returned.\n#'@param qr logicals. If TRUE the corresponding components of the fit (the model frame, the model matrix, the response, the QR decomposition) are returned.\n#'@param singular.ok\t\n#'logical. If FALSE (the default in S but not in R) a singular fit is an error.\n#'\n#'@param contrasts\t\n#'an optional list. See the contrasts.arg of model.matrix.default.\n#'\n#'@param offset\t\n#'this can be used to specify an a priori known component to be included in the linear predictor during fitting. This should be NULL or a numeric vector of length equal to the number of cases. One or more offset terms can be included in the formula instead or as well, and if more than one are specified their sum is used. See model.offset.\n#'\n#'@param \\dots\t\n#'additional arguments to be passed to the low level regression fitting functions (see below).\n#' @author The original R implementation of glm was written by Simon Davies working for Ross Ihaka at the University of Auckland, but has since been extensively re-written by members of the R Core team.\n#' The design was inspired by the S function of the same name described in Hastie & Pregibon (1992).\n#' @keywords HC HAC covariance estimation regression fitting model\n#' @rdname lmi\n#' @export\nlmi <- function (formula, data,vcov = NULL, subset, weights, na.action, method = \"qr\", \n model = TRUE, x = FALSE, y = FALSE, qr = TRUE, singular.ok = TRUE, \n contrasts = NULL, offset, ...) \n{\n ret.x <- x\n ret.y <- y\n cl <- match.call()\n mf <- match.call(expand.dots = FALSE)\n m <- match(c(\"formula\", \"data\", \"subset\", \"weights\", \"na.action\", \n \"offset\"), names(mf), 0L)\n mf <- mf[c(1L, m)]\n mf$drop.unused.levels <- TRUE\n mf[[1L]] <- as.name(\"model.frame\")\n mf <- eval(mf, parent.frame())\n if (method == \"model.frame\") \n return(mf)\n else if (method != \"qr\") \n warning(gettextf(\"method = '%s' is not supported. Using 'qr'\", \n method), domain = NA)\n mt <- attr(mf, \"terms\")\n y <- model.response(mf, \"numeric\")\n w <- as.vector(model.weights(mf))\n if (!is.null(w) && !is.numeric(w)) \n stop(\"'weights' must be a numeric vector\")\n offset <- as.vector(model.offset(mf))\n if (!is.null(offset)) {\n if (length(offset) != NROW(y)) \n stop(gettextf(\"number of offsets is %d, should equal %d (number of observations)\", \n length(offset), NROW(y)), domain = NA)\n }\n if (is.empty.model(mt)) {\n x <- NULL\n z <- list(coefficients = if (is.matrix(y)) matrix(, 0, \n 3) else numeric(), residuals = y, fitted.values = 0 * \n y, weights = w, rank = 0L, df.residual = if (!is.null(w)) sum(w != \n 0) else if (is.matrix(y)) nrow(y) else length(y))\n if (!is.null(offset)) {\n z$fitted.values <- offset\n z$residuals <- y - offset\n }\n }\n else {\n x <- model.matrix(mt, mf, contrasts)\n z <- if (is.null(w)) \n lm.fit(x, y, offset = offset, singular.ok = singular.ok, \n ...)\n else lm.wfit(x, y, w, offset = offset, singular.ok = singular.ok, \n ...)\n }\n class(z) <- c(if (is.matrix(y)) \"mlm\", \"lm\")\n z$na.action <- attr(mf, \"na.action\")\n z$offset <- offset\n z$contrasts <- attr(x, \"contrasts\")\n z$xlevels <- .getXlevels(mt, mf)\n z$call <- cl\n z$terms <- mt\n if (model) \n z$model <- mf\n if (ret.x) \n z$x <- x\n if (ret.y) \n z$y <- y\n if (!qr) \n z$qr <- NULL\n #z\n if(is.null(vcov)) {\n se <- vcov(z)\n } else {\n if (is.function(vcov))\n se <- vcov(z)\n else\n se <- vcov\n }\n z = list(z,vHaC = se) \n z\n}\n", - "created" : 1379107697415.000, - "dirty" : false, - "encoding" : "UTF-8", - "folds" : "", - "hash" : "2819201039", - "id" : "32D790F7", - "lastKnownWriteTime" : 1379110731, - "path" : "C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R", - "properties" : { - }, - "source_on_save" : true, - "type" : "r_source" -} \ No newline at end of file Deleted: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/per/t/445E439C =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/per/t/445E439C 2013-09-20 17:36:42 UTC (rev 3149) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/per/t/445E439C 2013-09-20 18:20:05 UTC (rev 3150) @@ -1,17 +0,0 @@ -{ - "contents" : "%% no need for \\DeclareGraphicsExtensions{.pdf,.eps}\n\n\\documentclass[12pt,letterpaper,english]{article}\n\\usepackage{times}\n\\usepackage[T1]{fontenc}\n\\IfFileExists{url.sty}{\\usepackage{url}}\n {\\newcommand{\\url}{\\texttt}}\n\n\\usepackage{babel}\n%\\usepackage{noweb}\n\\usepackage{Rd}\n\n\\usepackage{Sweave}\n\\SweaveOpts{engine=R,eps=FALSE}\n%\\VignetteIndexEntry{Performance Attribution from Bacon}\n%\\VignetteDepends{PerformanceAnalytics}\n%\\VignetteKeywords{returns, performance, risk, benchmark, portfolio}\n%\\VignettePackage{PerformanceAnalytics}\n\n%\\documentclass[a4paper]{article}\n%\\usepackage[noae]{Sweave}\n%\\usepackage{ucs}\n%\\usepackage[utf8x]{inputenc}\n%\\usepackage{amsmath, amsthm, latexsym}\n%\\usepackage[top=3cm, bottom=3cm, left=2.5cm]{geometry}\n%\\usepackage{graphicx}\n%\\usepackage{graphicx, verbatim}\n%\\usepackage{ucs}\n%\\usepackage[utf8x]{inputenc}\n%\\usepackage{amsmath, amsthm, latexsym}\n%\\usepackage{graphicx}\n\n\\title{Commodity Index Fund Performance Analysis}\n\\author{Shubhankit Mohan}\n\n\\begin{document}\n\\SweaveOpts{concordance=TRUE}\n\n\\maketitle\n\n\n\\begin{abstract}\nThe fact that many hedge fund returns exhibit extraordinary levels of serial correlation is now well-known and generally accepted as fact. The effect of this autocorrelation on investment returns diminishes the apparent risk of such asset classes as the true returns/risk is easily \\textbf{camouflaged} within a haze of liquidity, stale prices, averaged price quotes and smoothed return reporting. We highlight the effect \\emph{autocorrelation} and \\emph{drawdown} has on performance analysis by investigating the results of functions developed during the Google Summer of Code 2013 on \\textbf{commodity based index} .\n\\end{abstract}\n\n\\tableofcontents\n\n<>=\nlibrary(PerformanceAnalytics)\nlibrary(noniid.sm)\ndata(edhec)\n@\n\n\n\\section{Background}\nThe investigated fund index that tracks a basket of \\emph{commodities} to measure their performance.The value of these indexes fluctuates based on their underlying commodities, and this value depends on the \\emph{component}, \\emph{methodology} and \\emph{style} to cover commodity markets .\n\nA brief overview of the indicies invested in our report are : \n \\begin{itemize}\n \\item\n \\textbf{DJUBS Commodity index} : is a broadly diversified index that allows investors to track commodity futures through a single, simple measure. As the index has grown in popularity since its introduction in 1998, additional versions and a full complement of sub-indices have been introduced. Together, the family offers investors a comprehensive set of tools for measuring the commodity markets.\n \\item\n \\textbf{Morningstar CLS index} : is a simple rules-based trend following index operated in commodities\n \\item\n \\textbf{Newedge CTI} : includes funds that utilize a variety of investment strategies to profit from price moves in commodity markets.\nManagers typically use either (i) a trading orientated approach,involving the trading of physical commodity products and/or of commodity\nderivative instruments in either directional or relative value strategies; Or (ii) Long short equity strategies focused on commodity related stocks.\n \\end{itemize}\n%Let $X \\sim N(0,1)$ and $Y \\sim \\textrm{Exponential}(\\mu)$. Let\n%$Z = \\sin(X)$. $\\sqrt{X}$.\n \n%$\\hat{\\mu}$ = $\\displaystyle\\frac{22}{7}$\n%e^{2 \\mu} = 1\n%\\begin{equation}\n%\\left(\\sum_{t=1}^{T} R_t/T\\right) = \\hat{\\mu} \\\\\n%\\end{equation}\n\n\\section{Performance Summary Chart}\n\nGiven a series of historical returns \\((R_1,R_2, . . .,R_T)\\) from \\textbf{January-2001} to \\textbf{December-2009}, create a wealth index chart, bars for per-period performance, and underwater chart for drawdown of the 3 funds.\n\n<>=\ndata <- read.csv(\"C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/data/HAM3-data.csv\") \ndates <- data$X\nvalues <- data[,-1] # convert percentage to return\nCOM <- as.xts(values, order.by=as.Date(dates))\nCOM.09<-COM[,9:11]\ncharts.PerformanceSummary(COM.09[1:108,],colorset = rich6equal, lwd = 2, ylog = TRUE)\n@\n\nThe above figure shows the behavior of the respective fund performance, which is \\textbf{upward} trending for all the funds till the period of \\textbf{\"January-2008\"}.For comparative purpose, one can observe the distinct \\textbf{drawdown} of \\textbf{Newedge CTI} since the latter period.\n\n\\section{Statistical and Drawdown Analysis}\n\nA summary of Fund Return series characteristics show that \\textbf{DJUBS.Commodity} performs worse relatively to it's peers.The most distinct characteristic being highest : \\textbf{Variance, Stdev, SE Mean} and well as negative \\textbf{Skewness} .The table shows clearly, that the returns of all the hedge fund indices are non-normal.Presence of \\emph{negative} skewness is a major area of concern for the downside risk potential and expected maximum loss.\n\n<>=\ntable.Stats(COM.09, ci = 0.95, digits = 4)\n@\n\n\nThe results are consistent with Drawdown Analysis in which \\textbf{DJUBS.Commodity} performs worse relatively to it's peers.\n\n<>=\ntable.DownsideRisk(COM.09, ci = 0.95, digits = 4)\n@\n\\section{Non-i.i.d GSoC Usage}\n\\subsection{Auctocorrelation Adjusted Standard Deviation}\nGiven a sample of historical returns \\((R_1,R_2, . . .,R_T)\\),the method assumes the fund manager smooths returns in the following manner, when 't' is the unit time interval, with $\\rho$\\ as the respective term autocorrelation coefficient\n\n%Let $X \\sim N(0,1)$ and $Y \\sim \\textrm{Exponential}(\\mu)$. Let\n%$Z = \\sin(X)$. $\\sqrt{X}$.\n \n%$\\hat{\\mu}$ = $\\displaystyle\\frac{22}{7}$\n%e^{2 \\mu} = 1\n%\\begin{equation}\n%\\left(\\sum_{t=1}^{T} R_t/T\\right) = \\hat{\\mu} \\\\\n%\\end{equation}\n\\begin{equation}\n \\sigma_{T} = \\sqrt{ \\sum_k^n(\\sigma_{t}^2 + 2*\\rho_i) } \\\\\n\\end{equation}\n\n\n<>=\nACFVol = ACStdDev.annualized(COM.09)\nVol = StdDev.annualized(COM.09)\nbarplot(rbind(ACFVol,Vol), main=\"ACF and Orignal Volatility\",\n xlab=\"Fund Type\",ylab=\"Volatilty (in %)\", col=rich6equal[2:3], beside=TRUE)\n legend(\"topright\", c(\"ACF\",\"Orignal\"), cex=0.6, \n bty=\"2\", fill=rich6equal[2:3]);\n@\n\nFrom the above figure, we can observe that all the funds, exhibit \\textbf{serial auto correlation}, which results in significantly \\emph{inflated} standard deviation.\n\\subsection{Andrew Lo Statistics of Sharpe Ratio}\n\nThe building blocks of the \\textbf{Sharpe Ratio} : expected returns and volatilities are unknown quantities that must be estimated statistically and are,\ntherefore, subject to \\emph{estimation error} .To address this question, Andrew Lo derives explicit expressions for the statistical distribution of the Sharpe ratio using\nstandard asymptotic theory. \n\nThe Sharpe ratio (SR) is simply the return per unit of risk (represented by variability). In the classic case, the unit of risk is the standard deviation of the returns.\n \n\\deqn{\\frac{\\overline{(R_{a}-R_{f})}}{\\sqrt{\\sigma_{(R_{a}-R_{f})}}}}\n\nThe relationship between SR and SR(q) is somewhat more involved for non-\nIID returns because the variance of Rt(q) is not just the sum of the variances of component returns but also includes all the co-variances. Specifically, under\nthe assumption that returns \\(R_t\\) are stationary,\n\\begin{equation}\nVar[(R_t)] = \\sum_{i=0}^{q-1} \\sum_{j=1}^{q-1} Cov(R(t-i),R(t-j)) = q\\hat{\\sigma^2} + 2\\hat{\\sigma^2} \\sum_{k=1}^{q-1} (q-k)\\rho_k \\\\\n\\end{equation}\n\nWhere $\\rho$\\(_k\\) = Cov(\\(R(t)\\),\\(R(t-k\\)))/Var[\\(R_t\\)] is the \\(k^{th}\\) order autocorrelation coefficient's of the series of returns.This yields the following relationship between SR and SR(q):\n\n\\begin{equation}\n\\hat{SR}(q) = \\eta(q) \\\\\n\\end{equation}\n\nWhere :\n\n\\begin{equation}\n\\eta(q) = \\frac{q}{\\sqrt{(q\\hat{\\sigma^2} + 2\\hat{\\sigma^2} \\sum_{k=1}^{q-1} (q-k)\\rho_k)}} \\\\\n\\end{equation}\n \nIn given commodity funds, we find results, similar reported in paper, that the annual Sharpe ratio for a hedge fund can be overstated by as much as \\textbf{65} \\% because of the presence of \\textbf{serial correlation}.We can observe that the fund \"\\textbf{DJUBS.Commodity}\", which has the largest drawdown and serial autocorrelation, has it's Andrew Lo Sharpe ratio , \\emph{decrease} most significantly as compared to other funds.\n\n<>=\nLo.Sharpe = LoSharpe(COM.09)\nTheoretical.Sharpe= SharpeRatio.annualized(COM.09)\nbarplot(rbind(Theoretical.Sharpe,Lo.Sharpe), main=\"Sharpe Ratio Observed\",\n xlab=\"Fund Type\",ylab=\"Value\", col=rich6equal[2:3], beside=TRUE)\n legend(\"topright\", c(\"Orginal\",\"Lo\"), cex=0.6, \n bty=\"2\", fill=rich6equal[2:3]);\n@\n\\subsection{Conditional Drawdown}\nA new one-parameter family of risk measures called Conditional Drawdown (CDD) has\nbeen proposed. These measures of risk are functional of the portfolio drawdown (underwater) curve considered in active portfolio management. For some value of $\\hat{\\alpha}$ the tolerance parameter, in the case of a single sample path, drawdown functional is defined as the mean of the worst (1 \\(-\\) $\\hat{\\alpha}$)100\\% drawdowns. The CDD measure generalizes the notion of the drawdown functional to a multi-scenario case and can be considered as a generalization of deviation measure to a dynamic case. The CDD measure includes the Maximal Drawdown and Average Drawdown as its limiting cases.Similar to other cases, \\textbf{DJUBS.Commodity}, is the worst performing fund with worst case conditional drawdown greater than \\textbf{50\\%} and \\textbf{Newedge.CTI} performing significantly well among the peer commodity indices with less than \\textbf{15\\%}.\n\n<>=\nc.draw=CDrawdown(COM.09)\ne.draw=ES(COM.09,.95,method=\"gaussian\")\nc.draw=100*as.matrix(c.draw)\ne.draw=100*as.matrix(e.draw)\nbarplot(rbind(-c.draw,-e.draw), main=\"Expected Loss in (%) \",\n xlab=\"Fund Type\",ylab=\"Value\", col=rich6equal[2:3], beside=TRUE)\n legend(\"topright\", c(\"Conditional Drawdown\",\"Expected Shortfall\"), cex=0.6, \n bty=\"2\", fill=rich6equal[2:3]);\n@\n\\subsection{Calmar and Sterling Ratio}\nBoth the Calmar and the Sterling ratio are the ratio of annualized return over the absolute value of the maximum drawdown of an investment.\n{equation}\n\\begin{equation}\n Calmar Ratio = \\frac{Return [0,T]}{max Drawdown [0,T]} \\\\\n\\end{equation}\n\n\\begin{equation}\n Sterling Ratio = \\frac{Return [0,T]}{max Drawdown [0,T] - 10\\%} \\\\\n\\end{equation}\n<>=\nround(CalmarRatio.Norm(COM.09,1),4)\nround(SterlingRatio.Norm(COM.09,1),4)\n@\nFor a 1 year \\emph{horizon} return, we can see that Newedge.CTI is the clear performer in this metric as well.However, a \\textbf{surprising} observed result, is negative \\emph{Sterling} and \\emph{Calmar} ratio for Morningstar.CLS . \n\\subsection{GLM Smooth Index}\nGLM Smooth Index is a useful parameter to quantify the degree of autocorrelation.It is a summary statistic for measuring the concentration of autocorrelation present in the lag factors (up-to 6) , which can be defined by the below equation as :\n\\begin{equation}\n\\xi = \\sum_{j=0}^{k} \\theta _j^2 \\\\\n\\end{equation}\n\nThis measure is well known in the industrial organization literature as the Herfindahl index, a measure of the concentration of firms in a given industry where $\\theta$\\(_j\\) represents the market share of firm j. Because $\\xi_t$\\ is confined to the unit interval, and is minimized when all the $\\theta$\\(_j\\) 's are identical, which implies a value of 1/k+1 for $\\xi_i$\\ ; and is maximized when one coefficient is 1 and the rest are 0. In the context of smoothed returns, a lower value of implies less smoothing, and the upper bound of 1 implies pure smoothing, hence we shall refer to $\\theta$\\(_j\\) as a \\textbf{smoothing index}.\n\n<>=\nlibrary(noniid.sm)\nsource(\"C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/GLMSmoothIndex.R\")\nGLM.index=GLMSmoothIndex(COM.09)\nbarplot(as.matrix(GLM.index), main=\"GLM Smooth Index\",\n xlab=\"Fund Type\",ylab=\"Value\",colorset = rich6equal[1], beside=TRUE)\n@\n\nFor the given chart, we can observe that \\textbf{all the funds} have significant level of smooth returns.\n\\subsection{Acar Shane Maximum Loss}\n\nMeasuring risk through extreme losses is a very appealing idea. This is indeed how financial companies perceive risks. This explains the popularity of loss statistics such as the maximum drawdown and maximum loss. An empirical application to fund managers performance show that \\textbf{very few investments} exhibit \\emph{abnormally high or low drawdowns}. Consequently, it is doubtful that drawdowns statistics can be used \nto significantly distinguish fund managers. This is confirmed by the fact that predicting one-period ahead drawdown is an almost impossible task. Errors average at the very best 27\\% of the true value observed in the market.\n\nThe main concern of this paper is the study of alternative risk measures: namely maximum loss and maximum drawdown. Unfortunately, there is no analytical formula to establish the maximum drawdown properties under the random walk assumption. We should note first that due to its definition, the maximum drawdown divided by volatility is an only function of the ratio mean divided by volatility.\n\n\n\\begin{equation}\nMD / \\sigma = Min \\frac{ \\sum_{j=1}^{t} X_{j}}{\\sigma} = F(\\frac{\\mu}{\\sigma}) \\\\\n\\end{equation}\n\nSuch a ratio is useful in that this is a complementary statistic to the return divided by volatility ratio. To get some insight on the relationships between maximum drawdown per unit of volatility and mean return divided by volatility, we have proceeded to Monte-Carlo simulations. We have simulated cash flows over a period of 36 monthly returns and measured maximum drawdown for varied levels of annualized return divided by volatility varying from minus two to two by step of 0.1. The process has been repeated six thousand times.\n\nFor instance, an investment exhibiting an annualized return/volatility equal to -2 \nshould experience on average a maximum drawdown equal to six times the annualized volatility. \n\nOther observations are that: \n\\begin{itemize}\n\\item maximum drawdown is a positive function of the return/volatility ratio \n\\item confidence interval widens as the return/volatility ratio decreases \n\\end{itemize}\n\nThis means that as the return/volatility increases not only the magnitude of drawdown decreases but the confidence interval as well. In others words losses are both smaller and more predictable.\n\n<>=\nsource(\"C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/AcarSim.R\")\nAcarSim(COM.09)\n@\n\nAs we can see from the \\emph{simulated chart}, DJUBS.Commodity comes at the bottom , which imply a \\emph{lower} \\textbf{return-maximum loss} ratio.\n\n<>=\nlibrary(noniid.sm)\nchart.Autocorrelation(COM.09)\n@\n\nFinally, from the autocorrelation lag plot, one can observe, significant \\textbf{positive} autocorrelation for \\textbf{Newedge.CTI}, which is a \\emph{warning} signal in case drawdown occurs, in an otherwise excellent performing fund.\n\\section{Conclusion}\n\nAnalyzing all the function results, one can clearly differentiate \\textbf{Newedge.CTI}, as a far superior fund as compared to it's peer.\\textbf{MorningStar.CLS}, exhibits highest autocorrelation as well as lowest Calmar/Sterling ratio, but compared on other front, it distinctly outperforms \\textbf{DJUBS.Commodity}, which has performed poorly on all the tests. \n\nThe above figure shows the characteristic of the respective fund performance, which is after the period of analysis till \\textbf{\"July-2013\"}.At this moment, we would like the readers, to use the functions developed in the R \\textbf{\"PerformanceAnalytics\"} package, to study ,use it for analysis as well as for forming their own opinion. \n\n<>=\ncharts.PerformanceSummary(COM.09[109:151],colorset = rich6equal, lwd = 2, ylog = TRUE)\n@\n\n\n\\end{document}", - "created" : 1379111210609.000, - "dirty" : false, - "encoding" : "UTF-8", - "folds" : "", - "hash" : "3404124299", - "id" : "445E439C", - "lastKnownWriteTime" : 1378859979, - "path" : "C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Commodity.Rnw", - "properties" : { - "ignored_words" : "drawdown,autocorrelation,Newedge,MorningStar,Calmar,PerformanceAnalytics,url,eps,Shubhankit,Mohan,Morningstar,Drawdown,Stdev,Skewness,skewness,GSoC,Auctocorrelation,volatilities,Cov,th,drawdowns,multi,Herfindahl,Acar,analytical\n", - "tempName" : "Untitled1" - }, - "source_on_save" : false, - "type" : "sweave" -} \ No newline at end of file Deleted: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/per/t/44B07808 =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/per/t/44B07808 2013-09-20 17:36:42 UTC (rev 3149) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/per/t/44B07808 2013-09-20 18:20:05 UTC (rev 3150) @@ -1,15 +0,0 @@ -{ - "contents" : "\\name{lmi}\n\\alias{lmi}\n\\title{Fitting Generalized Linear Models with HC and HAC Covariance Matrix Estimators}\n\\usage{\n lmi(formula, data, vcov = NULL, subset, weights,\n na.action, method = \"qr\", model = TRUE, x = FALSE,\n y = FALSE, qr = TRUE, singular.ok = TRUE,\n contrasts = NULL, offset, ...)\n}\n\\arguments{\n \\item{formula}{an object of class \"formula\" (or one that\n can be coerced to that class): a symbolic description of\n the model to be fitted. The details of model\n specification are given under ?Details?.}\n\n \\item{data}{an optional data frame, list or environment\n (or object coercible by as.data.frame to a data frame)\n containing the variables in the model. If not found in\n data, the variables are taken from environment(formula),\n typically the environment from which lm is called.}\n\n \\item{vcov}{HC-HAC covariance estimation}\n\n \\item{weights}{an optional vector of weights to be used\n in the fitting process. Should be NULL or a numeric\n vector. If non-NULL, weighted least squares is used with\n weights weights (that is, minimizing sum(w*e^2));\n otherwise ordinary least squares is used. See also\n ?Details?,}\n\n \\item{subset}{an optional vector specifying a subset of\n observations to be used in the fitting process.}\n\n \\item{na.action}{a function which indicates what should\n happen when the data contain NAs. The default is set by\n the na.action setting of options, and is na.fail if that\n is unset. The ?factory-fresh? default is na.omit.\n Another possible value is NULL, no action. Value\n na.exclude can be useful.}\n\n \\item{method}{the method to be used; for fitting,\n currently only method = \"qr\" is supported; method =\n \"model.frame\" returns the model frame (the same as with\n model = TRUE, see below).}\n\n \\item{model}{logicals. If TRUE the corresponding\n components of the fit (the model frame, the model matrix,\n the response, the QR decomposition) are returned.}\n\n \\item{x}{logicals. If TRUE the corresponding components\n of the fit (the model frame, the model matrix, the\n response, the QR decomposition) are returned.}\n\n \\item{y}{logicals. If TRUE the corresponding components\n of the fit (the model frame, the model matrix, the\n response, the QR decomposition) are returned.}\n\n \\item{qr}{logicals. If TRUE the corresponding components\n of the fit (the model frame, the model matrix, the\n response, the QR decomposition) are returned.}\n\n \\item{singular.ok}{logical. If FALSE (the default in S\n but not in R) a singular fit is an error.}\n\n \\item{contrasts}{an optional list. See the contrasts.arg\n of model.matrix.default.}\n\n \\item{offset}{this can be used to specify an a priori\n known component to be included in the linear predictor\n during fitting. This should be NULL or a numeric vector\n of length equal to the number of cases. One or more\n offset terms can be included in the formula instead or as\n well, and if more than one are specified their sum is\n used. See model.offset.}\n\n \\item{\\dots}{additional arguments to be passed to the low\n level regression fitting functions (see below).}\n}\n\\description{\n lm is used to fit generalized linear models, specified by\n giving a symbolic description of the linear predictor and\n a description of the error distribution.\n}\n\\details{\n see \\code{\\link{lm}}.\n}\n\\author{\n The original R implementation of glm was written by Simon\n Davies working for Ross Ihaka at the University of\n Auckland, but has since been extensively re-written by\n members of the R Core team. The design was inspired by\n the S function of the same name described in Hastie &\n Pregibon (1992).\n}\n\\keyword{covariance}\n\\keyword{estimation}\n\\keyword{fitting}\n\\keyword{HAC}\n\\keyword{HC}\n\\keyword{model}\n\\keyword{regression}\n\n", - "created" : 1379108371760.000, - "dirty" : false, - "encoding" : "UTF-8", - "folds" : "", - "hash" : "1851514728", - "id" : "44B07808", - "lastKnownWriteTime" : 1379111172, - "path" : "C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/lmi.Rd", - "properties" : { - }, - "source_on_save" : false, - "type" : "r_doc" -} \ No newline at end of file Deleted: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/per/t/58E583C6 =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/per/t/58E583C6 2013-09-20 17:36:42 UTC (rev 3149) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/per/t/58E583C6 2013-09-20 18:20:05 UTC (rev 3150) @@ -1,15 +0,0 @@ -{ - "contents" : "* Edit the help file skeletons in 'man', possibly combining help files for multiple\n functions.\n* Edit the exports in 'NAMESPACE', and add necessary imports.\n* Put any C/C++/Fortran code in 'src'.\n* If you have compiled code, add a useDynLib() directive to 'NAMESPACE'.\n* Run R CMD build to build the package tarball.\n* Run R CMD check to check the package tarball.\n\nRead \"Writing R Extensions\" for more information.\n", - "created" : 1379111136257.000, - "dirty" : false, - "encoding" : "UTF-8", - "folds" : "", - "hash" : "3579872522", - "id" : "58E583C6", - "lastKnownWriteTime" : 1378551041, - "path" : "C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/Read-and-delete-me", - "properties" : { - }, - "source_on_save" : false, - "type" : "text" -} \ No newline at end of file Deleted: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/per/t/7D095D73 =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/per/t/7D095D73 2013-09-20 17:36:42 UTC (rev 3149) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/per/t/7D095D73 2013-09-20 18:20:05 UTC (rev 3150) @@ -1,15 +0,0 @@ -{ - "contents" : "Package: noniid.sm\nType: Package\nTitle: Non-i.i.d. GSoC 2013 Shubhankit\nVersion: 0.1\nDate: $Date: 2013-05-13 14:30:22 -0500 (Mon, 13 May 2013) $\nAuthor: Shubhankit Mohan \nContributors: Peter Carl, Brian G. Peterson\nDepends:\n xts,\n PerformanceAnalytics,\n tseries,\n stats\nMaintainer: Brian G. Peterson \nDescription: GSoC 2013 project to replicate literature on drawdowns and\n non-i.i.d assumptions in finance.\nLicense: GPL-3\nByteCompile: TRUE\nCollate:\n 'ACStdDev.annualized.R'\n 'CalmarRatio.Norm.R'\n 'CDrawdown.R'\n 'chart.AcarSim.R'\n 'chart.Autocorrelation.R'\n 'EmaxDDGBM.R'\n 'GLMSmoothIndex.R'\n 'na.skip.R'\n 'noniid.sm-internal.R'\n 'QP.Norm.R'\n 'Return.GLM.R'\n 'Return.Okunev.R'\n 'SterlingRatio.Norm.R'\n 'table.ComparitiveReturn.GLM.R'\n 'table.EMaxDDGBM.R'\n 'table.UnsmoothReturn.R'\n 'UnsmoothReturn.R'\n 'LoSharpe.R'\n 'se.LoSharpe.R'\n 'table.Sharpe.R'\n 'glmi.R'\n 'lmi.R'\n", - "created" : 1379107778236.000, - "dirty" : false, - "encoding" : "UTF-8", - "folds" : "", - "hash" : "677145396", - "id" : "7D095D73", - "lastKnownWriteTime" : 1379111172, - "path" : "C:/Users/shubhankit/Desktop/1 week/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/DESCRIPTION", - "properties" : { - }, - "source_on_save" : false, - "type" : "dcf" -} \ No newline at end of file Deleted: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/E5D7D248/sdb/per/t/934ACCDE [TRUNCATED] To get the complete diff run: svnlook diff /svnroot/returnanalytics -r 3150 From noreply at r-forge.r-project.org Sun Sep 22 13:55:03 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 22 Sep 2013 13:55:03 +0200 (CEST) Subject: [Returnanalytics-commits] r3151 - pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm Message-ID: <20130922115503.CA52B18548B@r-forge.r-project.org> Author: shubhanm Date: 2013-09-22 13:55:03 +0200 (Sun, 22 Sep 2013) New Revision: 3151 Removed: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/.Rproj.user/ Log: final touches- .Rproj history removed From noreply at r-forge.r-project.org Sun Sep 22 14:49:33 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 22 Sep 2013 14:49:33 +0200 (CEST) Subject: [Returnanalytics-commits] r3152 - in pkg/PerformanceAnalytics/sandbox/Shubhankit: noniid.sm/vignettes sandbox sandbox/vignettes Message-ID: <20130922124933.3C953184DE7@r-forge.r-project.org> Author: shubhanm Date: 2013-09-22 14:49:32 +0200 (Sun, 22 Sep 2013) New Revision: 3152 Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/LoSharpeRatio.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid - EmaxDDGBM.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid - LoSharpeRatio.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid - UnSmooth Return Analysis.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid -ACFSTDEV.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid -Commodity Index Fund Analysis.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid -ConditionalDrawdown.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid -GLMReturn.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid -GLMSmoothIndex.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid -NormCalmar-Sterling.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid -OkunevWhite.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid -ShaneAcarMaxLoss.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/AcarSim.R pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/Commodity.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/CommodityReport.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/Managers.Rnw Removed: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/ACFSTDEV.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Commodity.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/CommodityReport.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/ConditionalDrawdown.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/EmaxDDGBM.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/GLMReturn.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/GLMSmoothIndex.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Managers.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/NormCalmar.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OWReturn.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OkunevWhite.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/ShaneAcarMaxLoss.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/UnSmoothReturnAnalysis.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/AcarSim.R pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Commodity-002.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Commodity-005.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Commodity-006.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Commodity-007.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Commodity-009.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Commodity-010.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Commodity-011.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Commodity-012.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Commodity-concordance.tex pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Commodity.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Commodity.log pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Commodity.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Commodity.synctex.gz pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Commodity.tex pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Commodity.toc pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/CommodityReport-002.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/CommodityReport-005.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/CommodityReport-006.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/CommodityReport-007.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/CommodityReport-009.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/CommodityReport-010.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/CommodityReport-011.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/CommodityReport-012.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/CommodityReport-concordance.tex pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/CommodityReport.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/CommodityReport.log pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/CommodityReport.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/CommodityReport.synctex.gz pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/CommodityReport.tex pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/CommodityReport.toc pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/LoSharpe.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Managers-002.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Managers-005.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Managers-006.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Managers-007.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Managers-008.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Managers-009.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Managers-010.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Managers-011.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Managers-concordance.tex pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Managers.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Managers.log pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Managers.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Managers.synctex.gz pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Managers.tex pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/Managers.toc pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/ACFSTDEV-Graph10.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/ACFSTDEV-concordance.tex pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/ACFSTDEV.log pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/ACFSTDEV.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/ACFSTDEV.synctex.gz pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/ACFSTDEV.tex pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/Commodity_ResearchReport.tex pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/ConditionalDrawdown-Graph10.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/ConditionalDrawdown.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/GLMReturn-Graph1.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/GLMReturn-Graph10.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/GLMReturn.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/GLMSmoothIndex.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/LoSharpeRatio-concordance.tex pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/LoSharpeRatio.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/LoSharpeRatio.log pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/LoSharpeRatio.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/LoSharpeRatio.synctex.gz pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/LoSharpeRatio.tex pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/NormCalmar-Graph10.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/NormCalmar.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/OkunevWhite-Graph1.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/OkunevWhite-Graph10.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/OkunevWhite.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/Rplots.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/ShaneAcarMaxLoss-003.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/ShaneAcarMaxLoss.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/UnSmoothReturnAnalysis-003.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/UnSmoothReturnAnalysis-004.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/UnSmoothReturnAnalysis-005.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/UnSmoothReturnAnalysis-006.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/UnSmoothReturnAnalysis-007.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/UnSmoothReturnAnalysis-008.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/UnSmoothReturnAnalysis-Graph3.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/UnSmoothReturnAnalysis-Graph4.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/UnSmoothReturnAnalysis-Graph5.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/UnSmoothReturnAnalysis-Graph6.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/UnSmoothReturnAnalysis-concordance.tex pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/UnSmoothReturnAnalysis.log pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/UnSmoothReturnAnalysis.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/UnSmoothReturnAnalysis.synctex.gz pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/UnSmoothReturnAnalysis.tex pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/UnSmoothReturnAnalysis.toc Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/sandbox/vignettes/LoSharpe.Rnw Log: final touches - deletion of junk files + transfers Deleted: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/ACFSTDEV.pdf =================================================================== (Binary files differ) Deleted: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Commodity.pdf =================================================================== (Binary files differ) Deleted: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/CommodityReport.pdf =================================================================== (Binary files differ) Deleted: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/ConditionalDrawdown.pdf =================================================================== (Binary files differ) Deleted: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/EmaxDDGBM.pdf =================================================================== (Binary files differ) Deleted: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/GLMReturn.pdf =================================================================== (Binary files differ) Deleted: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/GLMSmoothIndex.pdf =================================================================== (Binary files differ) Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/LoSharpeRatio.Rnw =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/LoSharpeRatio.Rnw (rev 0) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/LoSharpeRatio.Rnw 2013-09-22 12:49:32 UTC (rev 3152) @@ -0,0 +1,116 @@ +%% no need for \DeclareGraphicsExtensions{.pdf,.eps} + +\documentclass[12pt,letterpaper,english]{article} +\usepackage{times} +\usepackage[T1]{fontenc} +\IfFileExists{url.sty}{\usepackage{url}} + {\newcommand{\url}{\texttt}} + +\usepackage{babel} +%\usepackage{noweb} +\usepackage{Rd} + +\usepackage{Sweave} +\SweaveOpts{engine=R,eps=FALSE} +%\VignetteIndexEntry{Performance Attribution from Bacon} +%\VignetteDepends{PerformanceAnalytics} +%\VignetteKeywords{returns, performance, risk, benchmark, portfolio} +%\VignettePackage{PerformanceAnalytics} + +%\documentclass[a4paper]{article} +%\usepackage[noae]{Sweave} +%\usepackage{ucs} +%\usepackage[utf8x]{inputenc} +%\usepackage{amsmath, amsthm, latexsym} +%\usepackage[top=3cm, bottom=3cm, left=2.5cm]{geometry} +%\usepackage{graphicx} +%\usepackage{graphicx, verbatim} +%\usepackage{ucs} +%\usepackage[utf8x]{inputenc} +%\usepackage{amsmath, amsthm, latexsym} +%\usepackage{graphicx} + +\title{Lo Sharpe Ratio} +\author{R Project for Statistical Computing} + +\begin{document} +\SweaveOpts{concordance=TRUE} + +\maketitle + + +\begin{abstract} +The building blocks of the Sharpe ratio-expected returns and volatilities- +are unknown quantities that must be estimated statistically and are, +therefore, subject to estimation error.In an illustrative +empirical example of mutual funds and hedge funds, Andrew Lo finds that the annual Sharpe ratio for a hedge fund can be overstated by as much as 65 percent +because of the presence of serial correlation in monthly returns, and once +this serial correlation is properly taken into account, the rankings of hedge +funds based on Sharpe ratios can change dramatically. +\end{abstract} + +<>= +library(PerformanceAnalytics) +library(noniid.sm) +data(edhec) +@ + + +\section{Background} +Given a sample of historical returns \((R_1,R_2, . . .,R_T)\), the standard estimators for these moments are the sample mean and variance: + +%Let $X \sim N(0,1)$ and $Y \sim \textrm{Exponential}(\mu)$. Let +%$Z = \sin(X)$. $\sqrt{X}$. + +%$\hat{\mu}$ = $\displaystyle\frac{22}{7}$ +%e^{2 \mu} = 1 +%\begin{equation} +%\left(\sum_{t=1}^{T} R_t/T\right) = \hat{\mu} \\ +%\end{equation} +\begin{equation} + \hat{\mu} = \sum_{t=1}^{T} (R_t)/T\\ +\end{equation} +\begin{equation} +\hat{\sigma^2} = \sum_{t=1}^{T} (R_t-\hat{\mu})^2/T\\ +\end{equation} + +From which the estimator of the Sharpe ratio $\hat{SR}$ follows immediately: +%\left(\mu \right) = \sum_{t=1}^{T} \(Ri)/T\ \\ +\begin{equation} +\hat{SR} = (\hat{\mu}- R_f)/\hat{\sigma} \\ +\end{equation} + +Using a set of techniques collectively known as "large-sample'' or "asymptotic'' statistical theory in which the Central Limit Theorem is applied to +estimators such as and , the distribution of and other nonlinear functions of and can be easily derived. + +\section{Non-IID Returns} +The relationship between SR and SR(q) is somewhat more involved for non- +IID returns because the variance of Rt(q) is not just the sum of the variances of component returns but also includes all the covariances. Specifically, under +the assumption that returns \(R_t\) are stationary, +\begin{equation} +Var[(R_t)] = \sum_{i=0}^{q-1} \sum_{j=1}^{q-1} Cov(R(t-i),R(t-j)) = q\hat{\sigma^2} + 2\hat{\sigma^2} \sum_{k=1}^{q-1} (q-k)\rho_k \\ +\end{equation} + +Where $\rho$\(_k\) = Cov(\(R(t)\),\(R(t-k\)))/Var[\(R_t\)] is the \(k^{th}\) order autocorrelation coefficient's of the series of returns.This yields the following relationship between SR and SR(q): + +\begin{equation} +\hat{SR}(q) = \eta(q) \\ +\end{equation} + +Where : + +\begin{equation} +\eta(q) = \frac{q}{\sqrt{(q\hat{\sigma^2} + 2\hat{\sigma^2} \sum_{k=1}^{q-1} (q-k)\rho_k)}} \\ +\end{equation} + +\section{Usage} + +In this example we use edhec database, to compute Sharpe Ratio for Hedge Fund Returns. +<<>>= +library(PerformanceAnalytics) +data(edhec) +LoSharpe(edhec) +@ + + +\end{document} \ No newline at end of file Deleted: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Managers.pdf =================================================================== (Binary files differ) Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid - EmaxDDGBM.pdf =================================================================== (Binary files differ) Property changes on: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid - EmaxDDGBM.pdf ___________________________________________________________________ Added: svn:mime-type + application/octet-stream Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid - LoSharpeRatio.pdf =================================================================== (Binary files differ) Property changes on: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid - LoSharpeRatio.pdf ___________________________________________________________________ Added: svn:mime-type + application/octet-stream Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid - UnSmooth Return Analysis.pdf =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid - UnSmooth Return Analysis.pdf (rev 0) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid - UnSmooth Return Analysis.pdf 2013-09-22 12:49:32 UTC (rev 3152) @@ -0,0 +1,3420 @@ +%PDF-1.5 +%???? +1 0 obj << +/Length 859 +>> +stream +concordance:UnSmoothReturnAnalysis.tex:UnSmoothReturnAnalysis.Rnw:1 46 1 1 5 1 4 45 1 1 4 1 2 2 1 1 4 1 2 3 1 1 3 1 2 3 1 1 3 1 2 5 1 1 2 1 0 3 1 5 0 1 1 5 0 1 2 6 0 1 1 5 0 1 2 1 0 1 1 1 2 1 0 1 2 1 0 1 2 5 0 1 2 1 1 1 2 1 0 4 1 1 2 1 0 1 2 1 0 1 2 6 0 1 3 10 1 1 5 5 0 1 2 9 1 1 2 10 0 1 1 10 0 1 2 10 1 1 2 1 0 1 1 12 0 1 1 13 0 1 2 10 1 1 2 1 0 1 1 12 0 1 1 13 0 1 2 4 1 1 2 1 0 1 1 12 0 1 1 13 0 1 2 11 1 1 2 1 0 1 1 16 0 1 1 17 0 1 2 9 1 1 5 17 0 1 2 9 1 1 3 12 0 1 1 12 0 1 2 8 1 1 5 15 0 1 2 10 1 1 2 1 0 1 1 14 0 1 1 15 0 1 2 8 1 1 2 1 0 1 1 16 0 1 1 17 0 1 2 12 1 1 2 1 0 1 1 12 0 1 1 13 0 1 2 4 1 1 2 1 0 1 1 12 0 1 1 13 0 1 2 4 1 1 2 1 0 1 1 16 0 1 1 17 0 1 2 8 1 1 2 1 0 1 1 16 0 1 1 17 0 1 2 8 1 1 2 1 0 1 1 16 0 1 1 17 0 1 2 8 1 1 2 1 0 1 1 14 0 1 1 15 0 1 2 8 1 1 2 1 0 1 1 14 0 1 1 15 0 1 2 9 1 2 2 2 1 2 2 4 1 1 2 5 0 1 2 3 1 1 2 5 0 1 2 1 1 +endstream +endobj +4 0 obj << +/Length 1530 +/Filter /FlateDecode +>> +stream +x??XIo?F??W9Q@?pn??m?R?J{h{?????J?q???mCR????A?p?oy??7C?\\<?$3Gi???b5Sy?Z?R?"k?lQ?? ???\u3u4?f?8????w????? +??p`_v??5.>?W???~??????t?#??D?YX?1d????Nt?a9?/?4M?/??7?@?? ?"?8??x?js?E??MK? ???H-Y???v?)61,?????????7&C?qtM]???"?????]=??2% ^??69?&g3y?W???????:&ws???j?N??~???/?? ???????`J;??? +E?SAa??L???b?2? @????`??%??????????5Y)??s?????W??n???^?9?#?0??4????V?^???/Qw? ?6???W?j+?[?b??\?1Bg?q?[Le&Md? ???+p????$??@(???flo ?V??1??? n?X?R?/zN?????q??"??@Z8??P?J?????#? ?????9?w?A???f?:Bw??^ ?x?U?S???~??A?qiQ???????"?K??."???m?(S9|??-$?Z??\?$?;d?S0 ??yX?)m?[?h???????? +\?`s???H????i??2pw?T???)????0]mp?X+ +" ???;~"???????Z?f???_??w?:@8?oOj?@?Jt??8J#QV?Yb???K??q?Z??G???m(Q????9Vj?$pm?????w??;?C]???NW???r*]p&^????S +4tKJ?\h????2e3lR?,9? h???@?'7?T??'Z0?DrV?!????????cg???5??^?B??L.R??`'$3?%]&?g???????0or??^LL?}?-=???????,W?m?7??w?;9taA??/;??m9r??w?FX? !? ?Gy??? ????????.?????????^%??cA ?}/??% j??!??^(~??????`X??~??[D??J??h??e"????x?7??% ????B? ?n?V?*?????DL????A??????#%>?c?-????O?????> +stream +x??Y?o?6?_???X??S?R?a?V?X???un?4n;??t??ww??(Yv????A6%R???~w???{?????E???B3?2?2TE?qt<?9?=.?r??V??_?p??kS??O??L?)?3h??u ?\3xC? +????????1~?a?v?????M?+??]=C??7??~? D op?$4_???????B?uP,?(????$???f,???5B?????d]? ??Q??P ??S????g?iDlN?$$_ ???p????W"r??G?)DG??????f?.V +|?>?x??[?zb +!? +<M??nD?/K? +U??t??3 +&??Do???d?y?f?p?? +??[??Ox??0??1h?+D?B?&?b?????d?]?~?"?]?? +??OT?????!???@A(??????W?f??da??7??As??P`????C??O?H??jP??*M??0??!(m??Q???|????V??5, +?\?R??????]?:|???] ?;JKC!??RT?d ?wJ;???D???????G? Ug?WW ??u??/??????I{??a?>?V??=?I???m?S |5??????????9??????l?!U#?????M?6???o???^????{R????&b?{A??J?gv?? ?!???Ue??n2X??^r???Y?"?&(r!J5NW?,?!'??ec????Ec(?p(p????"?;oXj????B?1? ?'?`Cu????????0??W???b)pB??on??l]???*}? +? \????,(%:???$axo??1??Hl??b{?Au??#\h[???2R?r6v??k,@t? u?%_h?io;?????=}?????c=?JQ??L{?|J???j?@_????@S +MM??/dR+=?v??Z?X????i???-Q?????6?WI???r???b?4m~6?? ;{-R?????!???H??g?n?4vse?n|?f}??U??y}?????y?]??A??? +?z?\?????I?|?????"h?4?s??h?????vu?h'F?lLc?H?$3Hb?VTJ?ia +?l@???o/ +?????????"P?O?@???I??f0'4 +Y12???)V$??E??=??8????PA?|or,a??U?yc?RBP??g?9[???mS +y"n??Y??LP??_?=K????'??4?iRNm?J?d?g????< ? +W8S-???$# J??:?????fKAD?6VX*????)??5?3?f?+??T??1?T4?[?? +??l??f{???k?b???j????V/??????xv?? ?????a?J,_?????$iK?g?r?vO?n??j3???t?+y???? +?U\u?ML4?`?j?~c?YQ}????L? ? +endstream +endobj +24 0 obj << +/Length 2286 +/Filter /FlateDecode +>> +stream +x?????6?>_???Z !^|8??Mb'??gR9?9???f$:????o?@?f?? ???????k&?(??0PjJe*??rQ3????gc&E??????>?)?!?'??`? ?#m?????k b>?@?l0?## ~??O???7?U??l????GO?^????I???@$z??&?????PUi????WT?? ?E;9?H???N????`H +D???~(???^_ye??ke?d??? 6??x????o?0??8&XkU#?!z???7???q?{???=??tu?> ebP??c?(?F????7e?P?4???/??? ;?)?j???????I??6K??????M??`???G7N????,!????????????#??s? ^G??-????#?O^??.X ?v??_#?o???S'hFO???g? l3?}&??m??EtdHWd?j?B#??P?A??j,C??>?|?.?????}?*??jU???6(AU?A?g??( +???`J???c??VN?@??M?n?|?c??????c@?N2V?:??,??x?//???B?b?Nl3?2??h?x~y???U?L??X?4?????o??&???m???@?T?T??:?????o?kkU?W??LY??nJ>?`??0fFRW?Z?A????3???9\:??E?(??Z?????.?????{???) +??????,Q???pc??]??1???0?P??=??\??V ????%?#?????J?W??r~?NJ??yGT?!?h??7Q????1?e???g?5?s"?P?Ek +?c?!A????d?7 Cwo?????a%?K??E????? +???6?k?DE?'???? ????a? ??E>???7?}{w?9;dx=??p???wO$c?e5C?e?j?z???n0VQH-?U??W?q?X8?E?????6?d??H???%]I?MwU?ss?C^y?n?[3\O???L??3I@?N?????z?E3D???W???P??n?|U?5????A??$b??.?6????g?T??o?n'?/SSB:5?pQ- ????}?>?B{'?r????W|I"uu??W?????U ++mb(?L?????W???N&???c??T?H5??S????{!????*\=_??B? ??1?ot?.???U???<c +E9???o??e???/?gp?n? + mf??Y???> +stream +x?]?MO?0 ???9???|4???!????a??mb]at????i??)J?????N?U2????J?U-???{?Q???????R +??h?"Pc??????T?$? ??5???????5????z_????_a??????8?d?4?o??x-tl$?????mZ?????^b?lL?4??x)?Z7?`?y??sHi?* b8)???C??f?| +endstream +endobj +21 0 obj << +/Type /XObject +/Subtype /Form +/FormType 1 +/PTEX.FileName (./UnSmoothReturnAnalysis-003.pdf) +/PTEX.PageNumber 1 +/PTEX.InfoDict 29 0 R +/BBox [0 0 432 432] +/Resources << +/ProcSet [ /PDF /Text ] +/Font << /F2 30 0 R/F3 31 0 R>> +/ExtGState << +>>/ColorSpace << +/sRGB 32 0 R +>>>> +/Length 11390 +/Filter /FlateDecode +>> +stream +x??}??????~?Y? Ue??-??6?Q??@?Y?zA??4?`???OF?{Dp???Y???z??|xzDf???]??_.}????^??:?K????]?m??e\??<^?~???W???????o?= ??m??????s???\?????O?p????r._????????a????a?????N?d|qy??n?}L;`???u+v?b???Z???>??u)??c?s+??c??S???tm??zl?u????X?7?????B=?? ?m????z???U??????zW?^_??xW?c????xL?z??q?h???p???x\?p?X??P=???z\??\=???W???k?X*9???i??~??0??z???_?w?p:Lm?k:?y?N???a]??Q????E+?????V?U?????????o]?x]?*?A?CX?????A?^?????????????????????"k??z]g??u4?q6??J???^$[?r??5??N????U????k?hp???m?u?_?a?X[o??=??P???y?u?????j?^??????x]??k?`p?'??v?At_???~?8?:??[????{????8????????4???7/??????_?=;??E??l?sl??Y??^ c???v?? ?^j?'N????m?^6kh/???U`p?4??0???8?? +????c??2??{???~??`? ??=0z?a?N????X???:.???}?6?}?bu??????cx??\1? + ???t?|f/?^?v???? +?V? +??{????g1;?;?}??#:^???f?7?????Y????Z???Y?d???[/??????????k???t?}?^???>;^???O?aG??~f???????:T??????2????^???????c? ???}?????????????WV?#F??p???????V?V~}???)F?G??????1?????0????????????n?????x??G>?1???h3?a????*??????G@??????{?? +n??l???c?d??F?b7????m44?n????P?}0:M6?t?UC?? +?f??Y?????P? ?n/j???}0a??f\????v?}???v????~?=?R?'J?$???J"{J???*?H??B"?HD"v|a???H???D$B??$?o+1???D?h?" ??$? +y????$?C? $??H?Y? +""????~??)??}e??>6y% ?7?X?.$??8H?JR#??uE"?O? +1???9??D44?  +?N???,H?"??????0??"1? 1?$?D4T? ??M?D???A"V??? +?b!(/?????B"?? ?$?;? ??N$b??N??SH???D?d%???-????X??*$b??J?? +???? +??$bC ???HH?H???$?x?D?TD"???????4?D? ??K?H?D"???Kac +HHD?tI":??$???q?A"0??D??1???D?{?D4?0?+??>???H"z?)$???G?? +??$???>?DX????D4?_??~=ID?(???A?H"??6I?4r#??xi???? ID'I?5???$Y$?c ?${{??????HD????Dt?Ua??II?H?HD?F?H":?@*@"?v??????$????O?IHI?a??ID?;0HD??S%?$"?${ ?_????y&)#????K????K?_???HD?v}????3?0???'?0???G$?$ba{?X8???x??X?{M??z??z-?$??? ?*Dam?? ???? +?D?_~%?w?>"?D?? $?g???$??J?-H"????D"v08??^???E"Vt?"?T"????~V$b??B?qT!?0??$?>J%?#{??????D?B?2a}a%??b%???????D"z_??$b??"??-?+z???u?$??.I"??s%?t?Q?D???D????D?T*@"???7'?0??@*?D?Da?`t*??~H??$$??( ?[?????DX?????s?? H??? ??$?? ( ~?B"???$?~?R????b$?H"\4?A$???(v? ? 6?`fL????J???v???????*1??0\H??@??R??U?I???????03?0??A"|l???$?0e??c& +a?? +?0 ??$?H?_O1? ?"????H???>?D?f??$b?d'?`???o???)H????DP? %b???DlU?0???'??~?X1???e?1?J?h??D,???Dl ?A"X??D4?)?? ~(?)?3g)?]C?X8?&??A?????@? ???}????4H?????q????? +IH?J%4?????????/%? ???;`?XG?K??RJ??)??D?I"?x??k(MJ????P"???D?D?DX????q???D????$??[? ???_A?!???????D ??H?X?S?J?rV"????0?NS??J???T"\:?1?>d8????N?D"f|<) ??$>??D???"??Dx;S8??YJG)#:?P"4U8c?`8??:?$"?1???D?????  6?S???J?a(3??H?NyR$bg#??A$bgPI$b??/%b??+%b??" ?H???OqP.????????0??D"(G?D??E":Fy?D? ?DR~H"???>I? +j?|????J"F??D Rv?D +Q???1H???q??a?J +g ;I?4??1???D ???1???1?>???J?=*?q*??10?(%???p?(e??????pIi glo?3v?*#fVE?X????TP??RJ?Ne?$?a?W8c??Q8c? ?p?J?@? )7$6?V1S???3??*?(e$b???????H?? "6??? ??c%?r?3???E? +I? +?  ;?Dt?bfOa??S8???H??c?$??rA??{?DX7??$6 ? )A?/H??"+'-"?~&+?? ?7"??"??HD??????U$???'?Q9??? ?D +W?D?l_"?? +????D?R&D"F???????W$bD??P???V8c??? ??D"?I?Dp?&?`??`?????(??C!+???n???$???9?D??$ +?C?J +gL??0Y???V?D??K?h?(%??AO? 6?g???p??F8????YP8c?Z??K>q??eh??r?ceh??W>V?&? +[??!?Uk????-v?u??a???kc`??>?$???b?0?????????????u??E????????mp]}???+????? ???,??B????O??f7;??f???++"? +??n??Z?i??l\?3????|???K????[??r?????^>???O^?'^?i>]???????????.?????l/n=????g ???2?^_>?????%?_U'_????????????????W_}??'???lF????????+?????_?????^}??g??]?? >???????????????w??W??3/?1x??????/??????????z????? +?:????/^???Qj??G(??????\??????>? &W???uB??{ON?}? ???l??D AQ????/??~<????????????n??? +l??}???A???????y?????????k?? +/????????????_??*?w?&??? +(?????????Qw??s???e?>v:???d??L;??ag?1? ????????`??x T? ????.?>??O?U???F?G??=?"??V????1?x ?fvO?H;?1K~A?F???1x??????vdt????/????]??I;?E??|????p???9?xx`??c$?????E)?!??x/???Lj)?L/????\??i/??s?v????e?K?q??O?O?Q?????.?L?????@SB|T??A??&???\O???,?1=???($Ndc?????2?*???{x?U?>?????U??=F??L?Ka?+???W??cLL,?V3??v???eb?bz7q:?3???q??X????I4????T??x?2+?I??-??n????[???C?i?pl|?yL?-?V?,1?V????a>R?j?^'?Eq?O?`?S?h?+??~W?=%?7$1??*bL??+??????$?????b!??????L?4???F?||????#=??v?WO?dQ ???r+$)??r??????V?(?,???`?hf??? ?$h???1??r?Nt\???J??iNS?????qb e? ?[Y?j?w(G???QJ????K??????^?]?L?.?t.??t?!?????mb?i?!??P????S?`a?`????????(g???+aIU?!L%?X?x?CFHNV????}pT?#?7?Eq=?B??T~??S?i??G?N[???r*?1??gC?I ?%1d??U????+J)???H`?!?!}$???<'0E???,oet??O??????V?RXh?bB?B[?Bu:?%???.????(??I??{g%M?;&?$?$H"?6?$?h???????T`A"??" ??????D&?R??HD`???$??H"?D$"p;?I"??H"??|0? ????i'?H ??$BX$"0ID`???$A +>?D??(!R ?rC?$?0T$b??e"???$b.{g} AL????D??#????T~?#? ?A??K"?I"?D???b?5L??T???????x?5l?T~z?????????K?J????_5~?? +?mJwL??w????L??0jIJ????0?????QCz???,=???"=@?????b*?a?Y')=?l?'K?b?????MuH?n?=?N%???]??????%=h?hIw???N;nz'?m?)=??P??F???E"G?bzg?"pH???1I???X?"$ ,=`?????Vv?4?-G?G?=H?@"?F???V??.v??????N?v???D?????SH???-?m1<0K$b??,1??D"F@F1???D?z?J"??I??/?????":"H??4?J"xVC??u?D??&\$b?k"?N%?hL:?`?7H??????D?:u$b???8ID?F1?TX??Q???D :%?$bl?W?D??D"f???D??)?"???&?0{9@??>??? ?$?mC$??J??Qg{?D0??$A?8 d????H??0D"t??SI???D"????????D ?v?$?~???H??1?TW??%s?D,?E"H??D?: ??$?Cp?m?????mw??}???$??pN6?????I??x#?r=+?w2'I"??? ?X8????_?? ??$Ap&\2)[9@?? ??&A0???LJ?;>R7? ?L?_j??&?X8h?D???????&%b?)?R"x?g(:?]J???R)33Y?D0?$b?Y":J\J???>?DL?H;`???"????s???s?\e X?N??X??V)W??U??v?]?NX?>?I;`???&??i?\'I??E???s???????v??r +6`?O??GX??????5????V? ?}:???b_O???????[0????s?b????}.?v?b??]????^? ???~i,v? ?0?7W?W???X?>wL;`??\2??a??2???5?X???W??E??v?M?NX?>WM;`???5????s???????>??'X?[??i?\8?'???s???s??\9?????????1?;a???:??????6 ,v?{?0????? ??~kw?j?Io?????iL;??a',v?????}??v?b??yD?C ;a??6?v?b?N3k??C;;a?Ou?,X?????v???vha',??T??}=???b???F???:'?o^??.z?????????Lf?,>6{l???d?1??~?l>F?#K?????G?G????:??:??:???#??=???uJ?????a????R???_????x?w?????]?'???"?1?7osC9?????????'?}????????:^??9omh???h??p?????????P??? +endstream +endobj +34 0 obj +<< +/Alternate /DeviceRGB +/N 3 +/Length 2596 +/Filter /FlateDecode +>> +stream +x???wTS????7?P????khRH +?H?.*1 J??"6DTpDQ??2(???C??"??Q??D?qp?Id???y?????~k????g?}??????LX ? ?X??????g` ?l?p??B?F?|??l???? ??*????????Y"1P??????\?8=W?%?O???4M?0J?"Y?2V?s?,[|??e9?2?<?s??e???'??9???`???2?&c?tI?@?o??|N6(??.?sSdl-c?(2?-?y?H?_??/X??????Z.$??&\S???????M????07?#?1??Y?rf??Yym?";?8980m-m?(?]????v?^??D???W~? +??e????mi]?P????`/???u}q?|^R??,g+???\K?k)/????C_|?R????ax??8?t1C^7nfz?D????p? ?????u?$??/?ED??L L??[???B?@???????????????X?!@~(* {d+??} ?G???????????}W?L??$?cGD2?Q????Z4 E@?@??????A(?q`1???D ??????`'?u?4?6pt?c?48.??`?R0??)? +?@???R?t C???X??CP?%CBH@??R?????f?[?(t? +C??Qh?z#0 ??Z?l?`O8?????28.????p|??O???X +????:??0?FB?x$ !???i@?????H???[EE1PL? ??????V?6??QP??>?U?(j +?MFk?????t,:??.FW???????8???c?1?L&?????9???a??X?:??? +?r?bl1? +{{{;?}?#?tp?8_\?8??"?Ey?.,?X?????%?%G??1?-??9????????K??l?.??oo???/?O$?&?'=JvM??x??????{????=Vs\?x? ????N???>?u?????c?Kz???=s?/?o?l????|??????y???? ??^d]???p?s?~???:;???/;]??7|?????W????p???????Q?o?H?!?????V????sn??Ys}?????????~4??]? =>?=:?`??;c??'?e??~??!?a???D?#?G?&}'/?^?x?I??????+?\????w?x?20;5?\?????_??????e?t???W?f^??Qs?-?m???w3????+??~???????O?~???? +endstream +endobj +38 0 obj << +/Length 639 +/Filter /FlateDecode +>> +stream +x?uT???@ ??si?0?L???+m?????????$?v?????I??B?y????A?>J(??:qH? ? +'?`????? ? ??? +V??c??ar=??09,????aln??S???]?cw??(e1X???\????N?wg{t???A??.?3D??????(??????M?ur???e=?\??^^?E??-&?s?+???V??? +Q???l?K??:??m?g???n?vp????~/????U> +/ExtGState << +>>/ColorSpace << +/sRGB 42 0 R +>>>> +/Length 11341 +/Filter /FlateDecode +>> +stream +x???K??????W??4?q????r??j??6???j K?Jn>?U??? {?@$y?%M??S?7"?>"?r???\?|??????????T/u???vi?|=?KY?u.?7?/????????u??????????}?????????s??4]Z??_.????r?]???????K??.????f|qy?????;d???u^?2????? +??Q6????=Z??t??c>?K? +???.?=?t?????????~m?>??G-?uM?A?=?t=?}Pg?y??t??c?!?u??{??:{?u????z??????~???z5?z?k??~????^??K????;J????d????:??;\?????~?j?^j????e?????]{A????0U+???n?W?h??~Y?o???i??j?W??.???~????:???m??????z??????v?W????z??????????????oe???????a???-Q??~le???{ 1???????????{5?7?N?d???uw]?~zEjv?uCmt????R?p??~???ui??????^0?g???t???????i??Q_??w????N?????u/H??b??t????}\ks??????t??W????n?{Al????~#???????@y?{???N??UcP???`A?? +??A;.R???1???v^????9V`?hIT`?????? +????^??? +?c_R???*??????^??L??T`???BR?5v?P?iou%??]X?.? +??A???uPA??F?T`???T`??2??? T`U?&???iP??????a{??6?????A??+0Ey? +?z???;? P?5?-Q??)b!???j?;(T0?Au?X?T`?????P?T`???7??)?T???iPAD?HF? ? +&?&DkJ?;~MT`?C/*??gT`??#?????:?? +?v*T??; +*0;??????X??^AT???? +D?????>? +z???*QA/??????????2H???9QA?bHVL? +??D?)v*?XJ???????y? +*????z?????^T?;W?tQA[8?$???T!*?vP?`hPA??PAc?*(?? +*{aQ?*RAE{T??BD?5c? +z?Qs??k????^ +?+XX>???,M?Rc3??}?L ?????TA*XEM????_Q??-Q??(v*X7?vH??Q??`?|??p???"VP??X???Q??1?2b(/RA??????Wp? +?^?? +??r???T??(V?????Mp?A?,?xQ?cQ??A?b3k??? ?? ?*)V?????F????T???g&???A(?4?????X? +?W-? +v????~?????:????`????csR??0?M*8???? [TRUNCATED] To get the complete diff run: svnlook diff /svnroot/returnanalytics -r 3152 From noreply at r-forge.r-project.org Sun Sep 22 18:06:39 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 22 Sep 2013 18:06:39 +0200 (CEST) Subject: [Returnanalytics-commits] r3153 - in pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm: R man vignettes Message-ID: <20130922160639.772CF185F4C@r-forge.r-project.org> Author: shubhanm Date: 2013-09-22 18:06:39 +0200 (Sun, 22 Sep 2013) New Revision: 3153 Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid-ACFSTDEV.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid-ACFSTDEV.rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid-ConditionalDrawdown.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid-ConditionalDrawdown.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid-EmaxDDGBM.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid-EmaxDDGBM.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid-GLMReturn.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid-GLMReturn.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid-GLMSmoothIndex.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid-GLMSmoothIndex.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid-LoSharpeRatio.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid-LoSharpeRatio.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid-Managers.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid-Managers.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid-NormCalmar-Sterling.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid-NormCalmar-Sterling.rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid-OWReturn.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid-OWReturn.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid-OkunevWhite.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid-OkunevWhite.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid-ShaneAcarMaxLoss.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid-ShaneAcarMaxLoss.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid-UnSmoothReturnAnalysis.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid-UnSmoothReturnAnalysis.pdf Removed: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/ACFSTDEV.rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/ConditionalDrawdown.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/EmaxDDGBM.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/GLMReturn.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/GLMSmoothIndex.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/LoSharpeRatio.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid - EmaxDDGBM.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid - LoSharpeRatio.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid - UnSmooth Return Analysis.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid -ACFSTDEV.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid -Commodity Index Fund Analysis.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid -ConditionalDrawdown.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid -GLMReturn.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid -GLMSmoothIndex.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid -NormCalmar-Sterling.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid -OkunevWhite.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/Non-iid -ShaneAcarMaxLoss.pdf pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/NormCalmar.rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OWReturn.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/OkunevWhite.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/ShaneAcarMaxLoss.Rnw pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/UnSmoothReturnAnalysis.Rnw Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/glmi.R pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/glmi.Rd pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/lmi.Rd Log: final touches - Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/glmi.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/glmi.R 2013-09-22 12:49:32 UTC (rev 3152) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/glmi.R 2013-09-22 16:06:39 UTC (rev 3153) @@ -4,22 +4,20 @@ #' @details #' see \code{\link{glm}}. #' @param formula -#'an object of class "formula" (or one that can be coerced to that class): a symbolic description of the model to be fitted. The details of model specification are given under ?Details?. -#' +#'an object of class "formula" (or one that can be coerced to that class): a symbolic description of the model to be fitted. #'@param family #' a description of the error distribution and link function to be used in the model. This can be a character string naming a family function, a family function or the result of a call to a family function. (See family for details of family functions.) #'@param data #'an optional data frame, list or environment (or object coercible by as.data.frame to a data frame) containing the variables in the model. If not found in data, the variables are taken from environment(formula), typically the environment from which lm is called. #' #'@param vcov HC-HAC covariance estimation -#'@param weights -#'an optional vector of weights to be used in the fitting process. Should be NULL or a numeric vector. If non-NULL, weighted least squares is used with weights weights (that is, minimizing sum; otherwise ordinary least squares is used. See also ?Details?, +#'@param weights an optional vector of weights to be used in the fitting process. #'@param subset #'an optional vector specifying a subset of observations to be used in the fitting process. #' #' #'@param na.action -#'a function which indicates what should happen when the data contain NAs. The default is set by the na.action setting of options, and is na.fail if that is unset. The ?factory-fresh? default is na.omit. Another possible value is NULL, no action. Value na.exclude can be useful. +#'a function which indicates what should happen when the data contain NAs. Another possible value is NULL, no action. Value na.exclude can be useful. #' #'@param start #'starting values for the parameters in the linear predictor. @@ -51,6 +49,14 @@ #'additional arguments to be passed to the low level regression fitting functions (see below). #' @author The original R implementation of glm was written by Simon Davies working for Ross Ihaka at the University of Auckland, but has since been extensively re-written by members of the R Core team. #' The design was inspired by the S function of the same name described in Hastie & Pregibon (1992). +#' @examples +#' ## Dobson (1990) Page 93: Randomized Controlled Trial : +#' counts <- c(18,17,15,20,10,20,25,13,12) +#' outcome <- gl(3,1,9) +#' treatment <- gl(3,3) +#' print(d.AD <- data.frame(treatment, outcome, counts)) +#'glm.D93 <- glmi(counts ~ outcome + treatment, family = poisson()) +#'summary(glm.D93) #' @keywords HC HAC covariance estimation regression fitting model #' @rdname glmi #' @export Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R 2013-09-22 12:49:32 UTC (rev 3152) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/lmi.R 2013-09-22 16:06:39 UTC (rev 3153) @@ -4,7 +4,7 @@ #' @details #' see \code{\link{lm}}. #' @param formula -#'an object of class "formula" (or one that can be coerced to that class): a symbolic description of the model to be fitted. The details of model specification are given under ?Details?. +#'an object of class "formula" (or one that can be coerced to that class): a symbolic description of the model to be fitted. #' #' #'@param data @@ -12,13 +12,13 @@ #' #'@param vcov HC-HAC covariance estimation #'@param weights -#'an optional vector of weights to be used in the fitting process. Should be NULL or a numeric vector. If non-NULL, weighted least squares is used with weights weights (that is, minimizing sum; otherwise ordinary least squares is used. See also ?Details?, +#'an optional vector of weights to be used in the fitting process. Should be NULL or a numeric vector. If non-NULL, weighted least squares is used with weights weights (that is, minimizing sum; otherwise ordinary least squares is used. #' #' #'@param subset #'an optional vector specifying a subset of observations to be used in the fitting process. #'@param na.action -#'a function which indicates what should happen when the data contain NAs. The default is set by the na.action setting of options, and is na.fail if that is unset. The ?factory-fresh? default is na.omit. Another possible value is NULL, no action. Value na.exclude can be useful. +#'a function which indicates what should happen when the data contain NAs. The default is set by the na.action setting of options, and is na.fail if that is unsed. Another possible value is NULL, no action. Value na.exclude can be useful. #' #'@param method #'the method to be used; for fitting, currently only method = "qr" is supported; method = "model.frame" returns the model frame (the same as with model = TRUE, see below). @@ -41,6 +41,14 @@ #' @author The original R implementation of glm was written by Simon Davies working for Ross Ihaka at the University of Auckland, but has since been extensively re-written by members of the R Core team. #' The design was inspired by the S function of the same name described in Hastie & Pregibon (1992). #' @keywords HC HAC covariance estimation regression fitting model +#' @examples +#'ctl <- c(4.17,5.58,5.18,6.11,4.50,4.61,5.17,4.53,5.33,5.14) +#'trt <- c(4.81,4.17,4.41,3.59,5.87,3.83,6.03,4.89,4.32,4.69) +#'group <- gl(2, 10, 20, labels = c("Ctl","Trt")) +#'weight <- c(ctl, trt) +#'lm.D9 <- lmi(weight ~ group) +#'lm.D90 <- lmi(weight ~ group - 1) # omitting intercept +#'summary(lm.D90) #' @rdname lmi #' @export lmi <- function (formula, data,vcov = NULL, subset, weights, na.action, method = "qr", Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/glmi.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/glmi.Rd 2013-09-22 12:49:32 UTC (rev 3152) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/glmi.Rd 2013-09-22 16:06:39 UTC (rev 3153) @@ -11,8 +11,7 @@ \arguments{ \item{formula}{an object of class "formula" (or one that can be coerced to that class): a symbolic description of - the model to be fitted. The details of model - specification are given under ?Details?.} + the model to be fitted.} \item{family}{a description of the error distribution and link function to be used in the model. This can be a @@ -29,20 +28,14 @@ \item{vcov}{HC-HAC covariance estimation} \item{weights}{an optional vector of weights to be used - in the fitting process. Should be NULL or a numeric - vector. If non-NULL, weighted least squares is used with - weights weights (that is, minimizing sum; otherwise - ordinary least squares is used. See also ?Details?,} + in the fitting process.} \item{subset}{an optional vector specifying a subset of observations to be used in the fitting process.} \item{na.action}{a function which indicates what should - happen when the data contain NAs. The default is set by - the na.action setting of options, and is na.fail if that - is unset. The ?factory-fresh? default is na.omit. - Another possible value is NULL, no action. Value - na.exclude can be useful.} + happen when the data contain NAs. Another possible value + is NULL, no action. Value na.exclude can be useful.} \item{start}{starting values for the parameters in the linear predictor.} @@ -95,6 +88,15 @@ \details{ see \code{\link{glm}}. } +\examples{ +## Dobson (1990) Page 93: Randomized Controlled Trial : +counts <- c(18,17,15,20,10,20,25,13,12) +outcome <- gl(3,1,9) +treatment <- gl(3,3) +print(d.AD <- data.frame(treatment, outcome, counts)) +glm.D93 <- glmi(counts ~ outcome + treatment, family = poisson()) +summary(glm.D93) +} \author{ The original R implementation of glm was written by Simon Davies working for Ross Ihaka at the University of Modified: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/lmi.Rd =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/lmi.Rd 2013-09-22 12:49:32 UTC (rev 3152) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/man/lmi.Rd 2013-09-22 16:06:39 UTC (rev 3153) @@ -10,8 +10,7 @@ \arguments{ \item{formula}{an object of class "formula" (or one that can be coerced to that class): a symbolic description of - the model to be fitted. The details of model - specification are given under ?Details?.} + the model to be fitted.} \item{data}{an optional data frame, list or environment (or object coercible by as.data.frame to a data frame) @@ -25,7 +24,7 @@ in the fitting process. Should be NULL or a numeric vector. If non-NULL, weighted least squares is used with weights weights (that is, minimizing sum; otherwise - ordinary least squares is used. See also ?Details?,} + ordinary least squares is used.} \item{subset}{an optional vector specifying a subset of observations to be used in the fitting process.} @@ -33,9 +32,8 @@ \item{na.action}{a function which indicates what should happen when the data contain NAs. The default is set by the na.action setting of options, and is na.fail if that - is unset. The ?factory-fresh? default is na.omit. - Another possible value is NULL, no action. Value - na.exclude can be useful.} + is unsed. Another possible value is NULL, no action. + Value na.exclude can be useful.} \item{method}{the method to be used; for fitting, currently only method = "qr" is supported; method = @@ -83,6 +81,15 @@ \details{ see \code{\link{lm}}. } +\examples{ +ctl <- c(4.17,5.58,5.18,6.11,4.50,4.61,5.17,4.53,5.33,5.14) +trt <- c(4.81,4.17,4.41,3.59,5.87,3.83,6.03,4.89,4.32,4.69) +group <- gl(2, 10, 20, labels = c("Ctl","Trt")) +weight <- c(ctl, trt) +lm.D9 <- lmi(weight ~ group) +lm.D90 <- lmi(weight ~ group - 1) # omitting intercept +summary(lm.D90) +} \author{ The original R implementation of glm was written by Simon Davies working for Ross Ihaka at the University of Deleted: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/ACFSTDEV.rnw =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/ACFSTDEV.rnw 2013-09-22 12:49:32 UTC (rev 3152) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/ACFSTDEV.rnw 2013-09-22 16:06:39 UTC (rev 3153) @@ -1,90 +0,0 @@ -%% no need for \DeclareGraphicsExtensions{.pdf,.eps} - -\documentclass[12pt,letterpaper,english]{article} -\usepackage{times} -\usepackage[T1]{fontenc} -\IfFileExists{url.sty}{\usepackage{url}} - {\newcommand{\url}{\texttt}} - -\usepackage{babel} -%\usepackage{noweb} -\usepackage{Rd} - -\usepackage{Sweave} -\SweaveOpts{engine=R,eps=FALSE} -%\VignetteIndexEntry{Performance Attribution from Bacon} -%\VignetteDepends{PerformanceAnalytics} -%\VignetteKeywords{returns, performance, risk, benchmark, portfolio} -%\VignettePackage{PerformanceAnalytics} - -%\documentclass[a4paper]{article} -%\usepackage[noae]{Sweave} -%\usepackage{ucs} -%\usepackage[utf8x]{inputenc} -%\usepackage{amsmath, amsthm, latexsym} -%\usepackage[top=3cm, bottom=3cm, left=2.5cm]{geometry} -%\usepackage{graphicx} -%\usepackage{graphicx, verbatim} -%\usepackage{ucs} -%\usepackage[utf8x]{inputenc} -%\usepackage{amsmath, amsthm, latexsym} -%\usepackage{graphicx} - -\title{Autocorrelated Standard Deviation} -\author{R Project for Statistical Computing} - -\begin{document} -\SweaveOpts{concordance=TRUE} - -\maketitle - - -\begin{abstract} -The fact that many hedge fund returns exhibit extraordinary levels of serial correlation is now well-known and generally accepted as fact.Because hedge fund strategies have exceptionally high autocorrelations in reported returns and this is taken as evidence of return smoothing, we highlight the effect autocorrelation has on volatility which is hazed by the square root of time rule used in the industry -\end{abstract} - -<>= -library(PerformanceAnalytics) -data(edhec) -@ - -<>= -require(noniid.sm) #source('C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/ACStdDev.annualized.R') -@ - -\section{Methodology} -Given a sample of historical returns \((R_1,R_2, . . .,R_T)\),the method assumes the fund manager smooths returns in the following manner, when 't' is the unit time interval: - -%Let $X \sim N(0,1)$ and $Y \sim \textrm{Exponential}(\mu)$. Let -%$Z = \sin(X)$. $\sqrt{X}$. - -%$\hat{\mu}$ = $\displaystyle\frac{22}{7}$ -%e^{2 \mu} = 1 -%\begin{equation} -%\left(\sum_{t=1}^{T} R_t/T\right) = \hat{\mu} \\ -%\end{equation} -\begin{equation} - \sigma_{T} = T \sqrt{\sigma_{t}} \\ -\end{equation} - - -\section{Usage} - -In this example we use edhec database, to compute true Hedge Fund Returns. - -<>= -library(PerformanceAnalytics) -data(edhec) -ACFVol = ACStdDev.annualized(edhec[,1:3]) -Vol = StdDev.annualized(edhec[,1:3]) -Vol -ACFVol -barplot(rbind(ACFVol,Vol), main="ACF and Orignal Volatility", - xlab="Fund Type",ylab="Volatilty (in %)", col=c("darkblue","red"), beside=TRUE) - legend("topright", c("1","2"), cex=0.6, - bty="2", fill=c("darkblue","red")); -@ - -The above figure shows the behaviour of the distribution tending to a normal IID distribution.For comparitive purpose, one can observe the change in the charateristics of return as compared to the orignal. - -\end{document} \ No newline at end of file Deleted: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/ConditionalDrawdown.Rnw =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/ConditionalDrawdown.Rnw 2013-09-22 12:49:32 UTC (rev 3152) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/ConditionalDrawdown.Rnw 2013-09-22 16:06:39 UTC (rev 3153) @@ -1,85 +0,0 @@ -%% no need for \DeclareGraphicsExtensions{.pdf,.eps} - -\documentclass[12pt,letterpaper,english]{article} -\usepackage{times} -\usepackage[T1]{fontenc} -\IfFileExists{url.sty}{\usepackage{url}} - {\newcommand{\url}{\texttt}} - -\usepackage{babel} -%\usepackage{noweb} -\usepackage{Rd} - -\usepackage{Sweave} -\SweaveOpts{engine=R,eps=FALSE} -%\VignetteIndexEntry{Performance Attribution from Bacon} -%\VignetteDepends{PerformanceAnalytics} -%\VignetteKeywords{returns, performance, risk, benchmark, portfolio} -%\VignettePackage{PerformanceAnalytics} - -%\documentclass[a4paper]{article} -%\usepackage[noae]{Sweave} -%\usepackage{ucs} -%\usepackage[utf8x]{inputenc} -%\usepackage{amsmath, amsthm, latexsym} -%\usepackage[top=3cm, bottom=3cm, left=2.5cm]{geometry} -%\usepackage{graphicx} -%\usepackage{graphicx, verbatim} -%\usepackage{ucs} -%\usepackage[utf8x]{inputenc} -%\usepackage{amsmath, amsthm, latexsym} -%\usepackage{graphicx} - -\title{Chekhlov Conditional Drawdown at Risk} -\author{R Project for Statistical Computing} - -\begin{document} -\SweaveOpts{concordance=TRUE} - -\maketitle - - -\begin{abstract} -A new one-parameter family of risk measures called Conditional Drawdown (CDD) has -been proposed. These measures of risk are functionals of the portfolio drawdown (underwater) curve considered in active portfolio management. For some value of $\hat{\alpha}$ the tolerance parameter, in the case of a single sample path, drawdown functional is defined as the mean of the worst (1 \(-\) $\hat{\alpha}$)100\% drawdowns. The CDD measure generalizes the notion of the drawdown functional to a multi-scenario case and can be considered as a generalization of deviation measure to a dynamic case. The CDD measure includes the Maximal Drawdown and Average Drawdown as its limiting cases. -\end{abstract} - -<>= -library(PerformanceAnalytics) -data(edhec) -@ - -<>= -require(noniid.sm) #source("C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/CDrawdown.R") -@ - -\section{Background} - -The model is focused on concept of drawdown measure which is in possession of all properties of a deviation measure,generalization of deviation measures to a dynamic case.Concept of risk profiling - Mixed Conditional Drawdown (generalization of CDD).Optimization techniques for CDD computation - reduction to linear programming (LP) problem. Portfolio optimization with constraint on Mixed CDD -The model develops concept of drawdown measure by generalizing the notion -of the CDD to the case of several sample paths for portfolio uncompounded rate -of return. - - -\section{Methodology} -For a given value of sequence ${\xi_k}$ where ${\xi}$ is a time series unit drawdown vector.The CV at R is formally defined as : -\begin{equation} -CV at R_{\alpha}(\xi)=\frac{\pi_{\xi}(\zeta(\alpha))-\alpha}{1-\alpha}\zeta(\alpha) + \frac{ \sum_{\xi_k=1}^{} \xi_k}{(1-\alpha)N} -\end{equation} - -Note that the first term in the right-hand side of equation appears because of inequality greater than equal to $\hat{\alpha}$. If (1 \(-\) $\hat{\alpha}$) \* 100\% of the worst drawdowns can be counted precisely, then and the first term in the right-hand side of equation disappears. Equation follows from the framework of the CVaR methodology - - -\section{Usage} - -In this example we use edhec database, to compute true Hedge Fund Returns. - -<<>>= -library(PerformanceAnalytics) -data(edhec) -CDrawdown(edhec) -@ - - - -\end{document} \ No newline at end of file Deleted: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/EmaxDDGBM.Rnw =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/EmaxDDGBM.Rnw 2013-09-22 12:49:32 UTC (rev 3152) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/EmaxDDGBM.Rnw 2013-09-22 16:06:39 UTC (rev 3153) @@ -1,102 +0,0 @@ -%% no need for \DeclareGraphicsExtensions{.pdf,.eps} - -\documentclass[12pt,letterpaper,english]{article} -\usepackage{times} -\usepackage[T1]{fontenc} -\IfFileExists{url.sty}{\usepackage{url}} - {\newcommand{\url}{\texttt}} - -\usepackage{babel} -%\usepackage{noweb} -\usepackage{Rd} - -\usepackage{Sweave} -\SweaveOpts{engine=R,eps=FALSE} -%\VignetteIndexEntry{Performance Attribution from Bacon} -%\VignetteDepends{PerformanceAnalytics} -%\VignetteKeywords{returns, performance, risk, benchmark, portfolio} -%\VignettePackage{PerformanceAnalytics} - -%\documentclass[a4paper]{article} -%\usepackage[noae]{Sweave} -%\usepackage{ucs} -%\usepackage[utf8x]{inputenc} -%\usepackage{amsmath, amsthm, latexsym} -%\usepackage[top=3cm, bottom=3cm, left=2.5cm]{geometry} -%\usepackage{graphicx} -%\usepackage{graphicx, verbatim} -%\usepackage{ucs} -%\usepackage[utf8x]{inputenc} -%\usepackage{amsmath, amsthm, latexsym} -%\usepackage{graphicx} - -\title{On the Maximum Drawdown of a Brownian Motion} -\author{Shubhankit Mohan} - -\begin{document} -\SweaveOpts{concordance=TRUE} - -\maketitle - - -\begin{abstract} -The maximum drawdown possible of an asset whose return series follows a Geometric Brownian Motion Process. - -\end{abstract} - - -<>= -require(noniid.sm) #source('C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/LoSharpe.R') -@ - -<>= -library(PerformanceAnalytics) -data(edhec) -data(managers) -@ -\section{Background} - If X(t) is a random process on [0, T ], the maximum - drawdown at time T , D(T), is defined by where \deqn{D(T) - = sup [X(s) - X(t)]} where s belongs to [0,t] and s - belongs to [0,T] Informally, this is the largest drop - from a peak to a bottom. In this paper, we investigate - the behavior of this statistic for a Brownian motion with - drift. In particular, we give an infinite series - representation of its distribution, and consider its - expected value. When the drift is zero, we give an - analytic expression for the expected value, and for - non-zero drift, we give an infinite series - representation. For all cases, we compute the limiting - \bold{(\eqn{T "tends to" \infty})} behavior, which can be - logarithmic (\eqn{\mu > 0} ), square root (\eqn{\mu = 0}), - or linear (\eqn{\mu < 0} ). - - - -<>= -source('C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/EmaxDDGBM.R') -data(edhec) -Lo.Sharpe = -100*ES(edhec,.99) -Theoretical.Sharpe= EmaxDDGBM(edhec) -barplot(as.matrix(rbind(Theoretical.Sharpe,Lo.Sharpe)), main="Expected Shortfall(.99) and Drawdown of a Brwonian Motion Asset Process", - xlab="Fund Type",ylab="Value", col=rich6equal[1:2], beside=TRUE) - legend("topright", c("ES","EGBMDD"), cex=0.6, - bty="2", fill=rich6equal[1:2]); -@ - -We can observe that the fund "\textbf{Emerging Markets}", which has the largest drawdown and serial autocorrelation, has highest Drawdown , \emph{decrease} most significantly as comapared to other funds. - -<>= - -data(managers) -Lo.Sharpe = -100*ES(managers[,1:6],.99) -Theoretical.Sharpe= EmaxDDGBM(managers[,1:6]) -barplot(as.matrix(rbind(Theoretical.Sharpe,Lo.Sharpe)), main="Expected Shortfall(.99) and Drawdown of a Brwonian Motion Asset Process", - xlab="Fund Type",ylab="Value", col=rich6equal[1:2], beside=TRUE) - legend("topright", c("ES","EGBMDD"), cex=0.6, - bty="2", fill=rich6equal[1:2]); -@ - -We can see that the model, correctly ranks the highest drawdown fund managers, i.e. \textbf{HAM2}, which has the largest drawdown among all the funds. - -\end{document} Deleted: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/GLMReturn.Rnw =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/GLMReturn.Rnw 2013-09-22 12:49:32 UTC (rev 3152) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/GLMReturn.Rnw 2013-09-22 16:06:39 UTC (rev 3153) @@ -1,135 +0,0 @@ -%% no need for \DeclareGraphicsExtensions{.pdf,.eps} - -\documentclass[12pt,letterpaper,english]{article} -\usepackage{times} -\usepackage[T1]{fontenc} -\IfFileExists{url.sty}{\usepackage{url}} - {\newcommand{\url}{\texttt}} - -\usepackage{babel} -%\usepackage{noweb} -\usepackage{Rd} - -\usepackage{Sweave} -\SweaveOpts{engine=R,eps=FALSE} -%\VignetteIndexEntry{Performance Attribution from Bacon} -%\VignetteDepends{PerformanceAnalytics} -%\VignetteKeywords{returns, performance, risk, benchmark, portfolio} -%\VignettePackage{PerformanceAnalytics} - -%\documentclass[a4paper]{article} -%\usepackage[noae]{Sweave} -%\usepackage{ucs} -%\usepackage[utf8x]{inputenc} -%\usepackage{amsmath, amsthm, latexsym} -%\usepackage[top=3cm, bottom=3cm, left=2.5cm]{geometry} -%\usepackage{graphicx} -%\usepackage{graphicx, verbatim} -%\usepackage{ucs} -%\usepackage[utf8x]{inputenc} -%\usepackage{amsmath, amsthm, latexsym} -%\usepackage{graphicx} - -\title{Gemantsky Lo Makarov Return Model} -\author{R Project for Statistical Computing} - -\begin{document} -\SweaveOpts{concordance=TRUE} - -\maketitle - - -\begin{abstract} -The returns to hedge funds and other alternative investments are often highly serially correlated. In this paper, we explore several sources of such serial correlation and show that the most likely explanation is illiquidity exposure and smoothed returns. We propose an econometric model of return smoothingand develop estimators for the smoothing profile as well as a smoothing-adjusted obtained Sharpe ratio.\end{abstract} - -<>= -library(PerformanceAnalytics) -data(edhec) -@ - -<>= -require(noniid.sm) #source('C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/Return.GLM.R') -require(noniid.sm) #source('C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/na.skip.R') -@ - -\section{Methodology} -Given a sample of historical returns \((R_1,R_2, . . .,R_T)\),the method assumes the fund manager smooths returns in the following manner: - -To quantify the impact of all of these possible sources of serial correlation, denote by \(R_t\),the true economic return of a hedge fund in period t; and let \(R_t\) satisfy the following linear single-factor model: - -\begin{equation} - R_t = \\ {\mu} + {\beta}{{\delta}}_t+ \xi_t -\end{equation} - -Where $\xi_t, \sim N(0,1)$ -and Var[\(R_t\)] = $\sigma$\ \(^2\) - -True returns represent the flow of information that would determine the equilibrium value of the fund's securities in a frictionless market. However, true economic returns are not observed. Instead, \(R_t^0\) denotes the reported or observed return in period t; and let -%$Z = \sin(X)$. $\sqrt{X}$. - -%$\hat{\mu}$ = $\displaystyle\frac{22}{7}$ -%e^{2 \mu} = 1 -%\begin{equation} -%\left(\sum_{t=1}^{T} R_t/T\right) = \hat{\mu} \\ -%\end{equation} -\begin{equation} - R_t^0 = \theta _0R_{t} + \theta _1R_{t-1}+\theta _2R_{t-2} + \cdots + \theta _kR_{t-k}\\ -\end{equation} -\begin{equation} -\theta _j \epsilon [0,1] where : j = 0,1, \cdots , k \\ -\end{equation} - -and -%\left(\mu \right) = \sum_{t=1}^{T} \(Ri)/T\ \\ -\begin{equation} -\theta _1 + \theta _2 + \theta _3 \cdots + \theta _k = 1 \\ -\end{equation} - -which is a weighted average of the fund's true returns over the most recent k + 1 -periods, including the current period. -\section{Smoothing Profile Estimates} - -Using the methods outlined above , the paper estimates the smoothing model -using maximumlikelihood procedure-programmed in Matlab using the Optimization Toolbox andreplicated in Stata usingits MA(k) estimation routine.Using Time seseries analysis and computational finance("tseries") library , we fit an it an ARMA model to a univariate time series by conditional least squares. For exact maximum likelihood estimation,arima0 from package stats can be used. - -\section{Usage} - -In this example we use edhec database, to compute true Hedge Fund Returns. - -<>= -library(PerformanceAnalytics) -data(edhec) -Returns = Return.GLM(edhec[,1]) -skewness(edhec[,1]) -skewness(Returns) -# Right Shift of Returns Ditribution for a negative skewed distribution -kurtosis(edhec[,1]) -kurtosis(Returns) -# Reduction in "peakedness" around the mean -layout(rbind(c(1, 2), c(3, 4))) - chart.Histogram(Returns, main = "Plain", methods = NULL) - chart.Histogram(Returns, main = "Density", breaks = 40, - methods = c("add.density", "add.normal")) - chart.Histogram(Returns, main = "Skew and Kurt", - methods = c("add.centered", "add.rug")) -chart.Histogram(Returns, main = "Risk Measures", - methods = c("add.risk")) -@ - -The above figure shows the behaviour of the distribution tending to a normal IID distribution.For comparitive purpose, one can observe the change in the charateristics of return as compared to the orignal. - -<>= -library(PerformanceAnalytics) -data(edhec) -Returns = Return.GLM(edhec[,1]) -layout(rbind(c(1, 2), c(3, 4))) - chart.Histogram(edhec[,1], main = "Plain", methods = NULL) - chart.Histogram(edhec[,1], main = "Density", breaks = 40, - methods = c("add.density", "add.normal")) - chart.Histogram(edhec[,1], main = "Skew and Kurt", - methods = c("add.centered", "add.rug")) -chart.Histogram(edhec[,1], main = "Risk Measures", - methods = c("add.risk")) -@ - -\end{document} \ No newline at end of file Deleted: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/GLMSmoothIndex.Rnw =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/GLMSmoothIndex.Rnw 2013-09-22 12:49:32 UTC (rev 3152) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/GLMSmoothIndex.Rnw 2013-09-22 16:06:39 UTC (rev 3153) @@ -1,107 +0,0 @@ -%% no need for \DeclareGraphicsExtensions{.pdf,.eps} - -\documentclass[12pt,letterpaper,english]{article} -\usepackage{times} -\usepackage[T1]{fontenc} -\IfFileExists{url.sty}{\usepackage{url}} - {\newcommand{\url}{\texttt}} - -\usepackage{babel} -%\usepackage{noweb} -\usepackage{Rd} - -\usepackage{Sweave} -\SweaveOpts{engine=R,eps=FALSE} -%\VignetteIndexEntry{Performance Attribution from Bacon} -%\VignetteDepends{PerformanceAnalytics} -%\VignetteKeywords{returns, performance, risk, benchmark, portfolio} -%\VignettePackage{PerformanceAnalytics} - -%\documentclass[a4paper]{article} -%\usepackage[noae]{Sweave} -%\usepackage{ucs} -%\usepackage[utf8x]{inputenc} -%\usepackage{amsmath, amsthm, latexsym} -%\usepackage[top=3cm, bottom=3cm, left=2.5cm]{geometry} -%\usepackage{graphicx} -%\usepackage{graphicx, verbatim} -%\usepackage{ucs} -%\usepackage[utf8x]{inputenc} -%\usepackage{amsmath, amsthm, latexsym} -%\usepackage{graphicx} - -\title{GLM Smoothing Index} -\author{R Project for Statistical Computing} - -\begin{document} -\SweaveOpts{concordance=TRUE} - -\maketitle - - -\begin{abstract} -The returns to hedge funds and other alternative investments are often highly serially correlated.Gemanstsy,Lo and Markov propose an econometric model of return smoothingand develop estimators for the smoothing profile.The magnitude of impact is measured by the smoothing index, which is a measure of concentration of weight in lagged terms. -\end{abstract} - -<>= -library(PerformanceAnalytics) -data(edhec) -@ - -<>= -require(noniid.sm) #source('C:/Users/shubhankit/Desktop/Again/pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/R/GLMSmoothIndex.R') -@ - -\section{Background} -To quantify the impact of all of these possible sources of serial correlation, denote by \(R_t\),the true economic return of a hedge fund in period t; and let \(R_t\) satisfy the following linear single factor model: - -\begin{equation} - R_t = \\ {\mu} + {\beta}{{\delta}}_t+ \xi_t -\end{equation} - -Where $\xi_t, \sim N(0,1)$ -and Var[\(R_t\)] = $\sigma$\ \(^2\) - -True returns represent the flow of information that would determine the equilibrium value of the fund's securities in a frictionless market. However, true economic returns are not observed. Instead, \(R_t^0\) denotes the reported or observed return in period t; and let -%$Z = \sin(X)$. $\sqrt{X}$. - -%$\hat{\mu}$ = $\displaystyle\frac{22}{7}$ -%e^{2 \mu} = 1 -%\begin{equation} -%\left(\sum_{t=1}^{T} R_t/T\right) = \hat{\mu} \\ -%\end{equation} -\begin{equation} - R_t^0 = \theta _0R_{t} + \theta _1R_{t-1}+\theta _2R_{t-2} + \cdots + \theta _kR_{t-k}\\ -\end{equation} -\begin{equation} -\theta _j \epsilon [0,1] where : j = 0,1, \cdots , k \\ -\end{equation} - -and -%\left(\mu \right) = \sum_{t=1}^{T} \(Ri)/T\ \\ -\begin{equation} -\theta _1 + \theta _2 + \theta _3 \cdots + \theta _k = 1 \\ -\end{equation} - -which is a weighted average of the fund's true returns over the most recent k + 1 -periods, including the current period. - -\section{Smoothing Index} -A useful summary statistic for measuringthe concentration of weights is : -\begin{equation} -\xi = \sum_{j=0}^{k} \theta _j^2 \\ -\end{equation} - -This measure is well known in the industrial organization literature as the Herfindahl index, a measure of the concentration of firms in a given industry where $\theta$\(_j\) represents the market share of firm j. Becaus $\xi_t$\ is confined to the unit interval, and is minimized when all the $\theta$\(_j\) 's are identical, which implies a value of 1/k+1 for $\xi_i$\ ; and is maximized when one coefficient is 1 and the rest are 0. In the context of smoothed returns, a lower value of implies more smoothing, and the upper bound of 1 implies no smoothing, hence we shall refer to $\theta$\(_j\) as a ''\textbf{smoothingindex}''. - -\section{Usage} - -In this example we use edhec database, to compute Smoothing Index for Hedge Fund Returns. -<<>>= -library(PerformanceAnalytics) -data(edhec) -GLMSmoothIndex(edhec) -@ - - -\end{document} \ No newline at end of file Deleted: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/LoSharpeRatio.Rnw =================================================================== --- pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/LoSharpeRatio.Rnw 2013-09-22 12:49:32 UTC (rev 3152) +++ pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm/vignettes/LoSharpeRatio.Rnw 2013-09-22 16:06:39 UTC (rev 3153) @@ -1,116 +0,0 @@ -%% no need for \DeclareGraphicsExtensions{.pdf,.eps} - -\documentclass[12pt,letterpaper,english]{article} -\usepackage{times} -\usepackage[T1]{fontenc} -\IfFileExists{url.sty}{\usepackage{url}} - {\newcommand{\url}{\texttt}} - -\usepackage{babel} -%\usepackage{noweb} -\usepackage{Rd} - -\usepackage{Sweave} -\SweaveOpts{engine=R,eps=FALSE} -%\VignetteIndexEntry{Performance Attribution from Bacon} [TRUNCATED] To get the complete diff run: svnlook diff /svnroot/returnanalytics -r 3153 From noreply at r-forge.r-project.org Sun Sep 22 18:21:56 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 22 Sep 2013 18:21:56 +0200 (CEST) Subject: [Returnanalytics-commits] r3154 - pkg/PerformanceAnalytics/sandbox/Shubhankit Message-ID: <20130922162156.6793518487F@r-forge.r-project.org> Author: shubhanm Date: 2013-09-22 18:21:56 +0200 (Sun, 22 Sep 2013) New Revision: 3154 Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm_0.1.tar.gz pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm_0.1.zip Log: Binary and Source Package Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm_0.1.tar.gz =================================================================== (Binary files differ) Property changes on: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm_0.1.tar.gz ___________________________________________________________________ Added: svn:mime-type + application/octet-stream Added: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm_0.1.zip =================================================================== (Binary files differ) Property changes on: pkg/PerformanceAnalytics/sandbox/Shubhankit/noniid.sm_0.1.zip ___________________________________________________________________ Added: svn:mime-type + application/octet-stream From noreply at r-forge.r-project.org Sun Sep 22 23:27:01 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 22 Sep 2013 23:27:01 +0200 (CEST) Subject: [Returnanalytics-commits] r3155 - pkg/PortfolioAnalytics/R Message-ID: <20130922212701.D35D1185C56@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-22 23:27:01 +0200 (Sun, 22 Sep 2013) New Revision: 3155 Modified: pkg/PortfolioAnalytics/R/random_portfolios.R Log: Correcting numbers generated for rp_simplex Modified: pkg/PortfolioAnalytics/R/random_portfolios.R =================================================================== --- pkg/PortfolioAnalytics/R/random_portfolios.R 2013-09-22 16:21:56 UTC (rev 3154) +++ pkg/PortfolioAnalytics/R/random_portfolios.R 2013-09-22 21:27:01 UTC (rev 3155) @@ -483,7 +483,7 @@ k <- ceiling(permutations / length(fev)) # generate uniform[0, 1] random numbers - U <- runif(n=k*length(fev)*nassets, 0, 1) + U <- runif(n=k*nassets, 0, 1) Umat <- matrix(data=U, nrow=k, ncol=nassets) # do the transformation to the set of weights to satisfy lower bounds From noreply at r-forge.r-project.org Sun Sep 22 23:28:09 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 22 Sep 2013 23:28:09 +0200 (CEST) Subject: [Returnanalytics-commits] r3156 - in pkg/PortfolioAnalytics: R man Message-ID: <20130922212809.4C31F185B6E@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-22 23:28:08 +0200 (Sun, 22 Sep 2013) New Revision: 3156 Removed: pkg/PortfolioAnalytics/man/print.optimize.portfolio.DEoptim.Rd pkg/PortfolioAnalytics/man/print.optimize.portfolio.GenSA.Rd pkg/PortfolioAnalytics/man/print.optimize.portfolio.ROI.Rd pkg/PortfolioAnalytics/man/print.optimize.portfolio.pso.Rd pkg/PortfolioAnalytics/man/print.optimize.portfolio.random.Rd Modified: pkg/PortfolioAnalytics/R/generics.R pkg/PortfolioAnalytics/man/chart.Weights.Rd pkg/PortfolioAnalytics/man/print.efficient.frontier.Rd pkg/PortfolioAnalytics/man/print.portfolio.Rd Log: Updating documentation for print methods. Collapsing print.optimize.portfolio.* to print.optimize.portfolio for documentation entries. Modified: pkg/PortfolioAnalytics/R/generics.R =================================================================== --- pkg/PortfolioAnalytics/R/generics.R 2013-09-22 21:27:01 UTC (rev 3155) +++ pkg/PortfolioAnalytics/R/generics.R 2013-09-22 21:28:08 UTC (rev 3156) @@ -45,11 +45,11 @@ #' #' Print method for objects of class \code{portfolio} created with \code{\link{portfolio.spec}} #' -#' @param x object of class \code{portfolio} +#' @param x an object of class \code{portfolio} #' @param \dots any other passthru parameters #' @author Ross Bennett #' @method print portfolio -#' @export +#' @S3method print portfolio print.portfolio <- function(x, ...){ if(!is.portfolio(x)) stop("object passed in is not of class 'portfolio'") @@ -244,22 +244,28 @@ #' @param \dots any other passthru parameters #' @author Ross Bennett #' @method print constraint -#' @export +#' @S3method print constraint print.constraint <- function(x, ...){ print.default(x, ...) } -#' Printing Output of optimize.portfolio +#' Printing output of optimize.portfolio #' -#' print method for optimize.portfolio.ROI +#' print method for optimize.portfolio objects #' -#' @param x an object of class \code{optimize.portfolio.ROI} resulting from a call to \code{\link{optimize.portfolio}} -#' @param ... any other passthru parameters +#' @param x an object used to select a method +#' @param \dots any other passthru parameters #' @param digits the number of significant digits to use when printing. #' @author Ross Bennett +#' @aliases print.optimize.portfolio.ROI, +#' print.optimize.portfolio.random, +#' print.optimize.portfolio.DEoptim, +#' print.optimize.portfolio.GenSA, +#' print.optimize.portfolio.pso +#' @rdname print.optimize.portfolio #' @method print optimize.portfolio.ROI -#' @export -print.optimize.portfolio.ROI <- function(x, ..., digits = max(3, getOption("digits") - 3)){ +#' @S3method print optimize.portfolio.ROI +print.optimize.portfolio.ROI <- function(x, ..., digits=4){ cat(rep("*", 35) ,"\n", sep="") cat("PortfolioAnalytics Optimization\n") cat(rep("*", 35) ,"\n", sep="") @@ -296,17 +302,11 @@ cat("\n") } -#' Printing Output of optimize.portfolio -#' -#' print method for optimize.portfolio.random -#' -#' @param x an object of class \code{optimize.portfolio.random} resulting from a call to \code{\link{optimize.portfolio}} -#' @param ... any other passthru parameters -#' @param digits the number of significant digits to use when printing. -#' @author Ross Bennett + +#' @rdname print.optimize.portfolio #' @method print optimize.portfolio.random -#' @export -print.optimize.portfolio.random <- function(x, ..., digits=max(3, getOption("digits")-3)){ +#' @S3method print optimize.portfolio.random +print.optimize.portfolio.random <- function(x, ..., digits=4){ cat(rep("*", 35) ,"\n", sep="") cat("PortfolioAnalytics Optimization\n") cat(rep("*", 35) ,"\n", sep="") @@ -334,7 +334,7 @@ cat(names(tmpl), ":\n") tmpv <- unlist(tmpl) names(tmpv) <- names(x$weights) - print(tmpv) + print(tmpv, digits=digits) cat("\n") } } @@ -343,17 +343,11 @@ cat("\n") } -#' Printing Output of optimize.portfolio -#' -#' print method for optimize.portfolio.DEoptim -#' -#' @param x an object of class \code{optimize.portfolio.DEoptim} resulting from a call to \code{\link{optimize.portfolio}} -#' @param ... any other passthru parameters -#' @param digits the number of significant digits to use when printing. -#' @author Ross Bennett + +#' @rdname print.optimize.portfolio #' @method print optimize.portfolio.DEoptim -#' @export -print.optimize.portfolio.DEoptim <- function(x, ..., digits=max(3, getOption("digits")-3)){ +#' @S3method print optimize.portfolio.DEoptim +print.optimize.portfolio.DEoptim <- function(x, ..., digits=4){ cat(rep("*", 35) ,"\n", sep="") cat("PortfolioAnalytics Optimization\n") cat(rep("*", 35) ,"\n", sep="") @@ -381,7 +375,7 @@ cat(names(tmpl), ":\n") tmpv <- unlist(tmpl) names(tmpv) <- names(x$weights) - print(tmpv) + print(tmpv, digits=digits) cat("\n") } } @@ -390,17 +384,11 @@ cat("\n") } -#' Printing Output of optimize.portfolio -#' -#' print method for optimize.portfolio.GenSA -#' -#' @param x an object of class \code{optimize.portfolio.GenSA} resulting from a call to \code{\link{optimize.portfolio}} -#' @param ... any other passthru parameters -#' @param digits the number of significant digits to use when printing -#' @author Ross Bennett + +#' @rdname print.optimize.portfolio #' @method print optimize.portfolio.GenSA -#' @export -print.optimize.portfolio.GenSA <- function(x, ..., digits=max(3, getOption("digits")-3)){ +#' @S3method print optimize.portfolio.GenSA +print.optimize.portfolio.GenSA <- function(x, ..., digits=4){ cat(rep("*", 35) ,"\n", sep="") cat("PortfolioAnalytics Optimization\n") cat(rep("*", 35) ,"\n", sep="") @@ -428,7 +416,7 @@ cat(names(tmpl), ":\n") tmpv <- unlist(tmpl) names(tmpv) <- names(x$weights) - print(tmpv) + print(tmpv, digits=digits) cat("\n") } } @@ -437,17 +425,11 @@ cat("\n") } -#' Printing Output of optimize.portfolio -#' -#' print method for optimize.portfolio.pso -#' -#' @param x an object of class \code{optimize.portfolio.pso} resulting from a call to \code{\link{optimize.portfolio}} -#' @param ... any other passthru parameters -#' @param digits the number of significant digits to use when printing. -#' @author Ross Bennett + +#' @rdname print.optimize.portfolio #' @method print optimize.portfolio.pso -#' @export -print.optimize.portfolio.pso <- function(x, ..., digits=max(3, getOption("digits")-3)){ +#' @S3method print optimize.portfolio.pso +print.optimize.portfolio.pso <- function(x, ..., digits=4){ cat(rep("*", 35) ,"\n", sep="") cat("PortfolioAnalytics Optimization\n") cat(rep("*", 35) ,"\n", sep="") @@ -475,7 +457,7 @@ cat(names(tmpl), ":\n") tmpv <- unlist(tmpl) names(tmpv) <- names(x$weights) - print(tmpv) + print(tmpv, digits=digits) cat("\n") } } @@ -707,10 +689,10 @@ #' efficient frontier was created or extracted. #' #' @param x objective of class \code{efficient.frontier} -#' @param ... passthrough parameters +#' @param \dots any other passthru parameters #' @author Ross Bennett #' @method print efficient.frontier -#' @export +#' @S3method print efficient.frontier print.efficient.frontier <- function(x, ...){ if(!inherits(x, "efficient.frontier")) stop("object passed in is not of class 'efficient.frontier'") Modified: pkg/PortfolioAnalytics/man/chart.Weights.Rd =================================================================== --- pkg/PortfolioAnalytics/man/chart.Weights.Rd 2013-09-22 21:27:01 UTC (rev 3155) +++ pkg/PortfolioAnalytics/man/chart.Weights.Rd 2013-09-22 21:28:08 UTC (rev 3156) @@ -1,4 +1,4 @@ -\name{chart.Weights.optimize.portfolio.DEoptim} +\name{chart.Weights} \alias{chart.Weights} \alias{chart.Weights.opt.list} \alias{chart.Weights.optimize.portfolio.DEoptim} Modified: pkg/PortfolioAnalytics/man/print.efficient.frontier.Rd =================================================================== --- pkg/PortfolioAnalytics/man/print.efficient.frontier.Rd 2013-09-22 21:27:01 UTC (rev 3155) +++ pkg/PortfolioAnalytics/man/print.efficient.frontier.Rd 2013-09-22 21:28:08 UTC (rev 3156) @@ -7,7 +7,7 @@ \arguments{ \item{x}{objective of class \code{efficient.frontier}} - \item{...}{passthrough parameters} + \item{\dots}{any other passthru parameters} } \description{ Print method for efficient frontier objects. Display the Deleted: pkg/PortfolioAnalytics/man/print.optimize.portfolio.DEoptim.Rd =================================================================== --- pkg/PortfolioAnalytics/man/print.optimize.portfolio.DEoptim.Rd 2013-09-22 21:27:01 UTC (rev 3155) +++ pkg/PortfolioAnalytics/man/print.optimize.portfolio.DEoptim.Rd 2013-09-22 21:28:08 UTC (rev 3156) @@ -1,24 +0,0 @@ -\name{print.optimize.portfolio.DEoptim} -\alias{print.optimize.portfolio.DEoptim} -\title{Printing Output of optimize.portfolio} -\usage{ - \method{print}{optimize.portfolio.DEoptim} (x, ..., - digits = max(3, getOption("digits") - 3)) -} -\arguments{ - \item{x}{an object of class - \code{optimize.portfolio.DEoptim} resulting from a call - to \code{\link{optimize.portfolio}}} - - \item{...}{any other passthru parameters} - - \item{digits}{the number of significant digits to use - when printing.} -} -\description{ - print method for optimize.portfolio.DEoptim -} -\author{ - Ross Bennett -} - Deleted: pkg/PortfolioAnalytics/man/print.optimize.portfolio.GenSA.Rd =================================================================== --- pkg/PortfolioAnalytics/man/print.optimize.portfolio.GenSA.Rd 2013-09-22 21:27:01 UTC (rev 3155) +++ pkg/PortfolioAnalytics/man/print.optimize.portfolio.GenSA.Rd 2013-09-22 21:28:08 UTC (rev 3156) @@ -1,24 +0,0 @@ -\name{print.optimize.portfolio.GenSA} -\alias{print.optimize.portfolio.GenSA} -\title{Printing Output of optimize.portfolio} -\usage{ - \method{print}{optimize.portfolio.GenSA} (x, ..., - digits = max(3, getOption("digits") - 3)) -} -\arguments{ - \item{x}{an object of class - \code{optimize.portfolio.GenSA} resulting from a call to - \code{\link{optimize.portfolio}}} - - \item{...}{any other passthru parameters} - - \item{digits}{the number of significant digits to use - when printing} -} -\description{ - print method for optimize.portfolio.GenSA -} -\author{ - Ross Bennett -} - Deleted: pkg/PortfolioAnalytics/man/print.optimize.portfolio.ROI.Rd =================================================================== --- pkg/PortfolioAnalytics/man/print.optimize.portfolio.ROI.Rd 2013-09-22 21:27:01 UTC (rev 3155) +++ pkg/PortfolioAnalytics/man/print.optimize.portfolio.ROI.Rd 2013-09-22 21:28:08 UTC (rev 3156) @@ -1,24 +0,0 @@ -\name{print.optimize.portfolio.ROI} -\alias{print.optimize.portfolio.ROI} -\title{Printing Output of optimize.portfolio} -\usage{ - \method{print}{optimize.portfolio.ROI} (x, ..., - digits = max(3, getOption("digits") - 3)) -} -\arguments{ - \item{x}{an object of class \code{optimize.portfolio.ROI} - resulting from a call to - \code{\link{optimize.portfolio}}} - - \item{...}{any other passthru parameters} - - \item{digits}{the number of significant digits to use - when printing.} -} -\description{ - print method for optimize.portfolio.ROI -} -\author{ - Ross Bennett -} - Deleted: pkg/PortfolioAnalytics/man/print.optimize.portfolio.pso.Rd =================================================================== --- pkg/PortfolioAnalytics/man/print.optimize.portfolio.pso.Rd 2013-09-22 21:27:01 UTC (rev 3155) +++ pkg/PortfolioAnalytics/man/print.optimize.portfolio.pso.Rd 2013-09-22 21:28:08 UTC (rev 3156) @@ -1,24 +0,0 @@ -\name{print.optimize.portfolio.pso} -\alias{print.optimize.portfolio.pso} -\title{Printing Output of optimize.portfolio} -\usage{ - \method{print}{optimize.portfolio.pso} (x, ..., - digits = max(3, getOption("digits") - 3)) -} -\arguments{ - \item{x}{an object of class \code{optimize.portfolio.pso} - resulting from a call to - \code{\link{optimize.portfolio}}} - - \item{...}{any other passthru parameters} - - \item{digits}{the number of significant digits to use - when printing.} -} -\description{ - print method for optimize.portfolio.pso -} -\author{ - Ross Bennett -} - Deleted: pkg/PortfolioAnalytics/man/print.optimize.portfolio.random.Rd =================================================================== --- pkg/PortfolioAnalytics/man/print.optimize.portfolio.random.Rd 2013-09-22 21:27:01 UTC (rev 3155) +++ pkg/PortfolioAnalytics/man/print.optimize.portfolio.random.Rd 2013-09-22 21:28:08 UTC (rev 3156) @@ -1,24 +0,0 @@ -\name{print.optimize.portfolio.random} -\alias{print.optimize.portfolio.random} -\title{Printing Output of optimize.portfolio} -\usage{ - \method{print}{optimize.portfolio.random} (x, ..., - digits = max(3, getOption("digits") - 3)) -} -\arguments{ - \item{x}{an object of class - \code{optimize.portfolio.random} resulting from a call to - \code{\link{optimize.portfolio}}} - - \item{...}{any other passthru parameters} - - \item{digits}{the number of significant digits to use - when printing.} -} -\description{ - print method for optimize.portfolio.random -} -\author{ - Ross Bennett -} - Modified: pkg/PortfolioAnalytics/man/print.portfolio.Rd =================================================================== --- pkg/PortfolioAnalytics/man/print.portfolio.Rd 2013-09-22 21:27:01 UTC (rev 3155) +++ pkg/PortfolioAnalytics/man/print.portfolio.Rd 2013-09-22 21:28:08 UTC (rev 3156) @@ -5,7 +5,7 @@ \method{print}{portfolio} (x, ...) } \arguments{ - \item{x}{object of class \code{portfolio}} + \item{x}{an object of class \code{portfolio}} \item{\dots}{any other passthru parameters} } From noreply at r-forge.r-project.org Sun Sep 22 23:29:23 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 22 Sep 2013 23:29:23 +0200 (CEST) Subject: [Returnanalytics-commits] r3157 - pkg/PortfolioAnalytics/man Message-ID: <20130922212924.09EB8185B6E@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-22 23:29:23 +0200 (Sun, 22 Sep 2013) New Revision: 3157 Added: pkg/PortfolioAnalytics/man/print.optimize.portfolio.Rd Log: Adding print.optimize.portfolio.Rd file Added: pkg/PortfolioAnalytics/man/print.optimize.portfolio.Rd =================================================================== --- pkg/PortfolioAnalytics/man/print.optimize.portfolio.Rd (rev 0) +++ pkg/PortfolioAnalytics/man/print.optimize.portfolio.Rd 2013-09-22 21:29:23 UTC (rev 3157) @@ -0,0 +1,42 @@ +\name{print.optimize.portfolio.ROI} +\alias{print.optimize.portfolio.DEoptim} +\alias{print.optimize.portfolio.DEoptim,} +\alias{print.optimize.portfolio.GenSA} +\alias{print.optimize.portfolio.GenSA,} +\alias{print.optimize.portfolio.pso} +\alias{print.optimize.portfolio.random} +\alias{print.optimize.portfolio.random,} +\alias{print.optimize.portfolio.ROI} +\alias{print.optimize.portfolio.ROI,} +\title{Printing output of optimize.portfolio} +\usage{ + \method{print}{optimize.portfolio.ROI} (x, ..., + digits = 4) + + \method{print}{optimize.portfolio.random} (x, ..., + digits = 4) + + \method{print}{optimize.portfolio.DEoptim} (x, ..., + digits = 4) + + \method{print}{optimize.portfolio.GenSA} (x, ..., + digits = 4) + + \method{print}{optimize.portfolio.pso} (x, ..., + digits = 4) +} +\arguments{ + \item{x}{an object used to select a method} + + \item{\dots}{any other passthru parameters} + + \item{digits}{the number of significant digits to use + when printing.} +} +\description{ + print method for optimize.portfolio objects +} +\author{ + Ross Bennett +} + From noreply at r-forge.r-project.org Sun Sep 22 23:35:28 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 22 Sep 2013 23:35:28 +0200 (CEST) Subject: [Returnanalytics-commits] r3158 - in pkg/PortfolioAnalytics: R man Message-ID: <20130922213528.3B4C31849A4@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-22 23:35:27 +0200 (Sun, 22 Sep 2013) New Revision: 3158 Added: pkg/PortfolioAnalytics/man/plot.Rd Removed: pkg/PortfolioAnalytics/man/plot.optimize.portfolio.DEoptim.Rd pkg/PortfolioAnalytics/man/plot.optimize.portfolio.GenSA.Rd pkg/PortfolioAnalytics/man/plot.optimize.portfolio.ROI.Rd pkg/PortfolioAnalytics/man/plot.optimize.portfolio.pso.Rd pkg/PortfolioAnalytics/man/plot.optimize.portfolio.random.Rd Modified: pkg/PortfolioAnalytics/R/charts.DE.R pkg/PortfolioAnalytics/R/charts.GenSA.R pkg/PortfolioAnalytics/R/charts.PSO.R pkg/PortfolioAnalytics/R/charts.ROI.R pkg/PortfolioAnalytics/R/charts.RP.R pkg/PortfolioAnalytics/man/chart.RiskReward.Rd Log: Updating documentation for plot.* methods. Modified: pkg/PortfolioAnalytics/R/charts.DE.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.DE.R 2013-09-22 21:29:23 UTC (rev 3157) +++ pkg/PortfolioAnalytics/R/charts.DE.R 2013-09-22 21:35:27 UTC (rev 3158) @@ -333,9 +333,9 @@ #' @param main an overall title for the plot: see \code{\link{title}} #' @param xlim set the limit on coordinates for the x-axis #' @param ylim set the limit on coordinates for the y-axis +#' @rdname plot #' @method plot optimize.portfolio.DEoptim #' @S3method plot optimize.portfolio.DEoptim -#' @export plot.optimize.portfolio.DEoptim <- function(x, ..., return.col='mean', risk.col='ES', chart.assets=FALSE, neighbors=NULL, main='optimized portfolio plot', xlim=NULL, ylim=NULL) { charts.DE(DE=x, risk.col=risk.col, return.col=return.col, chart.assets=chart.assets, neighbors=neighbors, main=main, xlim=xlim, ylim=ylim, ...) } Modified: pkg/PortfolioAnalytics/R/charts.GenSA.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.GenSA.R 2013-09-22 21:29:23 UTC (rev 3157) +++ pkg/PortfolioAnalytics/R/charts.GenSA.R 2013-09-22 21:35:27 UTC (rev 3158) @@ -174,9 +174,9 @@ #' @param ylim set the limit on coordinates for the y-axis #' @seealso \code{\link{optimize.portfolio}} #' @author Ross Bennett +#' @rdname plot #' @method plot optimize.portfolio.GenSA #' @S3method plot optimize.portfolio.GenSA -#' @export plot.optimize.portfolio.GenSA <- function(x, ..., rp=FALSE, return.col="mean", risk.col="ES", chart.assets=FALSE, cex.axis=0.8, element.color="darkgray", neighbors=NULL, main="GenSA.Portfolios", xlim=NULL, ylim=NULL){ charts.GenSA(GenSA=x, rp=rp, return.col=return.col, risk.col=risk.col, chart.assets=chart.assets, cex.axis=cex.axis, element.color=element.color, neighbors=neighbors, main=main, xlim=xlim, ylim=ylim, ...=...) } Modified: pkg/PortfolioAnalytics/R/charts.PSO.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.PSO.R 2013-09-22 21:29:23 UTC (rev 3157) +++ pkg/PortfolioAnalytics/R/charts.PSO.R 2013-09-22 21:35:27 UTC (rev 3158) @@ -231,9 +231,9 @@ #' @param ylim set the limit on coordinates for the y-axis #' @seealso \code{\link{optimize.portfolio}} #' @author Ross Bennett +#' @rdname plot #' @method plot optimize.portfolio.pso #' @S3method plot optimize.portfolio.pso -#' @export plot.optimize.portfolio.pso <- function(x, ..., return.col="mean", risk.col="ES", chart.assets=FALSE, cex.axis=0.8, element.color="darkgray", neighbors=NULL, main="PSO.Portfolios", xlim=NULL, ylim=NULL){ charts.pso(pso=x, return.col=return.col, risk.col=risk.col, chart.assets=FALSE, cex.axis=cex.axis, element.color=element.color, neighbors=neighbors, main=main, xlim=xlim, ylim=ylim, ...=...) } Modified: pkg/PortfolioAnalytics/R/charts.ROI.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.ROI.R 2013-09-22 21:29:23 UTC (rev 3157) +++ pkg/PortfolioAnalytics/R/charts.ROI.R 2013-09-22 21:35:27 UTC (rev 3158) @@ -178,9 +178,9 @@ #' @param ylim set the limit on coordinates for the y-axis #' @seealso \code{\link{optimize.portfolio}} #' @author Ross Bennett +#' @rdname plot #' @method plot optimize.portfolio.ROI #' @S3method plot optimize.portfolio.ROI -#' @export plot.optimize.portfolio.ROI <- function(x, ..., rp=FALSE, risk.col="ES", return.col="mean", chart.assets=FALSE, element.color="darkgray", neighbors=NULL, main="ROI.Portfolios", xlim=NULL, ylim=NULL){ charts.ROI(ROI=x, rp=rp, risk.col=risk.col, return.col=return.col, chart.assets=chart.assets, main=main, xlim=xlim, ylim=ylim, ...) } Modified: pkg/PortfolioAnalytics/R/charts.RP.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.RP.R 2013-09-22 21:29:23 UTC (rev 3157) +++ pkg/PortfolioAnalytics/R/charts.RP.R 2013-09-22 21:35:27 UTC (rev 3158) @@ -290,9 +290,9 @@ #' @param xlim set the limit on coordinates for the x-axis #' @param ylim set the limit on coordinates for the y-axis #' @param main an overall title for the plot: see \code{\link{title}} +#' @rdname plot #' @method plot optimize.portfolio.random #' @S3method plot optimize.portfolio.random -#' @export plot.optimize.portfolio.random <- function(x, ..., return.col='mean', risk.col='ES', chart.assets=FALSE, neighbors=NULL, xlim=NULL, ylim=NULL, main='optimized portfolio plot') { charts.RP(RP=x, risk.col=risk.col, return.col=return.col, chart.assets=chart.assets, neighbors=neighbors, main=main, xlim=xlim, ylim=ylim, ...) } @@ -320,9 +320,9 @@ #' @param xlim set the limit on coordinates for the x-axis #' @param ylim set the limit on coordinates for the y-axis #' @param main an overall title for the plot: see \code{\link{title}} +#' @rdname plot #' @method plot optimize.portfolio #' @S3method plot optimize.portfolio -#' @export plot.optimize.portfolio <- function(x, ..., return.col='mean', risk.col='ES', chart.assets=FALSE, neighbors=NULL, xlim=NULL, ylim=NULL, main='optimized portfolio plot') { charts.RP(RP=x, risk.col=risk.col, return.col=return.col, chart.assets=chart.assets, neighbors=neighbors, main=main, xlim=xlim, ylim=ylim, ...) } \ No newline at end of file Modified: pkg/PortfolioAnalytics/man/chart.RiskReward.Rd =================================================================== --- pkg/PortfolioAnalytics/man/chart.RiskReward.Rd 2013-09-22 21:29:23 UTC (rev 3157) +++ pkg/PortfolioAnalytics/man/chart.RiskReward.Rd 2013-09-22 21:35:27 UTC (rev 3158) @@ -1,4 +1,4 @@ -\name{chart.RiskReward.optimize.portfolio.DEoptim} +\name{chart.RiskReward} \alias{chart.RiskReward} \alias{chart.RiskReward.opt.list} \alias{chart.RiskReward.optimize.portfolio.DEoptim} Added: pkg/PortfolioAnalytics/man/plot.Rd =================================================================== --- pkg/PortfolioAnalytics/man/plot.Rd (rev 0) +++ pkg/PortfolioAnalytics/man/plot.Rd 2013-09-22 21:35:27 UTC (rev 3158) @@ -0,0 +1,316 @@ +\name{plot.optimize.portfolio.DEoptim} +\alias{plot.optimize.portfolio} +\alias{plot.optimize.portfolio.DEoptim} +\alias{plot.optimize.portfolio.GenSA} +\alias{plot.optimize.portfolio.pso} +\alias{plot.optimize.portfolio.random} +\alias{plot.optimize.portfolio.ROI} +\title{plot method for optimize.portfolio.DEoptim output} +\usage{ + \method{plot}{optimize.portfolio.DEoptim} (x, ..., + return.col = "mean", risk.col = "ES", + chart.assets = FALSE, neighbors = NULL, + main = "optimized portfolio plot", xlim = NULL, + ylim = NULL) + + \method{plot}{optimize.portfolio.random} (x, ..., + return.col = "mean", risk.col = "ES", + chart.assets = FALSE, neighbors = NULL, xlim = NULL, + ylim = NULL, main = "optimized portfolio plot") + + \method{plot}{optimize.portfolio} (x, ..., + return.col = "mean", risk.col = "ES", + chart.assets = FALSE, neighbors = NULL, xlim = NULL, + ylim = NULL, main = "optimized portfolio plot") + + \method{plot}{optimize.portfolio.ROI} (x, ..., + rp = FALSE, risk.col = "ES", return.col = "mean", + chart.assets = FALSE, element.color = "darkgray", + neighbors = NULL, main = "ROI.Portfolios", xlim = NULL, + ylim = NULL) + + \method{plot}{optimize.portfolio.pso} (x, ..., + return.col = "mean", risk.col = "ES", + chart.assets = FALSE, cex.axis = 0.8, + element.color = "darkgray", neighbors = NULL, + main = "PSO.Portfolios", xlim = NULL, ylim = NULL) + + \method{plot}{optimize.portfolio.GenSA} (x, ..., + rp = FALSE, return.col = "mean", risk.col = "ES", + chart.assets = FALSE, cex.axis = 0.8, + element.color = "darkgray", neighbors = NULL, + main = "GenSA.Portfolios", xlim = NULL, ylim = NULL) +} +\arguments{ + \item{x}{set of portfolios created by + \code{\link{optimize.portfolio}}} + + \item{...}{any other passthru parameters} + + \item{return.col}{string name of column to use for + returns (vertical axis)} + + \item{risk.col}{string name of column to use for risk + (horizontal axis)} + + \item{chart.assets}{TRUE/FALSE to include risk-return + scatter of assets} + + \item{neighbors}{set of 'neighbor portfolios to overplot} + + \item{main}{an overall title for the plot: see + \code{\link{title}}} + + \item{xlim}{set the limit on coordinates for the x-axis} + + \item{ylim}{set the limit on coordinates for the y-axis} + + \item{x}{set of portfolios created by + \code{\link{optimize.portfolio}}} + + \item{...}{any other passthru parameters} + + \item{return.col}{string name of column to use for + returns (vertical axis)} + + \item{risk.col}{string name of column to use for risk + (horizontal axis)} + + \item{chart.assets}{TRUE/FALSE to include risk-return + scatter of assets} + + \item{neighbors}{set of 'neighbor portfolios to overplot} + + \item{xlim}{set the limit on coordinates for the x-axis} + + \item{ylim}{set the limit on coordinates for the y-axis} + + \item{main}{an overall title for the plot: see + \code{\link{title}}} + + \item{x}{set of portfolios created by + \code{\link{optimize.portfolio}}} + + \item{...}{any other passthru parameters} + + \item{return.col}{string name of column to use for + returns (vertical axis)} + + \item{risk.col}{string name of column to use for risk + (horizontal axis)} + + \item{chart.assets}{TRUE/FALSE to include risk-return + scatter of assets} + + \item{neighbors}{set of 'neighbor portfolios to overplot} + + \item{xlim}{set the limit on coordinates for the x-axis} + + \item{ylim}{set the limit on coordinates for the y-axis} + + \item{main}{an overall title for the plot: see + \code{\link{title}}} + + \item{x}{object created by + \code{\link{optimize.portfolio}}} + + \item{...}{any other passthru parameters} + + \item{rp}{TRUE/FALSE to plot feasible portfolios + generated by \code{\link{random_portfolios}}} + + \item{risk.col}{string matching the objective of a 'risk' + objective, on horizontal axis} + + \item{return.col}{string matching the objective of a + 'return' objective, on vertical axis} + + \item{chart.assets}{TRUE/FALSE to include risk-return + scatter of assets} + + \item{element.color}{color for the default plot scatter + points} + + \item{neighbors}{set of 'neighbor' portfolios to + overplot} + + \item{main}{an overall title for the plot: see + \code{\link{title}}} + + \item{xlim}{set the limit on coordinates for the x-axis} + + \item{ylim}{set the limit on coordinates for the y-axis} + + \item{x}{object created by + \code{\link{optimize.portfolio}}} + + \item{...}{any other passthru parameters} + + \item{return.col}{string matching the objective of a + 'return' objective, on vertical axis} + + \item{risk.col}{string matching the objective of a 'risk' + objective, on horizontal axis} + + \item{chart.assets}{TRUE/FALSE to include risk-return + scatter of assets} + + \item{cex.axis}{The magnification to be used for axis + annotation relative to the current setting of \code{cex}} + + \item{element.color}{color for the default plot scatter + points} + + \item{neighbors}{set of 'neighbor' portfolios to + overplot} + + \item{main}{an overall title for the plot: see + \code{\link{title}}} + + \item{xlim}{set the limit on coordinates for the x-axis} + + \item{ylim}{set the limit on coordinates for the y-axis} + + \item{x}{object created by + \code{\link{optimize.portfolio}}} + + \item{...}{any other passthru parameters} + + \item{rp}{TRUE/FALSE to plot feasible portfolios + generated by \code{\link{random_portfolios}}} + + \item{return.col}{string matching the objective of a + 'return' objective, on vertical axis} + + \item{risk.col}{string matching the objective of a 'risk' + objective, on horizontal axis} + + \item{chart.assets}{TRUE/FALSE to include risk-return + scatter of assets} + + \item{cex.axis}{The magnification to be used for axis + annotation relative to the current setting of \code{cex}} + + \item{element.color}{color for the default plot scatter + points} + + \item{neighbors}{set of 'neighbor' portfolios to + overplot} + + \item{main}{an overall title for the plot: see + \code{\link{title}}} + + \item{xlim}{set the limit on coordinates for the x-axis} + + \item{ylim}{set the limit on coordinates for the y-axis} +} +\description{ + Scatter and weights chart for DEoptim portfolio + optimizations run with trace=TRUE + + Scatter and weights chart for random portfolio + optimizations run with trace=TRUE + + Scatter and weights chart for portfolio optimization + + Scatter and weights chart for ROI portfolio optimizations + run with trace=TRUE + + Scatter and weights chart for pso portfolio optimizations + run with trace=TRUE + + Scatter and weights chart for GenSA portfolio + optimizations run with trace=TRUE +} +\details{ + \code{return.col} must be the name of a function used to + compute the return metric on the random portfolio weights + \code{risk.col} must be the name of a function used to + compute the risk metric on the random portfolio weights + + \code{neighbors} may be specified in three ways. The + first is as a single number of neighbors. This will + extract the \code{neighbors} closest portfolios in terms + of the \code{out} numerical statistic. The second method + consists of a numeric vector for \code{neighbors}. This + will extract the \code{neighbors} with portfolio index + numbers that correspond to the vector contents. The third + method for specifying \code{neighbors} is to pass in a + matrix. This matrix should look like the output of + \code{\link{extractStats}}, and should contain + \code{risk.col},\code{return.col}, and weights columns + all properly named. + + \code{return.col} must be the name of a function used to + compute the return metric on the random portfolio + weights. \code{risk.col} must be the name of a function + used to compute the risk metric on the random portfolio + weights. + + \code{neighbors} may be specified in three ways. The + first is as a single number of neighbors. This will + extract the \code{neighbors} closest portfolios in terms + of the \code{out} numerical statistic. The second method + consists of a numeric vector for \code{neighbors}. This + will extract the \code{neighbors} with portfolio index + numbers that correspond to the vector contents. The third + method for specifying \code{neighbors} is to pass in a + matrix. This matrix should look like the output of + \code{\link{extractStats}}, and should contain + \code{risk.col},\code{return.col}, and weights columns + all properly named. + + This is a fallback that will be called for classes of + portfolio that do not have specific pre-existing plot + methods. + + \code{neighbors} may be specified in three ways. The + first is as a single number of neighbors. This will + extract the \code{neighbors} closest portfolios in terms + of the \code{out} numerical statistic. The second method + consists of a numeric vector for \code{neighbors}. This + will extract the \code{neighbors} with portfolio index + numbers that correspond to the vector contents. The third + method for specifying \code{neighbors} is to pass in a + matrix. This matrix should look like the output of + \code{\link{extractStats}}, and should contain + \code{risk.col},\code{return.col}, and weights columns + all properly named. + + The ROI optimizers do not store the portfolio weights + like DEoptim or random portfolios random portfolios can + be generated for the scatter plot. + + \code{return.col} must be the name of a function used to + compute the return metric on the random portfolio + weights. \code{risk.col} must be the name of a function + used to compute the risk metric on the random portfolio + weights + + \code{return.col} must be the name of a function used to + compute the return metric on the random portfolio + weights. \code{risk.col} must be the name of a function + used to compute the risk metric on the random portfolio + weights. + + \code{return.col} must be the name of a function used to + compute the return metric on the random portfolio + weights. \code{risk.col} must be the name of a function + used to compute the risk metric on the random portfolio + weights. +} +\author{ + Ross Bennett + + Ross Bennett + + Ross Bennett +} +\seealso{ + \code{\link{optimize.portfolio}} + + \code{\link{optimize.portfolio}} + + \code{\link{optimize.portfolio}} +} + Deleted: pkg/PortfolioAnalytics/man/plot.optimize.portfolio.DEoptim.Rd =================================================================== --- pkg/PortfolioAnalytics/man/plot.optimize.portfolio.DEoptim.Rd 2013-09-22 21:29:23 UTC (rev 3157) +++ pkg/PortfolioAnalytics/man/plot.optimize.portfolio.DEoptim.Rd 2013-09-22 21:35:27 UTC (rev 3158) @@ -1,58 +0,0 @@ -\name{plot.optimize.portfolio.DEoptim} -\alias{plot.optimize.portfolio.DEoptim} -\title{plot method for optimize.portfolio.DEoptim output} -\usage{ - \method{plot}{optimize.portfolio.DEoptim} (x, ..., - return.col = "mean", risk.col = "ES", - chart.assets = FALSE, neighbors = NULL, - main = "optimized portfolio plot", xlim = NULL, - ylim = NULL) -} -\arguments{ - \item{x}{set of portfolios created by - \code{\link{optimize.portfolio}}} - - \item{...}{any other passthru parameters} - - \item{return.col}{string name of column to use for - returns (vertical axis)} - - \item{risk.col}{string name of column to use for risk - (horizontal axis)} - - \item{chart.assets}{TRUE/FALSE to include risk-return - scatter of assets} - - \item{neighbors}{set of 'neighbor portfolios to overplot} - - \item{main}{an overall title for the plot: see - \code{\link{title}}} - - \item{xlim}{set the limit on coordinates for the x-axis} - - \item{ylim}{set the limit on coordinates for the y-axis} -} -\description{ - Scatter and weights chart for DEoptim portfolio - optimizations run with trace=TRUE -} -\details{ - \code{return.col} must be the name of a function used to - compute the return metric on the random portfolio weights - \code{risk.col} must be the name of a function used to - compute the risk metric on the random portfolio weights - - \code{neighbors} may be specified in three ways. The - first is as a single number of neighbors. This will - extract the \code{neighbors} closest portfolios in terms - of the \code{out} numerical statistic. The second method - consists of a numeric vector for \code{neighbors}. This - will extract the \code{neighbors} with portfolio index - numbers that correspond to the vector contents. The third - method for specifying \code{neighbors} is to pass in a - matrix. This matrix should look like the output of - \code{\link{extractStats}}, and should contain - \code{risk.col},\code{return.col}, and weights columns - all properly named. -} - Deleted: pkg/PortfolioAnalytics/man/plot.optimize.portfolio.GenSA.Rd =================================================================== --- pkg/PortfolioAnalytics/man/plot.optimize.portfolio.GenSA.Rd 2013-09-22 21:29:23 UTC (rev 3157) +++ pkg/PortfolioAnalytics/man/plot.optimize.portfolio.GenSA.Rd 2013-09-22 21:35:27 UTC (rev 3158) @@ -1,62 +0,0 @@ -\name{plot.optimize.portfolio.GenSA} -\alias{plot.optimize.portfolio.GenSA} -\title{plot method for optimize.portfolio.DEoptim output} -\usage{ - \method{plot}{optimize.portfolio.GenSA} (x, ..., - rp = FALSE, return.col = "mean", risk.col = "ES", - chart.assets = FALSE, cex.axis = 0.8, - element.color = "darkgray", neighbors = NULL, - main = "GenSA.Portfolios", xlim = NULL, ylim = NULL) -} -\arguments{ - \item{x}{object created by - \code{\link{optimize.portfolio}}} - - \item{...}{any other passthru parameters} - - \item{rp}{TRUE/FALSE to plot feasible portfolios - generated by \code{\link{random_portfolios}}} - - \item{return.col}{string matching the objective of a - 'return' objective, on vertical axis} - - \item{risk.col}{string matching the objective of a 'risk' - objective, on horizontal axis} - - \item{chart.assets}{TRUE/FALSE to include risk-return - scatter of assets} - - \item{cex.axis}{The magnification to be used for axis - annotation relative to the current setting of \code{cex}} - - \item{element.color}{color for the default plot scatter - points} - - \item{neighbors}{set of 'neighbor' portfolios to - overplot} - - \item{main}{an overall title for the plot: see - \code{\link{title}}} - - \item{xlim}{set the limit on coordinates for the x-axis} - - \item{ylim}{set the limit on coordinates for the y-axis} -} -\description{ - Scatter and weights chart for GenSA portfolio - optimizations run with trace=TRUE -} -\details{ - \code{return.col} must be the name of a function used to - compute the return metric on the random portfolio - weights. \code{risk.col} must be the name of a function - used to compute the risk metric on the random portfolio - weights. -} -\author{ - Ross Bennett -} -\seealso{ - \code{\link{optimize.portfolio}} -} - Deleted: pkg/PortfolioAnalytics/man/plot.optimize.portfolio.ROI.Rd =================================================================== --- pkg/PortfolioAnalytics/man/plot.optimize.portfolio.ROI.Rd 2013-09-22 21:29:23 UTC (rev 3157) +++ pkg/PortfolioAnalytics/man/plot.optimize.portfolio.ROI.Rd 2013-09-22 21:35:27 UTC (rev 3158) @@ -1,63 +0,0 @@ -\name{plot.optimize.portfolio.ROI} -\alias{plot.optimize.portfolio.ROI} -\title{plot method for optimize.portfolio.ROI output} -\usage{ - \method{plot}{optimize.portfolio.ROI} (x, ..., - rp = FALSE, risk.col = "ES", return.col = "mean", - chart.assets = FALSE, element.color = "darkgray", - neighbors = NULL, main = "ROI.Portfolios", xlim = NULL, - ylim = NULL) -} -\arguments{ - \item{x}{object created by - \code{\link{optimize.portfolio}}} - - \item{...}{any other passthru parameters} - - \item{rp}{TRUE/FALSE to plot feasible portfolios - generated by \code{\link{random_portfolios}}} - - \item{risk.col}{string matching the objective of a 'risk' - objective, on horizontal axis} - - \item{return.col}{string matching the objective of a - 'return' objective, on vertical axis} - - \item{chart.assets}{TRUE/FALSE to include risk-return - scatter of assets} - - \item{element.color}{color for the default plot scatter - points} - - \item{neighbors}{set of 'neighbor' portfolios to - overplot} - - \item{main}{an overall title for the plot: see - \code{\link{title}}} - - \item{xlim}{set the limit on coordinates for the x-axis} - - \item{ylim}{set the limit on coordinates for the y-axis} -} -\description{ - Scatter and weights chart for ROI portfolio optimizations - run with trace=TRUE -} -\details{ - The ROI optimizers do not store the portfolio weights - like DEoptim or random portfolios random portfolios can - be generated for the scatter plot. - - \code{return.col} must be the name of a function used to - compute the return metric on the random portfolio - weights. \code{risk.col} must be the name of a function - used to compute the risk metric on the random portfolio - weights -} -\author{ - Ross Bennett -} -\seealso{ - \code{\link{optimize.portfolio}} -} - Deleted: pkg/PortfolioAnalytics/man/plot.optimize.portfolio.pso.Rd =================================================================== --- pkg/PortfolioAnalytics/man/plot.optimize.portfolio.pso.Rd 2013-09-22 21:29:23 UTC (rev 3157) +++ pkg/PortfolioAnalytics/man/plot.optimize.portfolio.pso.Rd 2013-09-22 21:35:27 UTC (rev 3158) @@ -1,59 +0,0 @@ -\name{plot.optimize.portfolio.pso} -\alias{plot.optimize.portfolio.pso} -\title{plot method for optimize.portfolio.pso output} -\usage{ - \method{plot}{optimize.portfolio.pso} (x, ..., - return.col = "mean", risk.col = "ES", - chart.assets = FALSE, cex.axis = 0.8, - element.color = "darkgray", neighbors = NULL, - main = "PSO.Portfolios", xlim = NULL, ylim = NULL) -} -\arguments{ - \item{x}{object created by - \code{\link{optimize.portfolio}}} - - \item{...}{any other passthru parameters} - - \item{return.col}{string matching the objective of a - 'return' objective, on vertical axis} - - \item{risk.col}{string matching the objective of a 'risk' - objective, on horizontal axis} - - \item{chart.assets}{TRUE/FALSE to include risk-return - scatter of assets} - - \item{cex.axis}{The magnification to be used for axis - annotation relative to the current setting of \code{cex}} - - \item{element.color}{color for the default plot scatter - points} - - \item{neighbors}{set of 'neighbor' portfolios to - overplot} - - \item{main}{an overall title for the plot: see - \code{\link{title}}} - - \item{xlim}{set the limit on coordinates for the x-axis} - - \item{ylim}{set the limit on coordinates for the y-axis} -} -\description{ - Scatter and weights chart for pso portfolio optimizations - run with trace=TRUE -} -\details{ - \code{return.col} must be the name of a function used to - compute the return metric on the random portfolio - weights. \code{risk.col} must be the name of a function - used to compute the risk metric on the random portfolio - weights. -} -\author{ - Ross Bennett -} -\seealso{ - \code{\link{optimize.portfolio}} -} - Deleted: pkg/PortfolioAnalytics/man/plot.optimize.portfolio.random.Rd =================================================================== --- pkg/PortfolioAnalytics/man/plot.optimize.portfolio.random.Rd 2013-09-22 21:29:23 UTC (rev 3157) +++ pkg/PortfolioAnalytics/man/plot.optimize.portfolio.random.Rd 2013-09-22 21:35:27 UTC (rev 3158) @@ -1,58 +0,0 @@ -\name{plot.optimize.portfolio.random} -\alias{plot.optimize.portfolio.random} -\title{plot method for optimize.portfolio.random output} -\usage{ - \method{plot}{optimize.portfolio.random} (x, ..., - return.col = "mean", risk.col = "ES", - chart.assets = FALSE, neighbors = NULL, xlim = NULL, - ylim = NULL, main = "optimized portfolio plot") -} -\arguments{ - \item{x}{set of portfolios created by - \code{\link{optimize.portfolio}}} - - \item{...}{any other passthru parameters} - - \item{return.col}{string name of column to use for - returns (vertical axis)} - - \item{risk.col}{string name of column to use for risk - (horizontal axis)} - - \item{chart.assets}{TRUE/FALSE to include risk-return - scatter of assets} - - \item{neighbors}{set of 'neighbor portfolios to overplot} - - \item{xlim}{set the limit on coordinates for the x-axis} - - \item{ylim}{set the limit on coordinates for the y-axis} - - \item{main}{an overall title for the plot: see - \code{\link{title}}} -} -\description{ - Scatter and weights chart for random portfolio - optimizations run with trace=TRUE -} -\details{ - \code{return.col} must be the name of a function used to - compute the return metric on the random portfolio - weights. \code{risk.col} must be the name of a function - used to compute the risk metric on the random portfolio - weights. - - \code{neighbors} may be specified in three ways. The - first is as a single number of neighbors. This will - extract the \code{neighbors} closest portfolios in terms - of the \code{out} numerical statistic. The second method - consists of a numeric vector for \code{neighbors}. This - will extract the \code{neighbors} with portfolio index - numbers that correspond to the vector contents. The third - method for specifying \code{neighbors} is to pass in a - matrix. This matrix should look like the output of - \code{\link{extractStats}}, and should contain - \code{risk.col},\code{return.col}, and weights columns - all properly named. -} - From noreply at r-forge.r-project.org Sun Sep 22 23:48:22 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 22 Sep 2013 23:48:22 +0200 (CEST) Subject: [Returnanalytics-commits] r3159 - in pkg/PortfolioAnalytics: R man Message-ID: <20130922214822.5DB8C185C8D@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-22 23:48:22 +0200 (Sun, 22 Sep 2013) New Revision: 3159 Removed: pkg/PortfolioAnalytics/man/chart.RiskBudget.opt.list.Rd pkg/PortfolioAnalytics/man/chart.RiskBudget.optimize.portfolio.Rd Modified: pkg/PortfolioAnalytics/R/charts.risk.R pkg/PortfolioAnalytics/man/chart.RiskBudget.Rd pkg/PortfolioAnalytics/man/chart.RiskReward.Rd pkg/PortfolioAnalytics/man/chart.Weights.Rd Log: Updating documentation for chart.RiskBudget.* and collapsing into single .Rd file. Modified: pkg/PortfolioAnalytics/R/charts.risk.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.risk.R 2013-09-22 21:35:27 UTC (rev 3158) +++ pkg/PortfolioAnalytics/R/charts.risk.R 2013-09-22 21:48:22 UTC (rev 3159) @@ -2,48 +2,53 @@ #' Generic method to chart risk contribution #' #' This function is the generic method to chart risk budget objectives for -#' \code{optimize.portfolio} and \code{opt.list} objects. +#' \code{optimize.portfolio} and \code{opt.list} objects. This function charts +#' the contribution or percent contribution of the resulting objective measures +#' of a \code{risk_budget_objective}. #' -#' @param object optimal portfolio object created by \code{\link{optimize.portfolio}} -#' @param ... passthrough parameters to \code{\link{plot}} -#' @export -chart.RiskBudget <- function(object, ...){ - UseMethod("chart.RiskBudget") -} - -#' Chart risk contribution of an \code{optimize.portfolio} object -#' -#' This function charts the contribution or percent contribution of the resulting -#' objective measures in \code{risk_budget_objectives}. -#' +#' @details #' \code{neighbors} may be specified in three ways. -#' The first is as a single number of neighbors. This will extract the \code{neighbors} closest -#' portfolios in terms of the \code{out} numerical statistic. +#' The first is as a single number of neighbors. This will extract the +#' \code{neighbors} closest to the portfolios in terms of the \code{out} +#' numerical statistic. #' The second method consists of a numeric vector for \code{neighbors}. -#' This will extract the \code{neighbors} with portfolio index numbers that correspond to the vector contents. +#' This will extract the \code{neighbors} with portfolio index numbers that +#' correspond to the vector contents. #' The third method for specifying \code{neighbors} is to pass in a matrix. -#' This matrix should look like the output of \code{\link{extractStats}}, and should contain -#' properly named contribution and pct_contrib columns. +#' This matrix should look like the output of \code{\link{extractStats}}, and +#' should contain properly named contribution and pct_contrib columns. #' #' @param object optimal portfolio object created by \code{\link{optimize.portfolio}} -#' @param neighbors risk contribution or pct_contrib of neighbor portfolios to be plotted, see details. -#' @param \dots passthrough parameters to \code{\link{plot}}. +#' @param \dots any other passthru parameters to \code{\link{plot}} +#' @param neighbors risk contribution or pct_contrib of neighbor portfolios to be plotted, see Details. +#' @param match.col string of risk column to match. The \code{opt.list} object +#' may contain risk budgets for ES or StdDev and this will match the proper +#' column names of the objectives list outp (e.g. ES.contribution). #' @param risk.type "absolute" or "percentage" to plot risk contribution in absolute terms or percentage contribution. #' @param main main title for the chart. +#' @param plot.type "line" or "barplot". #' @param ylab label for the y-axis. -#' @param xlab label for the x-axis -#' @param cex.lab the magnification to be used for x and y labels relative to the current setting of \code{cex}. +#' @param xlab label for the x-axis. #' @param cex.axis the magnification to be used for axis annotation relative to the current setting of \code{cex}. +#' @param cex.lab the magnification to be used for axis annotation relative to the current setting of \code{cex}. #' @param element.color provides the color for drawing less-important chart elements, such as the box lines, axis lines, etc. #' @param las numeric in \{0,1,2,3\}; the style of axis labels #' \describe{ -#' \item{0:}{always parallel to the axis,} +#' \item{0:}{always parallel to the axis [\emph{default}],} #' \item{1:}{always horizontal,} #' \item{2:}{always perpendicular to the axis,} -#' \item{3:}{always vertical [\emph{default}].} +#' \item{3:}{always vertical.} #' } #' @param ylim set the y-axis limit, same as in \code{\link{plot}} -#' @author Ross Bennett +#' @param colorset color palette or vector of colors to use +#' @param legend.loc legend.loc NULL, "topright", "right", or "bottomright". If legend.loc is NULL, the legend will not be plotted +#' @param cex.legend The magnification to be used for the legend relative to the current setting of \code{cex} +#' @export +chart.RiskBudget <- function(object, ...){ + UseMethod("chart.RiskBudget") +} + +#' @rdname chart.RiskBudget #' @method chart.RiskBudget optimize.portfolio #' @S3method chart.RiskBudget optimize.portfolio chart.RiskBudget.optimize.portfolio <- function(object, ..., neighbors=NULL, risk.type="absolute", main="Risk Contribution", ylab="", xlab=NULL, cex.axis=0.8, cex.lab=0.8, element.color="darkgray", las=3, ylim=NULL){ @@ -201,34 +206,8 @@ } # end plot for pct_contrib risk.type } -#' Chart risk contribution of an \code{opt.list} object -#' -#' This function charts the absolute contribution or percent contribution of -#' the resulting objective measures in the \code{opt.list} object. -#' -#' @param object list of optimal portfolio objects created by \code{\link{optimizations.combine}}. -#' @param \dots any other passthru parameter. -#' @param match.col string of risk column to match. The \code{opt.list} object -#' may contain risk budgets for ES or StdDev and this will match the proper -#' column names of the objectives list outp (e.g. ES.contribution). -#' @param risk.type "absolute" or "percentage" to plot risk contribution in absolute terms or percentage contribution. -#' @param main main title for the chart. -#' @param plot.type "line" or "barplot". -#' @param cex.axis the magnification to be used for axis annotation relative to the current setting of \code{cex}. -#' @param cex.lab the magnification to be used for axis annotation relative to the current setting of \code{cex}. -#' @param element.color provides the color for drawing less-important chart elements, such as the box lines, axis lines, etc. -#' @param las numeric in \{0,1,2,3\}; the style of axis labels -#' \describe{ -#' \item{0:}{always parallel to the axis [\emph{default}],} -#' \item{1:}{always horizontal,} -#' \item{2:}{always perpendicular to the axis,} -#' \item{3:}{always vertical.} -#' } -#' @param ylim set the y-axis limit, same as in \code{\link{plot}} -#' @param colorset color palette or vector of colors to use -#' @param legend.loc legend.loc NULL, "topright", "right", or "bottomright". If legend.loc is NULL, the legend will not be plotted -#' @param cex.legend The magnification to be used for the legend relative to the current setting of \code{cex} -#' @author Ross Bennett + +#' @rdname chart.RiskBudget #' @method chart.RiskBudget opt.list #' @S3method chart.RiskBudget opt.list chart.RiskBudget.opt.list <- function(object, ..., match.col="ES", risk.type="absolute", main="Risk Budget", plot.type="line", cex.axis=0.8, cex.lab=0.8, element.color="darkgray", las=3, ylim=NULL, colorset=NULL, legend.loc=NULL, cex.legend=0.8){ @@ -381,7 +360,7 @@ if(is.null(colorset)) colorset <- 1:nrow(dat) # plot the data - barplot(dat, names.arg=columnnames, las=las, cex.names=cex.axis, xlab='', col=colorset, main=main, ylab=paste(match.col, "Contribution", sep=" "), cex.lab=cex.lab, cex.axis=cex.axis, ...) + barplot(dat, names.arg=columnnames, las=las, cex.names=cex.axis, xlab='', col=colorset, main=main, ylab=paste(match.col, "Contribution", sep=" "), cex.lab=cex.lab, cex.axis=cex.axis, beside=TRUE, ...) # set the axis #axis(2, cex.axis=cex.axis, col=element.color) Modified: pkg/PortfolioAnalytics/man/chart.RiskBudget.Rd =================================================================== --- pkg/PortfolioAnalytics/man/chart.RiskBudget.Rd 2013-09-22 21:35:27 UTC (rev 3158) +++ pkg/PortfolioAnalytics/man/chart.RiskBudget.Rd 2013-09-22 21:48:22 UTC (rev 3159) @@ -1,18 +1,99 @@ \name{chart.RiskBudget} \alias{chart.RiskBudget} +\alias{chart.RiskBudget.opt.list} +\alias{chart.RiskBudget.optimize.portfolio} \title{Generic method to chart risk contribution} \usage{ chart.RiskBudget(object, ...) + + \method{chart.RiskBudget}{optimize.portfolio} (object, + ..., neighbors = NULL, risk.type = "absolute", + main = "Risk Contribution", ylab = "", xlab = NULL, + cex.axis = 0.8, cex.lab = 0.8, + element.color = "darkgray", las = 3, ylim = NULL) + + \method{chart.RiskBudget}{opt.list} (object, ..., + match.col = "ES", risk.type = "absolute", + main = "Risk Budget", plot.type = "line", + cex.axis = 0.8, cex.lab = 0.8, + element.color = "darkgray", las = 3, ylim = NULL, + colorset = NULL, legend.loc = NULL, cex.legend = 0.8) } \arguments{ \item{object}{optimal portfolio object created by \code{\link{optimize.portfolio}}} - \item{...}{passthrough parameters to \code{\link{plot}}} + \item{\dots}{any other passthru parameters to + \code{\link{plot}}} + + \item{neighbors}{risk contribution or pct_contrib of + neighbor portfolios to be plotted, see Details.} + + \item{match.col}{string of risk column to match. The + \code{opt.list} object may contain risk budgets for ES or + StdDev and this will match the proper column names of the + objectives list outp (e.g. ES.contribution).} + + \item{risk.type}{"absolute" or "percentage" to plot risk + contribution in absolute terms or percentage + contribution.} + + \item{main}{main title for the chart.} + + \item{plot.type}{"line" or "barplot".} + + \item{ylab}{label for the y-axis.} + + \item{xlab}{label for the x-axis.} + + \item{cex.axis}{the magnification to be used for axis + annotation relative to the current setting of + \code{cex}.} + + \item{cex.lab}{the magnification to be used for axis + annotation relative to the current setting of + \code{cex}.} + + \item{element.color}{provides the color for drawing + less-important chart elements, such as the box lines, + axis lines, etc.} + + \item{las}{numeric in \{0,1,2,3\}; the style of axis + labels \describe{ \item{0:}{always parallel to the axis + [\emph{default}],} \item{1:}{always horizontal,} + \item{2:}{always perpendicular to the axis,} + \item{3:}{always vertical.} }} + + \item{ylim}{set the y-axis limit, same as in + \code{\link{plot}}} + + \item{colorset}{color palette or vector of colors to use} + + \item{legend.loc}{legend.loc NULL, "topright", "right", + or "bottomright". If legend.loc is NULL, the legend will + not be plotted} + + \item{cex.legend}{The magnification to be used for the + legend relative to the current setting of \code{cex}} } \description{ This function is the generic method to chart risk budget objectives for \code{optimize.portfolio} and - \code{opt.list} objects. + \code{opt.list} objects. This function charts the + contribution or percent contribution of the resulting + objective measures of a \code{risk_budget_objective}. } +\details{ + \code{neighbors} may be specified in three ways. The + first is as a single number of neighbors. This will + extract the \code{neighbors} closest to the portfolios in + terms of the \code{out} numerical statistic. The second + method consists of a numeric vector for \code{neighbors}. + This will extract the \code{neighbors} with portfolio + index numbers that correspond to the vector contents. The + third method for specifying \code{neighbors} is to pass + in a matrix. This matrix should look like the output of + \code{\link{extractStats}}, and should contain properly + named contribution and pct_contrib columns. +} Deleted: pkg/PortfolioAnalytics/man/chart.RiskBudget.opt.list.Rd =================================================================== --- pkg/PortfolioAnalytics/man/chart.RiskBudget.opt.list.Rd 2013-09-22 21:35:27 UTC (rev 3158) +++ pkg/PortfolioAnalytics/man/chart.RiskBudget.opt.list.Rd 2013-09-22 21:48:22 UTC (rev 3159) @@ -1,69 +0,0 @@ -\name{chart.RiskBudget.opt.list} -\alias{chart.RiskBudget.opt.list} -\title{Chart risk contribution of an \code{opt.list} object} -\usage{ - \method{chart.RiskBudget}{opt.list} (object, ..., - match.col = "ES", risk.type = "absolute", - main = "Risk Budget", plot.type = "line", - cex.axis = 0.8, cex.lab = 0.8, - element.color = "darkgray", las = 3, ylim = NULL, - colorset = NULL, legend.loc = NULL, cex.legend = 0.8) -} -\arguments{ - \item{object}{list of optimal portfolio objects created - by \code{\link{optimizations.combine}}.} - - \item{\dots}{any other passthru parameter.} - - \item{match.col}{string of risk column to match. The - \code{opt.list} object may contain risk budgets for ES or - StdDev and this will match the proper column names of the - objectives list outp (e.g. ES.contribution).} - - \item{risk.type}{"absolute" or "percentage" to plot risk - contribution in absolute terms or percentage - contribution.} - - \item{main}{main title for the chart.} - - \item{plot.type}{"line" or "barplot".} - - \item{cex.axis}{the magnification to be used for axis - annotation relative to the current setting of - \code{cex}.} - - \item{cex.lab}{the magnification to be used for axis - annotation relative to the current setting of - \code{cex}.} - - \item{element.color}{provides the color for drawing - less-important chart elements, such as the box lines, - axis lines, etc.} - - \item{las}{numeric in \{0,1,2,3\}; the style of axis - labels \describe{ \item{0:}{always parallel to the axis - [\emph{default}],} \item{1:}{always horizontal,} - \item{2:}{always perpendicular to the axis,} - \item{3:}{always vertical.} }} - - \item{ylim}{set the y-axis limit, same as in - \code{\link{plot}}} - - \item{colorset}{color palette or vector of colors to use} - - \item{legend.loc}{legend.loc NULL, "topright", "right", - or "bottomright". If legend.loc is NULL, the legend will - not be plotted} - - \item{cex.legend}{The magnification to be used for the - legend relative to the current setting of \code{cex}} -} -\description{ - This function charts the absolute contribution or percent - contribution of the resulting objective measures in the - \code{opt.list} object. -} -\author{ - Ross Bennett -} - Deleted: pkg/PortfolioAnalytics/man/chart.RiskBudget.optimize.portfolio.Rd =================================================================== --- pkg/PortfolioAnalytics/man/chart.RiskBudget.optimize.portfolio.Rd 2013-09-22 21:35:27 UTC (rev 3158) +++ pkg/PortfolioAnalytics/man/chart.RiskBudget.optimize.portfolio.Rd 2013-09-22 21:48:22 UTC (rev 3159) @@ -1,72 +0,0 @@ -\name{chart.RiskBudget.optimize.portfolio} -\alias{chart.RiskBudget.optimize.portfolio} -\title{Chart risk contribution of an \code{optimize.portfolio} object} -\usage{ - \method{chart.RiskBudget}{optimize.portfolio} (object, - ..., neighbors = NULL, risk.type = "absolute", - main = "Risk Contribution", ylab = "", xlab = NULL, - cex.axis = 0.8, cex.lab = 0.8, - element.color = "darkgray", las = 3, ylim = NULL) -} -\arguments{ - \item{object}{optimal portfolio object created by - \code{\link{optimize.portfolio}}} - - \item{neighbors}{risk contribution or pct_contrib of - neighbor portfolios to be plotted, see details.} - - \item{\dots}{passthrough parameters to - \code{\link{plot}}.} - - \item{risk.type}{"absolute" or "percentage" to plot risk - contribution in absolute terms or percentage - contribution.} - - \item{main}{main title for the chart.} - - \item{ylab}{label for the y-axis.} - - \item{xlab}{label for the x-axis} - - \item{cex.lab}{the magnification to be used for x and y - labels relative to the current setting of \code{cex}.} - - \item{cex.axis}{the magnification to be used for axis - annotation relative to the current setting of - \code{cex}.} - - \item{element.color}{provides the color for drawing - less-important chart elements, such as the box lines, - axis lines, etc.} - - \item{las}{numeric in \{0,1,2,3\}; the style of axis - labels \describe{ \item{0:}{always parallel to the axis,} - \item{1:}{always horizontal,} \item{2:}{always - perpendicular to the axis,} \item{3:}{always vertical - [\emph{default}].} }} - - \item{ylim}{set the y-axis limit, same as in - \code{\link{plot}}} -} -\description{ - This function charts the contribution or percent - contribution of the resulting objective measures in - \code{risk_budget_objectives}. -} -\details{ - \code{neighbors} may be specified in three ways. The - first is as a single number of neighbors. This will - extract the \code{neighbors} closest portfolios in terms - of the \code{out} numerical statistic. The second method - consists of a numeric vector for \code{neighbors}. This - will extract the \code{neighbors} with portfolio index - numbers that correspond to the vector contents. The third - method for specifying \code{neighbors} is to pass in a - matrix. This matrix should look like the output of - \code{\link{extractStats}}, and should contain properly - named contribution and pct_contrib columns. -} -\author{ - Ross Bennett -} - Modified: pkg/PortfolioAnalytics/man/chart.RiskReward.Rd =================================================================== --- pkg/PortfolioAnalytics/man/chart.RiskReward.Rd 2013-09-22 21:35:27 UTC (rev 3158) +++ pkg/PortfolioAnalytics/man/chart.RiskReward.Rd 2013-09-22 21:48:22 UTC (rev 3159) @@ -1,4 +1,4 @@ -\name{chart.RiskReward} +\name{chart.RiskReward.optimize.portfolio.DEoptim} \alias{chart.RiskReward} \alias{chart.RiskReward.opt.list} \alias{chart.RiskReward.optimize.portfolio.DEoptim} Modified: pkg/PortfolioAnalytics/man/chart.Weights.Rd =================================================================== --- pkg/PortfolioAnalytics/man/chart.Weights.Rd 2013-09-22 21:35:27 UTC (rev 3158) +++ pkg/PortfolioAnalytics/man/chart.Weights.Rd 2013-09-22 21:48:22 UTC (rev 3159) @@ -1,4 +1,4 @@ -\name{chart.Weights} +\name{chart.Weights.optimize.portfolio.DEoptim} \alias{chart.Weights} \alias{chart.Weights.opt.list} \alias{chart.Weights.optimize.portfolio.DEoptim} From noreply at r-forge.r-project.org Sun Sep 22 23:55:40 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 22 Sep 2013 23:55:40 +0200 (CEST) Subject: [Returnanalytics-commits] r3160 - in pkg/PortfolioAnalytics: R man Message-ID: <20130922215540.E710A185C8D@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-22 23:55:40 +0200 (Sun, 22 Sep 2013) New Revision: 3160 Added: pkg/PortfolioAnalytics/man/combine.optimizations.Rd Modified: pkg/PortfolioAnalytics/R/utility.combine.R Log: Changing optimizations.combine to combine.optimizations. Modified: pkg/PortfolioAnalytics/R/utility.combine.R =================================================================== --- pkg/PortfolioAnalytics/R/utility.combine.R 2013-09-22 21:48:22 UTC (rev 3159) +++ pkg/PortfolioAnalytics/R/utility.combine.R 2013-09-22 21:55:40 UTC (rev 3160) @@ -8,7 +8,7 @@ #' @param x a list of objects created by \code{\link{optimize.portfolio}} #' @return an \code{opt.list} object #' @export -optimizations.combine <- function(x){ +combine.optimizations <- function(x){ if(!is.list(x)) stop("x must be passed in as a list") for(i in 1:length(x)){ if(!inherits(x[[i]], "optimize.portfolio")) stop("All objects in x must be of class 'optimize.portfolio'") Added: pkg/PortfolioAnalytics/man/combine.optimizations.Rd =================================================================== --- pkg/PortfolioAnalytics/man/combine.optimizations.Rd (rev 0) +++ pkg/PortfolioAnalytics/man/combine.optimizations.Rd 2013-09-22 21:55:40 UTC (rev 3160) @@ -0,0 +1,19 @@ +\name{combine.optimizations} +\alias{combine.optimizations} +\title{Combine objects created by optimize.portfolio} +\usage{ + combine.optimizations(x) +} +\arguments{ + \item{x}{a list of objects created by + \code{\link{optimize.portfolio}}} +} +\value{ + an \code{opt.list} object +} +\description{ + This function takes a list of objects created by + \code{\link{optimize.portfolio}} and sets the class name + attribute to 'opt.list' for use in generic functions +} + From noreply at r-forge.r-project.org Sun Sep 22 23:56:29 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sun, 22 Sep 2013 23:56:29 +0200 (CEST) Subject: [Returnanalytics-commits] r3161 - in pkg/PortfolioAnalytics: . R man Message-ID: <20130922215629.4AB9B185C8D@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-22 23:56:29 +0200 (Sun, 22 Sep 2013) New Revision: 3161 Removed: pkg/PortfolioAnalytics/man/optimizations.combine.Rd Modified: pkg/PortfolioAnalytics/NAMESPACE pkg/PortfolioAnalytics/R/chart.Weights.R pkg/PortfolioAnalytics/R/charts.DE.R pkg/PortfolioAnalytics/man/chart.RiskReward.Rd pkg/PortfolioAnalytics/man/chart.Weights.Rd Log: Updating documentation for chart.Weights and chart.RiskReward Modified: pkg/PortfolioAnalytics/NAMESPACE =================================================================== --- pkg/PortfolioAnalytics/NAMESPACE 2013-09-22 21:55:40 UTC (rev 3160) +++ pkg/PortfolioAnalytics/NAMESPACE 2013-09-22 21:56:29 UTC (rev 3161) @@ -10,6 +10,7 @@ export(chart.RiskReward) export(chart.Weights.EF) export(chart.Weights) +export(combine.optimizations) export(constrained_objective_v2) export(constrained_objective) export(constraint_ROI) @@ -36,7 +37,6 @@ export(meanvar.efficient.frontier) export(minmax_objective) export(objective) -export(optimizations.combine) export(optimize.portfolio_v2) export(optimize.portfolio.parallel) export(optimize.portfolio.rebalancing) Modified: pkg/PortfolioAnalytics/R/chart.Weights.R =================================================================== --- pkg/PortfolioAnalytics/R/chart.Weights.R 2013-09-22 21:55:40 UTC (rev 3160) +++ pkg/PortfolioAnalytics/R/chart.Weights.R 2013-09-22 21:56:29 UTC (rev 3161) @@ -25,7 +25,7 @@ #' @seealso \code{\link{optimize.portfolio}} #' @rdname chart.Weights #' @name chart.Weights -#' @aliases chart.Weights.optimize.portfolio.ROI chart.Weights.optimize.portfolio.DEoptim chart.Weights.optimize.portfolio.pso chart.Weights.optimize.portfolio.RP chart.Weights.optimize.portfolio.GenSA +#' @aliases chart.Weights.optimize.portfolio.ROI chart.Weights.optimize.portfolio.DEoptim chart.Weights.optimize.portfolio.pso chart.Weights.optimize.portfolio.GenSA #' @export chart.Weights <- function(object, neighbors = NULL, ..., main="Weights", las = 3, xlab=NULL, cex.lab = 1, element.color = "darkgray", cex.axis=0.8){ UseMethod("chart.Weights") Modified: pkg/PortfolioAnalytics/R/charts.DE.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.DE.R 2013-09-22 21:55:40 UTC (rev 3160) +++ pkg/PortfolioAnalytics/R/charts.DE.R 2013-09-22 21:56:29 UTC (rev 3161) @@ -87,6 +87,7 @@ } #' @rdname chart.Weights +#' @name chart.Weights #' @method chart.Weights optimize.portfolio.DEoptim #' @S3method chart.Weights optimize.portfolio.DEoptim chart.Weights.optimize.portfolio.DEoptim <- chart.Weights.DE @@ -291,6 +292,7 @@ } #' @rdname chart.RiskReward +#' @name chart.RiskReward #' @method chart.RiskReward optimize.portfolio.DEoptim #' @S3method chart.RiskReward optimize.portfolio.DEoptim chart.RiskReward.optimize.portfolio.DEoptim <- chart.Scatter.DE Modified: pkg/PortfolioAnalytics/man/chart.RiskReward.Rd =================================================================== --- pkg/PortfolioAnalytics/man/chart.RiskReward.Rd 2013-09-22 21:55:40 UTC (rev 3160) +++ pkg/PortfolioAnalytics/man/chart.RiskReward.Rd 2013-09-22 21:56:29 UTC (rev 3161) @@ -1,7 +1,6 @@ -\name{chart.RiskReward.optimize.portfolio.DEoptim} +\name{chart.RiskReward} \alias{chart.RiskReward} \alias{chart.RiskReward.opt.list} -\alias{chart.RiskReward.optimize.portfolio.DEoptim} \alias{chart.RiskReward.optimize.portfolio.GenSA} \alias{chart.RiskReward.optimize.portfolio.pso} \alias{chart.RiskReward.optimize.portfolio.random} Modified: pkg/PortfolioAnalytics/man/chart.Weights.Rd =================================================================== --- pkg/PortfolioAnalytics/man/chart.Weights.Rd 2013-09-22 21:55:40 UTC (rev 3160) +++ pkg/PortfolioAnalytics/man/chart.Weights.Rd 2013-09-22 21:56:29 UTC (rev 3161) @@ -1,4 +1,4 @@ -\name{chart.Weights.optimize.portfolio.DEoptim} +\name{chart.Weights} \alias{chart.Weights} \alias{chart.Weights.opt.list} \alias{chart.Weights.optimize.portfolio.DEoptim} @@ -6,7 +6,6 @@ \alias{chart.Weights.optimize.portfolio.pso} \alias{chart.Weights.optimize.portfolio.random} \alias{chart.Weights.optimize.portfolio.ROI} -\alias{chart.Weights.optimize.portfolio.RP} \title{boxplot of the weights of the optimal portfolios} \usage{ \method{chart.Weights}{optimize.portfolio.DEoptim} (object, neighbors = NULL, ..., main = "Weights", Deleted: pkg/PortfolioAnalytics/man/optimizations.combine.Rd =================================================================== --- pkg/PortfolioAnalytics/man/optimizations.combine.Rd 2013-09-22 21:55:40 UTC (rev 3160) +++ pkg/PortfolioAnalytics/man/optimizations.combine.Rd 2013-09-22 21:56:29 UTC (rev 3161) @@ -1,19 +0,0 @@ -\name{optimizations.combine} -\alias{optimizations.combine} -\title{Combine objects created by optimize.portfolio} -\usage{ - optimizations.combine(x) -} -\arguments{ - \item{x}{a list of objects created by - \code{\link{optimize.portfolio}}} -} -\value{ - an \code{opt.list} object -} -\description{ - This function takes a list of objects created by - \code{\link{optimize.portfolio}} and sets the class name - attribute to 'opt.list' for use in generic functions -} - From noreply at r-forge.r-project.org Mon Sep 23 00:22:27 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 23 Sep 2013 00:22:27 +0200 (CEST) Subject: [Returnanalytics-commits] r3162 - in pkg/PortfolioAnalytics: R man Message-ID: <20130922222228.0E25A185F6E@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-23 00:22:27 +0200 (Mon, 23 Sep 2013) New Revision: 3162 Removed: pkg/PortfolioAnalytics/man/plot.optimize.portfolio.Rd Modified: pkg/PortfolioAnalytics/R/charts.DE.R pkg/PortfolioAnalytics/R/charts.GenSA.R pkg/PortfolioAnalytics/R/charts.PSO.R pkg/PortfolioAnalytics/R/charts.ROI.R pkg/PortfolioAnalytics/R/charts.RP.R pkg/PortfolioAnalytics/R/generics.R pkg/PortfolioAnalytics/man/chart.RiskReward.Rd pkg/PortfolioAnalytics/man/plot.Rd pkg/PortfolioAnalytics/man/print.optimize.portfolio.Rd Log: Updating documentation Modified: pkg/PortfolioAnalytics/R/charts.DE.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.DE.R 2013-09-22 21:56:29 UTC (rev 3161) +++ pkg/PortfolioAnalytics/R/charts.DE.R 2013-09-22 22:22:27 UTC (rev 3162) @@ -87,7 +87,6 @@ } #' @rdname chart.Weights -#' @name chart.Weights #' @method chart.Weights optimize.portfolio.DEoptim #' @S3method chart.Weights optimize.portfolio.DEoptim chart.Weights.optimize.portfolio.DEoptim <- chart.Weights.DE @@ -292,7 +291,6 @@ } #' @rdname chart.RiskReward -#' @name chart.RiskReward #' @method chart.RiskReward optimize.portfolio.DEoptim #' @S3method chart.RiskReward optimize.portfolio.DEoptim chart.RiskReward.optimize.portfolio.DEoptim <- chart.Scatter.DE @@ -311,10 +309,11 @@ } -#' plot method for optimize.portfolio.DEoptim output +#' plot method for objects of class \code{optimize.portfolio} #' -#' Scatter and weights chart for DEoptim portfolio optimizations run with trace=TRUE +#' Scatter and weights chart for portfolio optimizations run with trace=TRUE #' +#' @details #' \code{return.col} must be the name of a function used to compute the return metric on the random portfolio weights #' \code{risk.col} must be the name of a function used to compute the risk metric on the random portfolio weights #' @@ -325,9 +324,15 @@ #' This will extract the \code{neighbors} with portfolio index numbers that correspond to the vector contents. #' The third method for specifying \code{neighbors} is to pass in a matrix. #' This matrix should look like the output of \code{\link{extractStats}}, and should contain -#' \code{risk.col},\code{return.col}, and weights columns all properly named. +#' \code{risk.col},\code{return.col}, and weights columns all properly named. +#' +#' The ROI and GenSA solvers do not store the portfolio weights like DEoptim or random +#' portfolios, random portfolios can be generated for the scatter plot with the +#' \code{rp} argument. +#' #' @param x set of portfolios created by \code{\link{optimize.portfolio}} -#' @param ... any other passthru parameters +#' @param \dots any other passthru parameters +#' @param rp TRUE/FALSE to plot feasible portfolios generated by \code{\link{random_portfolios}} #' @param return.col string name of column to use for returns (vertical axis) #' @param risk.col string name of column to use for risk (horizontal axis) #' @param chart.assets TRUE/FALSE to include risk-return scatter of assets @@ -335,6 +340,8 @@ #' @param main an overall title for the plot: see \code{\link{title}} #' @param xlim set the limit on coordinates for the x-axis #' @param ylim set the limit on coordinates for the y-axis +#' @param element.color provides the color for drawing less-important chart elements, such as the box lines, axis lines, etc. +#' @param cex.axis the magnification to be used for axis annotation relative to the current setting of \code{cex}. #' @rdname plot #' @method plot optimize.portfolio.DEoptim #' @S3method plot optimize.portfolio.DEoptim Modified: pkg/PortfolioAnalytics/R/charts.GenSA.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.GenSA.R 2013-09-22 21:56:29 UTC (rev 3161) +++ pkg/PortfolioAnalytics/R/charts.GenSA.R 2013-09-22 22:22:27 UTC (rev 3162) @@ -153,27 +153,7 @@ par(op) } -#' plot method for optimize.portfolio.DEoptim output -#' -#' Scatter and weights chart for GenSA portfolio optimizations run with trace=TRUE -#' -#' \code{return.col} must be the name of a function used to compute the return metric on the random portfolio weights. -#' \code{risk.col} must be the name of a function used to compute the risk metric on the random portfolio weights. -#' -#' @param x object created by \code{\link{optimize.portfolio}} -#' @param ... any other passthru parameters -#' @param rp TRUE/FALSE to plot feasible portfolios generated by \code{\link{random_portfolios}} -#' @param return.col string matching the objective of a 'return' objective, on vertical axis -#' @param risk.col string matching the objective of a 'risk' objective, on horizontal axis -#' @param chart.assets TRUE/FALSE to include risk-return scatter of assets -#' @param cex.axis The magnification to be used for axis annotation relative to the current setting of \code{cex} -#' @param element.color color for the default plot scatter points -#' @param neighbors set of 'neighbor' portfolios to overplot -#' @param main an overall title for the plot: see \code{\link{title}} -#' @param xlim set the limit on coordinates for the x-axis -#' @param ylim set the limit on coordinates for the y-axis -#' @seealso \code{\link{optimize.portfolio}} -#' @author Ross Bennett + #' @rdname plot #' @method plot optimize.portfolio.GenSA #' @S3method plot optimize.portfolio.GenSA Modified: pkg/PortfolioAnalytics/R/charts.PSO.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.PSO.R 2013-09-22 21:56:29 UTC (rev 3161) +++ pkg/PortfolioAnalytics/R/charts.PSO.R 2013-09-22 22:22:27 UTC (rev 3162) @@ -211,26 +211,7 @@ par(op) } -#' plot method for optimize.portfolio.pso output -#' -#' Scatter and weights chart for pso portfolio optimizations run with trace=TRUE -#' -#' \code{return.col} must be the name of a function used to compute the return metric on the random portfolio weights. -#' \code{risk.col} must be the name of a function used to compute the risk metric on the random portfolio weights. -#' -#' @param x object created by \code{\link{optimize.portfolio}} -#' @param ... any other passthru parameters -#' @param return.col string matching the objective of a 'return' objective, on vertical axis -#' @param risk.col string matching the objective of a 'risk' objective, on horizontal axis -#' @param chart.assets TRUE/FALSE to include risk-return scatter of assets -#' @param cex.axis The magnification to be used for axis annotation relative to the current setting of \code{cex} -#' @param element.color color for the default plot scatter points -#' @param neighbors set of 'neighbor' portfolios to overplot -#' @param main an overall title for the plot: see \code{\link{title}} -#' @param xlim set the limit on coordinates for the x-axis -#' @param ylim set the limit on coordinates for the y-axis -#' @seealso \code{\link{optimize.portfolio}} -#' @author Ross Bennett + #' @rdname plot #' @method plot optimize.portfolio.pso #' @S3method plot optimize.portfolio.pso Modified: pkg/PortfolioAnalytics/R/charts.ROI.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.ROI.R 2013-09-22 21:56:29 UTC (rev 3161) +++ pkg/PortfolioAnalytics/R/charts.ROI.R 2013-09-22 22:22:27 UTC (rev 3162) @@ -155,29 +155,6 @@ par(op) } -#' plot method for optimize.portfolio.ROI output -#' -#' Scatter and weights chart for ROI portfolio optimizations run with trace=TRUE -#' -#' The ROI optimizers do not store the portfolio weights like DEoptim or random -#' portfolios random portfolios can be generated for the scatter plot. -#' -#' \code{return.col} must be the name of a function used to compute the return metric on the random portfolio weights. -#' \code{risk.col} must be the name of a function used to compute the risk metric on the random portfolio weights -#' -#' @param x object created by \code{\link{optimize.portfolio}} -#' @param ... any other passthru parameters -#' @param rp TRUE/FALSE to plot feasible portfolios generated by \code{\link{random_portfolios}} -#' @param risk.col string matching the objective of a 'risk' objective, on horizontal axis -#' @param return.col string matching the objective of a 'return' objective, on vertical axis -#' @param chart.assets TRUE/FALSE to include risk-return scatter of assets -#' @param element.color color for the default plot scatter points -#' @param neighbors set of 'neighbor' portfolios to overplot -#' @param main an overall title for the plot: see \code{\link{title}} -#' @param xlim set the limit on coordinates for the x-axis -#' @param ylim set the limit on coordinates for the y-axis -#' @seealso \code{\link{optimize.portfolio}} -#' @author Ross Bennett #' @rdname plot #' @method plot optimize.portfolio.ROI #' @S3method plot optimize.portfolio.ROI Modified: pkg/PortfolioAnalytics/R/charts.RP.R =================================================================== --- pkg/PortfolioAnalytics/R/charts.RP.R 2013-09-22 21:56:29 UTC (rev 3161) +++ pkg/PortfolioAnalytics/R/charts.RP.R 2013-09-22 22:22:27 UTC (rev 3162) @@ -266,30 +266,6 @@ } -#' plot method for optimize.portfolio.random output -#' -#' Scatter and weights chart for random portfolio optimizations run with trace=TRUE -#' -#' \code{return.col} must be the name of a function used to compute the return metric on the random portfolio weights. -#' \code{risk.col} must be the name of a function used to compute the risk metric on the random portfolio weights. -#' -#' \code{neighbors} may be specified in three ways. -#' The first is as a single number of neighbors. This will extract the \code{neighbors} closest -#' portfolios in terms of the \code{out} numerical statistic. -#' The second method consists of a numeric vector for \code{neighbors}. -#' This will extract the \code{neighbors} with portfolio index numbers that correspond to the vector contents. -#' The third method for specifying \code{neighbors} is to pass in a matrix. -#' This matrix should look like the output of \code{\link{extractStats}}, and should contain -#' \code{risk.col},\code{return.col}, and weights columns all properly named. -#' @param x set of portfolios created by \code{\link{optimize.portfolio}} -#' @param ... any other passthru parameters -#' @param return.col string name of column to use for returns (vertical axis) -#' @param risk.col string name of column to use for risk (horizontal axis) -#' @param chart.assets TRUE/FALSE to include risk-return scatter of assets -#' @param neighbors set of 'neighbor portfolios to overplot -#' @param xlim set the limit on coordinates for the x-axis -#' @param ylim set the limit on coordinates for the y-axis -#' @param main an overall title for the plot: see \code{\link{title}} #' @rdname plot #' @method plot optimize.portfolio.random #' @S3method plot optimize.portfolio.random @@ -297,29 +273,7 @@ charts.RP(RP=x, risk.col=risk.col, return.col=return.col, chart.assets=chart.assets, neighbors=neighbors, main=main, xlim=xlim, ylim=ylim, ...) } -#' plot method for optimize.portfolio output -#' -#' Scatter and weights chart for portfolio optimization -#' -#' This is a fallback that will be called for classes of portfolio that do not have specific pre-existing plot methods. -#' -#' \code{neighbors} may be specified in three ways. -#' The first is as a single number of neighbors. This will extract the \code{neighbors} closest -#' portfolios in terms of the \code{out} numerical statistic. -#' The second method consists of a numeric vector for \code{neighbors}. -#' This will extract the \code{neighbors} with portfolio index numbers that correspond to the vector contents. -#' The third method for specifying \code{neighbors} is to pass in a matrix. -#' This matrix should look like the output of \code{\link{extractStats}}, and should contain -#' \code{risk.col},\code{return.col}, and weights columns all properly named. -#' @param x set of portfolios created by \code{\link{optimize.portfolio}} -#' @param ... any other passthru parameters -#' @param return.col string name of column to use for returns (vertical axis) -#' @param risk.col string name of column to use for risk (horizontal axis) -#' @param chart.assets TRUE/FALSE to include risk-return scatter of assets -#' @param neighbors set of 'neighbor portfolios to overplot -#' @param xlim set the limit on coordinates for the x-axis -#' @param ylim set the limit on coordinates for the y-axis -#' @param main an overall title for the plot: see \code{\link{title}} + #' @rdname plot #' @method plot optimize.portfolio #' @S3method plot optimize.portfolio Modified: pkg/PortfolioAnalytics/R/generics.R =================================================================== --- pkg/PortfolioAnalytics/R/generics.R 2013-09-22 21:56:29 UTC (rev 3161) +++ pkg/PortfolioAnalytics/R/generics.R 2013-09-22 22:22:27 UTC (rev 3162) @@ -257,11 +257,6 @@ #' @param \dots any other passthru parameters #' @param digits the number of significant digits to use when printing. #' @author Ross Bennett -#' @aliases print.optimize.portfolio.ROI, -#' print.optimize.portfolio.random, -#' print.optimize.portfolio.DEoptim, -#' print.optimize.portfolio.GenSA, -#' print.optimize.portfolio.pso #' @rdname print.optimize.portfolio #' @method print optimize.portfolio.ROI #' @S3method print optimize.portfolio.ROI Modified: pkg/PortfolioAnalytics/man/chart.RiskReward.Rd =================================================================== --- pkg/PortfolioAnalytics/man/chart.RiskReward.Rd 2013-09-22 21:56:29 UTC (rev 3161) +++ pkg/PortfolioAnalytics/man/chart.RiskReward.Rd 2013-09-22 22:22:27 UTC (rev 3162) @@ -1,6 +1,7 @@ \name{chart.RiskReward} \alias{chart.RiskReward} \alias{chart.RiskReward.opt.list} +\alias{chart.RiskReward.optimize.portfolio.DEoptim} \alias{chart.RiskReward.optimize.portfolio.GenSA} \alias{chart.RiskReward.optimize.portfolio.pso} \alias{chart.RiskReward.optimize.portfolio.random} Modified: pkg/PortfolioAnalytics/man/plot.Rd =================================================================== --- pkg/PortfolioAnalytics/man/plot.Rd 2013-09-22 21:56:29 UTC (rev 3161) +++ pkg/PortfolioAnalytics/man/plot.Rd 2013-09-22 22:22:27 UTC (rev 3162) @@ -1,11 +1,11 @@ -\name{plot.optimize.portfolio.DEoptim} +\name{plot} \alias{plot.optimize.portfolio} \alias{plot.optimize.portfolio.DEoptim} \alias{plot.optimize.portfolio.GenSA} \alias{plot.optimize.portfolio.pso} \alias{plot.optimize.portfolio.random} \alias{plot.optimize.portfolio.ROI} -\title{plot method for optimize.portfolio.DEoptim output} +\title{plot method for objects of class \code{optimize.portfolio}} \usage{ \method{plot}{optimize.portfolio.DEoptim} (x, ..., return.col = "mean", risk.col = "ES", @@ -45,31 +45,11 @@ \item{x}{set of portfolios created by \code{\link{optimize.portfolio}}} - \item{...}{any other passthru parameters} + \item{\dots}{any other passthru parameters} - \item{return.col}{string name of column to use for - returns (vertical axis)} + \item{rp}{TRUE/FALSE to plot feasible portfolios + generated by \code{\link{random_portfolios}}} - \item{risk.col}{string name of column to use for risk - (horizontal axis)} - - \item{chart.assets}{TRUE/FALSE to include risk-return - scatter of assets} - - \item{neighbors}{set of 'neighbor portfolios to overplot} - - \item{main}{an overall title for the plot: see - \code{\link{title}}} - - \item{xlim}{set the limit on coordinates for the x-axis} - - \item{ylim}{set the limit on coordinates for the y-axis} - - \item{x}{set of portfolios created by - \code{\link{optimize.portfolio}}} - - \item{...}{any other passthru parameters} - \item{return.col}{string name of column to use for returns (vertical axis)} @@ -81,146 +61,24 @@ \item{neighbors}{set of 'neighbor portfolios to overplot} - \item{xlim}{set the limit on coordinates for the x-axis} - - \item{ylim}{set the limit on coordinates for the y-axis} - \item{main}{an overall title for the plot: see \code{\link{title}}} - \item{x}{set of portfolios created by - \code{\link{optimize.portfolio}}} - - \item{...}{any other passthru parameters} - - \item{return.col}{string name of column to use for - returns (vertical axis)} - - \item{risk.col}{string name of column to use for risk - (horizontal axis)} - - \item{chart.assets}{TRUE/FALSE to include risk-return - scatter of assets} - - \item{neighbors}{set of 'neighbor portfolios to overplot} - \item{xlim}{set the limit on coordinates for the x-axis} \item{ylim}{set the limit on coordinates for the y-axis} - \item{main}{an overall title for the plot: see - \code{\link{title}}} + \item{element.color}{provides the color for drawing + less-important chart elements, such as the box lines, + axis lines, etc.} - \item{x}{object created by - \code{\link{optimize.portfolio}}} - - \item{...}{any other passthru parameters} - - \item{rp}{TRUE/FALSE to plot feasible portfolios - generated by \code{\link{random_portfolios}}} - - \item{risk.col}{string matching the objective of a 'risk' - objective, on horizontal axis} - - \item{return.col}{string matching the objective of a - 'return' objective, on vertical axis} - - \item{chart.assets}{TRUE/FALSE to include risk-return - scatter of assets} - - \item{element.color}{color for the default plot scatter - points} - - \item{neighbors}{set of 'neighbor' portfolios to - overplot} - - \item{main}{an overall title for the plot: see - \code{\link{title}}} - - \item{xlim}{set the limit on coordinates for the x-axis} - - \item{ylim}{set the limit on coordinates for the y-axis} - - \item{x}{object created by - \code{\link{optimize.portfolio}}} - - \item{...}{any other passthru parameters} - - \item{return.col}{string matching the objective of a - 'return' objective, on vertical axis} - - \item{risk.col}{string matching the objective of a 'risk' - objective, on horizontal axis} - - \item{chart.assets}{TRUE/FALSE to include risk-return - scatter of assets} - - \item{cex.axis}{The magnification to be used for axis - annotation relative to the current setting of \code{cex}} - - \item{element.color}{color for the default plot scatter - points} - - \item{neighbors}{set of 'neighbor' portfolios to - overplot} - - \item{main}{an overall title for the plot: see - \code{\link{title}}} - - \item{xlim}{set the limit on coordinates for the x-axis} - - \item{ylim}{set the limit on coordinates for the y-axis} - - \item{x}{object created by - \code{\link{optimize.portfolio}}} - - \item{...}{any other passthru parameters} - - \item{rp}{TRUE/FALSE to plot feasible portfolios - generated by \code{\link{random_portfolios}}} - - \item{return.col}{string matching the objective of a - 'return' objective, on vertical axis} - - \item{risk.col}{string matching the objective of a 'risk' - objective, on horizontal axis} - - \item{chart.assets}{TRUE/FALSE to include risk-return - scatter of assets} - - \item{cex.axis}{The magnification to be used for axis - annotation relative to the current setting of \code{cex}} - - \item{element.color}{color for the default plot scatter - points} - - \item{neighbors}{set of 'neighbor' portfolios to - overplot} - - \item{main}{an overall title for the plot: see - \code{\link{title}}} - - \item{xlim}{set the limit on coordinates for the x-axis} - - \item{ylim}{set the limit on coordinates for the y-axis} + \item{cex.axis}{the magnification to be used for axis + annotation relative to the current setting of + \code{cex}.} } \description{ - Scatter and weights chart for DEoptim portfolio - optimizations run with trace=TRUE - - Scatter and weights chart for random portfolio - optimizations run with trace=TRUE - - Scatter and weights chart for portfolio optimization - - Scatter and weights chart for ROI portfolio optimizations - run with trace=TRUE - - Scatter and weights chart for pso portfolio optimizations - run with trace=TRUE - - Scatter and weights chart for GenSA portfolio - optimizations run with trace=TRUE + Scatter and weights chart for portfolio optimizations run + with trace=TRUE } \details{ \code{return.col} must be the name of a function used to @@ -241,76 +99,9 @@ \code{risk.col},\code{return.col}, and weights columns all properly named. - \code{return.col} must be the name of a function used to - compute the return metric on the random portfolio - weights. \code{risk.col} must be the name of a function - used to compute the risk metric on the random portfolio - weights. - - \code{neighbors} may be specified in three ways. The - first is as a single number of neighbors. This will - extract the \code{neighbors} closest portfolios in terms - of the \code{out} numerical statistic. The second method - consists of a numeric vector for \code{neighbors}. This - will extract the \code{neighbors} with portfolio index - numbers that correspond to the vector contents. The third - method for specifying \code{neighbors} is to pass in a - matrix. This matrix should look like the output of - \code{\link{extractStats}}, and should contain - \code{risk.col},\code{return.col}, and weights columns - all properly named. - - This is a fallback that will be called for classes of - portfolio that do not have specific pre-existing plot - methods. - - \code{neighbors} may be specified in three ways. The - first is as a single number of neighbors. This will - extract the \code{neighbors} closest portfolios in terms - of the \code{out} numerical statistic. The second method - consists of a numeric vector for \code{neighbors}. This - will extract the \code{neighbors} with portfolio index - numbers that correspond to the vector contents. The third - method for specifying \code{neighbors} is to pass in a - matrix. This matrix should look like the output of - \code{\link{extractStats}}, and should contain - \code{risk.col},\code{return.col}, and weights columns - all properly named. - - The ROI optimizers do not store the portfolio weights - like DEoptim or random portfolios random portfolios can - be generated for the scatter plot. - - \code{return.col} must be the name of a function used to - compute the return metric on the random portfolio - weights. \code{risk.col} must be the name of a function - used to compute the risk metric on the random portfolio - weights - - \code{return.col} must be the name of a function used to - compute the return metric on the random portfolio - weights. \code{risk.col} must be the name of a function - used to compute the risk metric on the random portfolio - weights. - - \code{return.col} must be the name of a function used to - compute the return metric on the random portfolio - weights. \code{risk.col} must be the name of a function - used to compute the risk metric on the random portfolio - weights. + The ROI and GenSA solvers do not store the portfolio + weights like DEoptim or random portfolios, random + portfolios can be generated for the scatter plot with the + \code{rp} argument. } -\author{ - Ross Bennett - Ross Bennett - - Ross Bennett -} -\seealso{ - \code{\link{optimize.portfolio}} - - \code{\link{optimize.portfolio}} - - \code{\link{optimize.portfolio}} -} - Deleted: pkg/PortfolioAnalytics/man/plot.optimize.portfolio.Rd =================================================================== --- pkg/PortfolioAnalytics/man/plot.optimize.portfolio.Rd 2013-09-22 21:56:29 UTC (rev 3161) +++ pkg/PortfolioAnalytics/man/plot.optimize.portfolio.Rd 2013-09-22 22:22:27 UTC (rev 3162) @@ -1,55 +0,0 @@ -\name{plot.optimize.portfolio} -\alias{plot.optimize.portfolio} -\title{plot method for optimize.portfolio output} -\usage{ - \method{plot}{optimize.portfolio} (x, ..., - return.col = "mean", risk.col = "ES", - chart.assets = FALSE, neighbors = NULL, xlim = NULL, - ylim = NULL, main = "optimized portfolio plot") -} -\arguments{ - \item{x}{set of portfolios created by - \code{\link{optimize.portfolio}}} - - \item{...}{any other passthru parameters} - - \item{return.col}{string name of column to use for - returns (vertical axis)} - - \item{risk.col}{string name of column to use for risk - (horizontal axis)} - - \item{chart.assets}{TRUE/FALSE to include risk-return - scatter of assets} - - \item{neighbors}{set of 'neighbor portfolios to overplot} - - \item{xlim}{set the limit on coordinates for the x-axis} - - \item{ylim}{set the limit on coordinates for the y-axis} - - \item{main}{an overall title for the plot: see - \code{\link{title}}} -} -\description{ - Scatter and weights chart for portfolio optimization -} -\details{ - This is a fallback that will be called for classes of - portfolio that do not have specific pre-existing plot - methods. - - \code{neighbors} may be specified in three ways. The - first is as a single number of neighbors. This will - extract the \code{neighbors} closest portfolios in terms - of the \code{out} numerical statistic. The second method - consists of a numeric vector for \code{neighbors}. This - will extract the \code{neighbors} with portfolio index - numbers that correspond to the vector contents. The third - method for specifying \code{neighbors} is to pass in a - matrix. This matrix should look like the output of - \code{\link{extractStats}}, and should contain - \code{risk.col},\code{return.col}, and weights columns - all properly named. -} - Modified: pkg/PortfolioAnalytics/man/print.optimize.portfolio.Rd =================================================================== --- pkg/PortfolioAnalytics/man/print.optimize.portfolio.Rd 2013-09-22 21:56:29 UTC (rev 3161) +++ pkg/PortfolioAnalytics/man/print.optimize.portfolio.Rd 2013-09-22 22:22:27 UTC (rev 3162) @@ -1,13 +1,9 @@ -\name{print.optimize.portfolio.ROI} +\name{print.optimize.portfolio} \alias{print.optimize.portfolio.DEoptim} -\alias{print.optimize.portfolio.DEoptim,} \alias{print.optimize.portfolio.GenSA} -\alias{print.optimize.portfolio.GenSA,} \alias{print.optimize.portfolio.pso} \alias{print.optimize.portfolio.random} -\alias{print.optimize.portfolio.random,} \alias{print.optimize.portfolio.ROI} -\alias{print.optimize.portfolio.ROI,} \title{Printing output of optimize.portfolio} \usage{ \method{print}{optimize.portfolio.ROI} (x, ..., From noreply at r-forge.r-project.org Mon Sep 23 00:51:00 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 23 Sep 2013 00:51:00 +0200 (CEST) Subject: [Returnanalytics-commits] r3163 - in pkg/PortfolioAnalytics: demo vignettes Message-ID: <20130922225100.4895A185992@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-23 00:50:59 +0200 (Mon, 23 Sep 2013) New Revision: 3163 Modified: pkg/PortfolioAnalytics/demo/demo_opt_combine.R pkg/PortfolioAnalytics/vignettes/portfolio_vignette.Rnw pkg/PortfolioAnalytics/vignettes/portfolio_vignette.pdf Log: Modifying files that used optimizations.combine Modified: pkg/PortfolioAnalytics/demo/demo_opt_combine.R =================================================================== --- pkg/PortfolioAnalytics/demo/demo_opt_combine.R 2013-09-22 22:22:27 UTC (rev 3162) +++ pkg/PortfolioAnalytics/demo/demo_opt_combine.R 2013-09-22 22:50:59 UTC (rev 3163) @@ -37,7 +37,7 @@ opt.qu <- optimize.portfolio(R=R, portfolio=port.qu, optimize_method="ROI", trace=TRUE) -opt <- optimizations.combine(list(GMV.LO=opt.gmv.lo, GMV.SHORT=opt.gmv.short, QU=opt.qu)) +opt <- combine.optimizations(list(GMV.LO=opt.gmv.lo, GMV.SHORT=opt.gmv.short, QU=opt.qu)) class(opt) chart.Weights(opt, legend.loc="topleft", cex.legend=0.8, ylim=c(-0.3, 1)) Modified: pkg/PortfolioAnalytics/vignettes/portfolio_vignette.Rnw =================================================================== --- pkg/PortfolioAnalytics/vignettes/portfolio_vignette.Rnw 2013-09-22 22:22:27 UTC (rev 3162) +++ pkg/PortfolioAnalytics/vignettes/portfolio_vignette.Rnw 2013-09-22 22:50:59 UTC (rev 3163) @@ -638,7 +638,7 @@ Combine the optimizations for easy comparison. <<>>= -opt_combine <- optimizations.combine(list(meanETL=opt_meanETL, +opt_combine <- combine.optimizations(list(meanETL=opt_meanETL, rbmeanETL=opt_rb_meanETL, eqmeanETL=opt_eq_meanETL)) Modified: pkg/PortfolioAnalytics/vignettes/portfolio_vignette.pdf =================================================================== (Binary files differ) From noreply at r-forge.r-project.org Mon Sep 23 01:48:44 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 23 Sep 2013 01:48:44 +0200 (CEST) Subject: [Returnanalytics-commits] r3164 - pkg/PortfolioAnalytics/sandbox/symposium2013 Message-ID: <20130922234844.C7F6218141A@r-forge.r-project.org> Author: peter_carl Date: 2013-09-23 01:48:44 +0200 (Mon, 23 Sep 2013) New Revision: 3164 Modified: pkg/PortfolioAnalytics/sandbox/symposium2013/README.md Log: - updated pandoc command Modified: pkg/PortfolioAnalytics/sandbox/symposium2013/README.md =================================================================== --- pkg/PortfolioAnalytics/sandbox/symposium2013/README.md 2013-09-22 22:50:59 UTC (rev 3163) +++ pkg/PortfolioAnalytics/sandbox/symposium2013/README.md 2013-09-22 23:48:44 UTC (rev 3164) @@ -20,6 +20,7 @@ ./data/README should cover who downloaded what data from where and when ./cache contains processed data files and intermediary results to be processed ./results contains output, figures, or other generated files. Should be able to delete the contents and regenerate them +./docs contains documents with text discussing results ./logs: contains logging output ./src: contains non-R source code where needed ./bin: compiled binaries or scripts @@ -28,4 +29,4 @@ # HOWTO To create PDF of slides: -$ pandoc symposium-slides-2013.Rmd -t beamer -o symposium-slides-2013.pdf \ No newline at end of file +$ pandoc symposium-slides-2013.Rmd -t beamer -o symposium-slides-2013.pdf --template=beamer.template \ No newline at end of file From noreply at r-forge.r-project.org Mon Sep 23 01:49:45 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 23 Sep 2013 01:49:45 +0200 (CEST) Subject: [Returnanalytics-commits] r3165 - pkg/PortfolioAnalytics/sandbox/symposium2013/docs Message-ID: <20130922234945.7E45818141A@r-forge.r-project.org> Author: peter_carl Date: 2013-09-23 01:49:45 +0200 (Mon, 23 Sep 2013) New Revision: 3165 Modified: pkg/PortfolioAnalytics/sandbox/symposium2013/docs/symposium-slides-2013.Rmd Log: - minor changes to commentary Modified: pkg/PortfolioAnalytics/sandbox/symposium2013/docs/symposium-slides-2013.Rmd =================================================================== --- pkg/PortfolioAnalytics/sandbox/symposium2013/docs/symposium-slides-2013.Rmd 2013-09-22 23:48:44 UTC (rev 3164) +++ pkg/PortfolioAnalytics/sandbox/symposium2013/docs/symposium-slides-2013.Rmd 2013-09-22 23:49:45 UTC (rev 3165) @@ -117,7 +117,7 @@ * maximizes return, * with per-asset position limits, -* with a specific univariate portfolio risk limit, +* with a specific univariate portfolio risk limit or target, * defines risk as losses, * considers the effects of skewness and kurtosis, and * either limits contribution of risk for constituents or @@ -125,6 +125,10 @@ + + # Risk budgeting * Used to allocate the "risk" of a portfolio * Decomposes the total portfolio risk into the risk contribution of each component position @@ -134,6 +138,9 @@ # Return distributions \includegraphics[width=1.0\textwidth]{../results/EDHEC-Distributions.png} +# Return distributions +* Split chart into two for readability + # Measuring risk, not volatility Measure risk with Conditional Value-at-Risk (CVaR) @@ -175,7 +182,8 @@ * A high positive %CmETL indicates the position has a large loss when the portfolio also has a large loss # Contribution to downside risk @@ -390,7 +398,6 @@ ## _PortfolioAnalytics_ -- Provides numerical solutions to portfolios with complex constraints and objectives comprised of any function - Unifies the interface across different closed-form optimizers and several analytical solvers - Implements three methods for generating Random Portfolios, including 'sample', 'simplex', and 'grid' - Preserves the flexibility to define any kind of objective and constraint @@ -399,7 +406,7 @@ ## _PerformanceAnalytics_ * Returns-based analysis of performance and risk for financial instruments and portfolios, available on CRAN -# Other packages +# Packages for Mathematical Programming Solvers ## _ROI_ * Infrastructure package by K. Hornik, D. Meyer, and S. Theussl for optimization that facilitates use of different solvers... @@ -409,7 +416,7 @@ ## quadprog * ... or this one, used for solving quadratic programming problems -# Other packages +# Packages for Generalized Continuous Solvers ## _DEoptim_ * Implements Differential Evolution, a very powerful, elegant, population based stochastic function minimizer @@ -419,9 +426,9 @@ ## _pso_ * An implementation of Partical Swarm Optimization consistent with the standard PSO 2007/2011 by Maurice Clerc, _et al._ -# Other packages +# Packages for more iron ## _foreach_ -* Steve Weston's remarkable parallel computing framework, which maps functions to data and aggregates results in parallel across multiple CPU cores and computers... +* Steve Weston's parallel computing framework, which maps functions to data and aggregates results in parallel across multiple CPU cores and computers. ## _doRedis_ * A companion package to _foreach_ by Bryan Lewis that implements a simple but very flexible parallel back end to Redis, making it to run parallel jobs across multiple R sessions. @@ -429,15 +436,15 @@ ## _doMPI_ * Another companion to _foreach_ that provides a parallel backend across cores using the _parallel_ package -## _xts_ - * Time series package specifically for finance by Jeff Ryan and Josh Ulrich + - # Thanks -* Brian Peterson -* Kris Boudt -* Doug Martin -* Ross Bennett +* Brian Peterson - Trading Partner at DV Trading, Chicago +* Kris Boudt - Faculty of Business and Economics, KU Leuven and VU University Amsterdam +* Doug Martin - Professor and Director of Computational Finance, University of Washington +* Ross Bennett - Student in the University of Washington's MS-CFRM program and GSOC participant # References Figure out bibtex links in markup From noreply at r-forge.r-project.org Mon Sep 23 01:50:37 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 23 Sep 2013 01:50:37 +0200 (CEST) Subject: [Returnanalytics-commits] r3166 - pkg/PortfolioAnalytics/sandbox/symposium2013 Message-ID: <20130922235037.5AA4718141A@r-forge.r-project.org> Author: peter_carl Date: 2013-09-23 01:50:37 +0200 (Mon, 23 Sep 2013) New Revision: 3166 Added: pkg/PortfolioAnalytics/sandbox/symposium2013/optimize.HFindexes.R Log: - optimization step for multiple objectives Added: pkg/PortfolioAnalytics/sandbox/symposium2013/optimize.HFindexes.R =================================================================== --- pkg/PortfolioAnalytics/sandbox/symposium2013/optimize.HFindexes.R (rev 0) +++ pkg/PortfolioAnalytics/sandbox/symposium2013/optimize.HFindexes.R 2013-09-22 23:50:37 UTC (rev 3166) @@ -0,0 +1,302 @@ +### For Presentation at FactSet's 2013 US Investment Process Symposium +# November 10 - 12 , 2013 +# Peter Carl + +### Load the necessary packages +# Include optimizer packages +require(PortfolioAnalytics) +require(quantmod) +library(DEoptim) +library(ROI) +require(ROI.plugin.quadprog) +require(ROI.plugin.glpk) +# ... and multi-core packages +require(foreach) +require(doMC) +registerDoMC(3) + +# Available on r-forge +# require(FactorAnalytics) # development version > build + +### Set script constants +runname='historical.moments' + +# Select a rebalance period +rebalance_period = 'quarters' # uses endpoints identifiers from xts; how to do semi-annual? +clean = "boudt" #"none" +permutations = 1000 +p=1-1/12 # set confidence for VaR/mETL for monthly data + +### Description + +# Seven assets, in this case hedge fund indexes representing different styles and two 'types': +# Convertible Arbitrage | Non-directional +# Equity Market Neutral | Non-directional +# Fixed Income Arbitrage | Non-directional +# Event Driven | Non-directional +# CTA Global | Directional +# Global Macro | Directional +# Long/Short Equity | Directional + +# see analyze.HFindexes.R for more detail + +# Set up seven objectives as buoy portfolios: +# - Equal contribution to... +# 1 Weight +# 2 Variance +# 3 Risk (mETL) +# - Reward to Risk ratio of... +# 4 Mean-Variance +# 5 Mean-mETL +# - Minimum... +# 6 Variance +# 7 Risk (mETL) + +# Add constraints +# - Box constraints - 5% to 30% +# - Group constraints - Non-directional constrained to 20-70%; Directional between 10-50% +# - Rebalancing period - quarterly +# - Turnover constraints - TBD + +#------------------------------------------------------------------------ +# Set up an initial portfolio object with constraints and objectives using +# v2 specification + +# Create initial portfolio object used to initialize ALL the bouy portfolios +init.portf <- portfolio.spec(assets=colnames(R), + weight_seq=generatesequence(by=0.005) +) +# Add leverage constraint +init.portf <- add.constraint(portfolio=init.portf, + type="leverage", + min_sum=1, + max_sum=1 +) +# Add box constraint +init.portf <- add.constraint(portfolio=init.portf, + type="box", + min=0.05, + max=0.3 +) +# Add group constraint +init.portf <- add.constraint(portfolio=init.portf, type="group", + groups=list(c(1:4), + c(5:7)), + group_min=c(0.25,.05), + group_max=c(0.85,0.75) +) + +# print(init.portf) +# summary(init.portf) + +#------------------------------------------------------------------------ +### Construct BUOY 1: Constrained Mean-StdDev Portfolio - using ROI +MeanSD.portf <- init.portf +# Add the return and sd objectives to the constraints created above +MeanSD.portf <- add.objective(portfolio=init.portf, + type="return", # the kind of objective this is + name="mean", # name of the function + ) +MeanSD.portf <- add.objective(portfolio=MeanSD.portf, + type="risk", # the kind of objective this is + name="var", # name of the function + ) + +### Construct BUOY 2: Constrained Mean-mETL Portfolio - using ROI +# Add the return and mETL objectives +MeanmETL.portf <- add.objective(portfolio=init.portf, + type="return", # the kind of objective this is + name="mean", # name of the function + ) +MeanmETL.portf <- add.objective(portfolio=MeanmETL.portf, + type="risk", # the kind of objective this is + name="ES", # the function to minimize + arguments=list(p=p) + ) + +### Construct BUOY 3: Constrained Minimum Variance Portfolio - using ROI +# Add the variance objective +MinSD.portf <- add.objective(portfolio=init.portf, + type="risk", # the kind of objective this is + name="var", # name of the function + ) + +### Construct BUOY 4: Constrained Minimum mETL Portfolio - using ROI +# Add the mETL objective +MinmETL.portf <- add.objective(portfolio=init.portf, + type="risk", # the kind of objective this is + name="ES", # the function to minimize + arguments=list(p=p) + ) + +### Construct BUOY 5: Constrained Equal Variance Contribution Portfolio - using RP +EqSD.portf <- add.objective(portfolio=init.portf, + type="risk_budget", + name="StdDev", + enabled=TRUE, + min_concentration=TRUE, + arguments = list(p=(1-1/12), clean=clean + ) +# Without a sub-objective, we get a somewhat undefined result, since there are (potentially) many Equal SD contribution portfolios. +EqSD.portf <- add.objective(portfolio=init.portf, + type="risk_budget", + name="StdDev" + ) +EqSD.portf$constraints[[1]]$min_sum = 0.99 # set to speed up RP +EqSD.portf$constraints[[1]]$max_sum = 1.01 + +### Construct BUOY 6: Constrained Equal mETL Contribution Portfolio - using RP +EqmETL.portf <- add.objective(init.portf, + type="risk_budget", + name="ES", + min_concentration=TRUE, + arguments = list(p=(1-1/12), clean=clean) +) +# Without a sub-objective, we get a somewhat undefined result, since there are (potentially) many Equal SD contribution portfolios. +EqSD.portf <- add.objective(portfolio=EqSD.portf, + type="risk", + name="var" +) +EqmETL.portf$constraints[[1]]$min_sum = 0.99 # set to speed up RP +EqmETL.portf$constraints[[1]]$max_sum = 1.01 + +### Construct BUOY 7: Equal Weight Portfolio +# There's only one, so construct weights for it. Rebalance the equal-weight portfolio at the same frequency as the others. +# dates=index(R[endpoints(R, on=rebalance_period)]) +# weights = xts(matrix(rep(1/NCOL(R),length(dates)*NCOL(R)), ncol=NCOL(R)), order.by=dates) +# colnames(weights)= colnames(R) + +### Construct RISK BUDGET Portfolio +RiskBudget.portf <- portfolio.spec(assets=colnames(R), + weight_seq=generatesequence(by=0.005) +) +# Add leverage constraint +RiskBudget.portf <- add.constraint(portfolio=RiskBudget.portf, + type="leverage", + min_sum=0.99, # set to speed up RP + max_sum=1.01 +) +# Establish position bounds +RiskBudget.portf <- add.constraint(portfolio=RiskBudget.portf, + type="box", + min=0.05, + max=1.0 +) +# Maximize mean return +RiskBudget.portf <- add.objective(portfolio=RiskBudget.portf, + type="return", + name="mean" + ) +# Add a risk measure +RiskBudget.portf <- add.objective(portfolio=RiskBudget.portf, + type="risk", + name="ETL", + arguments = list(p=(1-1/12), clean=clean) + ) +# Set risk budget limits +RiskBudget.portf <- add.objective(portfolio=RiskBudget.portf, + type="risk_budget", + name="ETL", + max_prisk=0.4, + arguments = list(p=(1-1/12), clean=clean) + ) + +#------------------------------------------------------------------------ +### Evaluate portfolio objective objects +# Generate a single set of random portfolios to evaluate against all constraint set +print(paste('constructing random portfolios at',Sys.time())) +rp = random_portfolios(portfolio=init.portf, permutations=permutations) +print(paste('done constructing random portfolios at',Sys.time())) + +start_time<-Sys.time() +print(paste('Starting optimization at',Sys.time())) + +### Evaluate BUOY 1: Constrained Mean-StdDev Portfolio - with ROI +MeanSD.ROI<-optimize.portfolio(R=R, + portfolio=MeanSD.portf, + optimize_method='ROI', + trace=TRUE +) +plot(MeanSD.ROI, risk.col="StdDev", return.col="mean", rp=permutations, chart.assets=TRUE, main="Mean-Volatility Portfolio") +save(MeanSD.ROI,file=paste(resultsdir, 'MeanSD-', Sys.Date(), '-', runname, '.rda',sep='')) # Save the results +print(paste('Completed meanSD optimization at',Sys.time(),'moving on to meanmETL')) + +### Evaluate BUOY 2: Constrained Mean-mETL Portfolio - with ROI +MeanmETL.ROI<-optimize.portfolio(R=R, + portfolio=MeanmETL.portf, + optimize_method='ROI', + trace=TRUE, verbose=TRUE + ) +plot(MeanmETL.ROI, risk.col="StdDev", return.col="mean", rp=permutations, chart.assets=TRUE, main="Mean-mETL Portfolio") +plot(MeanmETL.ROI, risk.col="ES", return.col="mean", rp=permutations, chart.assets=TRUE, main="Mean-mETL Portfolio") +save(MeanmETL.ROI,file=paste(resultsdir, 'MeanETL-', Sys.Date(), '-', runname, '.rda',sep='')) +print(paste('Completed meanmETL optimization at',Sys.time(),'moving on to MinSD')) + +### Evaluate BUOY 3: Constrained Minimum Variance Portfolio - with ROI +MinSD.ROI<-optimize.portfolio(R=R, + portfolio=MinSD.portf, + optimize_method='ROI', + trace=TRUE, verbose=TRUE + ) # +plot(MinSD.ROI, risk.col="StdDev", return.col="mean", rp=permutations, chart.assets=TRUE, main="Minimum Volatility Portfolio") +save(MinSD.ROI,file=paste(resultsdir, 'MinSD-', Sys.Date(), '-', runname, '.rda',sep='')) +print(paste('Completed MinSD optimization at',Sys.time(),'moving on to MinmETL')) + +### Evaluate BUOY 4: Constrained Minimum mETL Portfolio - with ROI +MinmETL.ROI<-optimize.portfolio(R=R, + portfolio=MinmETL.portf, + optimize_method='ROI', + trace=TRUE, verbose=TRUE, + ) +plot(MinmETL.ROI, risk.col="StdDev", return.col="mean", rp=permutations, chart.assets=TRUE, main="Minimum mETL Portfolio") +plot(MinmETL.ROI, risk.col="ES", return.col="mean", rp=permutations, chart.assets=TRUE, main="Minimum mETL Portfolio") +save(MinmETL.ROI,file=paste(resultsdir, 'MinmETL-', Sys.Date(), '-', runname, '.rda',sep='')) +print(paste('Completed MinmETL optimization at',Sys.time(),'moving on to EqSD')) + +### Evaluate BUOY 5: Constrained Equal Variance Contribution Portfolio - with RP +EqSD.RND<-optimize.portfolio(R=R, + portfolio=EqSD.portf, + optimize_method='random', + search_size=1000, trace=TRUE, verbose=TRUE, + rp=rp) # use the same random portfolios generated above +plot(EqSD.RND, risk.col="StdDev", return.col="mean", rp=permutations, chart.assets=TRUE, main="Equal Volatility Contribution Portfolio") +chart.RiskBudget(EqSD.RND, neighbors=25) +save(EqSD.RND,file=paste(resultsdir, 'EqSD-', Sys.Date(), '-', runname, '.rda',sep='')) +print(paste('Completed EqSD optimization at',Sys.time(),'moving on to EqmETL')) + +### Evaluate BUOY 6: Constrained Equal mETL Contribution Portfolio - with RP +EqmETL.RND<-optimize.portfolio(R=R, + portfolio=EqmETL.portf, + optimize_method='random', + search_size=1000, trace=TRUE + ) # +plot(EqmETL.RND, risk.col="StdDev", return.col="mean", rp=permutations, chart.assets=TRUE, main="Equal mETL Contribution Portfolio") +plot(EqmETL.RND, risk.col="ES", return.col="mean", rp=permutations, chart.assets=TRUE, main="Equal mETL Contribution Portfolio") +chart.RiskBudget(EqmETL.RND, neighbors=25) +save(EqmETL.RND,file=paste(resultsdir, 'EqmETL-', Sys.Date(), '-', runname, '.rda',sep='')) +print(paste('Completed EqmETL optimization at',Sys.time(),'moving on to RiskBudget')) + +### Evaluate BUOY 7: Equal Weight Portfolio +# There's only one, so calculate it. + +### Evaluate Risk Budget Portfolio - with DE +RiskBudget.DE<-optimize.portfolio(R=R, + portfolio=RiskBudget.portf, + optimize_method='DEoptim', + search_size=1000, trace=TRUE, verbose=TRUE + ) # use the same random portfolios generated above +plot(RiskBudget.DE, risk.col="StdDev", return.col="mean") +save(RiskBudget.DE,file=paste(resultsdir, 'RiskBudget-', Sys.Date(), '-', runname, '.rda',sep='')) +print(paste('Completed Risk Budget optimization at',Sys.time(),'. Done with optimizations.')) + +buoys <- combine.optimizations(list(MeanSD=MeanSD.ROI, MeanmETL=MeanmETL.ROI, MinSD=MinSD.ROI, MinmETL=MinmETL.ROI, EqSD=EqSD.RND, EqmETL=EqmETL.RND, RB=RiskBudget.DE)) + # how to add an EqWgt to this list? +end_time<-Sys.time() +end_time-start_time + + +######################################################################### +# Optimization ends here +######################################################################### + + From noreply at r-forge.r-project.org Mon Sep 23 01:51:44 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 23 Sep 2013 01:51:44 +0200 (CEST) Subject: [Returnanalytics-commits] r3167 - pkg/PortfolioAnalytics/sandbox/symposium2013 Message-ID: <20130922235144.7642A18141A@r-forge.r-project.org> Author: peter_carl Date: 2013-09-23 01:51:44 +0200 (Mon, 23 Sep 2013) New Revision: 3167 Modified: pkg/PortfolioAnalytics/sandbox/symposium2013/analyze.HFindexes.R Log: - added rolling correlation slide Modified: pkg/PortfolioAnalytics/sandbox/symposium2013/analyze.HFindexes.R =================================================================== --- pkg/PortfolioAnalytics/sandbox/symposium2013/analyze.HFindexes.R 2013-09-22 23:50:37 UTC (rev 3166) +++ pkg/PortfolioAnalytics/sandbox/symposium2013/analyze.HFindexes.R 2013-09-22 23:51:44 UTC (rev 3167) @@ -48,11 +48,12 @@ ######################################################################## # Load data ######################################################################## -## Just load the data from packages -### See parse.EDHEC.R +# Load the data objects from downloaded and parsed files +## See 'parse.EDHEC.R' and 'download.SP500TR.R' # Load data from cache -load("./cache/edhec.Rdata") +load("./cache/edhec.RData") +load("./cache/SP500TR.RData") # Drop some indexes and reorder R = edhec[,c("Convertible Arbitrage", "Equity Market Neutral","Fixed Income Arbitrage", "Event Driven", "CTA Global", "Global Macro", "Long/Short Equity")] @@ -159,11 +160,17 @@ # @TODO: Add 12M rolling correlation to S&P500 +# -------------------------------------------------------------------- +# Rolling Correlation to S&P500 TR +# -------------------------------------------------------------------- +png(filename=paste(resultsdir, dataname, "-RollCorr.png", sep=""), units="in", height=5.5, width=9, res=96) +chart.RollingCorrelation(R,SP500.TR, width=24, legend.loc="bottomleft", colorset=rainbow8equal, main="Rolling 24-Month Correlations") +dev.off() + # -------------------------------------------------------------------- ## Autocorrelation # -------------------------------------------------------------------- -# @TODO: This is frosting, do it last # require(Hmisc) AC.stats = t(table.Autocorrelation(R=R)) From noreply at r-forge.r-project.org Mon Sep 23 01:52:27 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 23 Sep 2013 01:52:27 +0200 (CEST) Subject: [Returnanalytics-commits] r3168 - pkg/PortfolioAnalytics/sandbox/symposium2013 Message-ID: <20130922235227.DD40718141A@r-forge.r-project.org> Author: peter_carl Date: 2013-09-23 01:52:27 +0200 (Mon, 23 Sep 2013) New Revision: 3168 Added: pkg/PortfolioAnalytics/sandbox/symposium2013/download.SP500TR.R Log: - taken from parser directory in FinancialInstrument to be used in this analysis Added: pkg/PortfolioAnalytics/sandbox/symposium2013/download.SP500TR.R =================================================================== --- pkg/PortfolioAnalytics/sandbox/symposium2013/download.SP500TR.R (rev 0) +++ pkg/PortfolioAnalytics/sandbox/symposium2013/download.SP500TR.R 2013-09-22 23:52:27 UTC (rev 3168) @@ -0,0 +1,64 @@ +# Script for downloading and parsing a monthly total return series from +# http://www.standardandpoors.com/ +# +# Peter Carl + +# Load needed packages: +require(xts) +require(gdata) + +### Constants +filename = "EDHEC-index-history.csv" +objectname = "SP500TR" +datadir = "./data" +cachedir = "./cache" + +# Download the first sheet in the xls workbook directly from the web site: +x = read.xls("http://www.spindices.com/documents/additional-material/monthly.xlsx?force_download=true") + +# That gives us something like the following: +# > head(x) +# STANDARD...POOR.S.INDEX.SERVICES X X.1 X.2 X.3 X.4 +# 1 S&P 500 MONTHLY RETURNS +# 2 +# 3 MONTH OF PRICE PRICE 1 MONTH 3 MONTH 6 MONTH +# 4 CLOSE CHANGE % CHANGE % CHANGE % CHANGE +# 5 10/2009 1036.19 -20.88 -1.98% 4.93% 18.72% +# 6 09/2009 1057.08 36.45 3.57% 14.98% 32.49% +# X.5 X.6 X.7 X.8 X.9 X.10 X.11 X.12 X.13 +# 1 NA NA +# 2 1 MONTH 12 MONTH NA NA +# 3 1 YEAR 2 YEAR 3 YEAR 5 YEARS 10 YEARS TOTAL TOTAL NA NA +# 4 % CHANGE % CHANGE % CHANGE % CHANGE % CHANGE RETURN RETURN NA NA +# 5 6.96% -33.12% -24.80% -8.32% -23.97% -1.86% 9.80% NA NA +# 6 -9.37% -30.76% -20.87% -5.16% -17.59% 3.73% -6.91% NA NA +# X.14 X.15 +# 1 NA NA +# 2 NA NA +# 3 NA NA +# 4 NA NA +# 5 NA NA +# 6 NA NA + +# So we only really care about column 1 for dates and column 12 (X.10) for +# total returns. The first four rows are headers, and can be discarded. +rawdates = x[-1:-4,1] +rawreturns = x[-1:-4,12] +# Data goes back to 12/1988. + +# First we convert the dates to something we can use. Note that frac=1 sets +# the day to the last day of the month. That should be close enough for +# monthly data. +ISOdates = as.Date(as.yearmon(rawdates, "%m/%Y"), frac=1) + +# Now we convert the rawreturns strings into numbers +tr = as.numeric(as.character((sub("%", "", rawreturns, fixed=TRUE))))/100 + +# Now construct an xts object with the two columns +SP500TR.R=na.omit(as.xts(tr, order.by=ISOdates)) +colnames(SP500TR.R)="SP500TR" + +# Clean up +rm(list=c("tr", "ISOdates", "rawdates", "rawreturns")) +### Save data into cache +save(SP500TR.R, file=paste(cachedir, "/", objectname, ".RData", sep="")) \ No newline at end of file From noreply at r-forge.r-project.org Mon Sep 23 02:36:18 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 23 Sep 2013 02:36:18 +0200 (CEST) Subject: [Returnanalytics-commits] r3169 - pkg/PortfolioAnalytics/man Message-ID: <20130923003618.25F47185F86@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-23 02:36:17 +0200 (Mon, 23 Sep 2013) New Revision: 3169 Added: pkg/PortfolioAnalytics/man/PortfolioAnalytics-package.Rd Log: Adding first draft of PortfolioAnalytics-package file. Added: pkg/PortfolioAnalytics/man/PortfolioAnalytics-package.Rd =================================================================== --- pkg/PortfolioAnalytics/man/PortfolioAnalytics-package.Rd (rev 0) +++ pkg/PortfolioAnalytics/man/PortfolioAnalytics-package.Rd 2013-09-23 00:36:17 UTC (rev 3169) @@ -0,0 +1,62 @@ +\name{PortfolioAnalytics-package} +\alias{PortfolioAnalytics-package} +\alias{PortfolioAnalytics} +\docType{package} +\title{ +Numeric methods for optimization of portfolios +} + +\description{ +\kbd{PortfolioAnalytics} provides an\R packaged to provide numerical solutions for portfolio problems with complex constraints and objective sets. The goal of the package is to aid practicioners and researchers in solving portfolio optimization problems with complex constraints and objectives that mirror real-world applications. + +One of the goals of the packages is to provide a common interface to specify constraints and objectives that can be solved by any supported solver (i.e. optimization method). Currently supported optimization methods include random portfolios, differential evolution, particle swarm optimization, generalized simulated annealing, and linear and quadratic programming routines. Additional information on random portfolios is provided below. The differential evolution algorithm is implemented via the \kbd{DEoptim} package, the particle swarm optimization algorithm via the \kbd{pso} package, the generalized simulated annealing via the \kbd{GenSA} package, and linear and quadratic programming are implemented via the \kbd{ROI} package which acts as an interface to the \kbd{Rglpk} and \kbd{quadprog} packages. + +A key strength of \kbd{PortfolioAnalytics} is the generalization of constraints and objectives that can be solved by any available optimization methods. The quadratic and linear programming solvers can solve a limited type of convex optimization problems. +\itemize{ + \item Maxmimize portfolio return subject leverage, box, group, position limit, target mean return, and/or factor exposure constraints on weights. + \item Minimize portfolio variance subject to leverage, box, group, turnover, and/or factor exposure constraints (otherwise known as global minimum variance portfolio). + \item Minimize portfolio variance subject to leverage, box, group, and/or factor exposure constraints and a desired portfolio return. + \item Maximize quadratic utility subject to leverage, box, group, target mean return, turnover, and/or factor exposure constraints and risk aversion parameter. + \item Minimize ETL subject to leverage, box, group, position limit, target mean return, and/or factor exposure constraints and target portfolio return. +} + +Many real-world portfolio optimization problems are 'global optimization' problems, and therefore are not suitable for linear or quadratic programming routines. \kbd{PortfolioAnalytics} provides a random portfolio optimization method, and also utilizes the \R packages DEoptim, pso, and GenSA for solving non-convex global optimization problems. \kbd{PortfolioAnalytics} supports three methods of generating random portfolios. + +\itemize{ + \item The ?sample? method to generate random portfolios is based on an idea by Pat Burns. This is the most flexible method, but also the slowest, and can generate portfolios to satisfy leverage, box, group, and position limit constraints. + \item The ?simplex? method to generate random portfolios is based on a paper by W. T. Shaw. The simplex method is useful to generate random portfolios with the full investment constraint, where the sum of the weights is equal to 1, and min box constraints. Values for min_sum and max_sum of the leverage constraint will be ignored, the sum of weights will equal 1. All other constraints such as the box constraint max, group and position limit constraints will be handled by elimination. If the constraints are very restrictive, this may result in very few feasible portfolios remaining. Another key point to note is that the solution may not be along the vertexes depending on the objective. For example, a risk budget objective will likely place the portfolio somewhere on the interior. + \item The ?grid? method to generate random portfolios is based on the \code{gridSearch} function in package \kbd{NMOF}. The grid search method only satisfies the min and max box constraints. The min_sum and max_sum leverage constraint will likely be violated and the weights in the random portfolios should be normalized. Normalization may cause the box constraints to be violated and will be penalized in constrained_objective. +} + +\kbd{PortfolioAnalytics} leverages the \kbd{PerformanceAnalytics} package for many common objective functions. The objective types in \kbd{PortfolioAnalytics} are designed to be used with \kbd{PerformanceAnalytics} functions, but any user supplied valid R function can be used as an objective. + +This summary attempts to provide an overview of how to construct a portfolio object with constraints and objectives, and then run the optimization. +} + +\section{Optimization} +The portfolio object is instantiated with the \code{\link{portfolio.spec}} function. The main argument to \code{\link{portfolio.spec}} is \code{assets}. The \code{assets} argument can be a scalar value for the number of assets, a character vector of fund names, or a named vector of initial weights. + +Adding constraints to the portfolio object is done with \code{\link{add.constraint}}. The \code{\link{add.constraint}} function is the main interface for adding and/or updating constraints to the portfolio object. This function allows the user to specify the portfolio to add the constraints to, the type of constraints, arguments for the constraint, and whether or not to enable the constraint. If updating an existing constraint, the indexnum argument can be specified. + +Objectives can be added to the portfolio object with \code{\link{add.objective}}. The \code{\link{add.objective}} function is the main function for adding and/or updating objectives to the portfolio object. This function allows the user to specify the portfolio to add the objectives to, the type, name of the objective function, arguments to the objective function, and whether or not to enable the objective. If updating an existing constraint, the indexnum argument can be specified. + +With the constraints and objectives specified in the portfolio object, the portfolio object can be passed to \code{\link{optimize.portfolio}} or \code{\link{optimize.portfolio.rebalancing}} to run the optimization. Arguments to \code{\link{optimize.portfolio}} include asset returns, the portfolio obect specifying constraints and objectives, optimization method, and other parameters specific to the solver. \code{\link{optimize.portfolio.rebalancing}} adds support for backtesting portfolio optimization through time with rebalancing or rolling periods. + +\section{Charts and Graphs} +TODO + +\section{Further Work} +TODO + +\section{Acknowledgements} +TODO + +\section{References} +TODO +W.T. Shaw Paper +Modern Portfolio Optimization +Large-scale portfolio optimization with DEoptim +http://cran.r-project.org/web/packages/DEoptim/vignettes/DEoptimPortfolioOptimization.pdf + +\section{See Also} +TODO \ No newline at end of file From noreply at r-forge.r-project.org Mon Sep 23 03:20:57 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 23 Sep 2013 03:20:57 +0200 (CEST) Subject: [Returnanalytics-commits] r3170 - pkg/PortfolioAnalytics/man Message-ID: <20130923012057.79788183A2B@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-23 03:20:56 +0200 (Mon, 23 Sep 2013) New Revision: 3170 Modified: pkg/PortfolioAnalytics/man/PortfolioAnalytics-package.Rd Log: Fixing errors in PortfolioAnalytics-package.Rd Modified: pkg/PortfolioAnalytics/man/PortfolioAnalytics-package.Rd =================================================================== --- pkg/PortfolioAnalytics/man/PortfolioAnalytics-package.Rd 2013-09-23 00:36:17 UTC (rev 3169) +++ pkg/PortfolioAnalytics/man/PortfolioAnalytics-package.Rd 2013-09-23 01:20:56 UTC (rev 3170) @@ -33,7 +33,7 @@ This summary attempts to provide an overview of how to construct a portfolio object with constraints and objectives, and then run the optimization. } -\section{Optimization} +\section{Optimization}{ The portfolio object is instantiated with the \code{\link{portfolio.spec}} function. The main argument to \code{\link{portfolio.spec}} is \code{assets}. The \code{assets} argument can be a scalar value for the number of assets, a character vector of fund names, or a named vector of initial weights. Adding constraints to the portfolio object is done with \code{\link{add.constraint}}. The \code{\link{add.constraint}} function is the main interface for adding and/or updating constraints to the portfolio object. This function allows the user to specify the portfolio to add the constraints to, the type of constraints, arguments for the constraint, and whether or not to enable the constraint. If updating an existing constraint, the indexnum argument can be specified. @@ -41,22 +41,28 @@ Objectives can be added to the portfolio object with \code{\link{add.objective}}. The \code{\link{add.objective}} function is the main function for adding and/or updating objectives to the portfolio object. This function allows the user to specify the portfolio to add the objectives to, the type, name of the objective function, arguments to the objective function, and whether or not to enable the objective. If updating an existing constraint, the indexnum argument can be specified. With the constraints and objectives specified in the portfolio object, the portfolio object can be passed to \code{\link{optimize.portfolio}} or \code{\link{optimize.portfolio.rebalancing}} to run the optimization. Arguments to \code{\link{optimize.portfolio}} include asset returns, the portfolio obect specifying constraints and objectives, optimization method, and other parameters specific to the solver. \code{\link{optimize.portfolio.rebalancing}} adds support for backtesting portfolio optimization through time with rebalancing or rolling periods. +} -\section{Charts and Graphs} +\section{Charts and Graphs}{ TODO +} -\section{Further Work} +\section{Further Work}{ TODO +} -\section{Acknowledgements} +\section{Acknowledgements}{ TODO +} -\section{References} +\section{References}{ TODO W.T. Shaw Paper Modern Portfolio Optimization Large-scale portfolio optimization with DEoptim http://cran.r-project.org/web/packages/DEoptim/vignettes/DEoptimPortfolioOptimization.pdf +} -\section{See Also} -TODO \ No newline at end of file +\section{See Also}{ +TODO +} \ No newline at end of file From noreply at r-forge.r-project.org Mon Sep 23 14:58:05 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 23 Sep 2013 14:58:05 +0200 (CEST) Subject: [Returnanalytics-commits] r3171 - pkg/PortfolioAnalytics/sandbox/symposium2013 Message-ID: <20130923125805.BA57D185D28@r-forge.r-project.org> Author: peter_carl Date: 2013-09-23 14:58:05 +0200 (Mon, 23 Sep 2013) New Revision: 3171 Modified: pkg/PortfolioAnalytics/sandbox/symposium2013/optimize.HFindexes.R Log: - adjusted for Ross' feedback Modified: pkg/PortfolioAnalytics/sandbox/symposium2013/optimize.HFindexes.R =================================================================== --- pkg/PortfolioAnalytics/sandbox/symposium2013/optimize.HFindexes.R 2013-09-23 01:20:56 UTC (rev 3170) +++ pkg/PortfolioAnalytics/sandbox/symposium2013/optimize.HFindexes.R 2013-09-23 12:58:05 UTC (rev 3171) @@ -13,7 +13,7 @@ # ... and multi-core packages require(foreach) require(doMC) -registerDoMC(3) +registerDoMC(5) # Available on r-forge # require(FactorAnalytics) # development version > build @@ -95,18 +95,18 @@ # Add the return and sd objectives to the constraints created above MeanSD.portf <- add.objective(portfolio=init.portf, type="return", # the kind of objective this is - name="mean", # name of the function + name="mean" # name of the function ) MeanSD.portf <- add.objective(portfolio=MeanSD.portf, type="risk", # the kind of objective this is - name="var", # name of the function + name="var" # name of the function ) ### Construct BUOY 2: Constrained Mean-mETL Portfolio - using ROI # Add the return and mETL objectives MeanmETL.portf <- add.objective(portfolio=init.portf, type="return", # the kind of objective this is - name="mean", # name of the function + name="mean" # name of the function ) MeanmETL.portf <- add.objective(portfolio=MeanmETL.portf, type="risk", # the kind of objective this is @@ -133,15 +133,18 @@ EqSD.portf <- add.objective(portfolio=init.portf, type="risk_budget", name="StdDev", - enabled=TRUE, min_concentration=TRUE, - arguments = list(p=(1-1/12), clean=clean + arguments = list(clean=clean) ) # Without a sub-objective, we get a somewhat undefined result, since there are (potentially) many Equal SD contribution portfolios. -EqSD.portf <- add.objective(portfolio=init.portf, - type="risk_budget", +EqSD.portf <- add.objective(portfolio=EqSD.portf, + type="risk", name="StdDev" - ) + ) # OR +EqSD.portf <- add.objective(portfolio=EqSD.portf, + type="return", + name="mean" +) EqSD.portf$constraints[[1]]$min_sum = 0.99 # set to speed up RP EqSD.portf$constraints[[1]]$max_sum = 1.01 @@ -156,6 +159,10 @@ EqSD.portf <- add.objective(portfolio=EqSD.portf, type="risk", name="var" +) # OR +EqSD.portf <- add.objective(portfolio=EqSD.portf, + type="return", + name="mean" ) EqmETL.portf$constraints[[1]]$min_sum = 0.99 # set to speed up RP EqmETL.portf$constraints[[1]]$max_sum = 1.01 @@ -257,20 +264,29 @@ EqSD.RND<-optimize.portfolio(R=R, portfolio=EqSD.portf, optimize_method='random', - search_size=1000, trace=TRUE, verbose=TRUE, - rp=rp) # use the same random portfolios generated above -plot(EqSD.RND, risk.col="StdDev", return.col="mean", rp=permutations, chart.assets=TRUE, main="Equal Volatility Contribution Portfolio") -chart.RiskBudget(EqSD.RND, neighbors=25) + search_size=1000, trace=TRUE + ) +plot(EqSD.RND, risk.col="StdDev", return.col="mean", chart.assets=TRUE, main="Equal Volatility Contribution Portfolio") +chart.RiskBudget(EqSD.RND, risk.type="percentage") save(EqSD.RND,file=paste(resultsdir, 'EqSD-', Sys.Date(), '-', runname, '.rda',sep='')) print(paste('Completed EqSD optimization at',Sys.time(),'moving on to EqmETL')) +EqSD.DE<-optimize.portfolio(R=R, + portfolio=EqSD.portf, + optimize_method='DEoptim', + search_size=1000, trace=TRUE, verbose=TRUE +) +plot(EqSD.DE, risk.col="StdDev", return.col="mean", chart.assets=TRUE, main="Equal Volatility Contribution Portfolio") +chart.RiskBudget(EqSD.DE, risk.type="percentage") + + ### Evaluate BUOY 6: Constrained Equal mETL Contribution Portfolio - with RP EqmETL.RND<-optimize.portfolio(R=R, portfolio=EqmETL.portf, optimize_method='random', search_size=1000, trace=TRUE ) # -plot(EqmETL.RND, risk.col="StdDev", return.col="mean", rp=permutations, chart.assets=TRUE, main="Equal mETL Contribution Portfolio") +plot(EqmETL.RND, risk.col="StdDev", return.col="mean", chart.assets=TRUE, main="Equal mETL Contribution Portfolio") plot(EqmETL.RND, risk.col="ES", return.col="mean", rp=permutations, chart.assets=TRUE, main="Equal mETL Contribution Portfolio") chart.RiskBudget(EqmETL.RND, neighbors=25) save(EqmETL.RND,file=paste(resultsdir, 'EqmETL-', Sys.Date(), '-', runname, '.rda',sep='')) From noreply at r-forge.r-project.org Mon Sep 23 23:32:51 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 23 Sep 2013 23:32:51 +0200 (CEST) Subject: [Returnanalytics-commits] r3172 - pkg/PortfolioAnalytics/sandbox/symposium2013 Message-ID: <20130923213251.65274185C0E@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-23 23:32:51 +0200 (Mon, 23 Sep 2013) New Revision: 3172 Modified: pkg/PortfolioAnalytics/sandbox/symposium2013/optimize.HFindexes.R Log: Making minor revisions and adding comments to symposiom script. Modified: pkg/PortfolioAnalytics/sandbox/symposium2013/optimize.HFindexes.R =================================================================== --- pkg/PortfolioAnalytics/sandbox/symposium2013/optimize.HFindexes.R 2013-09-23 12:58:05 UTC (rev 3171) +++ pkg/PortfolioAnalytics/sandbox/symposium2013/optimize.HFindexes.R 2013-09-23 21:32:51 UTC (rev 3172) @@ -103,6 +103,8 @@ ) ### Construct BUOY 2: Constrained Mean-mETL Portfolio - using ROI +#@ Cannot maximize mean return per unit ETL with ROI, consider using +#@ random portfolios or DEoptim. - RB # Add the return and mETL objectives MeanmETL.portf <- add.objective(portfolio=init.portf, type="return", # the kind of objective this is @@ -130,40 +132,47 @@ ) ### Construct BUOY 5: Constrained Equal Variance Contribution Portfolio - using RP -EqSD.portf <- add.objective(portfolio=init.portf, - type="risk_budget", - name="StdDev", - min_concentration=TRUE, - arguments = list(clean=clean) - ) +#@ - Add the sub-objectives first. Adding these 3 objectives means that we are +#@ maximizing mean per unit StdDev with equal volatility contribution portfolios. - RB # Without a sub-objective, we get a somewhat undefined result, since there are (potentially) many Equal SD contribution portfolios. -EqSD.portf <- add.objective(portfolio=EqSD.portf, +EqSD.portf <- add.objective(portfolio=init.portf, type="risk", name="StdDev" - ) # OR +) # OR EqSD.portf <- add.objective(portfolio=EqSD.portf, type="return", name="mean" ) +EqSD.portf <- add.objective(portfolio=EqSD.portf, + type="risk_budget", + name="StdDev", + min_concentration=TRUE, + arguments = list(clean=clean) + ) + EqSD.portf$constraints[[1]]$min_sum = 0.99 # set to speed up RP EqSD.portf$constraints[[1]]$max_sum = 1.01 ### Construct BUOY 6: Constrained Equal mETL Contribution Portfolio - using RP -EqmETL.portf <- add.objective(init.portf, - type="risk_budget", - name="ES", - min_concentration=TRUE, - arguments = list(p=(1-1/12), clean=clean) -) +#@ Add the sub-objectives first. These should be added to the EqmETL portfolio. +#@ All objectives below mean that we are maximizing mean return per unit ES with +#@ equal ES contribution. - RB # Without a sub-objective, we get a somewhat undefined result, since there are (potentially) many Equal SD contribution portfolios. -EqSD.portf <- add.objective(portfolio=EqSD.portf, +EqmETL.portf <- add.objective(portfolio=init.portf, type="risk", - name="var" + name="ES" ) # OR -EqSD.portf <- add.objective(portfolio=EqSD.portf, +EqmETL.portf <- add.objective(portfolio=EqmETL.portf, type="return", name="mean" ) +EqmETL.portf <- add.objective(EqmETL.portf, + type="risk_budget", + name="ES", + min_concentration=TRUE, + arguments = list(p=(1-1/12), clean=clean) +) + EqmETL.portf$constraints[[1]]$min_sum = 0.99 # set to speed up RP EqmETL.portf$constraints[[1]]$max_sum = 1.01 @@ -195,15 +204,16 @@ name="mean" ) # Add a risk measure +#@ Use ETL to be consistent with risk measures in other BUOY portfolios RiskBudget.portf <- add.objective(portfolio=RiskBudget.portf, type="risk", - name="ETL", + name="ES", arguments = list(p=(1-1/12), clean=clean) ) # Set risk budget limits RiskBudget.portf <- add.objective(portfolio=RiskBudget.portf, type="risk_budget", - name="ETL", + name="ES", max_prisk=0.4, arguments = list(p=(1-1/12), clean=clean) ) @@ -267,7 +277,7 @@ search_size=1000, trace=TRUE ) plot(EqSD.RND, risk.col="StdDev", return.col="mean", chart.assets=TRUE, main="Equal Volatility Contribution Portfolio") -chart.RiskBudget(EqSD.RND, risk.type="percentage") +chart.RiskBudget(EqSD.RND, risk.type="percentage", neighbors=25) save(EqSD.RND,file=paste(resultsdir, 'EqSD-', Sys.Date(), '-', runname, '.rda',sep='')) print(paste('Completed EqSD optimization at',Sys.time(),'moving on to EqmETL')) @@ -287,7 +297,7 @@ search_size=1000, trace=TRUE ) # plot(EqmETL.RND, risk.col="StdDev", return.col="mean", chart.assets=TRUE, main="Equal mETL Contribution Portfolio") -plot(EqmETL.RND, risk.col="ES", return.col="mean", rp=permutations, chart.assets=TRUE, main="Equal mETL Contribution Portfolio") +plot(EqmETL.RND, risk.col="ES", return.col="mean", chart.assets=TRUE, main="Equal mETL Contribution Portfolio") chart.RiskBudget(EqmETL.RND, neighbors=25) save(EqmETL.RND,file=paste(resultsdir, 'EqmETL-', Sys.Date(), '-', runname, '.rda',sep='')) print(paste('Completed EqmETL optimization at',Sys.time(),'moving on to RiskBudget')) @@ -295,6 +305,16 @@ ### Evaluate BUOY 7: Equal Weight Portfolio # There's only one, so calculate it. +#@ Create a portfolio object with all the objectives we want calculated. - RB +EqWt.portf <- portfolio.spec(assets=colnames(R)) +EqWt.portf <- add.constraint(portfolio=EqWt.portf, type="leverage", min_sum=0.99, max_sum=1.01) +EqWt.portf <- add.objective(portfolio=EqWt.portf, type="return", name="mean") +EqWt.portf <- add.objective(portfolio=EqWt.portf, type="risk_budget", name="ES", arguments=list(p=p, clean=clean)) +EqWt.portf <- add.objective(portfolio=EqWt.portf, type="risk_budget", name="StdDev", arguments=list(clean=clean)) + +#@ Calculate the objective measures for the equal weight portfolio - RB +EqWt.opt <- equal.weight(R=R, portfolio=EqWt.portf) + ### Evaluate Risk Budget Portfolio - with DE RiskBudget.DE<-optimize.portfolio(R=R, portfolio=RiskBudget.portf, @@ -302,11 +322,32 @@ search_size=1000, trace=TRUE, verbose=TRUE ) # use the same random portfolios generated above plot(RiskBudget.DE, risk.col="StdDev", return.col="mean") +plot(RiskBudget.DE, risk.col="ES", return.col="mean") # several outlier portfolios save(RiskBudget.DE,file=paste(resultsdir, 'RiskBudget-', Sys.Date(), '-', runname, '.rda',sep='')) print(paste('Completed Risk Budget optimization at',Sys.time(),'. Done with optimizations.')) -buoys <- combine.optimizations(list(MeanSD=MeanSD.ROI, MeanmETL=MeanmETL.ROI, MinSD=MinSD.ROI, MinmETL=MinmETL.ROI, EqSD=EqSD.RND, EqmETL=EqmETL.RND, RB=RiskBudget.DE)) - # how to add an EqWgt to this list? +buoys <- combine.optimizations(list(MeanSD=MeanSD.ROI, MeanmETL=MeanmETL.ROI, MinSD=MinSD.ROI, MinmETL=MinmETL.ROI, EqSD=EqSD.RND, EqmETL=EqmETL.RND, RB=RiskBudget.DE, EqWt=EqWt.opt)) +# how to add an EqWgt to this list? +#@ The elements of this list need to be optimize.portfolio objects, so unfortunately we +#@ can't do this unless we created an optimize.portfolio object for an equal weight +#@ portfolio. I'll add this. - RB +chart.Weights(buoys, plot.type="bar", ylim=c(0,1)) + +#@ Chart the portfolios that have mean and ES as objective measures. - RB +chart.RiskReward(buoys, risk.col="ES") +#@ Chart the portfolios that have mean and StdDev as objective measures. - RB +chart.RiskReward(buoys, risk.col="StdDev") + +#@ The EqmETL and RB optimizations would be good to compare because they are +#@ similar in that they both include component ES as an objective. - RB +buoyETL <- combine.optimizations(list(EqmETL=EqmETL.RND, RB=RiskBudget.DE, EqWt=EqWt.opt)) +chart.RiskBudget(buoyETL, match.col="ES", risk.type="percentage", legend.loc="topright") + +#@ Compare the equal weight portfolio and the equal SD contribution portfolio. - RB +buoyStdDev <- combine.optimizations(list(EqSD=EqSD.RND, EqWt=EqWt.opt)) +chart.RiskBudget(buoyStdDev, match.col="StdDev", risk.type="absolute", legend.loc="topleft") + + end_time<-Sys.time() end_time-start_time From noreply at r-forge.r-project.org Mon Sep 23 23:37:13 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 23 Sep 2013 23:37:13 +0200 (CEST) Subject: [Returnanalytics-commits] r3173 - in pkg/PortfolioAnalytics: . R man Message-ID: <20130923213713.E7B01185286@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-23 23:37:13 +0200 (Mon, 23 Sep 2013) New Revision: 3173 Added: pkg/PortfolioAnalytics/R/equal.weight.R pkg/PortfolioAnalytics/man/equal.weight.Rd Modified: pkg/PortfolioAnalytics/DESCRIPTION pkg/PortfolioAnalytics/NAMESPACE pkg/PortfolioAnalytics/man/plot.Rd Log: Adding function calculate objective_measures for an equal weight portfolio Modified: pkg/PortfolioAnalytics/DESCRIPTION =================================================================== --- pkg/PortfolioAnalytics/DESCRIPTION 2013-09-23 21:32:51 UTC (rev 3172) +++ pkg/PortfolioAnalytics/DESCRIPTION 2013-09-23 21:37:13 UTC (rev 3173) @@ -59,3 +59,4 @@ 'charts.groups.R' 'charts.multiple.R' 'utility.combine.R' + 'equal.weight.R' Modified: pkg/PortfolioAnalytics/NAMESPACE =================================================================== --- pkg/PortfolioAnalytics/NAMESPACE 2013-09-23 21:32:51 UTC (rev 3172) +++ pkg/PortfolioAnalytics/NAMESPACE 2013-09-23 21:37:13 UTC (rev 3173) @@ -18,6 +18,7 @@ export(create.EfficientFrontier) export(diversification_constraint) export(diversification) +export(equal.weight) export(extract.efficient.frontier) export(extractEfficientFrontier) export(extractGroups) Added: pkg/PortfolioAnalytics/R/equal.weight.R =================================================================== --- pkg/PortfolioAnalytics/R/equal.weight.R (rev 0) +++ pkg/PortfolioAnalytics/R/equal.weight.R 2013-09-23 21:37:13 UTC (rev 3173) @@ -0,0 +1,50 @@ + + +#' Create an equal weight portfolio +#' +#' This function calculates objective measures for an equal weight portfolio. +#' +#' @details +#' This function is simply a wrapper around \code{\link{constrained_objective}} +#' to calculate the objective measures in the given \code{portfolio} object of +#' an equal weight portfolio. The portfolio object should include all objectives +#' to be calculated. +#' +#' @param R an xts, vector, matrix, data frame, timeSeries or zoo object of asset returns +#' @param portfolio an object of type "portfolio" specifying the constraints and objectives for the optimization +#' @param \dots any other passthru parameters to \code{constrained_objective} +#' @return a list containing the returns, weights, objective measures, call, and portfolio object +#' @author Ross Bennett +#' @export +equal.weight <- function(R, portfolio, ...){ + # Check for portfolio object passed in + if(!is.portfolio(portfolio)) stop("portfolio object passed in must be of class 'portfolio'") + + # get asset information for equal weight portfolio + assets <- portfolio$assets + nassets <- length(assets) + weights <- rep(1 / nassets, nassets) + names(weights) <- names(assets) + + # make sure the number of columns in R matches the number of assets + if(ncol(R) != nassets){ + if(ncol(R) > nassets){ + R <- R[, 1:nassets] + warning("number of assets is less than number of columns in returns object, subsetting returns object.") + } else { + stop("number of assets is greater than number of columns in returns object") + } + } + + out <- constrained_objective(w=weights, R=R, portfolio=portfolio, trace=TRUE, ...)$objective_measures + return(structure(list( + R=R, + weights=weights, + objective_measures=out, + call=match.call(), + portfolio=portfolio), + class=c("optimize.portfolio.eqwt", "optimize.portfolio")) + ) +} + + Added: pkg/PortfolioAnalytics/man/equal.weight.Rd =================================================================== --- pkg/PortfolioAnalytics/man/equal.weight.Rd (rev 0) +++ pkg/PortfolioAnalytics/man/equal.weight.Rd 2013-09-23 21:37:13 UTC (rev 3173) @@ -0,0 +1,35 @@ +\name{equal.weight} +\alias{equal.weight} +\title{Create an equal weight portfolio} +\usage{ + equal.weight(R, portfolio, ...) +} +\arguments{ + \item{R}{an xts, vector, matrix, data frame, timeSeries + or zoo object of asset returns} + + \item{portfolio}{an object of type "portfolio" specifying + the constraints and objectives for the optimization} + + \item{\dots}{any other passthru parameters to + \code{constrained_objective}} +} +\value{ + a list containing the returns, weights, objective + measures, call, and portfolio object +} +\description{ + This function calculates objective measures for an equal + weight portfolio. +} +\details{ + This function is simply a wrapper around + \code{\link{constrained_objective}} to calculate the + objective measures in the given \code{portfolio} object + of an equal weight portfolio. The portfolio object should + include all objectives to be calculated. +} +\author{ + Ross Bennett +} + Modified: pkg/PortfolioAnalytics/man/plot.Rd =================================================================== --- pkg/PortfolioAnalytics/man/plot.Rd 2013-09-23 21:32:51 UTC (rev 3172) +++ pkg/PortfolioAnalytics/man/plot.Rd 2013-09-23 21:37:13 UTC (rev 3173) @@ -1,4 +1,4 @@ -\name{plot} +\name{plot.optimize} \alias{plot.optimize.portfolio} \alias{plot.optimize.portfolio.DEoptim} \alias{plot.optimize.portfolio.GenSA} From noreply at r-forge.r-project.org Mon Sep 23 23:56:22 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Mon, 23 Sep 2013 23:56:22 +0200 (CEST) Subject: [Returnanalytics-commits] r3174 - pkg/PortfolioAnalytics/sandbox/symposium2013 Message-ID: <20130923215622.BE1BB1859CD@r-forge.r-project.org> Author: peter_carl Date: 2013-09-23 23:56:22 +0200 (Mon, 23 Sep 2013) New Revision: 3174 Modified: pkg/PortfolioAnalytics/sandbox/symposium2013/optimize.HFindexes.R Log: - cleaned up conflict with local copy - thanks for the comments and additions, Ross! Modified: pkg/PortfolioAnalytics/sandbox/symposium2013/optimize.HFindexes.R =================================================================== --- pkg/PortfolioAnalytics/sandbox/symposium2013/optimize.HFindexes.R 2013-09-23 21:37:13 UTC (rev 3173) +++ pkg/PortfolioAnalytics/sandbox/symposium2013/optimize.HFindexes.R 2013-09-23 21:56:22 UTC (rev 3174) @@ -204,12 +204,14 @@ name="mean" ) # Add a risk measure -#@ Use ETL to be consistent with risk measures in other BUOY portfolios + +# Use ES to be consistent with risk measures in other BUOY portfolios RiskBudget.portf <- add.objective(portfolio=RiskBudget.portf, type="risk", name="ES", arguments = list(p=(1-1/12), clean=clean) ) + # Set risk budget limits RiskBudget.portf <- add.objective(portfolio=RiskBudget.portf, type="risk_budget", @@ -217,6 +219,14 @@ max_prisk=0.4, arguments = list(p=(1-1/12), clean=clean) ) +# Calculate portfolio variance, but don't use it in the objective; used only for plots +RiskBudget.portf <- add.objective(portfolio=RiskBudget.portf, + type="risk", # the kind of objective this is + name="StdDev", # the function to minimize + enabled=TRUE, # enable or disable the objective + multiplier=0, # calculate it but don't use it in the objective + arguments=list(clean=clean) +) #------------------------------------------------------------------------ ### Evaluate portfolio objective objects @@ -304,7 +314,6 @@ ### Evaluate BUOY 7: Equal Weight Portfolio # There's only one, so calculate it. - #@ Create a portfolio object with all the objectives we want calculated. - RB EqWt.portf <- portfolio.spec(assets=colnames(R)) EqWt.portf <- add.constraint(portfolio=EqWt.portf, type="leverage", min_sum=0.99, max_sum=1.01) @@ -315,17 +324,25 @@ #@ Calculate the objective measures for the equal weight portfolio - RB EqWt.opt <- equal.weight(R=R, portfolio=EqWt.portf) + ### Evaluate Risk Budget Portfolio - with DE +registerDoSEQ() # turn off parallelization to keep the trace data RiskBudget.DE<-optimize.portfolio(R=R, portfolio=RiskBudget.portf, optimize_method='DEoptim', - search_size=1000, trace=TRUE, verbose=TRUE + search_size=1000, trace=TRUE ) # use the same random portfolios generated above plot(RiskBudget.DE, risk.col="StdDev", return.col="mean") plot(RiskBudget.DE, risk.col="ES", return.col="mean") # several outlier portfolios +chart.RiskBudget(RiskBudget.DE) +chart.RiskBudget(RiskBudget.DE, risk.type="percentage") + save(RiskBudget.DE,file=paste(resultsdir, 'RiskBudget-', Sys.Date(), '-', runname, '.rda',sep='')) -print(paste('Completed Risk Budget optimization at',Sys.time(),'. Done with optimizations.')) +print(RiskBudget.DE$elapsed_time) +print('Done with optimizations.') + +### Combine optimization objects buoys <- combine.optimizations(list(MeanSD=MeanSD.ROI, MeanmETL=MeanmETL.ROI, MinSD=MinSD.ROI, MinmETL=MinmETL.ROI, EqSD=EqSD.RND, EqmETL=EqmETL.RND, RB=RiskBudget.DE, EqWt=EqWt.opt)) # how to add an EqWgt to this list? #@ The elements of this list need to be optimize.portfolio objects, so unfortunately we From noreply at r-forge.r-project.org Tue Sep 24 02:40:14 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Tue, 24 Sep 2013 02:40:14 +0200 (CEST) Subject: [Returnanalytics-commits] r3175 - pkg/PortfolioAnalytics/sandbox/symposium2013/R Message-ID: <20130924004014.AAAC41850E0@r-forge.r-project.org> Author: peter_carl Date: 2013-09-24 02:40:14 +0200 (Tue, 24 Sep 2013) New Revision: 3175 Added: pkg/PortfolioAnalytics/sandbox/symposium2013/R/chart.UnStackedBar.R Log: - this will find its way into PerformanceAnalytics at some point - slightly modified from the sandbox function in PerfA for this use Added: pkg/PortfolioAnalytics/sandbox/symposium2013/R/chart.UnStackedBar.R =================================================================== --- pkg/PortfolioAnalytics/sandbox/symposium2013/R/chart.UnStackedBar.R (rev 0) +++ pkg/PortfolioAnalytics/sandbox/symposium2013/R/chart.UnStackedBar.R 2013-09-24 00:40:14 UTC (rev 3175) @@ -0,0 +1,64 @@ +chart.UnStackedBar <- function(w, colorset=1:NROW(w), rotate=c("vertical", "horizontal"), yaxis=TRUE, equal.line=FALSE) + { + # Weights should come in as: + # Convertible Arbitrage CTA Global Distressed Securities + # 2000-01-01 0.02500000 0.14601749 0.0250000 + # 2001-01-01 0.15785710 0.19577551 0.0250000 + # 2002-01-01 0.24431295 0.02500000 0.0250000 + # 2003-01-01 0.21955470 0.06590151 0.0250000 + + # if (wrap) + # row.names = sapply(rownames(object), function(x) paste(strwrap(x, wrap.rownames), collapse = "\n"), USE.NAMES = FALSE) + rotate = rotate[1] + row.names = sapply(rownames(w), function(x) paste(strwrap(x,10), collapse = "\n"), USE.NAMES=FALSE) + if(rotate=="vertical"){ + par(oma = c(4,8,2,1), mar=c(0,0,0,1)) # c(bottom, left, top, right) + layout(matrix(c(1:NCOL(w)), nr = 1, byrow = TRUE)) + for(i in 1:NCOL(w)){ + if(i==1){ + barplot(w[,i], col=colorset[i], horiz=TRUE, xlim=c(0,max(w)), axes=FALSE, names.arg=row.names, las=2, cex.names=1.5) + abline(v=0, col="darkgray") + if(equal.line) + abline(v=1/NROW(w), col="darkgray", lty=2) + axis(1, cex.axis = 1, col = "darkgray", las=1) + mtext(colnames(w)[i], side= 3, cex=1, adj=0.5) + } + else{ + barplot(w[,i], col=colorset[i], horiz=TRUE, xlim=c(0,max(w)), axes=FALSE, names.arg="", ylab=colnames(w)[i]) + abline(v=0, col="darkgray") + if(equal.line) + abline(v=1/NROW(w), col="darkgray", lty=2) + if(yaxis) + axis(1, cex.axis = 1, col = "darkgray", las=1) + mtext(colnames(w)[i], side= 3, cex=1, adj=0.5) + } + } + } + else { # rotation is horizontal (zero line is horizontal) + par(oma = c(8,4,2,1), mar=c(1,0,1,1)) # c(bottom, left, top, right) + layout(matrix(c(1:NCOL(w)), nr = NCOL(w), byrow = FALSE)) + for(i in 1:NCOL(w)){ + if(i==NCOL(w)){ + barplot(w[,i], col=colorset[i], horiz=FALSE, ylim=c(0,max(w)), axes=FALSE, names.arg=row.names, las=2, cex.names=1.5) + abline(h=0, col="darkgray") + if(equal.line) + abline(h=1/NROW(w), col="darkgray", lty=2) + axis(2, cex.axis = 1, col = "darkgray", las=1) + mtext(colnames(w)[i], side= 3, cex=1, adj=0) + } + else{ + barplot(w[,i], col=colorset[i], horiz=FALSE, ylim=c(0,max(w)), axes=FALSE, names.arg="", ylab=colnames(w)[i]) + abline(h=0, col="darkgray") + if(equal.line) + abline(h=1/NROW(w), col="darkgray", lty=2) + if(yaxis) + axis(2, cex.axis = 1, col = "darkgray", las=1) + mtext(colnames(w)[i], side= 3, cex=1, adj=0) + } + } + } + par(op) +} + +# Another way, this without layout +# http://timotheepoisot.fr/2013/02/17/stacked-barcharts/ \ No newline at end of file From noreply at r-forge.r-project.org Tue Sep 24 02:56:00 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Tue, 24 Sep 2013 02:56:00 +0200 (CEST) Subject: [Returnanalytics-commits] r3176 - pkg/PortfolioAnalytics/sandbox/symposium2013/R Message-ID: <20130924005600.EB78B185E64@r-forge.r-project.org> Author: peter_carl Date: 2013-09-24 02:56:00 +0200 (Tue, 24 Sep 2013) New Revision: 3176 Modified: pkg/PortfolioAnalytics/sandbox/symposium2013/R/chart.UnStackedBar.R Log: - not fully functionalized, tweaking parameters Modified: pkg/PortfolioAnalytics/sandbox/symposium2013/R/chart.UnStackedBar.R =================================================================== --- pkg/PortfolioAnalytics/sandbox/symposium2013/R/chart.UnStackedBar.R 2013-09-24 00:40:14 UTC (rev 3175) +++ pkg/PortfolioAnalytics/sandbox/symposium2013/R/chart.UnStackedBar.R 2013-09-24 00:56:00 UTC (rev 3176) @@ -12,16 +12,16 @@ rotate = rotate[1] row.names = sapply(rownames(w), function(x) paste(strwrap(x,10), collapse = "\n"), USE.NAMES=FALSE) if(rotate=="vertical"){ - par(oma = c(4,8,2,1), mar=c(0,0,0,1)) # c(bottom, left, top, right) + par(oma = c(4,8,2,1), mar=c(0,1,0,1)) # c(bottom, left, top, right) layout(matrix(c(1:NCOL(w)), nr = 1, byrow = TRUE)) for(i in 1:NCOL(w)){ if(i==1){ - barplot(w[,i], col=colorset[i], horiz=TRUE, xlim=c(0,max(w)), axes=FALSE, names.arg=row.names, las=2, cex.names=1.5) + barplot(w[,i], col=colorset[i], horiz=TRUE, xlim=c(0,max(w)), axes=FALSE, names.arg=row.names, las=2, cex.names=1) abline(v=0, col="darkgray") if(equal.line) abline(v=1/NROW(w), col="darkgray", lty=2) axis(1, cex.axis = 1, col = "darkgray", las=1) - mtext(colnames(w)[i], side= 3, cex=1, adj=0.5) + mtext(colnames(w)[i], side= 3, cex=0.8, adj=0.5) } else{ barplot(w[,i], col=colorset[i], horiz=TRUE, xlim=c(0,max(w)), axes=FALSE, names.arg="", ylab=colnames(w)[i]) @@ -30,7 +30,7 @@ abline(v=1/NROW(w), col="darkgray", lty=2) if(yaxis) axis(1, cex.axis = 1, col = "darkgray", las=1) - mtext(colnames(w)[i], side= 3, cex=1, adj=0.5) + mtext(colnames(w)[i], side= 3, cex=0.8, adj=0.5) } } } From noreply at r-forge.r-project.org Tue Sep 24 03:30:20 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Tue, 24 Sep 2013 03:30:20 +0200 (CEST) Subject: [Returnanalytics-commits] r3177 - pkg/PortfolioAnalytics/man Message-ID: <20130924013020.7EC201848F5@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-24 03:30:19 +0200 (Tue, 24 Sep 2013) New Revision: 3177 Modified: pkg/PortfolioAnalytics/man/PortfolioAnalytics-package.Rd Log: Adding content to PortfolioAnalytics-package file. Modified: pkg/PortfolioAnalytics/man/PortfolioAnalytics-package.Rd =================================================================== --- pkg/PortfolioAnalytics/man/PortfolioAnalytics-package.Rd 2013-09-24 00:56:00 UTC (rev 3176) +++ pkg/PortfolioAnalytics/man/PortfolioAnalytics-package.Rd 2013-09-24 01:30:19 UTC (rev 3177) @@ -11,7 +11,7 @@ One of the goals of the packages is to provide a common interface to specify constraints and objectives that can be solved by any supported solver (i.e. optimization method). Currently supported optimization methods include random portfolios, differential evolution, particle swarm optimization, generalized simulated annealing, and linear and quadratic programming routines. Additional information on random portfolios is provided below. The differential evolution algorithm is implemented via the \kbd{DEoptim} package, the particle swarm optimization algorithm via the \kbd{pso} package, the generalized simulated annealing via the \kbd{GenSA} package, and linear and quadratic programming are implemented via the \kbd{ROI} package which acts as an interface to the \kbd{Rglpk} and \kbd{quadprog} packages. -A key strength of \kbd{PortfolioAnalytics} is the generalization of constraints and objectives that can be solved by any available optimization methods. The quadratic and linear programming solvers can solve a limited type of convex optimization problems. +A key strength of \kbd{PortfolioAnalytics} is the generalization of constraints and objectives that can be solved. The quadratic and linear programming solvers can solve a limited type of convex optimization problems. \itemize{ \item Maxmimize portfolio return subject leverage, box, group, position limit, target mean return, and/or factor exposure constraints on weights. \item Minimize portfolio variance subject to leverage, box, group, turnover, and/or factor exposure constraints (otherwise known as global minimum variance portfolio). @@ -25,12 +25,12 @@ \itemize{ \item The ?sample? method to generate random portfolios is based on an idea by Pat Burns. This is the most flexible method, but also the slowest, and can generate portfolios to satisfy leverage, box, group, and position limit constraints. \item The ?simplex? method to generate random portfolios is based on a paper by W. T. Shaw. The simplex method is useful to generate random portfolios with the full investment constraint, where the sum of the weights is equal to 1, and min box constraints. Values for min_sum and max_sum of the leverage constraint will be ignored, the sum of weights will equal 1. All other constraints such as the box constraint max, group and position limit constraints will be handled by elimination. If the constraints are very restrictive, this may result in very few feasible portfolios remaining. Another key point to note is that the solution may not be along the vertexes depending on the objective. For example, a risk budget objective will likely place the portfolio somewhere on the interior. - \item The ?grid? method to generate random portfolios is based on the \code{gridSearch} function in package \kbd{NMOF}. The grid search method only satisfies the min and max box constraints. The min_sum and max_sum leverage constraint will likely be violated and the weights in the random portfolios should be normalized. Normalization may cause the box constraints to be violated and will be penalized in constrained_objective. + \item The ?grid? method to generate random portfolios is based on the \code{gridSearch} function in package \kbd{NMOF}. The grid search method only satisfies the min and max box constraints. The min_sum and max_sum leverage constraint will likely be violated and the weights in the random portfolios should be normalized. Normalization may cause the box constraints to be violated and will be penalized in \code{constrained_objective}. } \kbd{PortfolioAnalytics} leverages the \kbd{PerformanceAnalytics} package for many common objective functions. The objective types in \kbd{PortfolioAnalytics} are designed to be used with \kbd{PerformanceAnalytics} functions, but any user supplied valid R function can be used as an objective. -This summary attempts to provide an overview of how to construct a portfolio object with constraints and objectives, and then run the optimization. +This summary attempts to provide an overview of how to construct a portfolio object with constraints and objectives, run the optimization, and chart the results. } \section{Optimization}{ @@ -44,25 +44,48 @@ } \section{Charts and Graphs}{ -TODO +Intuition into the optimization can be aided through visualization. The goal of creating the charts is to provide visualization tools for optimal portfolios regardless of the chosen optimization method. + +\code{\link{chart.Weights}} plots the weights of the optimal portfolio. \code{\link{chart.RiskReward}} plots the optimal portfolio in risk-reward space. The random portfolios, DEoptim, and pso solvers will return trace portfolio information at each iteration when \code{\link{optimize.portfolio}} is run with \code{trace=TRUE}. If this is the case, \code{\link{chart.RiskReward}} will plot these portfolios so that the feasible space can be easily visualized. Although the GenSA and ROI solvers do not return trace portfolio information, random portfolios can be be generated with the argument \code{rp=TRUE} in \code{\link{chart.RiskReward}}. A \code{plot} function is provided that will plot the weights and risk-reward scatter chart. The component risk contribution can be charted for portfolio optimization problems with risk budget objectives with \code{\link{chart.RiskBudget}}. Neighbor portfolios can be plotted in \code{\link{chart.RiskBudget}}, \code{\link{chart.Weights}}, and \code{\link{chart.RiskReward}}. + +Efficient frontiers can be extracted from \code{optimize.portfolio} objects or created from a \code{portfolio} object. The efficient frontier can be charted in risk-reward space with \code{\link{chart.EfficientFrontier}}. The weights along the efficient frontier can be charted with \code{\link{chart.Weights.EF}}. + +Multiple objects created via \code{\link{optimize.portfolio}} can be combined with \code{\link{combine.optimizations}} for visual comparison. The weights of the optimal portfolios can be plotted with \code{\link{chart.Weights}}. The optimal portfolios can be compared in risk-reward space with \code{\link{chart.RiskReward}}. The portfolio component risk contributions of the multiple optimal portfolios can be plotted with \code{\link{chart.RiskBudget}}. } \section{Further Work}{ -TODO +Continued work to improved charts and graphs. + +Continued work to improve features to combine and compare multiple optimal portfolio objects. + +Support for more solvers. + +Comments, suggestions, and/or code patches are welcome. } -\section{Acknowledgements}{ -TODO +\author{ +Kris Boudt \cr +Peter Carl \cr +Brian G. Peterson \cr + +Maintainer: Brian G. Peterson \email{brian at braverock.com} } -\section{References}{ +\references{ +Shaw, William Thornton, \emph{Portfolio Optimization for VAR, CVaR, Omega and Utility with General Return Distributions: A Monte Carlo Approach for Long-Only and Bounded Short Portfolios with Optional Robustness and a Simplified Approach to Covariance Matching} (June 1, 2011). Available at SSRN: http://ssrn.com/abstract=1856476 or http://dx.doi.org/10.2139/ssrn.1856476 \cr + +Scherer, B. and Martin, D. \emph{Modern Portfolio Optimization}. Springer. 2005. \cr + +} + +\section{Acknowledgements}{ TODO -W.T. Shaw Paper -Modern Portfolio Optimization -Large-scale portfolio optimization with DEoptim -http://cran.r-project.org/web/packages/DEoptim/vignettes/DEoptimPortfolioOptimization.pdf } -\section{See Also}{ -TODO +\seealso{ +CRAN task view on Empirical Finance \cr \url{http://cran.r-project.org/src/contrib/Views/Econometrics.html} + +CRAN task view on Optimization \cr \url{http://cran.r-project.org/web/views/Optimization.html} + +Large-scale portfolio optimization with DEoptim \cr \url{http://cran.r-project.org/web/packages/DEoptim/vignettes/DEoptimPortfolioOptimization.pdf} } \ No newline at end of file From noreply at r-forge.r-project.org Tue Sep 24 03:37:54 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Tue, 24 Sep 2013 03:37:54 +0200 (CEST) Subject: [Returnanalytics-commits] r3178 - in pkg/PortfolioAnalytics: . R man Message-ID: <20130924013754.96FAD1848F5@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-24 03:37:53 +0200 (Tue, 24 Sep 2013) New Revision: 3178 Removed: pkg/PortfolioAnalytics/man/extract.efficient.frontier.Rd Modified: pkg/PortfolioAnalytics/NAMESPACE pkg/PortfolioAnalytics/R/extract.efficient.frontier.R pkg/PortfolioAnalytics/man/PortfolioAnalytics-package.Rd pkg/PortfolioAnalytics/man/extractEfficientFrontier.Rd pkg/PortfolioAnalytics/man/plot.Rd Log: Updating documentation Modified: pkg/PortfolioAnalytics/NAMESPACE =================================================================== --- pkg/PortfolioAnalytics/NAMESPACE 2013-09-24 01:30:19 UTC (rev 3177) +++ pkg/PortfolioAnalytics/NAMESPACE 2013-09-24 01:37:53 UTC (rev 3178) @@ -19,7 +19,6 @@ export(diversification_constraint) export(diversification) export(equal.weight) -export(extract.efficient.frontier) export(extractEfficientFrontier) export(extractGroups) export(extractObjectiveMeasures) Modified: pkg/PortfolioAnalytics/R/extract.efficient.frontier.R =================================================================== --- pkg/PortfolioAnalytics/R/extract.efficient.frontier.R 2013-09-24 01:30:19 UTC (rev 3177) +++ pkg/PortfolioAnalytics/R/extract.efficient.frontier.R 2013-09-24 01:37:53 UTC (rev 3178) @@ -10,32 +10,7 @@ # ############################################################################### -#' Extract the efficient frontier of portfolios that meet your objectives over a range of risks -#' -#' The efficient frontier is extracted from the set of portfolios created by -#' \code{optimize.portfolio} with \code{trace=TRUE}. -#' -#' If you do not have an optimal portfolio object created by -#' \code{\link{optimize.portfolio}}, you can pass in a portfolio object and an -#' optimization will be run via \code{\link{optimize.portfolio}} -#' -#' @note -#' Note that this function will be extremely sensitive to the objectives in your -#' \code{\link{portfolio}} object. It will be especially obvious if you -#' are looking at a risk budget objective and your return preference is not set high enough. -#' -#' -#' @param object optimial portfolio object as created by \code{\link{optimize.portfolio}} -#' @param match.col string name of column to use for risk (horizontal axis) -#' @param from minimum value of the sequence -#' @param to maximum value of the sequence -#' @param by number to increment the sequence by -#' @param n.portfolios number of portfolios along the efficient frontier to extract -#' @param \dots any other passthru parameters to \code{optimize.portfolio} -#' @param R an xts, vector, matrix, data frame, timeSeries or zoo object of asset returns -#' @param portfolio an object of type "portfolio" specifying the constraints and objectives for the optimization, see \code{\link{portfolio.spec}} -#' @param optimize_method one of "DEoptim", "random", "ROI", "pso", or "GenSA" -#' @export + extract.efficient.frontier <- function (object=NULL, match.col='ES', from=NULL, to=NULL, by=0.005, n.portfolios=NULL, ..., R=NULL, portfolio=NULL, optimize_method='random') { #TODO add a threshold argument for how close it has to be to count @@ -383,7 +358,7 @@ #' For objects created by \code{optimize.portfolo} with the DEoptim, random, or #' pso solvers, the efficient frontier will be extracted from the object via #' \code{extract.efficient.frontier}. This means that \code{optimize.portfolio} must -#' be run with \code{trace=TRUE} +#' be run with \code{trace=TRUE}. #' #' @param object an optimal portfolio object created by \code{optimize.portfolio} #' @param match.col string name of column to use for risk (horizontal axis). Modified: pkg/PortfolioAnalytics/man/PortfolioAnalytics-package.Rd =================================================================== --- pkg/PortfolioAnalytics/man/PortfolioAnalytics-package.Rd 2013-09-24 01:30:19 UTC (rev 3177) +++ pkg/PortfolioAnalytics/man/PortfolioAnalytics-package.Rd 2013-09-24 01:37:53 UTC (rev 3178) @@ -7,7 +7,7 @@ } \description{ -\kbd{PortfolioAnalytics} provides an\R packaged to provide numerical solutions for portfolio problems with complex constraints and objective sets. The goal of the package is to aid practicioners and researchers in solving portfolio optimization problems with complex constraints and objectives that mirror real-world applications. +\kbd{PortfolioAnalytics} provides an \R packaged to provide numerical solutions for portfolio problems with complex constraints and objective sets. The goal of the package is to aid practicioners and researchers in solving portfolio optimization problems with complex constraints and objectives that mirror real-world applications. One of the goals of the packages is to provide a common interface to specify constraints and objectives that can be solved by any supported solver (i.e. optimization method). Currently supported optimization methods include random portfolios, differential evolution, particle swarm optimization, generalized simulated annealing, and linear and quadratic programming routines. Additional information on random portfolios is provided below. The differential evolution algorithm is implemented via the \kbd{DEoptim} package, the particle swarm optimization algorithm via the \kbd{pso} package, the generalized simulated annealing via the \kbd{GenSA} package, and linear and quadratic programming are implemented via the \kbd{ROI} package which acts as an interface to the \kbd{Rglpk} and \kbd{quadprog} packages. Deleted: pkg/PortfolioAnalytics/man/extract.efficient.frontier.Rd =================================================================== --- pkg/PortfolioAnalytics/man/extract.efficient.frontier.Rd 2013-09-24 01:30:19 UTC (rev 3177) +++ pkg/PortfolioAnalytics/man/extract.efficient.frontier.Rd 2013-09-24 01:37:53 UTC (rev 3178) @@ -1,57 +0,0 @@ -\name{extract.efficient.frontier} -\alias{extract.efficient.frontier} -\title{Extract the efficient frontier of portfolios that meet your objectives over a range of risks} -\usage{ - extract.efficient.frontier(object = NULL, - match.col = "ES", from = NULL, to = NULL, by = 0.005, - n.portfolios = NULL, ..., R = NULL, portfolio = NULL, - optimize_method = "random") -} -\arguments{ - \item{object}{optimial portfolio object as created by - \code{\link{optimize.portfolio}}} - - \item{match.col}{string name of column to use for risk - (horizontal axis)} - - \item{from}{minimum value of the sequence} - - \item{to}{maximum value of the sequence} - - \item{by}{number to increment the sequence by} - - \item{n.portfolios}{number of portfolios along the - efficient frontier to extract} - - \item{\dots}{any other passthru parameters to - \code{optimize.portfolio}} - - \item{R}{an xts, vector, matrix, data frame, timeSeries - or zoo object of asset returns} - - \item{portfolio}{an object of type "portfolio" specifying - the constraints and objectives for the optimization, see - \code{\link{portfolio.spec}}} - - \item{optimize_method}{one of "DEoptim", "random", "ROI", - "pso", or "GenSA"} -} -\description{ - The efficient frontier is extracted from the set of - portfolios created by \code{optimize.portfolio} with - \code{trace=TRUE}. -} -\details{ - If you do not have an optimal portfolio object created by - \code{\link{optimize.portfolio}}, you can pass in a - portfolio object and an optimization will be run via - \code{\link{optimize.portfolio}} -} -\note{ - Note that this function will be extremely sensitive to - the objectives in your \code{\link{portfolio}} object. - It will be especially obvious if you are looking at a - risk budget objective and your return preference is not - set high enough. -} - Modified: pkg/PortfolioAnalytics/man/extractEfficientFrontier.Rd =================================================================== --- pkg/PortfolioAnalytics/man/extractEfficientFrontier.Rd 2013-09-24 01:30:19 UTC (rev 3177) +++ pkg/PortfolioAnalytics/man/extractEfficientFrontier.Rd 2013-09-24 01:37:53 UTC (rev 3178) @@ -50,7 +50,7 @@ will be extracted from the object via \code{extract.efficient.frontier}. This means that \code{optimize.portfolio} must be run with - \code{trace=TRUE} + \code{trace=TRUE}. } \author{ Ross Bennett Modified: pkg/PortfolioAnalytics/man/plot.Rd =================================================================== --- pkg/PortfolioAnalytics/man/plot.Rd 2013-09-24 01:30:19 UTC (rev 3177) +++ pkg/PortfolioAnalytics/man/plot.Rd 2013-09-24 01:37:53 UTC (rev 3178) @@ -1,4 +1,4 @@ -\name{plot.optimize} +\name{plot} \alias{plot.optimize.portfolio} \alias{plot.optimize.portfolio.DEoptim} \alias{plot.optimize.portfolio.GenSA} From noreply at r-forge.r-project.org Tue Sep 24 03:55:37 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Tue, 24 Sep 2013 03:55:37 +0200 (CEST) Subject: [Returnanalytics-commits] r3179 - in pkg/PortfolioAnalytics: R man Message-ID: <20130924015537.6E6DB1848F5@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-24 03:55:36 +0200 (Tue, 24 Sep 2013) New Revision: 3179 Removed: pkg/PortfolioAnalytics/man/PortfolioAnalytics-package.Rd Modified: pkg/PortfolioAnalytics/R/extract.efficient.frontier.R pkg/PortfolioAnalytics/man/create.EfficientFrontier.Rd Log: Updating documentation Modified: pkg/PortfolioAnalytics/R/extract.efficient.frontier.R =================================================================== --- pkg/PortfolioAnalytics/R/extract.efficient.frontier.R 2013-09-24 01:37:53 UTC (rev 3178) +++ pkg/PortfolioAnalytics/R/extract.efficient.frontier.R 2013-09-24 01:55:36 UTC (rev 3179) @@ -266,12 +266,12 @@ #' than the simple mean-var and mean-ETL cases. For this type, we actually #' call \code{\link{optimize.portfolio}} with \code{optimize_method="DEoptim"} #' and then extract the efficient frontier with -#' \code{\link{extract.efficient.frontier}}.} +#' \code{extract.efficient.frontier}.} #' \item{"random":}{ This can handle more complex constraints and objectives #' than the simple mean-var and mean-ETL cases. For this type, we actually #' call \code{\link{optimize.portfolio}} with \code{optimize_method="random"} #' and then extract the efficient frontier with -#' \code{\link{extract.efficient.frontier}}.} +#' \code{extract.efficient.frontier}.} #' } #' #' @param R xts object of asset returns @@ -290,8 +290,7 @@ #' @seealso \code{\link{optimize.portfolio}}, #' \code{\link{portfolio.spec}}, #' \code{\link{meanvar.efficient.frontier}}, -#' \code{\link{meanetl.efficient.frontier}}, -#' \code{\link{extract.efficient.frontier}} +#' \code{\link{meanetl.efficient.frontier}} #' @export create.EfficientFrontier <- function(R, portfolio, type, n.portfolios=25, risk_aversion=NULL, match.col="ES", search_size=2000, ...){ # This is just a wrapper around a few functions to easily create efficient frontiers Deleted: pkg/PortfolioAnalytics/man/PortfolioAnalytics-package.Rd =================================================================== --- pkg/PortfolioAnalytics/man/PortfolioAnalytics-package.Rd 2013-09-24 01:37:53 UTC (rev 3178) +++ pkg/PortfolioAnalytics/man/PortfolioAnalytics-package.Rd 2013-09-24 01:55:36 UTC (rev 3179) @@ -1,91 +0,0 @@ -\name{PortfolioAnalytics-package} -\alias{PortfolioAnalytics-package} -\alias{PortfolioAnalytics} -\docType{package} -\title{ -Numeric methods for optimization of portfolios -} - -\description{ -\kbd{PortfolioAnalytics} provides an \R packaged to provide numerical solutions for portfolio problems with complex constraints and objective sets. The goal of the package is to aid practicioners and researchers in solving portfolio optimization problems with complex constraints and objectives that mirror real-world applications. - -One of the goals of the packages is to provide a common interface to specify constraints and objectives that can be solved by any supported solver (i.e. optimization method). Currently supported optimization methods include random portfolios, differential evolution, particle swarm optimization, generalized simulated annealing, and linear and quadratic programming routines. Additional information on random portfolios is provided below. The differential evolution algorithm is implemented via the \kbd{DEoptim} package, the particle swarm optimization algorithm via the \kbd{pso} package, the generalized simulated annealing via the \kbd{GenSA} package, and linear and quadratic programming are implemented via the \kbd{ROI} package which acts as an interface to the \kbd{Rglpk} and \kbd{quadprog} packages. - -A key strength of \kbd{PortfolioAnalytics} is the generalization of constraints and objectives that can be solved. The quadratic and linear programming solvers can solve a limited type of convex optimization problems. -\itemize{ - \item Maxmimize portfolio return subject leverage, box, group, position limit, target mean return, and/or factor exposure constraints on weights. - \item Minimize portfolio variance subject to leverage, box, group, turnover, and/or factor exposure constraints (otherwise known as global minimum variance portfolio). - \item Minimize portfolio variance subject to leverage, box, group, and/or factor exposure constraints and a desired portfolio return. - \item Maximize quadratic utility subject to leverage, box, group, target mean return, turnover, and/or factor exposure constraints and risk aversion parameter. - \item Minimize ETL subject to leverage, box, group, position limit, target mean return, and/or factor exposure constraints and target portfolio return. -} - -Many real-world portfolio optimization problems are 'global optimization' problems, and therefore are not suitable for linear or quadratic programming routines. \kbd{PortfolioAnalytics} provides a random portfolio optimization method, and also utilizes the \R packages DEoptim, pso, and GenSA for solving non-convex global optimization problems. \kbd{PortfolioAnalytics} supports three methods of generating random portfolios. - -\itemize{ - \item The ?sample? method to generate random portfolios is based on an idea by Pat Burns. This is the most flexible method, but also the slowest, and can generate portfolios to satisfy leverage, box, group, and position limit constraints. - \item The ?simplex? method to generate random portfolios is based on a paper by W. T. Shaw. The simplex method is useful to generate random portfolios with the full investment constraint, where the sum of the weights is equal to 1, and min box constraints. Values for min_sum and max_sum of the leverage constraint will be ignored, the sum of weights will equal 1. All other constraints such as the box constraint max, group and position limit constraints will be handled by elimination. If the constraints are very restrictive, this may result in very few feasible portfolios remaining. Another key point to note is that the solution may not be along the vertexes depending on the objective. For example, a risk budget objective will likely place the portfolio somewhere on the interior. - \item The ?grid? method to generate random portfolios is based on the \code{gridSearch} function in package \kbd{NMOF}. The grid search method only satisfies the min and max box constraints. The min_sum and max_sum leverage constraint will likely be violated and the weights in the random portfolios should be normalized. Normalization may cause the box constraints to be violated and will be penalized in \code{constrained_objective}. -} - -\kbd{PortfolioAnalytics} leverages the \kbd{PerformanceAnalytics} package for many common objective functions. The objective types in \kbd{PortfolioAnalytics} are designed to be used with \kbd{PerformanceAnalytics} functions, but any user supplied valid R function can be used as an objective. - -This summary attempts to provide an overview of how to construct a portfolio object with constraints and objectives, run the optimization, and chart the results. -} - -\section{Optimization}{ -The portfolio object is instantiated with the \code{\link{portfolio.spec}} function. The main argument to \code{\link{portfolio.spec}} is \code{assets}. The \code{assets} argument can be a scalar value for the number of assets, a character vector of fund names, or a named vector of initial weights. - -Adding constraints to the portfolio object is done with \code{\link{add.constraint}}. The \code{\link{add.constraint}} function is the main interface for adding and/or updating constraints to the portfolio object. This function allows the user to specify the portfolio to add the constraints to, the type of constraints, arguments for the constraint, and whether or not to enable the constraint. If updating an existing constraint, the indexnum argument can be specified. - -Objectives can be added to the portfolio object with \code{\link{add.objective}}. The \code{\link{add.objective}} function is the main function for adding and/or updating objectives to the portfolio object. This function allows the user to specify the portfolio to add the objectives to, the type, name of the objective function, arguments to the objective function, and whether or not to enable the objective. If updating an existing constraint, the indexnum argument can be specified. - -With the constraints and objectives specified in the portfolio object, the portfolio object can be passed to \code{\link{optimize.portfolio}} or \code{\link{optimize.portfolio.rebalancing}} to run the optimization. Arguments to \code{\link{optimize.portfolio}} include asset returns, the portfolio obect specifying constraints and objectives, optimization method, and other parameters specific to the solver. \code{\link{optimize.portfolio.rebalancing}} adds support for backtesting portfolio optimization through time with rebalancing or rolling periods. -} - -\section{Charts and Graphs}{ -Intuition into the optimization can be aided through visualization. The goal of creating the charts is to provide visualization tools for optimal portfolios regardless of the chosen optimization method. - -\code{\link{chart.Weights}} plots the weights of the optimal portfolio. \code{\link{chart.RiskReward}} plots the optimal portfolio in risk-reward space. The random portfolios, DEoptim, and pso solvers will return trace portfolio information at each iteration when \code{\link{optimize.portfolio}} is run with \code{trace=TRUE}. If this is the case, \code{\link{chart.RiskReward}} will plot these portfolios so that the feasible space can be easily visualized. Although the GenSA and ROI solvers do not return trace portfolio information, random portfolios can be be generated with the argument \code{rp=TRUE} in \code{\link{chart.RiskReward}}. A \code{plot} function is provided that will plot the weights and risk-reward scatter chart. The component risk contribution can be charted for portfolio optimization problems with risk budget objectives with \code{\link{chart.RiskBudget}}. Neighbor portfolios can be plotted in \code{\link{chart.RiskBudget}}, \code{\link{chart.Weights}}, and \code{\link{chart.RiskReward}}. - -Efficient frontiers can be extracted from \code{optimize.portfolio} objects or created from a \code{portfolio} object. The efficient frontier can be charted in risk-reward space with \code{\link{chart.EfficientFrontier}}. The weights along the efficient frontier can be charted with \code{\link{chart.Weights.EF}}. - -Multiple objects created via \code{\link{optimize.portfolio}} can be combined with \code{\link{combine.optimizations}} for visual comparison. The weights of the optimal portfolios can be plotted with \code{\link{chart.Weights}}. The optimal portfolios can be compared in risk-reward space with \code{\link{chart.RiskReward}}. The portfolio component risk contributions of the multiple optimal portfolios can be plotted with \code{\link{chart.RiskBudget}}. -} - -\section{Further Work}{ -Continued work to improved charts and graphs. - -Continued work to improve features to combine and compare multiple optimal portfolio objects. - -Support for more solvers. - -Comments, suggestions, and/or code patches are welcome. -} - -\author{ -Kris Boudt \cr -Peter Carl \cr -Brian G. Peterson \cr - -Maintainer: Brian G. Peterson \email{brian at braverock.com} -} - -\references{ -Shaw, William Thornton, \emph{Portfolio Optimization for VAR, CVaR, Omega and Utility with General Return Distributions: A Monte Carlo Approach for Long-Only and Bounded Short Portfolios with Optional Robustness and a Simplified Approach to Covariance Matching} (June 1, 2011). Available at SSRN: http://ssrn.com/abstract=1856476 or http://dx.doi.org/10.2139/ssrn.1856476 \cr - -Scherer, B. and Martin, D. \emph{Modern Portfolio Optimization}. Springer. 2005. \cr - -} - -\section{Acknowledgements}{ -TODO -} - -\seealso{ -CRAN task view on Empirical Finance \cr \url{http://cran.r-project.org/src/contrib/Views/Econometrics.html} - -CRAN task view on Optimization \cr \url{http://cran.r-project.org/web/views/Optimization.html} - -Large-scale portfolio optimization with DEoptim \cr \url{http://cran.r-project.org/web/packages/DEoptim/vignettes/DEoptimPortfolioOptimization.pdf} -} \ No newline at end of file Modified: pkg/PortfolioAnalytics/man/create.EfficientFrontier.Rd =================================================================== --- pkg/PortfolioAnalytics/man/create.EfficientFrontier.Rd 2013-09-24 01:37:53 UTC (rev 3178) +++ pkg/PortfolioAnalytics/man/create.EfficientFrontier.Rd 2013-09-24 01:55:36 UTC (rev 3179) @@ -63,14 +63,13 @@ \code{\link{optimize.portfolio}} with \code{optimize_method="DEoptim"} and then extract the efficient frontier with - \code{\link{extract.efficient.frontier}}.} - \item{"random":}{ This can handle more complex - constraints and objectives than the simple mean-var and - mean-ETL cases. For this type, we actually call - \code{\link{optimize.portfolio}} with - \code{optimize_method="random"} and then extract the + \code{extract.efficient.frontier}.} \item{"random":}{ + This can handle more complex constraints and objectives + than the simple mean-var and mean-ETL cases. For this + type, we actually call \code{\link{optimize.portfolio}} + with \code{optimize_method="random"} and then extract the efficient frontier with - \code{\link{extract.efficient.frontier}}.} } + \code{extract.efficient.frontier}.} } } \author{ Ross Bennett @@ -79,7 +78,6 @@ \code{\link{optimize.portfolio}}, \code{\link{portfolio.spec}}, \code{\link{meanvar.efficient.frontier}}, - \code{\link{meanetl.efficient.frontier}}, - \code{\link{extract.efficient.frontier}} + \code{\link{meanetl.efficient.frontier}} } From noreply at r-forge.r-project.org Tue Sep 24 03:56:43 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Tue, 24 Sep 2013 03:56:43 +0200 (CEST) Subject: [Returnanalytics-commits] r3180 - pkg/PortfolioAnalytics/man Message-ID: <20130924015643.A152C1848F5@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-24 03:56:43 +0200 (Tue, 24 Sep 2013) New Revision: 3180 Added: pkg/PortfolioAnalytics/man/PortfolioAnalytics-package.Rd Log: Adding new PortfolioAnalytics-package.Rd file with no non-ASCII input Added: pkg/PortfolioAnalytics/man/PortfolioAnalytics-package.Rd =================================================================== --- pkg/PortfolioAnalytics/man/PortfolioAnalytics-package.Rd (rev 0) +++ pkg/PortfolioAnalytics/man/PortfolioAnalytics-package.Rd 2013-09-24 01:56:43 UTC (rev 3180) @@ -0,0 +1,91 @@ +\name{PortfolioAnalytics-package} +\alias{PortfolioAnalytics-package} +\alias{PortfolioAnalytics} +\docType{package} +\title{ +Numeric methods for optimization of portfolios +} + +\description{ +\kbd{PortfolioAnalytics} provides an \R packaged to provide numerical solutions for portfolio problems with complex constraints and objective sets. The goal of the package is to aid practicioners and researchers in solving portfolio optimization problems with complex constraints and objectives that mirror real-world applications. + +One of the goals of the packages is to provide a common interface to specify constraints and objectives that can be solved by any supported solver (i.e. optimization method). Currently supported optimization methods include random portfolios, differential evolution, particle swarm optimization, generalized simulated annealing, and linear and quadratic programming routines. Additional information on random portfolios is provided below. The differential evolution algorithm is implemented via the \kbd{DEoptim} package, the particle swarm optimization algorithm via the \kbd{pso} package, the generalized simulated annealing via the \kbd{GenSA} package, and linear and quadratic programming are implemented via the \kbd{ROI} package which acts as an interface to the \kbd{Rglpk} and \kbd{quadprog} packages. + +A key strength of \kbd{PortfolioAnalytics} is the generalization of constraints and objectives that can be solved. The quadratic and linear programming solvers can solve a limited type of convex optimization problems. +\itemize{ + \item Maxmimize portfolio return subject leverage, box, group, position limit, target mean return, and/or factor exposure constraints on weights. + \item Minimize portfolio variance subject to leverage, box, group, turnover, and/or factor exposure constraints (otherwise known as global minimum variance portfolio). + \item Minimize portfolio variance subject to leverage, box, group, and/or factor exposure constraints and a desired portfolio return. + \item Maximize quadratic utility subject to leverage, box, group, target mean return, turnover, and/or factor exposure constraints and risk aversion parameter. + \item Minimize ETL subject to leverage, box, group, position limit, target mean return, and/or factor exposure constraints and target portfolio return. +} + +Many real-world portfolio optimization problems are global optimization problems, and therefore are not suitable for linear or quadratic programming routines. \kbd{PortfolioAnalytics} provides a random portfolio optimization method, and also utilizes the \R packages DEoptim, pso, and GenSA for solving non-convex global optimization problems. \kbd{PortfolioAnalytics} supports three methods of generating random portfolios. + +\itemize{ + \item The sample method to generate random portfolios is based on an idea by Pat Burns. This is the most flexible method, but also the slowest, and can generate portfolios to satisfy leverage, box, group, and position limit constraints. + \item The simplex method to generate random portfolios is based on a paper by W. T. Shaw. The simplex method is useful to generate random portfolios with the full investment constraint, where the sum of the weights is equal to 1, and min box constraints. Values for min_sum and max_sum of the leverage constraint will be ignored, the sum of weights will equal 1. All other constraints such as the box constraint max, group and position limit constraints will be handled by elimination. If the constraints are very restrictive, this may result in very few feasible portfolios remaining. Another key point to note is that the solution may not be along the vertexes depending on the objective. For example, a risk budget objective will likely place the portfolio somewhere on the interior. + \item The grid method to generate random portfolios is based on the \code{gridSearch} function in package \kbd{NMOF}. The grid search method only satisfies the min and max box constraints. The min_sum and max_sum leverage constraint will likely be violated and the weights in the random portfolios should be normalized. Normalization may cause the box constraints to be violated and will be penalized in \code{constrained_objective}. +} + +\kbd{PortfolioAnalytics} leverages the \kbd{PerformanceAnalytics} package for many common objective functions. The objective types in \kbd{PortfolioAnalytics} are designed to be used with \kbd{PerformanceAnalytics} functions, but any user supplied valid R function can be used as an objective. + +This summary attempts to provide an overview of how to construct a portfolio object with constraints and objectives, run the optimization, and chart the results. +} + +\section{Optimization}{ +The portfolio object is instantiated with the \code{\link{portfolio.spec}} function. The main argument to \code{\link{portfolio.spec}} is \code{assets}. The \code{assets} argument can be a scalar value for the number of assets, a character vector of fund names, or a named vector of initial weights. + +Adding constraints to the portfolio object is done with \code{\link{add.constraint}}. The \code{\link{add.constraint}} function is the main interface for adding and/or updating constraints to the portfolio object. This function allows the user to specify the portfolio to add the constraints to, the type of constraints, arguments for the constraint, and whether or not to enable the constraint. If updating an existing constraint, the indexnum argument can be specified. + +Objectives can be added to the portfolio object with \code{\link{add.objective}}. The \code{\link{add.objective}} function is the main function for adding and/or updating objectives to the portfolio object. This function allows the user to specify the portfolio to add the objectives to, the type, name of the objective function, arguments to the objective function, and whether or not to enable the objective. If updating an existing constraint, the indexnum argument can be specified. + +With the constraints and objectives specified in the portfolio object, the portfolio object can be passed to \code{\link{optimize.portfolio}} or \code{\link{optimize.portfolio.rebalancing}} to run the optimization. Arguments to \code{\link{optimize.portfolio}} include asset returns, the portfolio obect specifying constraints and objectives, optimization method, and other parameters specific to the solver. \code{\link{optimize.portfolio.rebalancing}} adds support for backtesting portfolio optimization through time with rebalancing or rolling periods. +} + +\section{Charts and Graphs}{ +Intuition into the optimization can be aided through visualization. The goal of creating the charts is to provide visualization tools for optimal portfolios regardless of the chosen optimization method. + +\code{\link{chart.Weights}} plots the weights of the optimal portfolio. \code{\link{chart.RiskReward}} plots the optimal portfolio in risk-reward space. The random portfolios, DEoptim, and pso solvers will return trace portfolio information at each iteration when \code{\link{optimize.portfolio}} is run with \code{trace=TRUE}. If this is the case, \code{\link{chart.RiskReward}} will plot these portfolios so that the feasible space can be easily visualized. Although the GenSA and ROI solvers do not return trace portfolio information, random portfolios can be be generated with the argument \code{rp=TRUE} in \code{\link{chart.RiskReward}}. A \code{plot} function is provided that will plot the weights and risk-reward scatter chart. The component risk contribution can be charted for portfolio optimization problems with risk budget objectives with \code{\link{chart.RiskBudget}}. Neighbor portfolios can be plotted in \code{\link{chart.RiskBudget}}, \code{\link{chart.Weights}}, and \code{\link{chart.RiskReward}}. + +Efficient frontiers can be extracted from \code{optimize.portfolio} objects or created from a \code{portfolio} object. The efficient frontier can be charted in risk-reward space with \code{\link{chart.EfficientFrontier}}. The weights along the efficient frontier can be charted with \code{\link{chart.Weights.EF}}. + +Multiple objects created via \code{\link{optimize.portfolio}} can be combined with \code{\link{combine.optimizations}} for visual comparison. The weights of the optimal portfolios can be plotted with \code{\link{chart.Weights}}. The optimal portfolios can be compared in risk-reward space with \code{\link{chart.RiskReward}}. The portfolio component risk contributions of the multiple optimal portfolios can be plotted with \code{\link{chart.RiskBudget}}. +} + +\section{Further Work}{ +Continued work to improved charts and graphs. + +Continued work to improve features to combine and compare multiple optimal portfolio objects. + +Support for more solvers. + +Comments, suggestions, and/or code patches are welcome. +} + +\author{ +Kris Boudt \cr +Peter Carl \cr +Brian G. Peterson \cr + +Maintainer: Brian G. Peterson \email{brian at braverock.com} +} + +\references{ +Shaw, William Thornton, \emph{Portfolio Optimization for VAR, CVaR, Omega and Utility with General Return Distributions: A Monte Carlo Approach for Long-Only and Bounded Short Portfolios with Optional Robustness and a Simplified Approach to Covariance Matching} (June 1, 2011). Available at SSRN: http://ssrn.com/abstract=1856476 or http://dx.doi.org/10.2139/ssrn.1856476 \cr + +Scherer, B. and Martin, D. \emph{Modern Portfolio Optimization}. Springer. 2005. \cr + +} + +\section{Acknowledgements}{ +TODO +} + +\seealso{ +CRAN task view on Empirical Finance \cr \url{http://cran.r-project.org/src/contrib/Views/Econometrics.html} + +CRAN task view on Optimization \cr \url{http://cran.r-project.org/web/views/Optimization.html} + +Large-scale portfolio optimization with DEoptim \cr \url{http://cran.r-project.org/web/packages/DEoptim/vignettes/DEoptimPortfolioOptimization.pdf} +} \ No newline at end of file From noreply at r-forge.r-project.org Tue Sep 24 04:07:58 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Tue, 24 Sep 2013 04:07:58 +0200 (CEST) Subject: [Returnanalytics-commits] r3181 - pkg/PortfolioAnalytics/sandbox/symposium2013 Message-ID: <20130924020759.1AF5D1848F5@r-forge.r-project.org> Author: peter_carl Date: 2013-09-24 04:07:57 +0200 (Tue, 24 Sep 2013) New Revision: 3181 Modified: pkg/PortfolioAnalytics/sandbox/symposium2013/optimize.HFindexes.R Log: - extracting portfolio measures won't work across all buoys, so re-calculating them based on weights and returns Modified: pkg/PortfolioAnalytics/sandbox/symposium2013/optimize.HFindexes.R =================================================================== --- pkg/PortfolioAnalytics/sandbox/symposium2013/optimize.HFindexes.R 2013-09-24 01:56:43 UTC (rev 3180) +++ pkg/PortfolioAnalytics/sandbox/symposium2013/optimize.HFindexes.R 2013-09-24 02:07:57 UTC (rev 3181) @@ -204,11 +204,11 @@ name="mean" ) # Add a risk measure - # Use ES to be consistent with risk measures in other BUOY portfolios RiskBudget.portf <- add.objective(portfolio=RiskBudget.portf, type="risk", name="ES", + multiplier=1, arguments = list(p=(1-1/12), clean=clean) ) @@ -257,8 +257,18 @@ plot(MeanmETL.ROI, risk.col="StdDev", return.col="mean", rp=permutations, chart.assets=TRUE, main="Mean-mETL Portfolio") plot(MeanmETL.ROI, risk.col="ES", return.col="mean", rp=permutations, chart.assets=TRUE, main="Mean-mETL Portfolio") save(MeanmETL.ROI,file=paste(resultsdir, 'MeanETL-', Sys.Date(), '-', runname, '.rda',sep='')) +chart.EfficientFrontier(MeanmETL.RND) print(paste('Completed meanmETL optimization at',Sys.time(),'moving on to MinSD')) +MeanmETL.RND<-optimize.portfolio(R=R, + portfolio=MeanmETL.portf, + optimize_method='random', + search_size=10000, + trace=TRUE +) +plot(MeanmETL.RND, risk.col="StdDev", return.col="mean", chart.assets=TRUE, main="Mean-mETL Portfolio") +plot(MeanmETL.RND, risk.col="ES", return.col="mean", chart.assets=TRUE, main="Mean-mETL Portfolio") + ### Evaluate BUOY 3: Constrained Minimum Variance Portfolio - with ROI MinSD.ROI<-optimize.portfolio(R=R, portfolio=MinSD.portf, @@ -364,7 +374,21 @@ buoyStdDev <- combine.optimizations(list(EqSD=EqSD.RND, EqWt=EqWt.opt)) chart.RiskBudget(buoyStdDev, match.col="StdDev", risk.type="absolute", legend.loc="topleft") +Wgts = extractWeights(buoys) +# Extract portfolio measures from each objective +## We can't just extract them, because they aren't all calculated +## so fill them in... +portfmeas=NULL +for(i in 1:NROW(Wgts)){ + mean = sum(colMeans(R)*Wgts[i,]) + sd = StdDev(R, weights=Wgts[i,]) + es = ES(R, weights=Wgts[i,], method="modified", portfolio_method="component", p=p) + portfmeas=rbind(portfmeas, c(mean, sd[1], es[1])) +} +colnames(portfmeas)=c("Mean", "StdDev", "mETL") +rownames(portfmeas)=rownames(Wgts) + end_time<-Sys.time() end_time-start_time From noreply at r-forge.r-project.org Tue Sep 24 11:51:01 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Tue, 24 Sep 2013 11:51:01 +0200 (CEST) Subject: [Returnanalytics-commits] r3182 - pkg/PortfolioAnalytics Message-ID: <20130924095101.6A169185AB6@r-forge.r-project.org> Author: braverock Date: 2013-09-24 11:51:01 +0200 (Tue, 24 Sep 2013) New Revision: 3182 Modified: pkg/PortfolioAnalytics/DESCRIPTION Log: - add Ross Bennett as author, rearrange - add Doug Martin as Contributor Modified: pkg/PortfolioAnalytics/DESCRIPTION =================================================================== --- pkg/PortfolioAnalytics/DESCRIPTION 2013-09-24 02:07:57 UTC (rev 3181) +++ pkg/PortfolioAnalytics/DESCRIPTION 2013-09-24 09:51:01 UTC (rev 3182) @@ -4,8 +4,8 @@ of Portfolios Version: 0.8.3 Date: $Date$ -Author: Kris Boudt, Peter Carl, Brian G. Peterson -Contributors: Hezky Varon, Guy Yollin +Author: Brian G. Peterson, Peter Carl, Ross Bennett, Kris Boudt +Contributors: R. Douglas Martin, Guy Yollin, Hezky Varon Maintainer: Brian G. Peterson Description: Portfolio optimization and analysis routines and graphics. Depends: From noreply at r-forge.r-project.org Tue Sep 24 18:43:49 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Tue, 24 Sep 2013 18:43:49 +0200 (CEST) Subject: [Returnanalytics-commits] r3183 - in pkg/PortfolioAnalytics: . R man Message-ID: <20130924164349.E3E31185E5C@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-24 18:43:49 +0200 (Tue, 24 Sep 2013) New Revision: 3183 Modified: pkg/PortfolioAnalytics/DESCRIPTION pkg/PortfolioAnalytics/R/random_portfolios.R pkg/PortfolioAnalytics/man/random_portfolios.Rd Log: Adding content to random_portfolios description. Modified: pkg/PortfolioAnalytics/DESCRIPTION =================================================================== --- pkg/PortfolioAnalytics/DESCRIPTION 2013-09-24 09:51:01 UTC (rev 3182) +++ pkg/PortfolioAnalytics/DESCRIPTION 2013-09-24 16:43:49 UTC (rev 3183) @@ -4,7 +4,7 @@ of Portfolios Version: 0.8.3 Date: $Date$ -Author: Brian G. Peterson, Peter Carl, Ross Bennett, Kris Boudt +Author: Brian G. Peterson, Peter Carl, Ross Bennett, Kris Boudt Contributors: R. Douglas Martin, Guy Yollin, Hezky Varon Maintainer: Brian G. Peterson Description: Portfolio optimization and analysis routines and graphics. Modified: pkg/PortfolioAnalytics/R/random_portfolios.R =================================================================== --- pkg/PortfolioAnalytics/R/random_portfolios.R 2013-09-24 09:51:01 UTC (rev 3182) +++ pkg/PortfolioAnalytics/R/random_portfolios.R 2013-09-24 16:43:49 UTC (rev 3183) @@ -349,8 +349,16 @@ #' } #' #' The constraint types checked are leverage, box, group, and position limit. Any -#' portfolio that does not satisfy all these constraints will be eliminated. +#' portfolio that does not satisfy all these constraints will be eliminated. This +#' function is particularly sensitive to \code{min_sum} and \code{max_sum} +#' leverage constraints. For the sample method, there should be some +#' "wiggle room" between \code{min_sum} and \code{max_sum} in order to generate +#' a sufficient number of feasible portfolios. For example, \code{min_sum=0.99} +#' and \code{max_sum=1.01} is recommended instead of \code{min_sum=1} +#' and \code{max_sum=1}. If \code{min_sum=1} and \code{max_sum=1}, the number of +#' feasible portfolios may be 1/3 or less depending on the other constraints. #' +#' #' @param portfolio an object of type "portfolio" specifying the constraints for the optimization, see \code{\link{constraint}} #' @param permutations integer: number of unique constrained random portfolios to generate #' @param \dots any other passthru parameters Modified: pkg/PortfolioAnalytics/man/random_portfolios.Rd =================================================================== --- pkg/PortfolioAnalytics/man/random_portfolios.Rd 2013-09-24 09:51:01 UTC (rev 3182) +++ pkg/PortfolioAnalytics/man/random_portfolios.Rd 2013-09-24 16:43:49 UTC (rev 3183) @@ -60,7 +60,17 @@ The constraint types checked are leverage, box, group, and position limit. Any portfolio that does not satisfy - all these constraints will be eliminated. + all these constraints will be eliminated. This function + is particularly sensitive to \code{min_sum} and + \code{max_sum} leverage constraints. For the sample + method, there should be some "wiggle room" between + \code{min_sum} and \code{max_sum} in order to generate a + sufficient number of feasible portfolios. For example, + \code{min_sum=0.99} and \code{max_sum=1.01} is + recommended instead of \code{min_sum=1} and + \code{max_sum=1}. If \code{min_sum=1} and + \code{max_sum=1}, the number of feasible portfolios may + be 1/3 or less depending on the other constraints. } \author{ Peter Carl, Brian G. Peterson, Ross Bennett (based on an From noreply at r-forge.r-project.org Tue Sep 24 19:05:52 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Tue, 24 Sep 2013 19:05:52 +0200 (CEST) Subject: [Returnanalytics-commits] r3184 - pkg/PortfolioAnalytics/sandbox/symposium2013 Message-ID: <20130924170552.748C41859AC@r-forge.r-project.org> Author: peter_carl Date: 2013-09-24 19:05:52 +0200 (Tue, 24 Sep 2013) New Revision: 3184 Modified: pkg/PortfolioAnalytics/sandbox/symposium2013/optimize.HFindexes.R Log: - adding single set of RP portfolios for evaluation and charts Modified: pkg/PortfolioAnalytics/sandbox/symposium2013/optimize.HFindexes.R =================================================================== --- pkg/PortfolioAnalytics/sandbox/symposium2013/optimize.HFindexes.R 2013-09-24 16:43:49 UTC (rev 3183) +++ pkg/PortfolioAnalytics/sandbox/symposium2013/optimize.HFindexes.R 2013-09-24 17:05:52 UTC (rev 3184) @@ -68,9 +68,7 @@ ) # Add leverage constraint init.portf <- add.constraint(portfolio=init.portf, - type="leverage", - min_sum=1, - max_sum=1 + type="full_investment" ) # Add box constraint init.portf <- add.constraint(portfolio=init.portf, @@ -177,10 +175,12 @@ EqmETL.portf$constraints[[1]]$max_sum = 1.01 ### Construct BUOY 7: Equal Weight Portfolio -# There's only one, so construct weights for it. Rebalance the equal-weight portfolio at the same frequency as the others. -# dates=index(R[endpoints(R, on=rebalance_period)]) -# weights = xts(matrix(rep(1/NCOL(R),length(dates)*NCOL(R)), ncol=NCOL(R)), order.by=dates) -# colnames(weights)= colnames(R) +# There's only one, so create a portfolio object with all the objectives we want calculated. +EqWt.portf <- portfolio.spec(assets=colnames(R)) +EqWt.portf <- add.constraint(portfolio=EqWt.portf, type="leverage", min_sum=0.99, max_sum=1.01) +EqWt.portf <- add.objective(portfolio=EqWt.portf, type="return", name="mean") +EqWt.portf <- add.objective(portfolio=EqWt.portf, type="risk_budget", name="ES", arguments=list(p=p, clean=clean)) +EqWt.portf <- add.objective(portfolio=EqWt.portf, type="risk_budget", name="StdDev", arguments=list(clean=clean)) ### Construct RISK BUDGET Portfolio RiskBudget.portf <- portfolio.spec(assets=colnames(R), @@ -230,8 +230,13 @@ #------------------------------------------------------------------------ ### Evaluate portfolio objective objects -# Generate a single set of random portfolios to evaluate against all constraint set +# Generate a single set of random portfolios to evaluate against all RP constraint sets print(paste('constructing random portfolios at',Sys.time())) + +# Modify the init.portf specification to get RP running +rp.portf <- init.portf +rp.portf$constraints[[1]]$min_sum = 0.99 # set to speed up RP +rp.portf$constraints[[1]]$max_sum = 1.01 rp = random_portfolios(portfolio=init.portf, permutations=permutations) print(paste('done constructing random portfolios at',Sys.time())) @@ -263,7 +268,7 @@ MeanmETL.RND<-optimize.portfolio(R=R, portfolio=MeanmETL.portf, optimize_method='random', - search_size=10000, + rp=rp, trace=TRUE ) plot(MeanmETL.RND, risk.col="StdDev", return.col="mean", chart.assets=TRUE, main="Mean-mETL Portfolio") @@ -294,27 +299,33 @@ EqSD.RND<-optimize.portfolio(R=R, portfolio=EqSD.portf, optimize_method='random', - search_size=1000, trace=TRUE + rp=rp, + trace=TRUE ) plot(EqSD.RND, risk.col="StdDev", return.col="mean", chart.assets=TRUE, main="Equal Volatility Contribution Portfolio") chart.RiskBudget(EqSD.RND, risk.type="percentage", neighbors=25) -save(EqSD.RND,file=paste(resultsdir, 'EqSD-', Sys.Date(), '-', runname, '.rda',sep='')) -print(paste('Completed EqSD optimization at',Sys.time(),'moving on to EqmETL')) +save(EqSD.RND,file=paste(resultsdir, 'EqSD.RND-', Sys.Date(), '-', runname, '.rda',sep='')) + +# or with DE EqSD.DE<-optimize.portfolio(R=R, - portfolio=EqSD.portf, - optimize_method='DEoptim', - search_size=1000, trace=TRUE, verbose=TRUE + portfolio=EqSD.portf, + optimize_method='DEoptim', + search_size=1000, + trace=TRUE, verbose=TRUE ) plot(EqSD.DE, risk.col="StdDev", return.col="mean", chart.assets=TRUE, main="Equal Volatility Contribution Portfolio") chart.RiskBudget(EqSD.DE, risk.type="percentage") +save(EqSD.DE,file=paste(resultsdir, 'EqSD.DE-', Sys.Date(), '-', runname, '.rda',sep='')) +print(paste('Completed EqSD optimization at',Sys.time(),'moving on to EqmETL')) ### Evaluate BUOY 6: Constrained Equal mETL Contribution Portfolio - with RP EqmETL.RND<-optimize.portfolio(R=R, portfolio=EqmETL.portf, optimize_method='random', - search_size=1000, trace=TRUE + rp=rp, + trace=TRUE ) # plot(EqmETL.RND, risk.col="StdDev", return.col="mean", chart.assets=TRUE, main="Equal mETL Contribution Portfolio") plot(EqmETL.RND, risk.col="ES", return.col="mean", chart.assets=TRUE, main="Equal mETL Contribution Portfolio") @@ -323,15 +334,7 @@ print(paste('Completed EqmETL optimization at',Sys.time(),'moving on to RiskBudget')) ### Evaluate BUOY 7: Equal Weight Portfolio -# There's only one, so calculate it. -#@ Create a portfolio object with all the objectives we want calculated. - RB -EqWt.portf <- portfolio.spec(assets=colnames(R)) -EqWt.portf <- add.constraint(portfolio=EqWt.portf, type="leverage", min_sum=0.99, max_sum=1.01) -EqWt.portf <- add.objective(portfolio=EqWt.portf, type="return", name="mean") -EqWt.portf <- add.objective(portfolio=EqWt.portf, type="risk_budget", name="ES", arguments=list(p=p, clean=clean)) -EqWt.portf <- add.objective(portfolio=EqWt.portf, type="risk_budget", name="StdDev", arguments=list(clean=clean)) - -#@ Calculate the objective measures for the equal weight portfolio - RB +# Calculate the objective measures for the equal weight portfolio EqWt.opt <- equal.weight(R=R, portfolio=EqWt.portf) From noreply at r-forge.r-project.org Wed Sep 25 05:01:13 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 25 Sep 2013 05:01:13 +0200 (CEST) Subject: [Returnanalytics-commits] r3185 - pkg/PortfolioAnalytics/R Message-ID: <20130925030114.81CAC1855B0@r-forge.r-project.org> Author: rossbennett34 Date: 2013-09-25 05:01:12 +0200 (Wed, 25 Sep 2013) New Revision: 3185 Modified: pkg/PortfolioAnalytics/R/optimize.portfolio.R Log: Adding message to optimize.portfolio for random and DEoptim if min_sum and max_sum are too restrictive. Modified: pkg/PortfolioAnalytics/R/optimize.portfolio.R =================================================================== --- pkg/PortfolioAnalytics/R/optimize.portfolio.R 2013-09-24 17:05:52 UTC (rev 3184) +++ pkg/PortfolioAnalytics/R/optimize.portfolio.R 2013-09-25 03:01:12 UTC (rev 3185) @@ -610,6 +610,12 @@ upper <- constraints$max lower <- constraints$min + # issue message if min_sum and max_sum are restrictive + if((constraints$max_sum - constraints$min_sum) < 0.02){ + message("Leverage constraint min_sum and max_sum are restrictive, + consider relaxing. e.g. 'full_investment' constraint should be min_sum=0.99 and max_sum=1.01") + } + if(hasArg(rpseed)){ seed <- match.call(expand.dots=TRUE)$rpseed DEcformals$initialpop <- seed @@ -656,6 +662,12 @@ # case for random portfolios optimization method if(optimize_method=="random"){ + # issue message if min_sum and max_sum are too restrictive + if((constraints$max_sum - constraints$min_sum) < 0.02){ + message("Leverage constraint min_sum and max_sum are restrictive, + consider relaxing. e.g. 'full_investment' constraint should be min_sum=0.99 and max_sum=1.01") + } + #' call random_portfolios() with portfolio and search_size to create matrix of portfolios if(missing(rp) | is.null(rp)){ if(hasArg(rp_method)) rp_method=match.call(expand.dots=TRUE)$rp_method else rp_method="sample" From noreply at r-forge.r-project.org Wed Sep 25 17:17:08 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 25 Sep 2013 17:17:08 +0200 (CEST) Subject: [Returnanalytics-commits] r3186 - pkg/PortfolioAnalytics/sandbox/symposium2013 Message-ID: <20130925151708.84405185A01@r-forge.r-project.org> Author: peter_carl Date: 2013-09-25 17:17:08 +0200 (Wed, 25 Sep 2013) New Revision: 3186 Added: pkg/PortfolioAnalytics/sandbox/symposium2013/results.HFindexes.R Log: - initial commit of slide graphics based on optimization results Added: pkg/PortfolioAnalytics/sandbox/symposium2013/results.HFindexes.R =================================================================== --- pkg/PortfolioAnalytics/sandbox/symposium2013/results.HFindexes.R (rev 0) +++ pkg/PortfolioAnalytics/sandbox/symposium2013/results.HFindexes.R 2013-09-25 15:17:08 UTC (rev 3186) @@ -0,0 +1,225 @@ +# Presentation of results from optimization scripts run prior to this script + +op <- par(no.readonly=TRUE) + +xtract = extractStats(EqmETL.RND) # get the RP portfolios with risk and return pre-calculated +# columnnames = colnames(xtract) +results.names=rownames(portfmeas) + +# -------------------------------------------------------------------- +# Plot Ex Ante scatter of RP and ONLY Equal Weight portfolio in StdDev space +# -------------------------------------------------------------------- +# Done +png(filename="RP-EqWgt-MeanSD-ExAnte.png", units="in", height=5.5, width=9, res=96) +par(mar=c(5, 4, 1, 2) + 0.1) #c(bottom, left, top, right) +# Calculate chart bounds to unify with the charts below +xlim.StdDev=c(min(c(xtract[,"StdDev"], unlist(portfmeas[,"StdDev"]))), max(c(xtract[,"StdDev"], unlist(portfmeas[,"StdDev"])))) +ylim.StdDev=c(min(c(xtract[,"mean"], unlist(portfmeas[,"Mean"]))), max(c(xtract[,"mean"], unlist(portfmeas[,"Mean"])))) + +plot(xtract[,"StdDev"],xtract[,"mean"], xlab="Ex Ante mETL", ylab="Ex Ante Mean", col="darkgray", axes=FALSE, main="", cex=.7, xlim=xlim.StdDev, ylim=ylim.StdDev) +grid(col = "darkgray") +abline(h = 0, col = "darkgray") +# Overplot the equal weight portfolio +points(as.numeric(portfmeas[8,"StdDev"]),as.numeric(portfmeas[8,"Mean"]), col=tol8qualitative[8], pch=16, cex=1.5) # watch the order in portfmeas +axis(1, cex.axis = 0.8, col = "darkgray") +axis(2, cex.axis = 0.8, col = "darkgray") +box(col = "darkgray") +legend("bottomright",legend=results.names[8], col=tol8qualitative[8], pch=16, ncol=1, border.col="darkgray", y.intersp=1.2, cex=0.8, inset=.02) +par(op) +dev.off() + +# -------------------------------------------------------------------- +# Plot Ex Ante scatter of RP and ASSET portfolios in StdDev space +# -------------------------------------------------------------------- +# @TODO: add the assets to this chart +png(filename="RP-EqWgt-MeanSD-ExAnte.png", units="in", height=5.5, width=9, res=96) +par(mar=c(5, 4, 1, 2) + 0.1) #c(bottom, left, top, right) +# Revise the chart bounds to include the asssets +plot(xtract[,"StdDev"],xtract[,"mean"], xlab="Ex Ante mETL", ylab="Ex Ante Mean", col="darkgray", axes=FALSE, main="", cex=.7) +grid(col = "darkgray") +abline(h = 0, col = "darkgray") +# Overplot the equal weight portfolio +points(as.numeric(portfmeas[8,"StdDev"]),as.numeric(portfmeas[8,"Mean"]), col=tol8qualitative[8], pch=16, cex=1.5) # watch the order in portfmeas +axis(1, cex.axis = 0.8, col = "darkgray") +axis(2, cex.axis = 0.8, col = "darkgray") +box(col = "darkgray") +legend("bottomright",legend=results.names[8], col=tol8qualitative[8], pch=16, ncol=1, border.col="darkgray", y.intersp=1.2, cex=0.8, inset=.02) +par(op) +dev.off() + +# -------------------------------------------------------------------- +# Plot Ex Ante scatter of RP and BUOY portfolios in StdDev space +# -------------------------------------------------------------------- +# Done +png(filename="RP-BUOY-MeanSD-ExAnte.png", units="in", height=5.5, width=9, res=96) +par(mar=c(5, 4, 1, 2) + 0.1) #c(bottom, left, top, right) +plot(xtract[,"StdDev"],xtract[,"mean"], xlab="Ex Ante mETL", ylab="Ex Ante Mean", col="darkgray", axes=FALSE, main="", cex=.7, xlim=xlim.StdDev, ylim=ylim.StdDev) +grid(col = "darkgray") +abline(h = 0, col = "darkgray") +# Overplot the buoy portfolios +points(as.numeric(portfmeas[,"StdDev"]),as.numeric(portfmeas[,"Mean"]), col=tol8qualitative, pch=16, cex=1.5) # watch the order in portfmeas +axis(1, cex.axis = 0.8, col = "darkgray") +axis(2, cex.axis = 0.8, col = "darkgray") +box(col = "darkgray") +legend("bottomright",legend=results.names, col=tol8qualitative, pch=16, ncol=1, border.col="darkgray", y.intersp=1.2, cex=0.8, inset=.02) +par(op) +dev.off() + +# -------------------------------------------------------------------- +# Plot Ex Ante scatter of RP and BUOY portfolios in mETL space +# -------------------------------------------------------------------- +# @TODO: Recalc chart limits for ES +png(filename="RP-BUOYS-mETL-ExAnte.png", units="in", height=5.5, width=9, res=96) +par(mar=c(5, 4, 1, 2) + 0.1) #c(bottom, left, top, right) +plot(xtract[,"ES"],xtract[,"mean"], xlab="Ex Ante mETL", ylab="Ex Ante Mean", col="darkgray", axes=FALSE, main="", cex=.7) +grid(col = "darkgray") +abline(h = 0, col = "darkgray") +# Overplot the buoy portfolios +points(as.numeric(portfmeas[,"ES"]),as.numeric(portfmeas[,"mean"]), col=tol8qualitative[8], pch=16, cex=1.5) # watch the order in portfmeas +axis(1, cex.axis = 0.8, col = "darkgray") +axis(2, cex.axis = 0.8, col = "darkgray") +box(col = "darkgray") +legend("bottomright",legend=results.names[1], col=tol7qualitative, pch=16, ncol=1, border.col="darkgray", y.intersp=1.2, cex=0.8, inset=.02) +par(op) +dev.off() + +# -------------------------------------------------------------------- +# Plot weights of Buoy portfolios +# -------------------------------------------------------------------- +source('./R/chart.UnStackedBar.R') +Wgts = extractWeights(buoys) +png(filename=paste(resultsdir, "Weights-Buoys.png", sep=""), units="in", height=5.5, width=9, res=96) +chart.UnStackedBar(t(Wgts), colorset=tol8qualitative, equal.line=TRUE) +dev.off() + +# -------------------------------------------------------------------- +# Plot contribution to risk of Buoy portfolios +# -------------------------------------------------------------------- +# @TODO: revise for this result set +# @TODO: add contribution to risk to portfmeas +source('./R/chart.UnStackedBar.R') +png(filename=paste(resultsdir, "Weights-Buoys.png", sep=""), units="in", height=5.5, width=9, res=96) +chart.UnStackedBar(t(Wgts), colorset=tol8qualitative, equal.line=TRUE) +dev.off() +# Alternatively, use table function for ES + +# -------------------------------------------------------------------- +# Plot Ex Post scatter of buoy portfolios? +# -------------------------------------------------------------------- +# @TODO: revise for this result set + +# Calculate ex post results +xpost.ret=Return.cumulative(BHportfs["2008-07::2008-09"]) +xpost.sd=StdDev(BHportfs["2008-07::2008-09"])*sqrt(3) +xante.ret=xtract[,"pamean.pamean"]/3 +xante.sd=xtract[,"pasd.pasd"]/sqrt(3) + +xpost.obj=NA +for(i in 1:NROW(RND.weights)){ + x = Return.portfolio(R=edhec.R["2008-07::2008-09"], weights=RND.weights[i,]) + y=c(Return.cumulative(x), StdDev(x)*sqrt(3)) + if(is.na(xpost.obj)) + xpost.obj=y + else + xpost.obj=rbind(xpost.obj,y) +} +rownames(xpost.obj)=rownames(RND.weights) +colnames(xpost.obj)=c("Realized Returns","Realized SD") +xmin=min(c(xpost.sd,xante.sd)) +xmax=max(c(xpost.sd,xante.sd)) +ymin=min(c(xpost.ret,xante.ret)) +ymax=max(c(xpost.ret,xante.ret)) + +png(filename="Scatter-ExPost-2008-06-30.png", units="in", height=5.5, width=9, res=96) +par(mar=c(5, 4, 1, 2) + 0.1) #c(bottom, left, top, right) +plot(xpost.sd,xpost.ret, xlab="StdDev", ylab="Mean", col="darkgray", axes=FALSE, main="", cex=.7, xlim=c(xmin,xmax), ylim=c(ymin,ymax)) +grid(col = "darkgray") +points(xpost.obj[,2],xpost.obj[,1], col=tol7qualitative, pch=16, cex=1.5) +points(xante.sd,xante.ret, col="lightgray", cex=.7) +points(unlist(RND.objectives[,2])/sqrt(3),unlist(RND.objectives[,1])/3, col=tol7qualitative, pch=16, cex=1.5) +abline(h = 0, col = "darkgray") +axis(1, cex.axis = 0.8, col = "darkgray") +axis(2, cex.axis = 0.8, col = "darkgray") +box(col = "darkgray") +legend("topright",legend=rownames(RND.weights), col=tol7qualitative, pch=16, ncol=1, border.col="darkgray", y.intersp=1.2, inset=.02) +dev.off() + + + +# -------------------------------------------------------------------- +# Ex Post Results Through Time? +# -------------------------------------------------------------------- +# @TODO: revise for this result set +buoys.R=cbind(EqWgt,MeanSD, MeanmETL,MinSD,MinmETL,EqSD,EqmETL) +png(filename="Buoy-Cumulative-Returns.png", units="in", height=5.5, width=9, res=96) +op <- par(no.readonly = TRUE) +layout(matrix(c(1, 2)), height = c(2, 1.3), width = 1) +par(mar = c(1, 4, 1, 2)) # c(bottom, left, top, right) +chart.CumReturns(buoys.R["2000::",], main = "", xaxis = FALSE, legend.loc = "topleft", ylab = "Cumulative Return", colorset= tol7qualitative, ylog=TRUE, wealth.index=TRUE, cex.legend=.7, cex.axis=.6, cex.lab=.7) +par(mar = c(4, 4, 0, 2)) +chart.Drawdown(buoys.R["2000::",], main = "", ylab = "Drawdown", colorset = tol7qualitative, cex.axis=.6, cex.lab=.7) +par(op) +dev.off() + + +### APPENDIX SLIDES: + +# -------------------------------------------------------------------- +# Show turnover of the RP portfolios relative to the EqWgt portfolio +# -------------------------------------------------------------------- +turnover = function(w1,w2) {sum(abs(w1-w2))/length(w1)} +# Calculate the turnover matrix for the random portfolio set: +to.matrix<-matrix(nrow=NROW(rp),ncol=NROW(rp)) +for(x in 1:NROW(rp)){ + for(y in 1:NROW(rp)) { + to.matrix[x,y]<-turnover(rp[x,],rp[y,]) + } +} + +png(filename="Turnover-2008-06-30.png", units="in", height=5.5, width=9, res=96) +# postscript(file="TurnoverOf20101231.eps", height=6, width=5, paper="special", horizontal=FALSE, onefile=FALSE) +op <- par(no.readonly=TRUE) +layout(matrix(c(1,2)),height=c(4,1.25),width=1) +par(mar=c(4,4,1,2)+.1, cex=1) # c(bottom, left, top, right) +seq.col = heat.colors(11) +## Draw the Scatter chart of combined results +### Get the random portfolios from one of the result sets +x=apply(rp, MARGIN=1,FUN=turnover,w2=rp[1,]) +plot(xtract[,"pasd.pasd"],xtract[,"pamean.pamean"], xlab="Predicted StdDev", ylab="Predicted Mean", col=seq.col[ceiling(x*100)], axes=FALSE, main="", cex=.7, pch=16) +grid(col = "darkgray") +points(RND.objectives[1,2],RND.objectives[1,1], col="blue", pch=19, cex=1.5) +axis(1, cex.axis = 0.8, col = "darkgray") +axis(2, cex.axis = 0.8, col = "darkgray") +box(col = "darkgray") + +# Add legend to bottom panel +par(mar=c(5,5.5,1,3)+.1, cex=0.7) +## Create a histogramed legend for sequential colorsets +## this next bit of code is based on heatmap.2 in gplots package +x=ceiling(x*100) +scale01 <- function(x, low = min(x), high = max(x)) { + return((x - low)/(high - low)) +} +breaks <- seq(min(x, na.rm = TRUE), max(x, na.rm = TRUE), length = length(seq.col)+1) +min.raw <- min(x, na.rm = TRUE) +max.raw <- max(x, na.rm = TRUE) +z <- seq(min.raw, max.raw, length = length(seq.col)) +image(z = matrix(z, ncol = 1), col = seq.col, breaks = breaks, xaxt = "n", yaxt = "n") +par(usr = c(0, 1, 0, 1)) # needed to draw the histogram correctly +lv <- pretty(breaks) +xv <- scale01(as.numeric(lv), min.raw, max.raw) +axis(1, at = xv, labels=sprintf("%s%%", pretty(lv))) +h <- hist(x, plot = FALSE, breaks=breaks) +hx <- scale01(breaks, min(x), max(x)) +hy <- c(h$counts, h$counts[length(h$counts)]) +lines(hx, hy/max(hy)*.95, lwd = 2, type = "s", col = "blue") +axis(2, at = pretty(hy)/max(hy)*.95, pretty(hy)) +title(ylab="Count") +title(xlab="Degree of Turnover from Equal Weight Portfolio") +par(op) +dev.off() + +# -------------------------------------------------------------------- +# Show CONCENTRATION of the RP portfolios? +# -------------------------------------------------------------------- +# Basically the same chart as above but use HHI instead of turnover calc \ No newline at end of file From noreply at r-forge.r-project.org Wed Sep 25 17:28:00 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Wed, 25 Sep 2013 17:28:00 +0200 (CEST) Subject: [Returnanalytics-commits] r3187 - pkg/PortfolioAnalytics/sandbox/symposium2013 Message-ID: <20130925152800.9CCA11859A1@r-forge.r-project.org> Author: peter_carl Date: 2013-09-25 17:28:00 +0200 (Wed, 25 Sep 2013) New Revision: 3187 Modified: pkg/PortfolioAnalytics/sandbox/symposium2013/results.HFindexes.R Log: - finished chart in ES space w buoys Modified: pkg/PortfolioAnalytics/sandbox/symposium2013/results.HFindexes.R =================================================================== --- pkg/PortfolioAnalytics/sandbox/symposium2013/results.HFindexes.R 2013-09-25 15:17:08 UTC (rev 3186) +++ pkg/PortfolioAnalytics/sandbox/symposium2013/results.HFindexes.R 2013-09-25 15:28:00 UTC (rev 3187) @@ -14,9 +14,9 @@ par(mar=c(5, 4, 1, 2) + 0.1) #c(bottom, left, top, right) # Calculate chart bounds to unify with the charts below xlim.StdDev=c(min(c(xtract[,"StdDev"], unlist(portfmeas[,"StdDev"]))), max(c(xtract[,"StdDev"], unlist(portfmeas[,"StdDev"])))) -ylim.StdDev=c(min(c(xtract[,"mean"], unlist(portfmeas[,"Mean"]))), max(c(xtract[,"mean"], unlist(portfmeas[,"Mean"])))) +ylim.mean=c(min(c(xtract[,"mean"], unlist(portfmeas[,"Mean"]))), max(c(xtract[,"mean"], unlist(portfmeas[,"Mean"])))) -plot(xtract[,"StdDev"],xtract[,"mean"], xlab="Ex Ante mETL", ylab="Ex Ante Mean", col="darkgray", axes=FALSE, main="", cex=.7, xlim=xlim.StdDev, ylim=ylim.StdDev) +plot(xtract[,"StdDev"],xtract[,"mean"], xlab="Ex Ante mETL", ylab="Ex Ante Mean", col="darkgray", axes=FALSE, main="", cex=.7, xlim=xlim.StdDev, ylim=ylim.mean) grid(col = "darkgray") abline(h = 0, col = "darkgray") # Overplot the equal weight portfolio @@ -53,7 +53,7 @@ # Done png(filename="RP-BUOY-MeanSD-ExAnte.png", units="in", height=5.5, width=9, res=96) par(mar=c(5, 4, 1, 2) + 0.1) #c(bottom, left, top, right) -plot(xtract[,"StdDev"],xtract[,"mean"], xlab="Ex Ante mETL", ylab="Ex Ante Mean", col="darkgray", axes=FALSE, main="", cex=.7, xlim=xlim.StdDev, ylim=ylim.StdDev) +plot(xtract[,"StdDev"],xtract[,"mean"], xlab="Ex Ante mETL", ylab="Ex Ante Mean", col="darkgray", axes=FALSE, main="", cex=.7, xlim=xlim.StdDev, ylim=ylim.mean) grid(col = "darkgray") abline(h = 0, col = "darkgray") # Overplot the buoy portfolios @@ -68,24 +68,26 @@ # -------------------------------------------------------------------- # Plot Ex Ante scatter of RP and BUOY portfolios in mETL space # -------------------------------------------------------------------- -# @TODO: Recalc chart limits for ES +# Done png(filename="RP-BUOYS-mETL-ExAnte.png", units="in", height=5.5, width=9, res=96) par(mar=c(5, 4, 1, 2) + 0.1) #c(bottom, left, top, right) -plot(xtract[,"ES"],xtract[,"mean"], xlab="Ex Ante mETL", ylab="Ex Ante Mean", col="darkgray", axes=FALSE, main="", cex=.7) +xlim.ES=c(min(c(xtract[,"ES"], unlist(portfmeas[,"mETL"]))), max(c(xtract[,"ES"], unlist(portfmeas[,"mETL"])))) +plot(xtract[,"ES"],xtract[,"mean"], xlab="Ex Ante mETL", ylab="Ex Ante Mean", col="darkgray", axes=FALSE, main="", cex=.7, xlim=xlim.ES, ylim=ylim.mean) grid(col = "darkgray") abline(h = 0, col = "darkgray") # Overplot the buoy portfolios -points(as.numeric(portfmeas[,"ES"]),as.numeric(portfmeas[,"mean"]), col=tol8qualitative[8], pch=16, cex=1.5) # watch the order in portfmeas +points(as.numeric(portfmeas[,"mETL"]),as.numeric(portfmeas[,"Mean"]), col=tol8qualitative, pch=16, cex=1.5) # watch the order in portfmeas axis(1, cex.axis = 0.8, col = "darkgray") axis(2, cex.axis = 0.8, col = "darkgray") box(col = "darkgray") -legend("bottomright",legend=results.names[1], col=tol7qualitative, pch=16, ncol=1, border.col="darkgray", y.intersp=1.2, cex=0.8, inset=.02) +legend("bottomright",legend=results.names, col=tol8qualitative, pch=16, ncol=1, border.col="darkgray", y.intersp=1.2, cex=0.8, inset=.02) par(op) dev.off() # -------------------------------------------------------------------- # Plot weights of Buoy portfolios # -------------------------------------------------------------------- +# Done source('./R/chart.UnStackedBar.R') Wgts = extractWeights(buoys) png(filename=paste(resultsdir, "Weights-Buoys.png", sep=""), units="in", height=5.5, width=9, res=96) From noreply at r-forge.r-project.org Thu Sep 26 15:03:09 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 26 Sep 2013 15:03:09 +0200 (CEST) Subject: [Returnanalytics-commits] r3188 - in pkg/PortfolioAnalytics/sandbox/symposium2013: . docs Message-ID: <20130926130309.499B018515B@r-forge.r-project.org> Author: peter_carl Date: 2013-09-26 15:03:08 +0200 (Thu, 26 Sep 2013) New Revision: 3188 Modified: pkg/PortfolioAnalytics/sandbox/symposium2013/analyze.HFindexes.R pkg/PortfolioAnalytics/sandbox/symposium2013/docs/symposium-slides-2013.Rmd pkg/PortfolioAnalytics/sandbox/symposium2013/optimize.HFindexes.R pkg/PortfolioAnalytics/sandbox/symposium2013/results.HFindexes.R Log: - minor updates Modified: pkg/PortfolioAnalytics/sandbox/symposium2013/analyze.HFindexes.R =================================================================== --- pkg/PortfolioAnalytics/sandbox/symposium2013/analyze.HFindexes.R 2013-09-25 15:28:00 UTC (rev 3187) +++ pkg/PortfolioAnalytics/sandbox/symposium2013/analyze.HFindexes.R 2013-09-26 13:03:08 UTC (rev 3188) @@ -169,7 +169,7 @@ dev.off() # -------------------------------------------------------------------- -## Autocorrelation +# Autocorrelation # -------------------------------------------------------------------- # require(Hmisc) @@ -184,4 +184,9 @@ png(filename=paste(resultsdir, dataname, "-ACStackedBars.png", sep=""), units="in", height=5.5, width=9, res=96) rownames(AC.stats)= sapply(colnames(R), function(x) paste(strwrap(x,10), collapse = "\n"), USE.NAMES=FALSE) chart.StackedBar(as.matrix(AC.stats[,1:6]), colorset=bluemono, main="Observed Autocorrelation") -dev.off() \ No newline at end of file +dev.off() + +# -------------------------------------------------------------------- +# ETL parameterization charts +# -------------------------------------------------------------------- + Modified: pkg/PortfolioAnalytics/sandbox/symposium2013/docs/symposium-slides-2013.Rmd =================================================================== --- pkg/PortfolioAnalytics/sandbox/symposium2013/docs/symposium-slides-2013.Rmd 2013-09-25 15:28:00 UTC (rev 3187) +++ pkg/PortfolioAnalytics/sandbox/symposium2013/docs/symposium-slides-2013.Rmd 2013-09-26 13:03:08 UTC (rev 3188) @@ -3,6 +3,12 @@ % November 11, 2013 + # Return distributions \includegraphics[width=1.0\textwidth]{../results/EDHEC-Distributions.png} @@ -159,7 +166,7 @@ Modified ETL demonstrates a better fit for historical CVaR at lower confidence levels, and can break down at higher confidence levels *Insert chart or charts* - + # _Ex ante_, not _ex post_ _Ex post_ analysis of risk contribution has been around for a while @@ -211,16 +218,23 @@ * Group constraints * Rebalancing quarterly -# Estimate +# Estimates +Table of Return, Volatility, Skew, Kurt, and Correlations by asset + + @@ -335,23 +349,21 @@ --> # Random Portfolios -It is what it sounds like +From a portfolio seed, generate random permutations of weights that meet your constraints -* From a portfolio seed, generate random permutations of weights that meet your constraints * Several methods: [Burns (2009)](http://www.portfolioprobe.com/blog/), Shaw (2010), and Gilli, _et al_ (2011) - -Sampling can help provide insight into the goals and constraints of the optimization - * Covers the 'edge case' (min/max) constraints well * Covers the 'interior' portfolios * Useful for finding the search space for an optimizer * Allows arbitrary number of samples * Allows massively parallel execution +Sampling can help provide insight into the goals and constraints of the optimization + Modified: pkg/PortfolioAnalytics/sandbox/symposium2013/optimize.HFindexes.R =================================================================== --- pkg/PortfolioAnalytics/sandbox/symposium2013/optimize.HFindexes.R 2013-09-25 15:28:00 UTC (rev 3187) +++ pkg/PortfolioAnalytics/sandbox/symposium2013/optimize.HFindexes.R 2013-09-26 13:03:08 UTC (rev 3188) @@ -170,7 +170,14 @@ min_concentration=TRUE, arguments = list(p=(1-1/12), clean=clean) ) - +# Calculate portfolio variance, but don't use it in the objective; used only for plots +EqmETL.portf <- add.objective(portfolio=EqmETL.portf, + type="risk", # the kind of objective this is + name="StdDev", # the function to minimize + enabled=TRUE, # enable or disable the objective + multiplier=0, # calculate it but don't use it in the objective + arguments=list(clean=clean) +) EqmETL.portf$constraints[[1]]$min_sum = 0.99 # set to speed up RP EqmETL.portf$constraints[[1]]$max_sum = 1.01 Modified: pkg/PortfolioAnalytics/sandbox/symposium2013/results.HFindexes.R =================================================================== --- pkg/PortfolioAnalytics/sandbox/symposium2013/results.HFindexes.R 2013-09-25 15:28:00 UTC (rev 3187) +++ pkg/PortfolioAnalytics/sandbox/symposium2013/results.HFindexes.R 2013-09-26 13:03:08 UTC (rev 3188) @@ -16,7 +16,7 @@ xlim.StdDev=c(min(c(xtract[,"StdDev"], unlist(portfmeas[,"StdDev"]))), max(c(xtract[,"StdDev"], unlist(portfmeas[,"StdDev"])))) ylim.mean=c(min(c(xtract[,"mean"], unlist(portfmeas[,"Mean"]))), max(c(xtract[,"mean"], unlist(portfmeas[,"Mean"])))) -plot(xtract[,"StdDev"],xtract[,"mean"], xlab="Ex Ante mETL", ylab="Ex Ante Mean", col="darkgray", axes=FALSE, main="", cex=.7, xlim=xlim.StdDev, ylim=ylim.mean) +plot(xtract[,"StdDev"],xtract[,"mean"], xlab="Ex Ante Std Dev", ylab="Ex Ante Mean", col="darkgray", axes=FALSE, main="", cex=.7, xlim=xlim.StdDev, ylim=ylim.mean) grid(col = "darkgray") abline(h = 0, col = "darkgray") # Overplot the equal weight portfolio @@ -106,6 +106,21 @@ # Alternatively, use table function for ES # -------------------------------------------------------------------- +# Plot efficient frontier of mean-sd? +# -------------------------------------------------------------------- + + +# -------------------------------------------------------------------- +# Plot efficient frontier of mean-mETL? +# -------------------------------------------------------------------- + + +# -------------------------------------------------------------------- +# Plot efficient frontier of Equal Risk +# -------------------------------------------------------------------- + + +# -------------------------------------------------------------------- # Plot Ex Post scatter of buoy portfolios? # -------------------------------------------------------------------- # @TODO: revise for this result set From noreply at r-forge.r-project.org Thu Sep 26 17:21:47 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 26 Sep 2013 17:21:47 +0200 (CEST) Subject: [Returnanalytics-commits] r3189 - in pkg/PortfolioAnalytics/sandbox/symposium2013: . docs Message-ID: <20130926152147.7AC9B18514D@r-forge.r-project.org> Author: peter_carl Date: 2013-09-26 17:21:47 +0200 (Thu, 26 Sep 2013) New Revision: 3189 Modified: pkg/PortfolioAnalytics/sandbox/symposium2013/analyze.HFindexes.R pkg/PortfolioAnalytics/sandbox/symposium2013/docs/symposium-slides-2013.Rmd Log: - added ETL sensitivity graphic Modified: pkg/PortfolioAnalytics/sandbox/symposium2013/analyze.HFindexes.R =================================================================== --- pkg/PortfolioAnalytics/sandbox/symposium2013/analyze.HFindexes.R 2013-09-26 13:03:08 UTC (rev 3188) +++ pkg/PortfolioAnalytics/sandbox/symposium2013/analyze.HFindexes.R 2013-09-26 15:21:47 UTC (rev 3189) @@ -57,8 +57,8 @@ # Drop some indexes and reorder R = edhec[,c("Convertible Arbitrage", "Equity Market Neutral","Fixed Income Arbitrage", "Event Driven", "CTA Global", "Global Macro", "Long/Short Equity")] +R.names = colnames(R) - ######################################################################## # Returns-based performance analysis ######################################################################## @@ -137,8 +137,6 @@ library(gplots) # Generate some color choices for the scale skewedWB20 = c(colorpanel(16, "#008566","#E1E56D"), colorpanel(5, "#E1E56D", "#742414")[-1]) -skewedGnYeRd10 = c(colorpanel(8, "darkgreen", "yellow"),colorpanel(3, "yellow", "darkred")[-1]) -skewedGnYeRd20 = c(colorpanel(16, "darkgreen", "yellow"),colorpanel(5, "yellow", "darkred")[-1]) M <- cor(R) colnames(M) = rownames(M) @@ -158,8 +156,12 @@ corrRect.hclust(M36.hc2, k=3, method="complete", col="blue") dev.off() -# @TODO: Add 12M rolling correlation to S&P500 +# -------------------------------------------------------------------- +# Table of Return correlations +# -------------------------------------------------------------------- +write.csv(M, file=paste(resultsdir, dataname, "-inception-cor.csv", sep="")) + # -------------------------------------------------------------------- # Rolling Correlation to S&P500 TR # -------------------------------------------------------------------- @@ -189,4 +191,17 @@ # -------------------------------------------------------------------- # ETL parameterization charts # -------------------------------------------------------------------- - + # @TODO: make these y-axes match? +# source('~/devel/R/returnanalytics/pkg/PerformanceAnalytics/R/chart.VaRSensitivity.R') +png(filename=paste(resultsdir, dataname, "-ETL-sensitivity.png", sep=""), units="in", height=5.5, width=9, res=96) +op <- par(no.readonly = TRUE) +layout(matrix(c(1:8), nrow=2)) +par(mar = c(4, 4, 5, 2)+0.1) #c(bottom, left, top, right) +for(i in 1:NCOL(R)){ + chart.VaRSensitivity(R[,i], methods=c("ModifiedES","HistoricalES", "GaussianES"), legend.loc=NULL, clean=clean, colorset=c("orange", "black", "darkgray"), lty=c(3,1,2), lwd=3, main=R.names[i], ylim=c(-0.09,0), ylab="Expected Tail Loss") + abline(v = 1-1/12, col = "red", lty = 2, lwd=1) +} + plot.new() + legend("center", legend=c("Modified \nETL","Historical \nETL", "Gaussian \nETL"), lty=c(3,1,2), lwd=3, col=c("orange", "black", "darkgray"), cex=1.2, y.intersp=2) +par(op) +dev.off() \ No newline at end of file Modified: pkg/PortfolioAnalytics/sandbox/symposium2013/docs/symposium-slides-2013.Rmd =================================================================== --- pkg/PortfolioAnalytics/sandbox/symposium2013/docs/symposium-slides-2013.Rmd 2013-09-26 13:03:08 UTC (rev 3188) +++ pkg/PortfolioAnalytics/sandbox/symposium2013/docs/symposium-slides-2013.Rmd 2013-09-26 15:21:47 UTC (rev 3189) @@ -6,7 +6,6 @@ # TODO - Add a slide comparing traditional risk budgeting to equalized risk portfolios with limits - Move as much text off slides and into comments, add more graphics - - Add autocorrelation charts? --> + +# Return autocorrelation +\includegraphics[width=1.0\textwidth]{../results/EDHEC-ACStackedBars.png} + # Measuring risk, not volatility Measure risk with Conditional Value-at-Risk (CVaR) @@ -163,11 +178,13 @@ Split graphic into two pages so it's readable # ETL sensitivity +\includegraphics[width=1.0\textwidth]{../results/EDHEC-ETL-sensitivity.png} + + - - # _Ex ante_, not _ex post_ _Ex post_ analysis of risk contribution has been around for a while @@ -189,7 +206,7 @@ * A high positive %CmETL indicates the position has a large loss when the portfolio also has a large loss From noreply at r-forge.r-project.org Thu Sep 26 17:40:34 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 26 Sep 2013 17:40:34 +0200 (CEST) Subject: [Returnanalytics-commits] r3190 - pkg/PerformanceAnalytics/R Message-ID: <20130926154035.0C241185616@r-forge.r-project.org> Author: peter_carl Date: 2013-09-26 17:40:34 +0200 (Thu, 26 Sep 2013) New Revision: 3190 Modified: pkg/PerformanceAnalytics/R/chart.VaRSensitivity.R Log: - added ylim so that axis can be adjusted Modified: pkg/PerformanceAnalytics/R/chart.VaRSensitivity.R =================================================================== --- pkg/PerformanceAnalytics/R/chart.VaRSensitivity.R 2013-09-26 15:21:47 UTC (rev 3189) +++ pkg/PerformanceAnalytics/R/chart.VaRSensitivity.R 2013-09-26 15:40:34 UTC (rev 3190) @@ -47,7 +47,7 @@ #' #' @export chart.VaRSensitivity <- -function (R, methods = c("GaussianVaR", "ModifiedVaR", "HistoricalVaR","GaussianES", "ModifiedES", "HistoricalES"), clean=c("none", "boudt", "geltner"), elementcolor="darkgray", reference.grid=TRUE, xlab = "Confidence Level", ylab="Value at Risk", type = "l", lty = c(1,2,4), lwd = 1, colorset = (1:12), pch = (1:12), legend.loc = "bottomleft", cex.legend = 0.8, main=NULL,...) +function (R, methods = c("GaussianVaR", "ModifiedVaR", "HistoricalVaR","GaussianES", "ModifiedES", "HistoricalES"), clean=c("none", "boudt", "geltner"), elementcolor="darkgray", reference.grid=TRUE, xlab = "Confidence Level", ylab="Value at Risk", type = "l", lty = c(1,2,4), lwd = 1, colorset = (1:12), pch = (1:12), legend.loc = "bottomleft", cex.legend = 0.8, main=NULL, ylim=NULL, ...) { # @author Peter Carl R = checkData(R) @@ -106,7 +106,8 @@ # print(risk) risk.columns = ncol(risk) - ylim=c(min(risk),max(risk)) + if(is.null(ylim)) + ylim=c(min(risk),max(risk)) xlim=c(min(p), max(p)) if(is.null(main)) main=paste("Risk Confidence Sensitivity of ", columnnames[1], sep="") From noreply at r-forge.r-project.org Thu Sep 26 17:42:07 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 26 Sep 2013 17:42:07 +0200 (CEST) Subject: [Returnanalytics-commits] r3191 - pkg/PerformanceAnalytics/R Message-ID: <20130926154207.2FF0B18106D@r-forge.r-project.org> Author: peter_carl Date: 2013-09-26 17:42:06 +0200 (Thu, 26 Sep 2013) New Revision: 3191 Modified: pkg/PerformanceAnalytics/R/chart.VaRSensitivity.R Log: - added ylim to documentation Modified: pkg/PerformanceAnalytics/R/chart.VaRSensitivity.R =================================================================== --- pkg/PerformanceAnalytics/R/chart.VaRSensitivity.R 2013-09-26 15:40:34 UTC (rev 3190) +++ pkg/PerformanceAnalytics/R/chart.VaRSensitivity.R 2013-09-26 15:42:06 UTC (rev 3191) @@ -20,6 +20,7 @@ #' and y axes #' @param ylab set the y-axis label, same as in \code{\link{plot}} #' @param xlab set the x-axis label, same as in \code{\link{plot}} +#' @param ylim set the y-axis dimensions, same as in \code{\link{plot}} #' @param type set the chart type, same as in \code{\link{plot}} #' @param lty set the line type, same as in \code{\link{plot}} #' @param lwd set the line width, same as in \code{\link{plot}} From noreply at r-forge.r-project.org Thu Sep 26 17:45:09 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 26 Sep 2013 17:45:09 +0200 (CEST) Subject: [Returnanalytics-commits] r3192 - pkg/PortfolioAnalytics/sandbox/symposium2013/R Message-ID: <20130926154510.0AA7C185AC9@r-forge.r-project.org> Author: peter_carl Date: 2013-09-26 17:45:09 +0200 (Thu, 26 Sep 2013) New Revision: 3192 Added: pkg/PortfolioAnalytics/sandbox/symposium2013/R/chart.VaRSensitivity.R Log: - revised chart ALREADY IN PerfA but copied here so that the package does not need to be updated Added: pkg/PortfolioAnalytics/sandbox/symposium2013/R/chart.VaRSensitivity.R =================================================================== --- pkg/PortfolioAnalytics/sandbox/symposium2013/R/chart.VaRSensitivity.R (rev 0) +++ pkg/PortfolioAnalytics/sandbox/symposium2013/R/chart.VaRSensitivity.R 2013-09-26 15:45:09 UTC (rev 3192) @@ -0,0 +1,161 @@ +#' show the sensitivity of Value-at-Risk or Expected Shortfall estimates +#' +#' Creates a chart of Value-at-Risk and/or Expected Shortfall estimates by +#' confidence interval for multiple methods. +#' +#' This chart shows estimated VaR along a series of confidence intervals for +#' selected calculation methods. Useful for comparing a method to the +#' historical VaR calculation. +#' +#' @param R an xts, vector, matrix, data frame, timeSeries or zoo object of +#' asset returns +#' @param methods one or more calculation methods indicated "GaussianVaR", +#' "ModifiedVaR", "HistoricalVaR", "GaussianES", "ModifiedES", "HistoricalES". +#' See \code{\link{VaR}} or \code{\link{ES}} for more detail. +#' @param clean method for data cleaning through \code{\link{Return.clean}}. +#' Current options are "none" or "boudt" or "geltner". +#' @param elementcolor the color used to draw chart elements. The default is +#' "darkgray" +#' @param reference.grid if true, draws a grid aligned with the points on the x +#' and y axes +#' @param ylab set the y-axis label, same as in \code{\link{plot}} +#' @param xlab set the x-axis label, same as in \code{\link{plot}} +#' @param ylim set the y-axis dimensions, same as in \code{\link{plot}} +#' @param type set the chart type, same as in \code{\link{plot}} +#' @param lty set the line type, same as in \code{\link{plot}} +#' @param lwd set the line width, same as in \code{\link{plot}} +#' @param colorset color palette to use, set by default to rational choices +#' @param pch symbols to use, see also \code{\link{plot}} +#' @param legend.loc places a legend into one of nine locations on the chart: +#' bottomright, bottom, bottomleft, left, topleft, top, topright, right, or +#' center. +#' @param cex.legend The magnification to be used for sizing the legend +#' relative to the current setting of 'cex'. +#' @param main set the chart title, same as in \code{\link{plot}} +#' @param \dots any other passthru parameters +#' @author Peter Carl +#' @seealso \code{\link{VaR}} \cr \code{\link{ES}} +#' @references Boudt, K., Peterson, B. G., Croux, C., 2008. Estimation and +#' Decomposition of Downside Risk for Portfolios with Non-Normal Returns. +#' Journal of Risk, forthcoming. +#' @keywords ts multivariate distribution +#' @examples +#' +#' data(managers) +#' chart.VaRSensitivity(managers[,1,drop=FALSE], +#' methods=c("HistoricalVaR", "ModifiedVaR", "GaussianVaR"), +#' colorset=bluefocus, lwd=2) +#' +#' @export +chart.VaRSensitivity <- +function (R, methods = c("GaussianVaR", "ModifiedVaR", "HistoricalVaR","GaussianES", "ModifiedES", "HistoricalES"), clean=c("none", "boudt", "geltner"), elementcolor="darkgray", reference.grid=TRUE, xlab = "Confidence Level", ylab="Value at Risk", type = "l", lty = c(1,2,4), lwd = 1, colorset = (1:12), pch = (1:12), legend.loc = "bottomleft", cex.legend = 0.8, main=NULL, ylim=NULL, ...) +{ # @author Peter Carl + + R = checkData(R) + columnnames = colnames(R) + clean = clean[1] + legend.txt = NULL + if(length(methods) > 1){ + columns=1 + } + p = seq(0.99,0.89,by=-0.005) + risk = matrix(nrow=length(p), ncol=length(methods),dimnames=list(p,methods)) + + for(column in 1:columns) { + for(j in 1:length(methods)) { + for(i in 1:length(p)){ + switch(methods[j], + GaussianVaR = { + risk[i, j] = as.numeric(VaR(na.omit(R[,column,drop=FALSE]), p = p[i], method="gaussian", clean=clean)) + if(i==1) + legend.txt = c(legend.txt, "Gaussian VaR") + + }, + ModifiedVaR = { + risk[i, j] = as.numeric(VaR(na.omit(R[,column,drop=FALSE]), p = p[i], method="modified", clean=clean)) + if(i==1) + legend.txt = c(legend.txt, "Modified VaR") + }, + HistoricalVaR = { + risk[i,j] = as.numeric(VaR(na.omit(R[,column,drop=FALSE]), p = p[i], method="historical", clean=clean)) #hVaR = quantile(x,probs=.01) + if(i==1) + legend.txt = c(legend.txt, "Historical VaR") + + }, + GaussianES = { + risk[i, j] = as.numeric(ES(na.omit(R[,column,drop=FALSE]), p = p[i], method="gaussian", clean=clean)) + if(i==1) + legend.txt = c(legend.txt, "Gaussian ES") + + }, + ModifiedES = { + risk[i, j] = as.numeric(ES(na.omit(R[,column,drop=FALSE]), p = p[i], method="modified", clean=clean)) + if(i==1) + legend.txt = c(legend.txt, "Modified ES") + }, + HistoricalES = { + risk[i,j] = as.numeric(ES(na.omit(R[,column,drop=FALSE]), p = p[i], method="historical", clean=clean)) + if(i==1) + legend.txt = c(legend.txt, "Historical ES") + + } + ) # end switch + } + + } # end method loop + } # end column loop +# print(risk) + + risk.columns = ncol(risk) + if(is.null(ylim)) + ylim=c(min(risk),max(risk)) + xlim=c(min(p), max(p)) + if(is.null(main)) + main=paste("Risk Confidence Sensitivity of ", columnnames[1], sep="") + if(length(lwd) < risk.columns) + lwd = rep(lwd,risk.columns) + if(length(lty) < risk.columns) + lty = rep(lty,risk.columns) + if(length(pch) < risk.columns) + pch = rep(pch,risk.columns) + plot.new() + plot.window(xlim, ylim, xaxs = "r") + if (reference.grid) { + grid(col = elementcolor) + } + for(risk.column in risk.columns:1) { + lines(p,risk[,risk.column], col = colorset[risk.column], lwd = lwd[risk.column], pch = pch[risk.column], lty = lty[risk.column], type = type, ...) + } + + # draw x-axis + axis(1, labels=p, at=p, col = elementcolor) # at= 1/(1:length(p)) + title(xlab = xlab) + + # set up y-axis + axis(2, col=elementcolor) + box(col = elementcolor) + + if(!is.null(legend.loc)){ + # There's no good place to put this automatically, except under the graph. + # That requires a different solution, but here's the quick fix + legend(legend.loc, inset = 0.02, text.col = colorset, col = colorset, cex = cex.legend, border.col = elementcolor, lty = lty, lwd = 2, bg = "white", legend = legend.txt) + } + + # Add the other titles + if(is.null(main)) + main=columnnames[1] + title(ylab = ylab) + title(main = main) +} + +############################################################################### +# R (http://r-project.org/) Econometrics for Performance and Risk Analysis +# +# Copyright (c) 2004-2012 Peter Carl and Brian G. Peterson +# +# This R package is distributed under the terms of the GNU Public License (GPL) +# for full details see the file COPYING +# +# $Id: chart.VaRSensitivity.R 3191 2013-09-26 15:42:06Z peter_carl $ +# +############################################################################### From noreply at r-forge.r-project.org Thu Sep 26 22:52:57 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Thu, 26 Sep 2013 22:52:57 +0200 (CEST) Subject: [Returnanalytics-commits] r3193 - pkg/PortfolioAnalytics/sandbox/symposium2013 Message-ID: <20130926205257.D176E185953@r-forge.r-project.org> Author: peter_carl Date: 2013-09-26 22:52:57 +0200 (Thu, 26 Sep 2013) New Revision: 3193 Modified: pkg/PortfolioAnalytics/sandbox/symposium2013/optimize.HFindexes.R Log: - creating more summary objects from results Modified: pkg/PortfolioAnalytics/sandbox/symposium2013/optimize.HFindexes.R =================================================================== --- pkg/PortfolioAnalytics/sandbox/symposium2013/optimize.HFindexes.R 2013-09-26 15:45:09 UTC (rev 3192) +++ pkg/PortfolioAnalytics/sandbox/symposium2013/optimize.HFindexes.R 2013-09-26 20:52:57 UTC (rev 3193) @@ -361,8 +361,10 @@ print(RiskBudget.DE$elapsed_time) print('Done with optimizations.') +#------------------------------------------------------------------------ +### Extract data from optimizations for analysis -### Combine optimization objects +# Combine optimization objects buoys <- combine.optimizations(list(MeanSD=MeanSD.ROI, MeanmETL=MeanmETL.ROI, MinSD=MinSD.ROI, MinmETL=MinmETL.ROI, EqSD=EqSD.RND, EqmETL=EqmETL.RND, RB=RiskBudget.DE, EqWt=EqWt.opt)) # how to add an EqWgt to this list? #@ The elements of this list need to be optimize.portfolio objects, so unfortunately we @@ -386,19 +388,36 @@ Wgts = extractWeights(buoys) -# Extract portfolio measures from each objective -## We can't just extract them, because they aren't all calculated -## so fill them in... -portfmeas=NULL +### Extract portfolio measures from each objective +# We can't just extract them, because they aren't all calculated +# so fill them in... +buoys.portfmeas = buoys.contrib.sd = buoys.contrib.es = buoys.perc.sd = buoys.perc.es = NULL for(i in 1:NROW(Wgts)){ mean = sum(colMeans(R)*Wgts[i,]) - sd = StdDev(R, weights=Wgts[i,]) + sd = StdDev(R, weights=Wgts[i,], portfolio_method="component") es = ES(R, weights=Wgts[i,], method="modified", portfolio_method="component", p=p) - portfmeas=rbind(portfmeas, c(mean, sd[1], es[1])) + buoys.portfmeas=rbind(buoys.portfmeas, c(mean, sd[[1]][1], es[[1]][1])) + buoys.contrib.sd= rbind(buoys.contrib.sd,sd[[2]]) + buoys.contrib.es= rbind(buoys.contrib.es,es[[2]]) + buoys.perc.sd = rbind(buoys.perc.sd,sd[[3]]) + buoys.perc.es = rbind(buoys.perc.es,es[[3]]) } -colnames(portfmeas)=c("Mean", "StdDev", "mETL") -rownames(portfmeas)=rownames(Wgts) +colnames(buoys.portfmeas)=c("Mean", "StdDev", "mETL") +rownames(buoys.portfmeas)= +rownames(buoys.contrib.sd)= +rownames(buoys.contrib.es)= +rownames(buoys.perc.sd) = +rownames(buoys.perc.es) = rownames(Wgts) +colnames(buoys.contrib.sd)= +colnames(buoys.contrib.es)= +colnames(buoys.perc.sd) = +colnames(buoys.perc.es) = colnames(Wgts) +# get the RP portfolios with risk and return pre-calculated +xtract = extractStats(EqmETL.RND) +# columnnames = colnames(xtract) +results.names=rownames(buoys.portfmeas) + end_time<-Sys.time() end_time-start_time From noreply at r-forge.r-project.org Sat Sep 28 21:01:52 2013 From: noreply at r-forge.r-project.org (noreply at r-forge.r-project.org) Date: Sat, 28 Sep 2013 21:01:52 +0200 (CEST) Subject: [Returnanalytics-commits] r3194 - in pkg/PortfolioAnalytics/sandbox/symposium2013: . docs Message-ID: <20130928190152.6AF57183D86@r-forge.r-project.org> Author: peter_carl Date: 2013-09-28 21:01:51 +0200 (Sat, 28 Sep 2013) New Revision: 3194 Modified: pkg/PortfolioAnalytics/sandbox/symposium2013/analyze.HFindexes.R pkg/PortfolioAnalytics/sandbox/symposium2013/docs/symposium-slides-2013.Rmd pkg/PortfolioAnalytics/sandbox/symposium2013/optimize.HFindexes.R pkg/PortfolioAnalytics/sandbox/symposium2013/results.HFindexes.R Log: - several new slides and graphs - modified RP to make more accurate portfolios - modified buoy constraints per Ross' suggestions - issue comparing RP to closed form solutions Modified: pkg/PortfolioAnalytics/sandbox/symposium2013/analyze.HFindexes.R =================================================================== --- pkg/PortfolioAnalytics/sandbox/symposium2013/analyze.HFindexes.R 2013-09-26 20:52:57 UTC (rev 3193) +++ pkg/PortfolioAnalytics/sandbox/symposium2013/analyze.HFindexes.R 2013-09-28 19:01:51 UTC (rev 3194) @@ -191,17 +191,19 @@ # -------------------------------------------------------------------- # ETL parameterization charts # -------------------------------------------------------------------- - # @TODO: make these y-axes match? -# source('~/devel/R/returnanalytics/pkg/PerformanceAnalytics/R/chart.VaRSensitivity.R') +# Requires a recent modification to the chart in PerformanceAnalytics to make the y-axes match; in revision 3191 +source('./R/chart.VaRSensitivity.R') png(filename=paste(resultsdir, dataname, "-ETL-sensitivity.png", sep=""), units="in", height=5.5, width=9, res=96) op <- par(no.readonly = TRUE) layout(matrix(c(1:8), nrow=2)) par(mar = c(4, 4, 5, 2)+0.1) #c(bottom, left, top, right) for(i in 1:NCOL(R)){ - chart.VaRSensitivity(R[,i], methods=c("ModifiedES","HistoricalES", "GaussianES"), legend.loc=NULL, clean=clean, colorset=c("orange", "black", "darkgray"), lty=c(3,1,2), lwd=3, main=R.names[i], ylim=c(-0.09,0), ylab="Expected Tail Loss") + chart.VaRSensitivity(R[,i], methods=c("ModifiedES","HistoricalES", "GaussianES"), legend.loc=NULL, clean=clean, colorset=c("orange", "black", "darkgray"), lty=c(2,1,2), lwd=3, main=R.names[i], ylim=c(-0.09,0), ylab="Expected Tail Loss") abline(v = 1-1/12, col = "red", lty = 2, lwd=1) } plot.new() - legend("center", legend=c("Modified \nETL","Historical \nETL", "Gaussian \nETL"), lty=c(3,1,2), lwd=3, col=c("orange", "black", "darkgray"), cex=1.2, y.intersp=2) + legend("center", legend=c("Modified \nETL","Historical \nETL", "Gaussian \nETL"), lty=c(2,1,2), lwd=3, col=c("orange", "black", "darkgray"), cex=1.2, y.intersp=2) par(op) -dev.off() \ No newline at end of file +dev.off() + + \ No newline at end of file Modified: pkg/PortfolioAnalytics/sandbox/symposium2013/docs/symposium-slides-2013.Rmd =================================================================== --- pkg/PortfolioAnalytics/sandbox/symposium2013/docs/symposium-slides-2013.Rmd 2013-09-26 20:52:57 UTC (rev 3193) +++ pkg/PortfolioAnalytics/sandbox/symposium2013/docs/symposium-slides-2013.Rmd 2013-09-28 19:01:51 UTC (rev 3194) @@ -181,7 +181,7 @@ \includegraphics[width=1.0\textwidth]{../results/EDHEC-ETL-sensitivity.png} @@ -239,18 +239,21 @@ Table of Return, Volatility, Skew, Kurt, and Correlations by asset