[Yuima-commits] r408 - pkg/yuima/R
noreply at r-forge.r-project.org
noreply at r-forge.r-project.org
Wed Mar 2 13:37:49 CET 2016
Author: lorenzo
Date: 2016-03-02 13:37:49 +0100 (Wed, 02 Mar 2016)
New Revision: 408
Modified:
pkg/yuima/R/MM.COGARCH.R
pkg/yuima/R/PseudoLogLikCOGARCH.R
Log:
Summary COGARCH updated
Modified: pkg/yuima/R/MM.COGARCH.R
===================================================================
--- pkg/yuima/R/MM.COGARCH.R 2016-02-25 03:30:09 UTC (rev 407)
+++ pkg/yuima/R/MM.COGARCH.R 2016-03-02 12:37:49 UTC (rev 408)
@@ -1,5 +1,5 @@
# In this function we consider different kind estimation procedures for COGARCH(P,Q)
-# Model.
+# Model.
is.COGARCH <- function(obj){
if(is(obj,"yuima"))
return(is(obj at model, "yuima.cogarch"))
@@ -8,7 +8,7 @@
return(FALSE)
}
yuima.acf<-function(data,lag.max, forward=TRUE){
- if(forward==FALSE){
+ if(forward==FALSE){
burndata<-lag.max+1
dataused<-as.list(numeric(length=burndata+1))
Time<-length(data)
@@ -17,24 +17,24 @@
dataused[[2]]<-mean(dataformean^2)
mu<-mean(dataformean)
leng <-length(dataformean)
-
+
var1<-sum((dataformean-mu)*(dataformean-mu))/leng
-
-
+
+
res1<-numeric(length=lag.max)
elem<-matrix(0,lag.max,(Time-lag.max))
for (t in (lag.max+1):(Time)){
-
+
# h<-leng-lag.max
- # elem<-(data[(1+t):leng]-mu)*(data[1:h]-mu)/(leng*var1)
- # res1[t+1]<-sum(elem)
+ # elem<-(data[(1+t):leng]-mu)*(data[1:h]-mu)/(leng*var1)
+ # res1[t+1]<-sum(elem)
h <-c(lag.max:1)
elem[,(t-lag.max)]<-(data[(t-h)[order(t-h,decreasing=TRUE)]]-mu)*(data[t]-mu)/(var1)
}
for(h in 3:(lag.max+2)){
dataused[[h]]<-sum(elem[h-2,])/leng
- }
-
+ }
+
elem0<-rbind(t(as.matrix(dataformean)),elem)
# res1<-res1
# acfr<-res1[2:(lag.max+1)] #analogously to Matlab program
@@ -47,71 +47,71 @@
dataused[[2]]<-mean(dataformean^2)
mu<-mean(dataformean)
leng <-length(dataformean)
-
+
var1<-sum((dataformean-mu)*(dataformean-mu))/leng
-
-
+
+
res1<-numeric(length=lag.max)
elem<-matrix(0,lag.max,(Time-lag.max))
for (t in 1:(Time-(lag.max))){
-
+
# h<-leng-lag.max
- # elem<-(data[(1+t):leng]-mu)*(data[1:h]-mu)/(leng*var1)
- # res1[t+1]<-sum(elem)
+ # elem<-(data[(1+t):leng]-mu)*(data[1:h]-mu)/(leng*var1)
+ # res1[t+1]<-sum(elem)
h <-c(1:lag.max)
elem[,t]<-(data[(t+h)]-mu)*(data[t]-mu)/(var1)
}
for(h in 3:(lag.max+2)){
dataused[[h]]<-sum(elem[h-2,])/leng
- }
-
+ }
+
elem0<-rbind(t(as.matrix(dataformean)),elem)
}
return(list(dataused=dataused, elem=elem0, leng=leng))
}
-# The estimation procedure for cogarch(p,q) implemented in this code are based on the
+# The estimation procedure for cogarch(p,q) implemented in this code are based on the
# Chadraa phd's thesis
-gmm<-function(yuima, data = NULL, start, method="BFGS", fixed = list(),
+gmm<-function(yuima, data = NULL, start, method="BFGS", fixed = list(),
lower, upper, lag.max = NULL, equally.spaced = FALSE, aggregation=TRUE,
Est.Incr = "NoIncr", objFun = "L2"){
print <- FALSE
aggr.G <- equally.spaced
call <- match.call()
-
+
if(objFun=="L1" && method!="Nelder-Mead"){
yuima.warn("Mean absolute error minimization is available only for 'method=Nelder-Mead'. yuima sets automatically 'method=Nelder-Mead' ")
method<-"Nelder.Mead"
}
-
+
if(objFun=="L1" && (length(fixed)!=0 || !missing(lower) || !missing(upper))){
yuima.stop("Constraints are not allow for the minimization of Mean absolute error")
}
-
+
codelist.objFun <- c("L1","L2","L2CUE", "TWOSTEPS")
if(any(is.na(match(objFun,codelist.objFun)))){
yuima.stop("Value of objFun not available. Please choose among L1, L2, L2CUE, TWOSTEPS")
}
-
+
codelist.Est.Incr <- c("NoIncr","Incr","IncrPar")
if(any(is.na(match(Est.Incr,codelist.Est.Incr)))){
yuima.stop("Value of Est.Incr not available. Please choose among NoIncr, Incr, IncrPar ")
}
if( missing(yuima))
yuima.stop("yuima object is missing.")
-
- if( missing(start) )
+
+ if( missing(start) )
yuima.stop("Starting values for the parameters are missing.")
-
+
if( !is.COGARCH(yuima) )
yuima.stop("The model is not an object of class yuima.")
if( !is(yuima,"yuima") && missing(data) )
yuima.stop("data are missing.")
-
-
+
+
if(is(yuima,"yuima")){
model<-yuima at model
if(is.null(data)){
@@ -126,11 +126,11 @@
observ<-data
}
}
-
+
if(!is(observ,"yuima.data")){
- yuima.stop("Data must be an object of class yuima.data-class")
- }
-
+ yuima.stop("Data must be an object of class yuima.data-class")
+ }
+
if( !missing(upper) && (method!="L-BFGS-B"||method!="brent")){
yuima.warn("The upper requires L-BFGS-B or brent methods. We change method in L-BFGS-B")
method <- "L-BFGS-B"
@@ -145,16 +145,16 @@
yuima.warn("The fixed constraints requires L-BFGS-B or brent methods. We change method in L-BFGS-B")
method <- "L-BFGS-B"
}
-
+
# We identify the model parameters
info <- model at info
-
+
numb.ar <- info at q
ar.name <- paste(info at ar.par,c(numb.ar:1),sep="")
-
+
numb.ma <- info at p
ma.name <- paste(info at ma.par,c(1:numb.ma),sep="")
-
+
loc.par <- info at loc.par
#xinit.name <- paste(info at Latent.var, c(1:numb.ar), sep = "")
@@ -163,14 +163,14 @@
xinit.name <- xinit.name0[-idx]
meas.par <- model at parameter@measure
-
+
if(length(meas.par)==0 && Est.Incr=="IncrPar"){
yuima.warn("The dimension of measure parameters is zero, yuima changes 'Est.Incr = IncrPar' into 'Est.Incr = Incr'")
Est.Incr <- "Incr"
}
-
-
-
+
+
+
fixed.name <- names(fixed)
if(info at q==1){
# nm <- c(names(start), "EL1", "phi1","phi2")
@@ -181,7 +181,7 @@
}
# We identify the index of parameters
if(length(meas.par)!=0){
- if(info at q==1){
+ if(info at q==1){
# fullcoeff <- c(ar.name, ma.name, loc.par, xinit.name, measure.par, "EL1", "phi1","phi2")
fullcoeff <- c(ar.name, ma.name, loc.par, xinit.name, meas.par, "EL1")
}else{
@@ -209,7 +209,7 @@
start <- c(start, EL1 = 1)
}
start <- start[order(oo)]
-
+
nm <- names(start)
ar.idx <- match(ar.name, fullcoeff)
@@ -223,18 +223,18 @@
# n <- length(observ)[1]
#n <- attr(observ at original.data,"tsp")[2]
-
+
n <- length(index(observ at original.data))
#Lag
assign("lag", lag.max, envir=env)
-
+
# Data
assign("Data", as.matrix(onezoo(observ)[,1]), envir=env)
assign("deltaData", n/index(observ at zoo.data[[1]])[n], envir=env)
assign("time.obs",length(env$Data),envir=env)
-
-
+
+
# Order
assign("p", info at p, envir=env)
assign("q", info at q, envir=env)
@@ -245,21 +245,21 @@
assign("loc.idx", loc.idx, envir=env)
assign("meas.idx", meas.idx, envir=env)
assign("EL1.idx", EL1.idx, envir=env)
-
+
objFunDummy <- NULL
if(objFun=="TWOSTEPS"||objFun=="L2CUE"){
objFunDummy <- objFun
objFun <- "L2"
-
+
}
assign("objFun",objFun, envir=env)
-
+
if(aggr.G==TRUE){
if(floor(n/index(observ at zoo.data[[1]])[n])!=env$deltaData){
yuima.stop("the n/Terminal in sampling information is not an integer. equally.spaced=FALSE is recommended")
}
}
-
+
if(aggr.G==TRUE){
# Aggregate returns G
#dt<-round(deltat(onezoo(observ)[,1])*10^5)/10^5
@@ -270,8 +270,8 @@
dummydata<-index(onezoo(observ)[,1])
unitarytime<-floor(dummydata)
index<-!duplicated(unitarytime)
- G_i <- diff(env$Data[index])
-
+ G_i <- diff(env$Data[index])
+
r <- 1
}
d <- min(floor(sqrt(length(G_i))),env$lag)
@@ -279,14 +279,14 @@
typeacf <- "correlation"
assign("typeacf", typeacf, envir=env)
# CovQuad <- acf(G_i^2,plot=FALSE,lag.max=d,type=typeacf)$acf[-1]
-
+
example<-yuima.acf(data=G_i^2,lag.max=d)
dummyEmpiricalMoM<-as.numeric(example$dataused)
CovQuad <- as.numeric(example$dataused)[-c(1:2)]
-#
-
-
-
+#
+
+
+
assign("G_i", G_i, envir=env)
assign("r", r, envir=env)
#mu_G2 <- as.numeric(example$dataused)[1]
@@ -306,108 +306,108 @@
names(mycoef) <- nm[-c(fixed.idx,meas.idx)] ## SMI 2/9/14
}else{
names(mycoef) <- nm
- }
+ }
ErrTerm(yuima=yuima, param=mycoef, print=print, env)
}
if(method!="L-BFGS-B"&&method!="brent"){
out<- optim(start, objectiveFun, method = method, env=env, hessian = TRUE)
}else{
if(length(fixed)!=0 && !missing(upper) && !missing(lower)){
- out<- optim(start, objectiveFun, method = method,
- fixed=as.numeric(fixed),
- lower=as.numeric(lower),
+ out<- optim(start, objectiveFun, method = method,
+ fixed=as.numeric(fixed),
+ lower=as.numeric(lower),
upper=as.numeric(upper), env=env)
}else{
if(!missing(upper) && !missing(lower)){
- out<- optim(start, objectiveFun, method = method,
- lower=as.numeric(lower),
+ out<- optim(start, objectiveFun, method = method,
+ lower=as.numeric(lower),
upper=as.numeric(upper), env=env)
}
if(length(fixed)!=0 && !missing(lower)){
- out<- optim(start, objectiveFun, method = method,
- fixed=as.numeric(fixed),
+ out<- optim(start, objectiveFun, method = method,
+ fixed=as.numeric(fixed),
lower=as.numeric(lower), env=env)
}
if(!missing(upper) && length(fixed)!=0){
- out<- optim(start, objectiveFun, method = method,
- fixed=as.numeric(fixed),
+ out<- optim(start, objectiveFun, method = method,
+ fixed=as.numeric(fixed),
upper=as.numeric(upper), env=env)
}
}
-
+
if(length(fixed)!=0 && missing(upper) && missing(lower)){
- out<- optim(start, objectiveFun, method = method,
+ out<- optim(start, objectiveFun, method = method,
fixed=as.numeric(fixed), env=env)
}
-
+
if(length(fixed)==0 && !missing(upper) && missing(lower)){
out<- optim(start, objectiveFun, method = method,
upper=as.numeric(upper), env=env)
}
-
+
if(length(fixed)==0 && missing(upper) && !missing(lower)){
- out<- optim(start, objectiveFun, method = method,
+ out<- optim(start, objectiveFun, method = method,
lower=as.numeric(lower), env=env)
}
-
+
}
# Alternative way for calculating Variance Covariance Matrix
# sig2eps<-out$value/d
-#
+#
# dumHess<- out$hessian[c(ar.name, ma.name),c(ar.name, ma.name)]/(2*sig2eps)
# vcovgmm<-solve(dumHess)
# sqrt(diag(vcovgmm))
if(!is.null(objFunDummy)){
assign("objFun",objFunDummy, envir=env)
-
+
bvect<-out$par[ar.name]
bq<-bvect[1]
avect<-out$par[ma.name]
a1<-avect[1]
-
+
out$par[loc.par]<-(bq-a1)*mu_G2/(bq*r)
-
+
# Determine the Variance-Covariance Matrix
if(length(meas.par)!=0){
idx.dumm<-match(meas.par,names(out$par))
out$par<-out$par[- idx.dumm]
}
dimOutp<-length(out$par)-(1+info at q)
- coef <- out$par[c(1:dimOutp)]
+ coef <- out$par[c(1:dimOutp)]
vcov<-matrix(NA, dimOutp, dimOutp)
names_coef<-names(coef)
colnames(vcov) <- names_coef
rownames(vcov) <- names_coef
mycoef <- start
- min <- out$value
+ min <- out$value
# # call
- gradVect0<-MM_grad_Cogarch(p=info at p, q=info at q,
- acoeff=avect,cost=out$par[loc.par], b=bvect,
- r=env$r, h=seq(1, env$d, by = 1)*env$r, type=typeacf,
+ gradVect0<-MM_grad_Cogarch(p=info at p, q=info at q,
+ acoeff=avect,cost=out$par[loc.par], b=bvect,
+ r=env$r, h=seq(1, env$d, by = 1)*env$r, type=typeacf,
m2=env$mu_G2, var=env$var_G2)
-
+
score0 <- MM_Cogarch(p=info at p, q=info at q,
- acoeff=avect,cost=out$par[loc.par], b=bvect,
- r=env$r, h=seq(1, env$d, by = 1)*env$r, type=typeacf,
+ acoeff=avect,cost=out$par[loc.par], b=bvect,
+ r=env$r, h=seq(1, env$d, by = 1)*env$r, type=typeacf,
m2=env$mu_G2, var=env$var_G2)
- idx.aaa<-match(loc.par,names_coef)
+ idx.aaa<-match(loc.par,names_coef)
# gradVect <- gradVect0[names_coef[-idx.aaa], ]
gradVect <- gradVect0[names_coef[-idx.aaa],CovQuad>0]
# score <- c(score0$acfG2)%*%matrix(1,1,example$leng)
score <- c(score0$acfG2[CovQuad>0])%*%matrix(1,1,example$leng)
-
+
#We need to write the matrix W for the matrix sandwhich
#plot(as.numeric(example$dataused)[-1],type="h")
#S_matrix
-
+
# EmpirScore <-score-example$elem[-1,]
exampelem <-example$elem[-1,]
EmpirScore <-score-exampelem[CovQuad>0,]
-
- Omega_est <- tryCatch((1/example$leng*EmpirScore%*%t(EmpirScore)),
+
+ Omega_est <- tryCatch((1/example$leng*EmpirScore%*%t(EmpirScore)),
error=function(theta){NULL})
W_est <-tryCatch(chol2inv(Omega_est),error=function(theta){NULL})
if(!is.null(W_est)){
@@ -420,43 +420,43 @@
out<- optim(start, objectiveFun, method = method, env=env)
}else{
if(length(fixed)!=0 && !missing(upper) && !missing(lower)){
- out<- optim(start, objectiveFun, method = method,
- fixed=as.numeric(fixed),
- lower=as.numeric(lower),
+ out<- optim(start, objectiveFun, method = method,
+ fixed=as.numeric(fixed),
+ lower=as.numeric(lower),
upper=as.numeric(upper), env=env)
}else{
if(!missing(upper) && !missing(lower)){
- out<- optim(start, objectiveFun, method = method,
- lower=as.numeric(lower),
+ out<- optim(start, objectiveFun, method = method,
+ lower=as.numeric(lower),
upper=as.numeric(upper), env=env)
}
if(length(fixed)!=0 && !missing(lower)){
- out<- optim(start, objectiveFun, method = method,
- fixed=as.numeric(fixed),
+ out<- optim(start, objectiveFun, method = method,
+ fixed=as.numeric(fixed),
lower=as.numeric(lower), env=env)
}
if(!missing(upper) && length(fixed)!=0){
- out<- optim(start, objectiveFun, method = method,
- fixed=as.numeric(fixed),
+ out<- optim(start, objectiveFun, method = method,
+ fixed=as.numeric(fixed),
upper=as.numeric(upper), env=env)
}
}
-
+
if(length(fixed)!=0 && missing(upper) && missing(lower)){
- out<- optim(start, objectiveFun, method = method,
+ out<- optim(start, objectiveFun, method = method,
fixed=as.numeric(fixed), env=env)
}
-
+
if(length(fixed)==0 && !missing(upper) && missing(lower)){
out<- optim(start, objectiveFun, method = method,
upper=as.numeric(upper), env=env)
}
-
+
if(length(fixed)==0 && missing(upper) && !missing(lower)){
- out<- optim(start, objectiveFun, method = method,
+ out<- optim(start, objectiveFun, method = method,
lower=as.numeric(lower), env=env)
}
-
+
}
}else{
yuima.warn("Method TWOSTEPS or L2CUE Changed in L2 since First W failed to compute")
@@ -466,7 +466,7 @@
bq<-bvect[1]
avect<-out$par[ma.name]
a1<-avect[1]
-
+
out$par[loc.par]<-(bq-a1)*mu_G2/(bq*r)
# Determine the Variance-Covariance Matrix
@@ -475,36 +475,36 @@
out$par<-out$par[- idx.dumm]
}
dimOutp<-length(out$par)-(1+info at q)
- coef <- out$par[c(1:dimOutp)]
+ coef <- out$par[c(1:dimOutp)]
vcov<-matrix(NA, dimOutp, dimOutp)
names_coef<-names(coef)
colnames(vcov) <- names_coef
rownames(vcov) <- names_coef
# mycoef <- start
- min <- out$value
+ min <- out$value
# # call
if(objFun!="L1"){
- gradVect0<-MM_grad_Cogarch(p=info at p, q=info at q,
- acoeff=avect,cost=out$par[loc.par], b=bvect,
- r=env$r, h=seq(1, env$d, by = 1)*env$r, type=typeacf,
+ gradVect0<-MM_grad_Cogarch(p=info at p, q=info at q,
+ acoeff=avect,cost=out$par[loc.par], b=bvect,
+ r=env$r, h=seq(1, env$d, by = 1)*env$r, type=typeacf,
m2=env$mu_G2, var=env$var_G2)
-
+
score0 <- MM_Cogarch(p=info at p, q=info at q,
- acoeff=avect,cost=out$par[loc.par], b=bvect,
- r=env$r, h=seq(1, env$d, by = 1)*env$r, type=typeacf,
+ acoeff=avect,cost=out$par[loc.par], b=bvect,
+ r=env$r, h=seq(1, env$d, by = 1)*env$r, type=typeacf,
m2=env$mu_G2, var=env$var_G2)
if(objFun == "L2"){
# min <- log(sum((score0$acfG2[CovQuad>0]-CovQuad[CovQuad>0])^2))
-
+
min <- log(sum((score0$acfG2-CovQuad)^2))
#min <- log(sum((score0$acfG2[CovQuad>0]-CovQuad[CovQuad>0])^2))
- }
- idx.aaa<-match(loc.par,names_coef)
+ }
+ idx.aaa<-match(loc.par,names_coef)
# gradVect <- gradVect0[names_coef[-idx.aaa], ]
gradVect <- gradVect0[names_coef[-idx.aaa],CovQuad>0]
# score <- c(score0$acfG2)%*%matrix(1,1,example$leng)
score <- c(score0$acfG2[CovQuad>0])%*%matrix(1,1,example$leng)
-
+
#We need to write the matrix W for the matrix sandwhich
#plot(as.numeric(example$dataused)[-1],type="h")
#S_matrix
@@ -512,8 +512,8 @@
# EmpirScore <-score-example$elem[-1,]
exampelem <-example$elem[-1,]
EmpirScore <-score-exampelem[CovQuad>0,]
-
- Omega_est<-tryCatch((1/example$leng*EmpirScore%*%t(EmpirScore)),
+
+ Omega_est<-tryCatch((1/example$leng*EmpirScore%*%t(EmpirScore)),
error=function(theta){NULL})
if(is.null(Omega_est)){
Omega_est<-matrix(NA,dim(EmpirScore)[1],dim(EmpirScore)[1])
@@ -531,10 +531,10 @@
#Var_Matr0 <- solve(Gmatr)/example$leng
}
-
+
if(!is.null(Var_Matr0)){
aaa<-dimOutp-1
- vcov[c(1:aaa),c(1:aaa)]<-Var_Matr0
+ vcov[c(1:aaa),c(1:aaa)]<-Var_Matr0
}
# Var_Matr <- solve(gradVect%*%t(gradVect))
@@ -544,18 +544,18 @@
#out$par[loc.par]<-(bq-a1)*mu_G2/(bq*r)
}
-
+
# Build an object of class mle
if(Est.Incr=="NoIncr"){
- res<-new("cogarch.gmm", call = call, coef = coef, fullcoef = unlist(coef),
- vcov = vcov, min = min, details = list(),
+ res<-new("cogarch.gmm", call = call, coef = coef, fullcoef = unlist(coef),
+ vcov = vcov, min = min, details = list(),
method = character(),
model = model,
- objFun = objFun
+ objFun = objFun
)
}
if(Est.Incr=="Incr"||Est.Incr=="IncrPar"){
- L.Incr<-cogarchNoise(yuima.cogarch=model, data=observ,
+ L.Incr<-cogarchNoise(yuima.cogarch=model, data=observ,
param=as.list(coef), mu=1)
ttt<-observ at zoo.data[[1]]
tt<-index(ttt)
@@ -563,8 +563,8 @@
}
if(Est.Incr=="Incr"){
# Build an object of class cogarch.gmm.incr
- res<-new("cogarch.gmm.incr", call = call, coef = coef, fullcoef = unlist(coef),
- vcov = vcov, min = min, details = list(),
+ res<-new("cogarch.gmm.incr", call = call, coef = coef, fullcoef = unlist(coef),
+ vcov = vcov, min = min, details = list(),
method = character(),
Incr.Lev = L.Incr_Fin,
model = model, nobs=as.integer(length(L.Incr)+1),
@@ -589,8 +589,8 @@
}else{
inc.levy1 <- L.Incr
}
-
- result.Lev <- gmm.Est.Lev(Increment.lev=c(0,inc.levy1),
+
+ result.Lev <- gmm.Est.Lev(Increment.lev=c(0,inc.levy1),
param0=start[meas.par],
fixed = fixedCon[meas.par],
lower=lowerCon[meas.par],
@@ -599,18 +599,18 @@
measure.type=model at measure.type,
aggregation=aggregation,
dt=1/env$deltaData
- )
-
+ )
+
if(is.null(result.Lev)){
- res<-new("cogarch.gmm.incr", call = call, coef = coef, fullcoef = unlist(coef),
- vcov = vcov, min = min, details = list(),
+ res<-new("cogarch.gmm.incr", call = call, coef = coef, fullcoef = unlist(coef),
+ vcov = vcov, min = min, details = list(),
method = character(),
Incr.Lev=L.Incr_Fin,
model = model, nobs=as.integer(length(L.Incr)+1),
logL.Incr = numeric(),
objFun= objFun
)
-
+
}
else{
Inc.Parm<-result.Lev$estLevpar
@@ -622,32 +622,32 @@
}
name.parm.cog<-names(coef)
coef<-c(coef,Inc.Parm)
-
+
names.par<-names(coef)
cov<-NULL
cov<-matrix(NA,length(names.par),length(names.par))
rownames(cov)<-names.par
colnames(cov)<-names.par
-
+
cov[unique(name.parm.cog),unique(name.parm.cog)]<-vcov
cov[names(Inc.Parm),names(Inc.Parm)]<-IncVCOV
cov<-cov
-
- res<-new("cogarch.gmm.incr", call = call, coef = coef, fullcoef = unlist(coef),
- vcov = cov, min = min, details = list(),
+
+ res<-new("cogarch.gmm.incr", call = call, coef = coef, fullcoef = unlist(coef),
+ vcov = cov, min = min, details = list(),
method = character(),
Incr.Lev=L.Incr_Fin,
model = model, nobs=as.integer(length(L.Incr)+1),
logL.Incr = tryCatch(-result.Lev$value,error=function(theta){NULL}),
objFun= objFun
)
-
+
}
}
return(res)
-
+
}
constdum<-function(fixed, meas.par){
@@ -661,7 +661,7 @@
}
-gmm.Est.Lev<-function(Increment.lev,
+gmm.Est.Lev<-function(Increment.lev,
param0,
fixed=list(),
lower=list(),
@@ -670,14 +670,14 @@
measure.type,
aggregation,
dt){
-
- fixed.carma <- unlist(fixed)
+
+ fixed.carma <- unlist(fixed)
lower.carma <- unlist(lower)
upper.carma <- unlist(upper)
-
- Dummy <- TRUE
-
-
+
+ Dummy <- TRUE
+
+
CPlist <- c("dnorm","dgamma", "dexp")
codelist <- c("rngamma","rNIG","rIG", "rgamma")
if(measure.type=="CP"){
@@ -685,21 +685,21 @@
measurefunc <- substring(measure$df$exp, 1, tmp-1)
if(is.na(match(measurefunc,CPlist))){
yuima.warn("COGARCH(p,q): Other compound poisson processes will be implemented as soon as")
- Dummy <- FALSE
+ Dummy <- FALSE
}
-
+
}
-
-
+
+
if(measure.type=="code"){
tmp <- regexpr("\\(", measure$df$exp)[1]
measurefunc <- substring(measure$df$exp, 1, tmp-1)
if(is.na(match(measurefunc,codelist))){
yuima.warn("COGARCH(p,q): Other Levy measure will be implemented as soon as possible")
Dummy <- FALSE
-
- }
- }
+
+ }
+ }
if(Dummy){
#env$h<-dt
result.Lev <- yuima.Estimation.Lev(Increment.lev=Increment.lev,
@@ -722,7 +722,7 @@
ErrTerm <- function(yuima, param, print, env){
typeacf <- env$typeacf
param <- as.numeric(param)
-
+
G_i <- env$G_i
r <- env$r
mu_G2 <- env$mu_G2
@@ -737,16 +737,16 @@
meanL1 <- param[env$EL1.idx]
# meanL1 <- 1
# if(env$q == 1){
-#
+#
# beta <- param[cost]*param[b]
# eta <- param[b]
# phi <- param[a]
-#
+#
# # phi1 <- param[env$phi1.idx]
# # phi2 <- param[env$phi2.idx]
# #theo_mu_G2 <- meanL1*r*beta/abs(phi1)
# phi1 <- meanL1*r*beta/mu_G2
-#
+#
# termA <- (6*mu_G2/r*beta/abs(phi1)*(2*eta/phi-meanL1)*(r-(1-exp(-r*abs(phi1)))/abs(phi1))+2*beta^2/phi^2*r)
# phi2 <-2*termA*abs(phi1)/((var_G2-2*mu_G2^2)*abs(phi1)+termA)
# if(typeacf == "covariance"){
@@ -758,18 +758,18 @@
# (2/abs(phi2)-1/abs(phi1))*(1-exp(-r*abs(phi1)))*(exp(r*abs(phi1))-1)*
# exp(-h*abs(phi1))/(var_G2)
# }
-#
-# }
+#
+# }
if(env$q >= 1){
TheoCovQuad <- numeric(length = length(h))
cost<-param[cost]
# for(i in c(1:length(h))){
MomentCog <- MM_Cogarch(p = env$p, q = env$q, acoeff=param[a],
cost=cost,
- b=param[b], r = r, h = h,
+ b=param[b], r = r, h = h,
type = typeacf, m2=mu_G2, var=var_G2)
-
-
+
+
TheoCovQuad <- MomentCog$acfG2
# }
theo_mu_G2 <- MomentCog$meanG2
@@ -782,15 +782,15 @@
# emp <- log(CovQuad[CovQuad>0])
# theo <- log(TheoCovQuad[CovQuad>0])
# res <- log(sum((abs(TheoCovQuad)-abs(CovQuad))^2))
-
-
+
+
res <- sum((log(TheoCovQuad[CovQuad>0])-log(CovQuad[CovQuad>0]))^2)
# res <- sum((TheoCovQuad[CovQuad>0]-CovQuad[CovQuad>0])^2)
# res <- sum((TheoCovQuad-CovQuad)^2)
-
+
# res <- sum((log(abs(TheoCovQuad))-log(abs(CovQuad)))^2)
-
+
# res <- sum((log(TheoCovQuad[CovQuad>0]))-log(CovQuad[CovQuad>0]))^2)
return(res)
}
@@ -807,18 +807,18 @@
#CompMoM0<-TheoMoM-CovQuad[CovQuad>0]
CompMoM<-matrix(CompMoM0,1,length(CompMoM0))
res <- as.numeric(CompMoM%*%W_est%*%t(CompMoM))
-
-
+
+
# TheoMoM<-TheoCovQuad[CovQuad>0]
# intrDum<-env$score[-1,]
-#
+#
# dumScore <- (TheoMoM%*%matrix(1,1,env$leng))-intrDum[CovQuad>0,]
# W_est<-chol2inv(dumScore%*%t(dumScore)/env$leng)
# CompMoM0<-TheoMoM-c(CovQuad[CovQuad>0])
# CompMoM<-matrix(CompMoM0,1,length(CompMoM0))
# res <- as.numeric(CompMoM%*%W_est%*%t(CompMoM))
-
-
+
+
return(res)
}
@@ -829,13 +829,13 @@
}
- if(env$objFun=="L2CUE"){
+ if(env$objFun=="L2CUE"){
#TheoMoM<-TheoCovQuad
#
#TheoMoM<-log(TheoCovQuad[CovQuad>0])
TheoMoM<-TheoCovQuad[CovQuad>0]
intrDum<-env$score[-1,]
-
+
dumScore <- (TheoMoM%*%matrix(1,1,env$leng))-intrDum[CovQuad>0,]
W_est<-chol2inv(dumScore%*%t(dumScore)/env$leng)
CompMoM0<-TheoMoM-c(CovQuad[CovQuad>0])
@@ -844,11 +844,11 @@
return(res)
}
-
+
}
MM_Cogarch <- function(p, q, acoeff,cost, b, r, h, type, m2, var){
- # The code developed here is based on the acf for squared returns derived in the
+ # The code developed here is based on the acf for squared returns derived in the
# Chaadra phd Thesis
a <- e <- matrix(0,nrow=q,ncol=1)
e[q,1] <- 1
@@ -859,23 +859,23 @@
# # Matching only the autocorrelation we are not able to estimate the a_0 parameter
# nevertheless using the theoretical formula of the expectaion of squared returns we
-# are able to fix this parameter for having a perfect match between theoretical and
+# are able to fix this parameter for having a perfect match between theoretical and
# empirical mean
-# We recall that under the assumption of levy process is centered the mean at time 1 of the
+# We recall that under the assumption of levy process is centered the mean at time 1 of the
# squared returns and the mean of the corresponding levy measure are equals.
mu<-1 # we assume variance of the underlying L\'evy is one
meanL1<-mu
-
+
cost<-(bq-mu*a1)*m2/(bq*r)
B<- MatrixA(b[c(q:1)])
B_tilde <- B+mu*e%*%t(a)
meanG2 <- cost*bq*r/(bq-mu*a1)*meanL1
Inf_eps <- IdentM <- diag(q)
if(q==1){
- Inf_eps[q,q] <- -1/(2*B_tilde[1,1])
+ Inf_eps[q,q] <- -1/(2*B_tilde[1,1])
}
if(q==2){
Inf_eps[q,q] <- -B_tilde[2,1]
@@ -891,7 +891,7 @@
if(q>=4){
Inf_eps <- round(AsympVar(B_tilde,e,lower=0,upper=100,delta=0.01)*10^5)/10^5
}
-
+
#term <- expm(B_tilde*h)
term <- lapply(h,"yuimaExpm",B=B_tilde)
#invB <- solve(B_tilde) # In this case we can use the analytical form for Companion Matrix???
@@ -901,14 +901,14 @@
}else{invB<-1/B_tilde}
term1 <- invB%*%(IdentM-expm(-B_tilde*r))
term2 <- (expm(B_tilde*r)-IdentM)
-
+
P0_overRho <- 2*mu^2*(3*invB%*%(invB%*%term2-r*IdentM)-IdentM)%*%Inf_eps
Q0_overRho <- 6*mu*((r*IdentM-invB%*%term2)%*%Inf_eps
-invB%*%(invB%*%term2-r*IdentM)%*%Inf_eps%*%t(B_tilde))%*%e
# Q0_overRho <- 6*mu*((r*IdentM-invB%*%term2)%*%Inf_eps
# -invB%*%(invB%*%term2-r*IdentM)%*%Inf_eps%*%t(B))%*%e
m_overRho <- as.numeric(t(a)%*%Inf_eps%*%a)
- Den<- (m_overRho*meanL1^2/m2^2*var*r^2+t(a)%*%Q0_overRho+t(a)%*%P0_overRho%*%a+1)
+ Den<- (m_overRho*meanL1^2/m2^2*var*r^2+t(a)%*%Q0_overRho+t(a)%*%P0_overRho%*%a+1)
num <-(meanL1^2/m2^2*var-2*mu^2)*r^2
rh0 <- as.numeric(num/Den)
@@ -917,17 +917,17 @@
Ph_withouterm <- term1%*%invB%*%term2%*%Inf_eps1*mu^2
Ph<-lapply(term,"%*%",Ph_withouterm)
Qh_without <- term1%*%(-term2%*%Inf_eps1-invB%*%term2%*%Inf_eps1%*%t(B_tilde))%*%e*mu
- Qh<-lapply(term,"%*%",Qh_without)
+ Qh<-lapply(term,"%*%",Qh_without)
# Qh <- mu*term%*%term1%*%(-term2%*%Inf_eps1-invB%*%term2%*%Inf_eps1%*%t(B))%*%e
m <- m_overRho*rh0
atrasp<-t(a)
if(type=="correlation"){
VarTheo<-as.numeric(rh0*atrasp%*%P0_overRho%*%a+rh0*atrasp%*%Q0_overRho+2*r^2*mu^2+rh0)
- acfG2 <- (as.numeric(lapply(Ph,"yuimaQuadrForm",at=atrasp,a=a))
+ acfG2 <- (as.numeric(lapply(Ph,"yuimaQuadrForm",at=atrasp,a=a))
+ as.numeric(lapply(Qh,"yuimaInnerProd",at=atrasp)))/VarTheo
}else{
coeff <- cost^2*bq^2/((1-m)*(bq-mu*a1)^2)
- acfG2 <- coeff*( as.numeric(lapply(Ph,"yuimaQuadrForm",at=atrasp,a=a))
+ acfG2 <- coeff*( as.numeric(lapply(Ph,"yuimaQuadrForm",at=atrasp,a=a))
+ as.numeric(lapply(Qh,"yuimaInnerProd",at=atrasp)) )
}
res <- list(acfG2=acfG2, meanG2=meanG2)
@@ -999,7 +999,7 @@
cmat <- cbind(Estimate = object at coef, `Std. Error` = sqrt(diag(object at vcov)))
obj<- object at min
labFun <- object at objFun
-
+
tmp <- new("summary.cogarch.gmm", call = object at call, coef = cmat,
m2logL = 0,
#model = object at model,
@@ -1012,21 +1012,31 @@
# errorfun <- function(estimates, labelFun = "L2"){
# if(LabelFun == "L2"){
-#
+#
# }
-#
+#
# }
setMethod("show", "summary.cogarch.gmm",
function (object)
{
-
- cat("GMM estimation\n\nCall:\n")
+
+
+ if(object at objFun == "PseudoLogLik"){
+ cat("PML estimation\n\nCall:\n")
+ }else{
+ cat("GMM estimation\n\nCall:\n")
+ }
+
print(object at call)
cat("\nCoefficients:\n")
print(coef(object))
- cat("\n",paste0(paste("Log.objFun", object at objFun),":"), object at objFunVal, "\n")
- #cat("objFun", object at min, "\n")
+ if(object at objFun == "PseudoLogLik"){
+ cat("\n",paste0(paste("-2 ", object at objFun),":"), 2*object at objFunVal, "\n")
+ }else{
+ cat("\n",paste0(paste("Log.objFun", object at objFun),":"), object at objFunVal, "\n")
+ }
+ #cat("objFun", object at min, "\n")
}
)
@@ -1040,7 +1050,7 @@
data<- data[!is.na(data)]
obj<- object at min
labFun <- object at objFun
-
+
tmp <- new("summary.cogarch.gmm.incr", call = object at call, coef = cmat,
m2logL = m2logL,
objFun = labFun,
@@ -1055,23 +1065,23 @@
tmp
}
)
-#
+#
setMethod("show", "summary.cogarch.gmm.incr",
function (object)
{
-
+
cat("Two Stages GMM estimation \n\nCall:\n")
print(object at call)
cat("\nCoefficients:\n")
print(coef(object))
# cat("\n-2 log L:", object at m2logL, "\n")
cat("\n",paste0(paste("Log.objFun", object at objFun),":"), object at objFunVal, "\n")
-
+
cat(sprintf("\n\nNumber of increments: %d\n",object at NumbI))
cat(sprintf("\nAverage of increments: %f\n",object at MeanI))
cat(sprintf("\nStandard Dev. of increments: %f\n",object at SdI))
if(!is.null(object at logLI)){
-
+
cat(sprintf("\n\n-2 log L of increments: %f\n",-2*object at logLI))
}
cat("\nSummary statistics for increments:\n")
Modified: pkg/yuima/R/PseudoLogLikCOGARCH.R
===================================================================
--- pkg/yuima/R/PseudoLogLikCOGARCH.R 2016-02-25 03:30:09 UTC (rev 407)
+++ pkg/yuima/R/PseudoLogLikCOGARCH.R 2016-03-02 12:37:49 UTC (rev 408)
@@ -135,9 +135,9 @@
mycoef <- start
# min <- out$value
objFun <- "PseudoLogLik"
- min <- numeric()
+ # min <- numeric()
+ min <- out$value
-
res<-new("cogarch.gmm", call = call, coef = coef, fullcoef = unlist(coef),
vcov = vcov, min = min, details = list(),
method = character(),
More information about the Yuima-commits
mailing list