################################################################################ ################################ 3. Simulations ################################ ################################################################################ # 1. Calculate MAD for different estimators library(snowfall) # Parallel computing sfInit(parallel = TRUE, cpus = 4, type = "SOCK") #sfInit(parallel = FALSE) # for trouble-shooting sfLibrary(fGarch) sfLibrary(actuar) sfLibrary(evd) sfExport("ES.estimates", "ES.true", "ES.trimmed", "ES.hoga", "ES.POT", "ES.Sca04") wrapper <- function(d, reps, alpha, n){ ES.estimates(d, reps, alpha, n) } start <- Sys.time() res <- sfLapply(1:12, wrapper, reps=10000, alpha = (1 : 50) / 1000, n = 2000) Burr1 <- res[[1]] Burr2 <- res[[2]] Pareto3 <- res[[3]] Pareto1.5 <- res[[4]] tInf <- res[[5]] t10 <- res[[6]] GARCH.Inf <- res[[7]] GARCH.25 <- res[[8]] AR.GARCH.Inf <- res[[9]] AR.GARCH.25 <- res[[10]] AR.Inf <- res[[11]] AR.10 <- res[[12]] cat("Average of k*{ES} for Burr1 = ", Burr1$k, "\n") cat("Average of k*{ES} for Burr2 = ", Burr2$k, "\n") cat("Average of k*{ES} for Pareto3 = " , Pareto3$k, "\n") cat("Average of k*{ES} for Pareto1.5 = ", Pareto1.5$k, "\n") cat("Average of k*{ES} for tInf = " , tInf$k, "\n") cat("Average of k*{ES} for t10 = ", t10$k, "\n") # Value of k*{ES} for Table 2 cat("Average of k*{ES} for GARCH.Inf = ", GARCH.Inf$k, "\n") cat("Average of k*{ES} for GARCH.25 = ", GARCH.25$k, "\n") cat("Average of k*{ES} for AR.GARCH.Inf = ", AR.GARCH.Inf$k, "\n") cat("Average of k*{ES} for AR.GARCH.25 = ", AR.GARCH.25$k, "\n") cat("Average of k*{ES} for AR.Inf = " , AR.Inf$k, "\n") cat("Average of k*{ES} for AR.10 = ", AR.10$k, "\n") print(Sys.time() - start) sfStop() ################################################################################ ################################### FIGURE 1 ################################### ################################################################################ alpha = (1 : 50) / 1000 split.screen(figs=c(3, 2)) par(oma=c(1,2,0,0)) # Burr1 screen(1) par(mar = c(4.5,2,1.5,3)) RMSE.ratio.nonpar <- Burr1$MAD[3, ] / Burr1$MAD[1, ] RMSE.ratio.Hill <- Burr1$MAD[2, ] / Burr1$MAD[1, ] RMSE.ratio.Scaillet <- Burr1$MAD[4, ] / Burr1$MAD[1, ] RMSE.ratio.GPD <- Burr1$MAD[5, ] / Burr1$MAD[1, ] RMSE.ratio.Pickands <- Burr1$MAD[6, ] / Burr1$MAD[1, ] plot(alpha, RMSE.ratio.nonpar, type = "l", xlab="", ylab = "", xaxs="i", yaxs="i", log = "y", ylim = c(0.5, 5)) lines(alpha, RMSE.ratio.Hill, lty = "dotted") lines(alpha, RMSE.ratio.GPD, lty = "dotdash") lines(alpha, RMSE.ratio.Pickands, lty = "longdash") abline(h=1) legend('bottomright', legend = c("non-par.", "Hill", "POT", "Pickands") , lty=c("solid", "dotted", "dotdash", "longdash"), cex=0.5) mtext(expression('p'[n]), side = 1, line=2, cex=1, outer=FALSE) mtext("MAD ratio", side = 2, line=2.5, cex=1, outer=FALSE) mtext("(a) Burr(1, 1, 1.5)", side = 1, line=3.5, cex=1.1, outer=FALSE) # Burr2 screen(2) par(mar = c(4.5,2,1.5,3)) RMSE.ratio.nonpar <- Burr2$MAD[3, ] / Burr2$MAD[1, ] RMSE.ratio.Hill <- Burr2$MAD[2, ] / Burr2$MAD[1, ] RMSE.ratio.Scaillet <- Burr2$MAD[4, ] / Burr2$MAD[1, ] RMSE.ratio.GPD <- Burr2$MAD[5, ] / Burr2$MAD[1, ] RMSE.ratio.Pickands <- Burr2$MAD[6, ] / Burr2$MAD[1, ] plot(alpha, RMSE.ratio.nonpar, type = "l", xlab="", ylab = "", xaxs="i", yaxs="i", log = "y", ylim = c(0.5, 5)) lines(alpha, RMSE.ratio.Hill, lty = "dotted") lines(alpha, RMSE.ratio.GPD, lty = "dotdash") lines(alpha, RMSE.ratio.Pickands, lty = "longdash") abline(h=1) legend('bottomright', legend = c("non-par.", "Hill", "POT", "Pickands") , lty=c("solid", "dotted", "dotdash", "longdash"), cex=0.5) mtext(expression('p'[n]), side = 1, line=2, cex=1, outer=FALSE) mtext("MAD ratio", side = 2, line=2.5, cex=1, outer=FALSE) mtext("(b) Burr(1, 0.25, 6)", side = 1, line=3.5, cex=1.1, outer=FALSE) screen(3) par(mar = c(4.5,2,1.5,3)) RMSE.ratio.nonpar <- Pareto3$MAD[3, ] / Pareto3$MAD[1, ] RMSE.ratio.Hill <- Pareto3$MAD[2, ] / Pareto3$MAD[1, ] RMSE.ratio.Scaillet <- Pareto3$MAD[4, ] / Pareto3$MAD[1, ] RMSE.ratio.GPD <- Pareto3$MAD[5, ] / Pareto3$MAD[1, ] RMSE.ratio.Pickands <- Pareto3$MAD[6, ] / Pareto3$MAD[1, ] plot(alpha, RMSE.ratio.nonpar, type = "l", xlab="", ylab = "", xaxs="i", yaxs="i", log = "y", ylim = c(0.5, 5)) lines(alpha, RMSE.ratio.Hill, lty = "dotted") lines(alpha, RMSE.ratio.GPD, lty = "dotdash") lines(alpha, RMSE.ratio.Pickands, lty = "longdash") abline(h=1) legend('bottomright', legend = c("non-par.", "Hill", "POT", "Pickands") , lty=c("solid", "dotted", "dotdash", "longdash"), cex=0.5) mtext(expression('p'[n]), side = 1, line=2, cex=1, outer=FALSE) mtext("MAD ratio", side = 2, line=2.5, cex=1, outer=FALSE) mtext("(c) Pa(3)", side = 1, line=3.5, cex=1.1, outer=FALSE) screen(4) par(mar = c(4.5,2,1.5,3)) RMSE.ratio.nonpar <- Pareto1.5$MAD[3, ] / Pareto1.5$MAD[1, ] RMSE.ratio.Hill <- Pareto1.5$MAD[2, ] / Pareto1.5$MAD[1, ] RMSE.ratio.Scaillet <- Pareto1.5$MAD[4, ] / Pareto1.5$MAD[1, ] RMSE.ratio.GPD <- Pareto1.5$MAD[5, ] / Pareto1.5$MAD[1, ] RMSE.ratio.Pickands <- Pareto1.5$MAD[6, ] / Pareto1.5$MAD[1, ] plot(alpha, RMSE.ratio.nonpar, type = "l", xlab="", ylab = "", xaxs="i", yaxs="i", log = "y", ylim = c(0.5, 5)) lines(alpha, RMSE.ratio.Hill, lty = "dotted") lines(alpha, RMSE.ratio.GPD, lty = "dotdash") lines(alpha, RMSE.ratio.Pickands, lty = "longdash") abline(h=1) legend('bottomright', legend = c("non-par.", "Hill", "POT", "Pickands") , lty=c("solid", "dotted", "dotdash", "longdash"), cex=0.5) mtext(expression('p'[n]), side = 1, line=2, cex=1, outer=FALSE) mtext("MAD ratio", side = 2, line=2.5, cex=1, outer=FALSE) mtext("(d) Pa(1.5)", side = 1, line=3.5, cex=1.1, outer=FALSE) # N(0,1) screen(5) par(mar = c(4.5,2,1.5,3)) RMSE.ratio.nonpar <- tInf$MAD[3, ] / tInf$MAD[1, ] RMSE.ratio.Hill <- tInf$MAD[2, ] / tInf$MAD[1, ] RMSE.ratio.Scaillet <- tInf$MAD[4, ] / tInf$MAD[1, ] RMSE.ratio.GPD <- tInf$MAD[5, ] / tInf$MAD[1, ] RMSE.ratio.Pickands <- tInf$MAD[6, ] / tInf$MAD[1, ] plot(alpha, RMSE.ratio.nonpar, type = "l", xlab="", ylab = "", xaxs="i", yaxs="i", log = "y", ylim = c(0.2, 20)) lines(alpha, RMSE.ratio.Hill, lty = "dotted") lines(alpha, RMSE.ratio.GPD, lty = "dotdash") lines(alpha, RMSE.ratio.Pickands, lty = "longdash") abline(h=1) legend('bottomright', legend = c("non-par.", "Hill", "POT", "Pickands") , lty=c("solid", "dotted", "dotdash", "longdash"), cex=0.5) mtext(expression('p'[n]), side = 1, line=2, cex=1, outer=FALSE) mtext("MAD ratio", side = 2, line=2.5, cex=1, outer=FALSE) mtext("(e) N(0,1)", side = 1, line=3.5, cex=1.1, outer=FALSE) # t_10 screen(6) par(mar = c(4.5,2,1.5,3)) RMSE.ratio.nonpar <- t10$MAD[3, ] / t10$MAD[1, ] RMSE.ratio.Hill <- t10$MAD[2, ] / t10$MAD[1, ] RMSE.ratio.Scaillet <- t10$MAD[4, ] / t10$MAD[1, ] RMSE.ratio.GPD <- t10$MAD[5, ] / t10$MAD[1, ] RMSE.ratio.Pickands <- t10$MAD[6, ] / t10$MAD[1, ] plot(alpha, RMSE.ratio.nonpar, type = "l", xlab="", ylab = "", xaxs="i", yaxs="i", log = "y", ylim = c(0.2, 20)) lines(alpha, RMSE.ratio.Hill, lty = "dotted") lines(alpha, RMSE.ratio.GPD, lty = "dotdash") lines(alpha, RMSE.ratio.Pickands, lty = "longdash") abline(h=1) legend('bottomright', legend = c("non-par.", "Hill", "POT", "Pickands") , lty=c("solid", "dotted", "dotdash", "longdash"), cex=0.5) mtext(expression('p'[n]), side = 1, line=2, cex=1, outer=FALSE) mtext("MAD ratio", side = 2, line=2.5, cex=1, outer=FALSE) mtext("(f) t(10)", side = 1, line=3.5, cex=1.1, outer=FALSE) close.screen(all = TRUE) ################################################################################ ################################################################################ ################################### FIGURE 2 ################################### ################################################################################ split.screen(figs=c(3, 2)) par(oma=c(1,2,0,0)) # GARCH.Inf screen(1) par(mar = c(4.5,2,1.5,3)) RMSE.ratio.nonpar <- GARCH.Inf$MAD[3, ] / GARCH.Inf$MAD[1, ] RMSE.ratio.Hill <- GARCH.Inf$MAD[2, ] / GARCH.Inf$MAD[1, ] RMSE.ratio.Scaillet <- GARCH.Inf$MAD[4, ] / GARCH.Inf$MAD[1, ] RMSE.ratio.GPD <- GARCH.Inf$MAD[5, ] / GARCH.Inf$MAD[1, ] RMSE.ratio.Pickands <- GARCH.Inf$MAD[6, ] / GARCH.Inf$MAD[1, ] plot(alpha, RMSE.ratio.nonpar, type = "l", xlab="", ylab = "", xaxs="i", yaxs="i", log = "y", ylim = c(0.5, 5)) lines(alpha, RMSE.ratio.Hill, lty = "dotted") lines(alpha, RMSE.ratio.GPD, lty = "dotdash") lines(alpha, RMSE.ratio.Pickands, lty = "longdash") abline(h=1) legend('bottomright', legend = c("non-par.", "Hill", "POT", "Pickands") , lty=c("solid", "dotted", "dotdash", "longdash"), cex=0.5) mtext(expression('p'[n]), side = 1, line=2, cex=1, outer=FALSE) mtext("MAD ratio", side = 2, line=2.5, cex=1, outer=FALSE) mtext("(a) GARCH-N(0,1)", side = 1, line=3.5, cex=1.1, outer=FALSE) # GARCH.25 screen(2) par(mar = c(4.5,2,1.5,3)) RMSE.ratio.nonpar <- GARCH.25$MAD[3, ] / GARCH.25$MAD[1, ] RMSE.ratio.Hill <- GARCH.25$MAD[2, ] / GARCH.25$MAD[1, ] RMSE.ratio.Scaillet <- GARCH.25$MAD[4, ] / GARCH.25$MAD[1, ] RMSE.ratio.GPD <- GARCH.25$MAD[5, ] / GARCH.25$MAD[1, ] RMSE.ratio.Pickands <- GARCH.25$MAD[6, ] / GARCH.25$MAD[1, ] plot(alpha, RMSE.ratio.nonpar, type = "l", xlab="", ylab = "", xaxs="i", yaxs="i", log = "y", ylim = c(0.5, 5)) lines(alpha, RMSE.ratio.Hill, lty = "dotted") lines(alpha, RMSE.ratio.GPD, lty = "dotdash") lines(alpha, RMSE.ratio.Pickands, lty = "longdash") abline(h=1) legend('bottomright', legend = c("non-par.", "Hill", "POT", "Pickands") , lty=c("solid", "dotted", "dotdash", "longdash"), cex=0.5) mtext(expression('p'[n]), side = 1, line=2, cex=1, outer=FALSE) mtext("MAD ratio", side = 2, line=2.5, cex=1, outer=FALSE) mtext("(b) GARCH-t(2.5)", side = 1, line=3.5, cex=1.1, outer=FALSE) # AR.GARCH.Inf screen(3) par(mar = c(4.5,2,1.5,3)) RMSE.ratio.nonpar <- AR.GARCH.Inf$MAD[3, ] / AR.GARCH.Inf$MAD[1, ] RMSE.ratio.Hill <- AR.GARCH.Inf$MAD[2, ] / AR.GARCH.Inf$MAD[1, ] RMSE.ratio.Scaillet <- AR.GARCH.Inf$MAD[4, ] / AR.GARCH.Inf$MAD[1, ] RMSE.ratio.GPD <- AR.GARCH.Inf$MAD[5, ] / AR.GARCH.Inf$MAD[1, ] RMSE.ratio.Pickands <- AR.GARCH.Inf$MAD[6, ] / AR.GARCH.Inf$MAD[1, ] plot(alpha, RMSE.ratio.nonpar, type = "l", xlab="", ylab = "", xaxs="i", yaxs="i", log = "y", ylim = c(0.5, 5)) lines(alpha, RMSE.ratio.Hill, lty = "dotted") lines(alpha, RMSE.ratio.GPD, lty = "dotdash") lines(alpha, RMSE.ratio.Pickands, lty = "longdash") abline(h=1) legend('bottomright', legend = c("non-par.", "Hill", "POT", "Pickands") , lty=c("solid", "dotted", "dotdash", "longdash"), cex=0.5) mtext(expression('p'[n]), side = 1, line=2, cex=1, outer=FALSE) mtext("MAD ratio", side = 2, line=2.5, cex=1, outer=FALSE) mtext("(c) AR-GARCH-N(0,1)", side = 1, line=3.5, cex=1.1, outer=FALSE) # AR.GARCH.25 screen(4) par(mar = c(4.5,2,1.5,3)) RMSE.ratio.nonpar <- AR.GARCH.25$MAD[3, ] / AR.GARCH.25$MAD[1, ] RMSE.ratio.Hill <- AR.GARCH.25$MAD[2, ] / AR.GARCH.25$MAD[1, ] RMSE.ratio.Scaillet <- AR.GARCH.25$MAD[4, ] / AR.GARCH.25$MAD[1, ] RMSE.ratio.GPD <- AR.GARCH.25$MAD[5, ] / AR.GARCH.25$MAD[1, ] RMSE.ratio.Pickands <- AR.GARCH.25$MAD[6, ] / AR.GARCH.25$MAD[1, ] plot(alpha, RMSE.ratio.nonpar, type = "l", xlab="", ylab = "", xaxs="i", yaxs="i", log = "y", ylim = c(0.5, 5)) lines(alpha, RMSE.ratio.Hill, lty = "dotted") lines(alpha, RMSE.ratio.GPD, lty = "dotdash") lines(alpha, RMSE.ratio.Pickands, lty = "longdash") abline(h=1) legend('bottomright', legend = c("non-par.", "Hill", "POT", "Pickands") , lty=c("solid", "dotted", "dotdash", "longdash"), cex=0.5) mtext(expression('p'[n]), side = 1, line=2, cex=1, outer=FALSE) mtext("MAD ratio", side = 2, line=2.5, cex=1, outer=FALSE) mtext("(d) AR-GARCH-t(2.5)", side = 1, line=3.5, cex=1.1, outer=FALSE) # AR.Inf screen(5) par(mar = c(4.5,2,1.5,3)) RMSE.ratio.nonpar <- AR.Inf$MAD[3, ] / AR.Inf$MAD[1, ] RMSE.ratio.Hill <- AR.Inf$MAD[2, ] / AR.Inf$MAD[1, ] RMSE.ratio.Scaillet <- AR.Inf$MAD[4, ] / AR.Inf$MAD[1, ] RMSE.ratio.GPD <- AR.Inf$MAD[5, ] / AR.Inf$MAD[1, ] RMSE.ratio.Pickands <- AR.Inf$MAD[6, ] / AR.Inf$MAD[1, ] plot(alpha, RMSE.ratio.nonpar, type = "l", xlab="", ylab = "", xaxs="i", yaxs="i", log = "y", ylim = c(0.2, 20)) lines(alpha, RMSE.ratio.Hill, lty = "dotted") lines(alpha, RMSE.ratio.GPD, lty = "dotdash") lines(alpha, RMSE.ratio.Pickands, lty = "longdash") abline(h=1) legend('bottomright', legend = c("non-par.", "Hill", "POT", "Pickands") , lty=c("solid", "dotted", "dotdash", "longdash"), cex=0.5) mtext(expression('p'[n]), side = 1, line=2, cex=1, outer=FALSE) mtext("MAD ratio", side = 2, line=2.5, cex=1, outer=FALSE) mtext("(e) AR-N(0,1)", side = 1, line=3.5, cex=1.1, outer=FALSE) # AR.10 screen(6) par(mar = c(4.5,2,1.5,3)) RMSE.ratio.nonpar <- AR.10$MAD[3, ] / AR.10$MAD[1, ] RMSE.ratio.Hill <- AR.10$MAD[2, ] / AR.10$MAD[1, ] RMSE.ratio.Scaillet <- AR.10$MAD[4, ] / AR.10$MAD[1, ] RMSE.ratio.GPD <- AR.10$MAD[5, ] / AR.10$MAD[1, ] RMSE.ratio.Pickands <- AR.10$MAD[6, ] / AR.10$MAD[1, ] plot(alpha, RMSE.ratio.nonpar, type = "l", xlab="", ylab = "", xaxs="i", yaxs="i", log = "y", ylim = c(0.2, 20)) lines(alpha, RMSE.ratio.Hill, lty = "dotted") lines(alpha, RMSE.ratio.GPD, lty = "dotdash") lines(alpha, RMSE.ratio.Pickands, lty = "longdash") abline(h=1) legend('bottomright', legend = c("non-par.", "Hill", "POT", "Pickands") , lty=c("solid", "dotted", "dotdash", "longdash"), cex=0.5) mtext(expression('p'[n]), side = 1, line=2, cex=1, outer=FALSE) mtext("MAD ratio", side = 2, line=2.5, cex=1, outer=FALSE) mtext("(f) AR-t(10)", side = 1, line=3.5, cex=1.1, outer=FALSE) close.screen(all = TRUE) ################################################################################ alpha = (1 : 50) / 1000 spec.2 = garchSpec(model = list(omega=10e-06, alpha = 0.3, beta = 0.6, shape=2.5), cond.dist = "std") ES.trueval.2 <- ES.true(spec.2, alpha, n = 10000, N = 10000) spec.3 = garchSpec(model = list(omega=10e-06, alpha = 0.3, beta = 0.6, shape=5), cond.dist = "std") ES.trueval.3 <- ES.true(spec.3, alpha, n = 10000, N = 10000) # Required Functions # Function to calculate different ES estimates ES.estimates <- function(d, reps, alpha, n){ ES.hil <- ES.H <- ES.P <- ES.lx <- ES.Sca <- ES.GPD <- ES.bf <- matrix(NA, nrow=reps, ncol=length(alpha)) k.star <- numeric(reps) coverage.hil <- coverage.hog <- 0 # 1. Specify models with dependence spec.1 = garchSpec(model = list(omega=10e-06, alpha = 0.3, beta = 0.6), cond.dist = "norm") spec.2 = garchSpec(model = list(omega=10e-06, alpha = 0.3, beta = 0.6, shape=2.5), cond.dist = "std") spec.3 = garchSpec(model = list(ar = 0.7, omega=10e-06, alpha = 0.3, beta = 0.6), cond.dist = "norm") spec.4 = garchSpec(model = list(ar = 0.7, omega=10e-06, alpha = 0.3, beta = 0.6, shape=2.5), cond.dist = "std") spec.5 = garchSpec(model = list(ar = 0.7, omega=1, alpha = 0, beta = 0), cond.dist = "norm") spec.6 = garchSpec(model = list(ar = 0.7, omega=1, alpha = 0, beta = 0, shape=10), cond.dist = "std") # 2. Calculate true values for ES ES.trueval <- numeric(length(alpha)) if(d == 1){ for(i in 1:length(alpha)){ ES.trueval[i] <- -1/alpha[i] * integrate(qburr, lower = (1-alpha[i]), upper = 1, shape1=1, shape2=1.5)$value } } else if(d == 2){ for(i in 1:length(alpha)){ ES.trueval[i] <- -1/alpha[i] * integrate(qburr, lower = (1-alpha[i]), upper = 1, shape1=0.25, shape2=6)$value } } else if(d == 3){ for(i in 1:length(alpha)){ ES.trueval[i] <- -1/alpha[i] * integrate(qpareto1, lower = (1-alpha[i]), upper = 1, shape=3, min=1)$value } } else if(d == 4){ for(i in 1:length(alpha)){ ES.trueval[i] <- -1/alpha[i] * integrate(qpareto1, lower = (1-alpha[i]), upper = 1, shape=1.5, min=1)$value } } else if(d == 5){ for(i in 1:length(alpha)){ ES.trueval[i] <- -1/alpha[i] * integrate(qt, lower = (1-alpha[i]), upper = 1, df=Inf)$value } } else if(d == 6){ for(i in 1:length(alpha)){ ES.trueval[i] <- -1/alpha[i] * integrate(qt, lower = (1-alpha[i]), upper = 1, df=10)$value } } else if(d == 7){ ES.trueval <- ES.true(spec.1, alpha, n = 100000, N = 10000) # N=100 } else if(d == 8){ ES.trueval <- ES.true(spec.2, alpha, n = 100000, N = 10000) } else if(d == 9){ ES.trueval <- ES.true(spec.3, alpha, n = 100000, N = 10000) } else if(d == 10){ ES.trueval <- ES.true(spec.4, alpha, n = 100000, N = 10000) } else if(d == 11){ ES.trueval <- ES.true(spec.5, alpha, n = 100000, N = 10000) } else if(d == 12){ ES.trueval <- ES.true(spec.6, alpha, n = 100000, N = 10000) } # 3. Calculate MAD for different ES estimates for(i in 1:reps){ if(d == 1){ X <- -rburr(n, shape1=1, shape2=1.5) # Interpret: the closer shape2 is to 1, i.e., the closer to true Pareto, the better. } else if(d == 2){ X <- -rburr(n, shape1=0.25, shape2=6) } else if(d == 3){ X <- -rpareto1(n, shape=3, min=1) } else if(d == 4){ X <- -rpareto1(n, shape=1.5, min=1) } else if(d == 5){ X <- rt(n, df=Inf) } else if(d == 6){ X <- rt(n, df=10) } else if(d == 7){ X <- as.numeric(garchSim(spec.1, n)) X <- -abs(X) } else if(d == 8){ X <- as.numeric(garchSim(spec.2, n)) X <- -abs(X) } else if(d == 9){ X <- as.numeric(garchSim(spec.3, n)) X <- -abs(X) } else if(d == 10){ X <- as.numeric(garchSim(spec.4, n)) X <- -abs(X) } else if(d == 11){ X <- as.numeric(garchSim(spec.5, n)) X <- -abs(X) } else if(d == 12){ X <- as.numeric(garchSim(spec.6, n)) X <- -abs(X) } temp <- ES.trimmed(X, alpha) ES.lx[i, ] <- abs(temp[1, ] - ES.trueval) # ES.true is a vector ES.hil[i, ] <- abs(temp[2, ] - ES.trueval) ES.hog.help <- ES.hoga(X, alpha) ES.H[i, ] <- abs(ES.hog.help$ES.H - ES.trueval) k.star[i] <- ES.hog.help$k.H ES.P[i, ] <- abs(ES.hog.help$ES.P - ES.trueval) ES.Sca[i, ] <- abs(ES.Sca04(X, alpha) - ES.trueval) ES.GPD[i, ] <- abs(ES.POT(X, alpha)$ES - ES.trueval) } return(list( MAD = matrix( c(colMeans(ES.H), colMeans(ES.hil), colMeans(ES.lx), colMeans(ES.Sca), colMeans(ES.GPD), colMeans(ES.P)), nrow = 6, ncol = length(alpha), byrow=TRUE, dimnames = list(c("Hoga", "Hill", "non-par.", "Scaillet", "GPD", "Pickands"))), k = mean(k.star))) } ################################################################################ # Calculate true values of ES ES.true <- function(spec, alpha, n, N){ # [Hil15, p.17 (Footnote 8)] no.alpha <- length(alpha) ES.est <- matrix(NA, nrow=N, ncol=no.alpha) for(i in 1 : N){ X <- as.numeric(garchSim(spec, n)) X <- -abs(X) X.partial <- sort.int(X, partial = floor(alpha[no.alpha] * n)) X.partial <- sort.int(X.partial[1 : floor(alpha[no.alpha] * n)], method = "quick") for(a in 1:no.alpha){ q.hat <- X.partial[floor(alpha[a] * n)] # [Hil15, p.6] ES.est[i, a] <- 1/alpha[a] * mean( X * 1*(X <= q.hat) ) } } return( colMeans(ES.est) ) } ################################################################################ # Hill (2015) ES.trimmed <- function(X, alpha){ n <- length(X) no.alpha <- length(alpha) core.ES <- nonpar.ES <- optbias.ES <- numeric(no.alpha) # 1. Calculate nonparametric and core estimator [Hil15, p.2 and p.6] iota <- 10^(-10) k.n <- max( 1, floor(0.25 * n^(2/3) / (log(n))^(2*iota)) ) # [Hil15, p.18] q.hat <- sort.int(X, partial=floor(alpha[no.alpha] * n)) # [Hil15, p.6] q.hat <- sort.int(q.hat[1 : floor(alpha[no.alpha] * n)]) # [Hil15, p.6] X.neg <- pmin.int(X, 0) X.sort <- sort.int(X.neg) X.k.neg <- X.sort[k.n] for(i in 1:no.alpha){ core.ES[i] <- -1/(alpha[i] * n) * sum(X * 1*(X <= q.hat[floor(alpha[i] * n)]) * 1*(X >= X.k.neg)) # [Hil15, p.6] nonpar.ES[i] <- -1/(alpha[i] * n) * sum(X * 1*(X <= q.hat[floor(alpha[i] * n)])) } # 2. Calculate Bias-corrected estimator [Hil15, p.14] m.n.min <- max( 1, floor(0.05 * k.n * log(n)^(iota)) ) m.n.max <- max( 1, floor( 10 * k.n * log(n)^(iota)) ) Hill <- cumsum( log( -X.sort[1 : m.n.max] ) ) / (1 : m.n.max) - log( -X.sort[1 : m.n.max] ) # -X.sort due to negativity Hill <- 1 / Hill for(i in 1:no.alpha){ R.n.2 <- -1 / alpha[i] * ( Hill / (Hill - 1) * k.n / n * X.k.neg ) m.n.lambda.hat <- which.min( abs(core.ES[i] + R.n.2[m.n.min : m.n.max] - nonpar.ES[i]) ) m.n.lambda.hat <- m.n.lambda.hat + m.n.min - 1 optbias.ES[i] <- core.ES[i] + R.n.2[m.n.lambda.hat] } return( matrix(c(-nonpar.ES, -optbias.ES), nrow=2, ncol=no.alpha, byrow=TRUE) ) } ################################################################################ # Hoga (2017) ES.hoga <- function(X, alpha){ n <- length(X) no.alpha <- length(alpha) # 1.a My Routine Y <- sort.int(X, method = "quick") # focus on the left tail Y <- Y[Y < 0] k.min <- floor(n * 0.05) # minimal value of k k.max <- min(floor(n^0.9), length(Y)-1) # minimal value of k sup.H <- H <- rep.int(NA, k.max) # for Hill estimator sup.P <- P <- rep.int(NA, k.max) # for Pickands estimator for(k in k.min : k.max){ # Hill estimator H[k] <- 1/k * sum(log( Y[1 : (k+1)] / Y[(k+1)] )) # Hill estimate based on (k+1) largest order statistics ES.sp <- ( k / (1 : k.max) )^(H[k]) * Y[k+1] / (1 - H[k]) # semi-parametric estimate ES.np <- 1 / (1 : k.max) * cumsum(Y[1 : k.max]) # non-parametric estimate sup.H[k] <- max( abs(ES.np - ES.sp) ) # Pickands estimator P[k] <- 1 / log(2) * log( (Y[floor(k/4)] - Y[floor(k/2)]) / (Y[floor(k/2)] - Y[floor(k)]) ) # Pickands estimate based on (k+1) largest order statistics ES.sp <- ( k / (1 : k.max) )^(P[k]) * Y[k+1] / (1 - P[k]) # semi-parametric estimate sup.P[k] <- max( abs(ES.np - ES.sp) ) } k.star.H <- which.min(sup.H) k.star.P <- which.min(sup.P) # 2. Calculate ES ... # ... for Hill estimator H.EQ <- Y[floor(k.star.H)+1] * ( k.star.H / (n*alpha) )^(H[k.star.H]) ES.semipar.H <- H.EQ / (1 - H[k.star.H]) # ... for Pickands estimator P.EQ <- Y[floor(k.star.P)+1] * ( k.star.P / (n*alpha) )^(P[k.star.P]) ES.semipar.P <- P.EQ / (1 - P[k.star.P]) return( list(ES.H = ES.semipar.H, k.H = k.star.H, ES.P = ES.semipar.P, k.P = k.star.P) ) } ################################################################################ # GPD ES.POT <- function(X, alpha){ n <- length(X) no.alpha <- length(alpha) X <- -X # negate data # 1. Choice of threshold min.X <- min(X) X <- X - min.X + 0.0001 # shift data to positive half-line u.WP <- as.numeric(quantile(X, probs=0.9, type=1)) # recommended by [CES14] and also [QFP01] k.star <- floor(0.1 * n) # 2. Calculate ES (see, e.g., McNeil and Frey (2000, Eqns. (10) & (16))) POT <- fpot(X, threshold = u.WP, model = "gpd", std.err = FALSE)$estimate sigma <- as.numeric( POT[1] ) xi <- as.numeric( POT[2] ) VaR.GPD <- u.WP + sigma/xi * ( (n*alpha / k.star)^(-xi) - 1 ) ES.GPD <- VaR.GPD / (1-xi) + (sigma - xi * u.WP) / (1-xi) ES.GPD <- ES.GPD + min.X - 0.0001 # offset shift to positive half-line return( list(ES = -ES.GPD, k = k.star) ) } # 1. Order statistics Y[k] are used as potential upper thresholds for POT approach. # For POT to make sense k should be chosen, s.t. (1 - k/n) < (1 - max(alpha)) or, # equivalently, k/n > max(alpha). ################################################################################ # Scaillet (2004) ES.Sca04 <- function(X, alpha){ n <- length(X) no.alpha <- length(alpha) h <- sd(X) * n^(-1/5) # bandwidth used by Scaillet (2004, Sec. 4.2) # 1. Kernel estimate of VaR int.kernel <- function(q, p){ int <- 1/n * sum(pnorm( (q-X)/h )) return(int - p) } q <- numeric(no.alpha) for(i in 1 : no.alpha){ e <- try( q[i] <- uniroot(int.kernel, interval = c(-100, 100), p = alpha[i], extendInt = "yes")$root, silent = TRUE ) if(class(e) == "try-error"){ q[i] <- as.numeric(quantile(X, probs = alpha[i], type = 1)) # if kernel-smoothed quantile cannot be calculated, use simple empirical quantile } else{ q[i] <- uniroot(int.kernel, interval = c(-100, 100), p = alpha[i], extendInt = "yes")$root } } # 2. (Gaussian) Kernel estimate of ES ES.Sca04 <- numeric(no.alpha) for(i in 1 : no.alpha){ ES.Sca04[i] <- 1 / (n * alpha[i]) * sum(X * pnorm((q[i] - X) / h )) } return( ES.Sca04 ) } ################################################################################ ############### Brute-force estimation for GARCH-N(0,1) ######################## ################################################################################ n <- 2000 reps <- 1000 alpha = (1:50) / 1000 ES.BF.1 <- ES.hog.1 <- matrix(NA, nrow=reps, ncol=length(alpha)) # Brute force for GARCH(1,1) models spec = garchSpec(model = list(omega=10e-06, alpha = 0.3, beta = 0.6), cond.dist = "norm") start <- Sys.time() ES.trueval.1 <- ES.true(spec, alpha, n = 100000, N = 10000) # 1 hour print(Sys.time() - start) for(i in 1:reps){ ES.BF.1[i, ] <- abs(ES.BF.GARCH(spec, alpha, reps, n) - ES.trueval.1) X <- as.numeric(garchSim(spec, n)) ES.hog.1[i,] <- abs(ES.hog(-abs(X), alpha) - ES.trueval.1) } print(Sys.time() - start) MAD = matrix( c(colMeans(ES.BF.1), colMeans(ES.hog.1)), nrow = 2, ncol = length(alpha), byrow=TRUE, dimnames = list(c("BF.1", "Hog.1"))) plot(alpha, colMeans(ES.BF.1) / colMeans(ES.hog.1)) ####### Required functions for Brute-force approach ####### # Parametric estimate ES.BF.GARCH <- function(spec, alpha, reps, n){ # 0. Preliminaries no.alpha <- length(alpha) ES.est <- matrix(NA, nrow=reps, ncol=no.alpha) # 1. Estimate model cond=TRUE while (cond==TRUE){ X <- as.numeric(garchSim(spec, n)) outp <- try(garchFit(formula = ~garch(1,1), data = X, init.rec = "mci", cond.dist = "QMLE", include.mean = FALSE, algorithm = "lbfgsb+nm", trace = FALSE), silent = TRUE) garch.coef = try(outp@fit$matcoef, silent=TRUE) omega1 = try(garch.coef[1], silent=TRUE) alpha1 = try(garch.coef[2], silent=TRUE) beta1 = try(garch.coef[3], silent=TRUE) if(is.na(omega1)){omega1=1} if(is.na(alpha1)){alpha1=1} if(is.na(beta1)){beta1=1} if(is.character(outp)){cond=TRUE} else{ cond=((beta1>=1)|(omega1<0) | (alpha1<0) |(beta1<0)|((alpha1+beta1)>=1)) } garch.coef <- c(omega1, alpha1, beta1) spec = garchSpec(model = list(omega=garch.coef[1], alpha = garch.coef[2], beta = garch.coef[3]), cond.dist = "norm") } # 2. Simulate from estimated model and estimate ES for(i in 1 : reps){ X.boot <- as.numeric(garchSim(spec, n)) X.boot <- -abs(X.boot) X.partial <- sort.int(X.boot, partial = floor(alpha[no.alpha] * n)) X.partial <- sort.int(X.partial[1 : floor(alpha[no.alpha] * n)], method = "quick") for(a in 1:no.alpha){ q.hat <- X.partial[floor(alpha[a] * n)] # [Hil15, p.6] ES.est[i, a] <- 1/alpha[a] * mean( X.boot * 1*(X.boot <= q.hat) ) } } # 3. Calculate ES ES <- colMeans(ES.est) return(ES) } # Nonparametric estimate ES.hog <- function(X, alpha){ n <- length(X) no.alpha <- length(alpha) # 1.a My Routine Y <- sort.int(X, method = "quick") # focus on the left tail Y <- Y[Y < 0] k.min <- floor(n * 0.05) # minimal value of k k.max <- min(floor(n^0.9), length(Y)-1) # minimal value of k sup.H <- H <- rep.int(NA, k.max) # for Hill estimator for(k in k.min : k.max){ # Hill estimator H[k] <- 1/k * sum(log( Y[1 : (k+1)] / Y[(k+1)] )) # Hill estimate based on (k+1) largest order statistics ES.sp <- ( k / (1 : k.max) )^(H[k]) * Y[k+1] / (1 - H[k]) # semi-parametric estimate ES.np <- 1 / (1 : k.max) * cumsum(Y[1 : k.max]) # non-parametric estimate sup.H[k] <- max( abs(ES.np - ES.sp) ) } k.star.H <- which.min(sup.H) # 2. Calculate ES ... # ... for Hill estimator H.EQ <- Y[floor(k.star.H)+1] * ( k.star.H / (n*alpha) )^(H[k.star.H]) ES.semipar.H <- H.EQ / (1 - H[k.star.H]) return( ES.semipar.H ) } ################################################################################ ################### 3.2 Results for uniform confidence bands ################### ################################################################################ library(actuar) library(fGarch) alpha = (1 : 100) / 10000 # 1. ARMA-GARCH processes of interest # Dependent data spec.1 = garchSpec(model = list(omega=10e-06, alpha = 0.3, beta = 0.6), cond.dist = "norm") spec.2 = garchSpec(model = list(omega=10e-06, alpha = 0.3, beta = 0.6, shape=2.5), cond.dist = "std") spec.3 = garchSpec(model = list(ar = 0.7, omega=10e-06, alpha = 0.3, beta = 0.6), cond.dist = "norm") spec.4 = garchSpec(model = list(ar = 0.7, omega=10e-06, alpha = 0.3, beta = 0.6, shape=2.5), cond.dist = "std") spec.5 = garchSpec(model = list(ar = 0.7, omega=1, alpha = 0, beta = 0), cond.dist = "norm") spec.6 = garchSpec(model = list(ar = 0.7, omega=1, alpha = 0, beta = 0, shape=10), cond.dist = "std") # I.i.d. data spec.Burr1.5 = "Burr1.5" spec.Burr6 = "Burr6" spec.Pa3 = "Pareto3" spec.Pa1.5 = "Pareto1.5" spec.tInf = "tInf" spec.t10 = "t10" # load functions "Quantile.true", "Quantile.estimate", "Quantile.hoga" below start <- Sys.time() # takes 42 hours Quantile.spec.1 <- Quantile.true(spec.1, alpha, n = 100000, N = 10000) Quantile.spec.2 <- Quantile.true(spec.2, alpha, n = 100000, N = 10000) Quantile.spec.3 <- Quantile.true(spec.3, alpha, n = 100000, N = 10000) Quantile.spec.4 <- Quantile.true(spec.4, alpha, n = 100000, N = 10000) Quantile.spec.5 <- Quantile.true(spec.5, alpha, n = 100000, N = 10000) Quantile.spec.6 <- Quantile.true(spec.6, alpha, n = 100000, N = 10000) Quantile.Burr1.5 <- -qburr(p=1-alpha, shape1=1, shape2=1.5) Quantile.Burr6 <- -qburr(p=1-alpha, shape1=0.25, shape2=6) Quantile.Pa3 <- -qpareto1(p=1-alpha, shape=3, min=1) Quantile.Pa1.5 <- -qpareto1(p=1-alpha, shape=1.5, min=1) Quantile.tInf <- qt(alpha, df = Inf) Quantile.t10 <- qt(alpha, df = 10) print(Sys.time() - start) start <- Sys.time() # takes 3.5 hours # coverage.spec.1 <- Quantile.estimates(reps=10000, spec.1, Quantile.spec.1, alpha, n=2000) # coverage.spec.2 <- Quantile.estimates(reps=10000, spec.2, Quantile.spec.2, alpha, n=2000) # coverage.spec.3 <- Quantile.estimates(reps=10000, spec.3, Quantile.spec.3, alpha, n=2000) # coverage.spec.4 <- Quantile.estimates(reps=10000, spec.4, Quantile.spec.4, alpha, n=2000) # coverage.spec.5 <- Quantile.estimates(reps=10000, spec.5, Quantile.spec.5, alpha, n=2000) # coverage.spec.6 <- Quantile.estimates(reps=10000, spec.6, Quantile.spec.6, alpha, n=2000) coverage.Burr1.5 <- Quantile.estimates(reps=10000, spec.Burr1.5, Quantile.Burr1.5, alpha, n=2000) coverage.Burr6 <- Quantile.estimates(reps=10000, spec.Burr6 , Quantile.Burr6 , alpha, n=2000) coverage.Pa3 <- Quantile.estimates(reps=10000, spec.Pa3 , Quantile.Pa3 , alpha, n=2000) coverage.Pa1.5 <- Quantile.estimates(reps=10000, spec.Pa1.5 , Quantile.Pa1.5 , alpha, n=2000) coverage.tInf <- Quantile.estimates(reps=10000, spec.tInf , Quantile.tInf , alpha, n=2000) coverage.t10 <- Quantile.estimates(reps=10000, spec.t10 , Quantile.t10 , alpha, n=2000) print(Sys.time() - start) # For particular choice of alpha=0.01 no.alpha <- length(alpha) start <- Sys.time() # takes 3.5 hours # coverage.spec.1a <- Quantile.estimates(reps=10000, spec.1, Quantile.spec.1[no.alpha], alpha=0.01, n=2000) # coverage.spec.2a <- Quantile.estimates(reps=10000, spec.2, Quantile.spec.2[no.alpha], alpha=0.01, n=2000) # coverage.spec.3a <- Quantile.estimates(reps=10000, spec.3, Quantile.spec.3[no.alpha], alpha=0.01, n=2000) # coverage.spec.4a <- Quantile.estimates(reps=10000, spec.4, Quantile.spec.4[no.alpha], alpha=0.01, n=2000) # coverage.spec.5a <- Quantile.estimates(reps=10000, spec.5, Quantile.spec.5[no.alpha], alpha=0.01, n=2000) # coverage.spec.6a <- Quantile.estimates(reps=10000, spec.6, Quantile.spec.6[no.alpha], alpha=0.01, n=2000) coverage.Burr1.5a <- Quantile.estimates(reps=10000, spec.Burr1.5, Quantile.Burr1.5[no.alpha], alpha=0.01, n=2000) coverage.Burr6a <- Quantile.estimates(reps=10000, spec.Burr6 , Quantile.Burr6[no.alpha] , alpha=0.01, n=2000) coverage.Pa3a <- Quantile.estimates(reps=10000, spec.Pa3 , Quantile.Pa3[no.alpha] , alpha=0.01, n=2000) coverage.Pa1.5a <- Quantile.estimates(reps=10000, spec.Pa1.5 , Quantile.Pa1.5[no.alpha] , alpha=0.01, n=2000) coverage.tInfa <- Quantile.estimates(reps=10000, spec.tInf , Quantile.tInf[no.alpha] , alpha=0.01, n=2000) coverage.t10a <- Quantile.estimates(reps=10000, spec.t10 , Quantile.t10[no.alpha] , alpha=0.01, n=2000) print(Sys.time() - start) # Uniform confidence bands Quantile.hoga <- function(X, alpha){ n <- length(X) no.alpha <- length(alpha) # 1. Compute optimal k* using [Dea16+, p. 16] (Tail Index Estimation: Quantile Driven Threshold Selection) Y <- sort.int(X, method = "quick") # focus on the left tail Y <- Y[Y < 0] k.min <- floor(n * 0.05) # minimal value of k k.max <- min(floor(n^0.9), length(Y)-1) # maximal value of k sup.H <- H <- rep.int(NA, k.max) for(k in k.min : k.max){ H[k] <- 1/k * sum(log( Y[1 : (k+1)] / Y[(k+1)] )) sup.H[k] <- max( abs(Y[2 : (k.max+1)] - (k/(1 : k.max))^( H[k] ) * Y[(k+1)]) ) } k.star <- which.min(sup.H) # Leave following line uncommented unless you want to reproduce Figure 5 # k.star <- 1060 # 2. Calculate Quantile estimates d.n.t <- k.star / (n*alpha) H.EQ <- Y[floor(k.star)+1] * d.n.t^(H[k.star]) # 3. Calculate asymptotic variance [Hil10, p.1412] # 3.1 Preliminaries gamma.n <- max( 1, floor( k.star^0.25 ) ) # see http://www.unc.edu/~jbhill/tail_estim_GAUSS_v1.txt W.i <- log( pmax.int(X / Y[floor(k.star)+1], 1) ) - (k.star / n) * H[k.star] # 3.2 Actual Calculation sigma.n.hat.sq <- sum(W.i^2) for(h in 1:gamma.n){ # Use Bartlett kernel max(0, 1-abs(x)) Lag.h <- (1 - h / gamma.n) * sum( W.i[(h+1):n] * W.i[1:(n-h)] ) sigma.n.hat.sq <- sigma.n.hat.sq + 2 * Lag.h } sigma.n.hat.sq <- (1 / k.star) * sigma.n.hat.sq # 4. Calculate 90%-confidence bands CB.90 <- matrix(NA, nrow=3, ncol=no.alpha) CB.90[1, ] <- H.EQ * exp(1.64 * log(d.n.t) * sqrt(sigma.n.hat.sq) / sqrt(k.star)) CB.90[2, ] <- H.EQ CB.90[3, ] <- H.EQ * exp(-1.64 * log(d.n.t) * sqrt(sigma.n.hat.sq) / sqrt(k.star)) return( list(VaR = CB.90, k = k.star) ) } # Calculates true values of Quantiles Quantile.true <- function(spec, alpha, n, N){ # [Hil15, p.17 (Footnote 8)] no.alpha <- length(alpha) q.hat <- matrix(NA, nrow=N, ncol=no.alpha) for(i in 1 : N){ X <- as.numeric(garchSim(spec, n)) X <- -abs(X) X.partial <- sort.int(X, partial = floor(alpha[no.alpha] * n)) X.partial <- sort.int(X.partial[1 : floor(alpha[no.alpha] * n)], method = "quick") for(a in 1:no.alpha){ q.hat[i, a] <- X.partial[floor(alpha[a] * n)] # [Hil15, p.6] } } return( colMeans(q.hat) ) } # wrapper function Quantile.estimates <- function(reps, spec, Quantile.true, alpha, n){ no.alpha <- length(alpha) coverage <- 0 k.star <- 0 for(i in 1:reps){ if(identical(spec, 'Burr1.5')){ X <- -rburr(n, shape1=1, shape2=1.5) # Interpret: the closer shape2 is to 1, i.e., the closer to true Pareto, the better. } else if(identical(spec, 'Burr6')){ X <- -rburr(n, shape1=0.25, shape2=6) } else if(identical(spec, 'Pareto3')){ X <- -rpareto1(n, shape=3, min=1) } else if(identical(spec, 'Pareto1.5')){ X <- -rpareto1(n, shape=1.5, min=1) } else if(identical(spec, 'tInf')){ X <- rt(n, df=Inf) } else if(identical(spec, 't10')){ X <- rt(n, df=10) } else{ X <- as.numeric(garchSim(spec, n)) X <- -abs(X) } Q.hog <- Quantile.hoga(X, alpha) k.star <- k.star + Q.hog$k # to calculate average k.star Q.hog <- Q.hog$VaR Null.no.alpha <- rep.int(0, no.alpha) coverage <- coverage + 1*( identical(pmax.int(Q.hog[1, ] - Quantile.true, 0), Null.no.alpha) ) * 1*( identical(pmin.int(Q.hog[3, ] - Quantile.true, 0), Null.no.alpha) ) } return( list(cov = coverage / reps, k = k.star / reps) ) } ################################################################################ ################################ 4. Application ################################ ################################################################################ ################################################################################ ########################## 4.1 Unconditional Analysis ########################## ################################################################################ # Yahoo Finance library(fGarch) # Time period before Porsche takeover VW.data <- read.csv("VOW.csv") VW.data <- VW.data[!VW.data[, 2] %in% "null", ] # delete rows with nulls VW <- VW.data$Adj.Close # VW share prices from 1995-03-24 to 2008-10-24 VW <- as.numeric(as.character(VW)) # convert "factor" to "numeric" Datum <- VW.data$Date[-1] # read in dates and ... Datum <- as.Date(Datum) # ... convert to date format X <- diff(log(VW)) # Investigate log-returns ################################################################################ ################################### FIGURE 3 ################################### ################################################################################ plot( Datum, X, type = "l", xlab = "Date", ylab ="VW log-returns", xaxs = "i", yaxs = "i") # Stationarity? Yes: GARCH.fit <- garchFit(formula ~ arma(1,0) + garch(1,1), data = X, cond.dist="sstd") # fit a GARCH(1,1) with skewed t_{n}-innovations garch.coef <- coef(GARCH.fit) summary(GARCH.fit) # LB test: no correlation left in (raw and squared) standardized residuals plot(residuals(GARCH.fit) / volatility(GARCH.fit), type = "p") # standardized residuals look reasonably good! ################################################################################ ################################### FIGURE 4 ################################### ################################################################################ # Test, if \gamma>0 using Pareto quantile plot and Hill plot H <- function(X, k.min, k.max, AsVar){ n <- length(X) Y <- sort.int(X, decreasing = TRUE, method = "quick") Y <- Y[Y[] > 0] log_Y <- log(Y) Hill <- cumsum(log_Y[1 : (k.max - 1)]) / (1 : (k.max - 1)) - log_Y[2 : k.max] # Asymptotic variance sigma.n.hat.sq <- rep.int(NA, (k.max-1)) if(AsVar == 'Yes'){ # 3. Calculate asymptotic variance [Hil10, p.1412] for(k in 1 : (k.max+1)){ # 3.1 Preliminaries gamma.n <- max( 1, floor( k^0.25 ) ) # see http://www.unc.edu/~jbhill/tail_estim_GAUSS_v1.txt W.i <- log( pmax.int(X / Y[floor(k)+1], 1) ) - (k / n) * Hill[k] # 3.2 Actual Calculation sigma.n.hat.sq[k] <- sum(W.i^2) for(h in 1:gamma.n){ # Use Bartlett kernel max(0, 1-abs(x)) Lag.h <- (1 - h / gamma.n) * sum( W.i[(h+1):n] * W.i[1:(n-h)] ) sigma.n.hat.sq[k] <- sigma.n.hat.sq[k] + 2 * Lag.h } sigma.n.hat.sq[k] <- (1 / k) * sigma.n.hat.sq[k] } } return(list(H = Hill[(k.min-1) : (k.max-1)], AsVar = sigma.n.hat.sq[(k.min-1) : (k.max-1)])) } X.s <- -X + 0.05 # X.s denotes the shifted log-losses X.ns <- -X # X.ns denotes the non-shifted log-losses par(mar=c(6.1, 5.1, 1.1, 2.1)) split.screen(figs = c(2, 2)) screen(1) # Pareto quantile plot X.ns <- X.ns[X.ns>0] n <- length(X.ns) x <- -log((1:n) / (n+1)) y <- log(sort.int(X.ns, decreasing = TRUE, method = "quick")) plot(x, y, xlab = "-log(j/(n+1))", ylab = expression('log X'[(j)])) mtext("(a)", side = 1, line = 5) screen(2) # Hill plot k.min <- 10 k.max <- 2000 Hill <- H(X.ns, k.min, k.max, AsVar = "No")$H #0.1 plot(k.min: k.max, Hill, type = "l", xaxs="i", yaxs="i", xlab = expression('k'[n]), ylab = "Hill estimate", ylim=c(0, 0.6), xlim=c(0, k.max)) mtext("(b)", side = 1, line = 5) screen(3) # Pareto quantile plot X.s <- X.s[X.s>0] n <- length(X.s) x <- -log((1:n) / (n+1)) y <- log(sort.int(X.s, decreasing = TRUE, method = "quick")) plot(x, y, xlab = "-log(j/(n+1))", ylab = expression('log X'[(j)])) mtext("(c)", side = 1, line = 5) screen(4) # Hill plot k.min <- 10 k.max <- 2000 k.range <- k.min : k.max Hill <- H(X.s, k.min, k.max, AsVar = "Yes") CI.lo <- Hill$H - 1.96 * sqrt( Hill$AsVar / k.range ) CI.up <- Hill$H + 1.96 * sqrt( Hill$AsVar / k.range ) plot(k.range, Hill$H, type = "l", xaxs="i", yaxs="i", xlab = expression('k'[n]), ylab = "Hill estimate", ylim=c(0, 0.6), xlim=c(0, k.max)) polygon(c(k.range, rev(k.range)), c(CI.up, rev(CI.lo)), density = 20, angle = 45, lty = "dotted") mtext("(d)", side = 1, line = 5) close.screen(all = TRUE) ################################################################################ ################################### FIGURE 5 ################################### ################################################################################ alpha <- (1:500) / 10000 ES <- ES.hoga(X - 0.05, alpha) # ES estimates ES.H <- ES$ES.H + 0.05 # ES estimates using Hill estimator ES.P <- ES$ES.P + 0.05 # ES estimates using Pickands estimator k.ES <- ES$k.H # k.ES = 902 chosen for Hill estimator ES.GP <- ES.POT(X, alpha)$ES # ES estimates using POT method Q.cor <- Quantile.hoga(X - 0.05, alpha)$VaR + 0.05 # VaR corridor k.VaR <- Quantile.hoga(X - 0.05, alpha)$k # k.VaR = 365 # Return level plot [Col01, pp. 88-90] m <- 1 / (alpha * 250) # m-year return level Q.cor[1, ] - Q.cor[3, ] par(mar=c(4.1, 4.1, 1.1, 2.1)) plot(rev(log(m)), rev(-Q.cor[2, ]), type = "l", xlab = "Return period in years", ylab ="Return level", xaxs = "i", yaxs = "i", xaxt= "n", ylim = c( 0.0, 0.35 )) lab <- c(0.2, 0.5, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 20, 30, 40) axis(1, at = log(lab), labels=lab) polygon(c(rev(log(m)), log(m)), c(rev(-Q.cor[1, ]), rev(rev(-Q.cor[3, ]))), density = -1, col = "gray76", border = NA) lines(rev(log(m)), rev(-ES.H), type = "l", lty = "dotted", lwd = 2) # plot ES estimates using Hill estimator lines(rev(log(m)), rev(-Q.cor[2, ]), type = "l", lty = "solid", lwd =1) # plot VaR estimates lines(rev(log(m)), rev(-ES.P), type = "l", lty = "longdash", lwd = 2) # plot ES estimates using Pickands estimator lines(rev(log(m)), rev(-ES.GP), type = "l", lty = "dotdash", lwd = 2) # plot CES estimates using POT approach # add empirical estimates n <- length(X) X.sort <- sort.int(X, decreasing = FALSE, method = "quick") rp <- c(10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0.5, 1/3, 1/4, 1/5, 1/6, 1/7, 1/8, 1/9, 1/10, 1/15) emp.est <- X.sort[floor(rp / 250 * n +1)] lines(log(1 / rp), -emp.est, type = "p") legend('topleft', legend = c("CTM Hill", "CTM Pickands", "POT") , lty=c("dotted", "longdash", "dotdash"), lwd = c(2, 2, 2), cex=1) # add parametric estimates using brute-force approach spec <- garchSpec( model = list(mu = garch.coef[1], ar = garch.coef[2], omega = garch.coef[3], alpha = garch.coef[4], beta = garch.coef[5], skew = garch.coef[6], shape = garch.coef[7], cond.dist = "sstd") ) no.alpha <- length(alpha) N <- 100000 B <- 10000 ES.par <- VaR.par <- matrix(NA, nrow=B, ncol=no.alpha) for(i in 1:B){ Y <- as.numeric( garchSim(spec, N) ) Y.partial <- sort.int(Y, partial = floor(alpha[no.alpha] * N)) Y.partial <- sort.int(Y.partial[1 : floor(alpha[no.alpha] * N)], method = "quick") for(a in 1:no.alpha){ VaR.par[i, a] <- Y.partial[floor(alpha[a] * N)] # [Hil15, p.6] ES.par[i, a] <- 1/alpha[a] * mean( Y * 1*(Y <= VaR.par[i, a]) ) } } ES.par <- colMeans(ES.par) VaR.par <- colMeans(VaR.par) lines(rev(log(m)), rev(-ES.par), type = "l", lty = "twodash", lwd = 2, col="red") # plot parametric ES estimates using brute-force approach lines(rev(log(m)), rev(-VaR.par), type = "l", lty = "twodash", lwd = 2, col="red") # plot parametric VaR estimates using brute-force approach ################################################################################ ########################### 4.2 Conditional Analysis ########################### ################################################################################ X <- c(X, 0.904, 0.597) # also consider huge log-returns prior to crash (see paper) GARCH.fit <- garchFit(formula ~ arma(1,0) + garch(1,1), data = X, cond.dist="QMLE", include.mean = FALSE) # fit a AR(1)-GARCH(1,1) with skewed t_{n}-innovations summary(GARCH.fit) plot(GARCH.fit, which=c(8, 9, 13)) std.resid <- GARCH.fit@residuals / GARCH.fit@sigma.t ################ BEGINNING: FOLLOWING FIGURE NOT SHOWN IN PAPER ################ X.s <- -std.resid + 1.5 # X.s denotes the shifted standardized residuals X.ns <- -std.resid # X.ns denotes the non-shofted standardized residuals par(mar=c(6.1, 5.1, 1.1, 2.1)) split.screen(figs = c(2, 2)) screen(1) # Pareto quantile plot X.ns <- X.ns[X.ns>0] n <- length(X.ns) x <- -log((1:n) / (n+1)) y <- log(sort.int(X.ns, decreasing = TRUE, method = "quick")) plot(x, y, xlab = "-log(j/(n+1))", ylab = expression('log X'[(j)])) mtext("(a)", side = 1, line = 5) screen(2) # Hill plot k.min <- 10 k.max <- 2000 Hill <- H(X.ns, k.min, k.max, AsVar = "No")$H #0.1 plot(k.min: k.max, Hill, type = "l", xaxs="i", yaxs="i", xlab = expression('k'[n]), ylab = "Hill estimate", ylim=c(0, 0.6), xlim=c(0, k.max)) mtext("(b)", side = 1, line = 5) screen(3) # Pareto quantile plot X.s <- X.s[X.s>0] n <- length(X.s) x <- -log((1:n) / (n+1)) y <- log(sort.int(X.s, decreasing = TRUE, method = "quick")) plot(x, y, xlab = "-log(j/(n+1))", ylab = expression('log X'[(j)])) mtext("(c)", side = 1, line = 5) screen(4) # Hill plot k.min <- 10 k.max <- 2000 k.range <- k.min : k.max Hill <- H(X.s, k.min, k.max, AsVar = "Yes") CI.lo <- Hill$H - 1.96 * sqrt( Hill$AsVar / k.range ) CI.up <- Hill$H + 1.96 * sqrt( Hill$AsVar / k.range ) plot(k.range, Hill$H, type = "l", xaxs="i", yaxs="i", xlab = expression('k'[n]), ylab = "Hill estimate", ylim=c(0, 0.6), xlim=c(0, k.max)) polygon(c(k.range, rev(k.range)), c(CI.up, rev(CI.lo)), density = 20, angle = 45, lty = "dotted") mtext("(d)", side = 1, line = 5) close.screen(all = TRUE) ################### END: FOLLOWING FIGURE NOT SHOWN IN PAPER ################### ################################################################################ ################################### FIGURE 6 ################################### ################################################################################ alpha <- (1:500) / 10000 n <- length(X) # Unconditional VaR & ES estimates of standardized residuals Q.cor <- Quantile.hoga(std.resid - 1.5, alpha)$VaR + 1.5 # VaR corridor k.VaR <- Quantile.hoga(std.resid - 1.5, alpha)$k # k.VaR = 200 ES <- ES.hoga(std.resid - 1.5, alpha) # ES estimates ES.H <- ES$ES.H + 1.5 # ES estimates using Hill estimator ES.P <- ES$ES.P + 1.5 # ES estimates using Pickands estimator ES.GP <- ES.POT(std.resid, alpha)$ES # ES estimates using POT method k.ES <- ES$k.H # k.ES = 204 # Conditional VaR & ES estimates given history of past returns phi <- GARCH.fit@fit$matcoef[1, 1] omega <- GARCH.fit@fit$matcoef[2, 1] alpha1 <- GARCH.fit@fit$matcoef[3, 1] beta1 <- GARCH.fit@fit$matcoef[4, 1] mu.np1 <- phi * X[n] sigma.np1 <- sqrt( omega + alpha1 * GARCH.fit@sigma.t[n]^2 + beta1 * X[n]^2 ) CVaR <- mu.np1 + sigma.np1 * Q.cor CES.H <- mu.np1 + sigma.np1 * ES.H CES.P <- mu.np1 + sigma.np1 * ES.P CES.POT<- mu.np1 + sigma.np1 * ES.GP # Return level plot [Col01, pp. 88-90] m <- 1 / (alpha * 250) # m-year return level par(mar=c(4.1, 4.1, 1.1, 2.1)) plot(rev(log(m)), rev(-CVaR[2, ]), type = "l", xlab = "Return period in years", ylab ="Conditional Return level", xaxs = "i", yaxs = "i", xaxt= "n", ylim = c( 0.0, 5 )) lab <- c(0.2, 0.5, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 20, 30, 40) axis(1, at = log(lab), labels=lab) polygon(c(rev(log(m)), log(m)), c(rev(-CVaR[1, ]), rev(rev(-CVaR[3, ]))), density = -1, col = "gray76", border = NA) lines(rev(log(m)), rev(-CES.H), type = "l", lty = "dotted", lwd = 2) # plot CES estimates using Hill estimator lines(rev(log(m)), rev(-CVaR[2, ]), type = "l", lty = "solid", lwd = 1) # plot CVaR estimates lines(rev(log(m)), rev(-CES.P), type = "l", lty = "longdash", lwd = 2) # plot CES estimates using Pickands estimator lines(rev(log(m)), rev(-CES.POT), type = "l", lty = "dotdash", lwd = 2) # plot CES estimates using POT approach # add empirical estimates n <- length(std.resid) X.sort <- sort.int(std.resid, decreasing = FALSE, method = "quick") # sorted standardized residuals rp <- c(10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0.5, 1/3, 1/4, 1/5, 1/6, 1/7, 1/8, 1/9, 1/10, 1/15) emp.est <- X.sort[floor(rp / 250 * n +1)] emp.est <- mu.np1 + sigma.np1 * emp.est lines(log(1 / rp), -emp.est, type = "p") legend('topleft', legend = c("CTM Hill", "CTM Pickands", "POT") , lty=c("dotted", "longdash", "dotdash"), lwd = c(2, 2, 2), cex=1) # Q: To which level of CVaR does a log-loss of 0.603 correspond? rp <- 23 emp.est <- X.sort[floor(rp / 250 * n +1)] emp.est <- mu.np1 + sigma.np1 * emp.est print(-emp.est) # A: Roughly to a level of 23 / 250 = 9%. This is certainly not an extreme level! ################################################################################ # Section 3: Computing tail indices of GARCH-models f.GARCH.t <- function(alpha, alpha_1, beta_1, nu){ # Calculate tail index of squared GARCH(1,1) with t(nu)-innovations dens <- function(x) (alpha_1 * (nu - 2) / nu * x + beta_1)^(alpha/2) * df( x, df1 = 1, df2 = nu) EX <- integrate(dens, 0, Inf)$value return(EX - 1) } f.GARCH.N01 <- function(alpha, alpha_1, beta_1, nu){ # Calculate tail index of squared GARCH(1,1) with N(0,1)-innovations dens <- function(x) x^(alpha/2) * dchisq( (x - beta_1) / alpha_1, df = 1) / alpha_1 # dchisq(..) / .. is density of (alpha_1 * N(0,1)^2 + beta_1) EX <- integrate(dens, beta_1, Inf)$value return(EX - 1) } # Tail index ARCH(1) unter H_1 ti <- uniroot(f.GARCH.t, interval = c(0.1, 2.3), tol = 1e-10, alpha_1 = 0.3, beta_1 = 0.6, nu = 2.5) # Tail index des GARCH(1,1)-Prozesses ti$root # = 2.667815 ti <- uniroot(f.GARCH.N01, interval = c(0.1, 10), tol = 1e-10, alpha_1 = 0.3, beta_1 = 0.6, nu = Inf) ti$root # = 1.072107 ################################################################################