Interactive exploration of Filtered Historical Simulation (FHS), combining GARCH volatility dynamics with the empirical distribution of standardized shocks
Filtered Historical Simulation (FHS) is a semi-parametric method for computing Value-at-Risk and Expected Shortfall. It combines the best of two worlds: GARCH-type models for capturing time-varying volatility, and the empirical distribution of standardized shocks for capturing fat tails and skewness without distributional assumptions (Christoffersen 2012, chap. 6; Barone-Adesi et al. 1999).
Recall that GARCH models decompose daily returns as
where \(\sigma_{PF,t+1}\) is the conditional standard deviation and \(z_{t+1}\) is the standardized innovation. Parametric approaches assume a specific distribution \(D\) (e.g., normal or Student-t), while standard Historical Simulation ignores the time-varying volatility altogether. FHS takes a different path:
Estimate a GARCH model to obtain the conditional volatility series \(\{\hat{\sigma}_t\}\)
Filter the returns to extract standardized residuals \(\hat{z}_t = R_t / \hat{\sigma}_t\), which should be approximately i.i.d.
Compute risk measures using the empirical distribution of the \(\hat{z}_t\), scaled by the current volatility forecast \(\hat{\sigma}_{t+1}\)
A semi-parametric approach. FHS is parametric for the volatility dynamics (GARCH) but nonparametric for the shock distribution (empirical). This means it inherits the volatility responsiveness of GARCH and the distributional flexibility of Historical Simulation.
// ============================================================// MATH UTILITIES: Normal distribution// ============================================================// Standard normal CDF (Abramowitz & Stegun approximation)normalCDF = x => {const a1 =0.254829592, a2 =-0.284496736, a3 =1.421413741const a4 =-1.453152027, a5 =1.061405429, p =0.3275911const sign = x <0?-1:1const z =Math.abs(x) /Math.sqrt(2)const t =1.0/ (1.0+ p * z)const y =1- (((((a5 * t + a4) * t) + a3) * t + a2) * t + a1) * t *Math.exp(-z * z)return0.5* (1+ sign * y)}
// Standard normal PDFnormalPDF = x =>Math.exp(-x * x /2) /Math.sqrt(2*Math.PI)
// Normal ES (as a positive number): ES = σ × φ(Φ⁻¹(p)) / pnormalES = (p, sigma) => sigma *normalPDF(qnorm(p)) / p
// ============================================================// MATH UTILITIES: Log-gamma and Student-t distribution// ============================================================// Log-gamma function (Lanczos approximation, g=7)lgamma = {const g =7const coef = [0.99999999999980993,676.5203681218851,-1259.1392167224028,771.32342877765313,-176.61502916214059,12.507343278686905,-0.13857109526572012,9.9843695780195716e-6,1.5056327351493116e-7 ]function_lgamma(x) {if (x <=0) returnInfinityif (x <0.5) {returnMath.log(Math.PI/Math.sin(Math.PI* x)) -_lgamma(1- x) } x -=1let a = coef[0]const t = x + g +0.5for (let i =1; i < g +2; i++) a += coef[i] / (x + i)return0.5*Math.log(2*Math.PI) + (x +0.5) *Math.log(t) - t +Math.log(a) }return _lgamma}
// Standard (non-standardized) Student-t PDFtPDF = (x, d) => {returnMath.exp(lgamma((d +1) /2) -lgamma(d /2)-0.5*Math.log(d *Math.PI)- ((d +1) /2) *Math.log(1+ x * x / d) )}
// Regularized incomplete beta function (Numerical Recipes algorithm)regIncBeta = {functionbetacf(a, b, x) {const qab = a + b, qap = a +1, qam = a -1const TINY =1e-30, EPS =1e-14, MAXITER =200let c =1let d =1- qab * x / qapif (Math.abs(d) < TINY) d = TINY d =1/ dlet h = dfor (let m =1; m <= MAXITER; m++) {const m2 =2* mlet aa = m * (b - m) * x / ((qam + m2) * (a + m2)) d =1+ aa * d;if (Math.abs(d) < TINY) d = TINY c =1+ aa / c;if (Math.abs(c) < TINY) c = TINY d =1/ d; h *= d * c aa =-(a + m) * (qab + m) * x / ((a + m2) * (qap + m2)) d =1+ aa * d;if (Math.abs(d) < TINY) d = TINY c =1+ aa / c;if (Math.abs(c) < TINY) c = TINY d =1/ dconst del = d * c h *= delif (Math.abs(del -1) < EPS) break }return h }function_rib(a, b, x) {if (x <0|| x >1) returnNaNif (x ===0) return0if (x ===1) return1if (x > (a +1) / (a + b +2)) {return1-_rib(b, a,1- x) }const lbeta =lgamma(a) +lgamma(b) -lgamma(a + b)const front =Math.exp(Math.log(x) * a +Math.log(1- x) * b - lbeta) / areturn front *betacf(a, b, x) }return _rib}
// ============================================================// PRNG UTILITIES// ============================================================rng_utils = {functionmulberry32(seed) {returnfunction() { seed |=0; seed = seed +0x6D2B79F5|0let t =Math.imul(seed ^ seed >>>15,1| seed) t = t +Math.imul(t ^ t >>>7,61| t) ^ treturn ((t ^ t >>>14) >>>0) /4294967296 } }functionboxMuller(rng) {const u1 =rng(), u2 =rng()returnMath.sqrt(-2*Math.log(u1)) *Math.cos(2*Math.PI* u2) }functiongammaSample(rng, shape) {if (shape <1) {returngammaSample(rng, shape +1) *Math.pow(rng(),1/ shape) }const d = shape -1/3const c =1/Math.sqrt(9* d)while (true) {let x, vdo { x =boxMuller(rng) v =1+ c * x } while (v <=0) v = v * v * vconst u =rng()if (u <1-0.0331* (x * x) * (x * x)) return d * vif (Math.log(u) <0.5* x * x + d * (1- v +Math.log(v))) return d * v } }functionchiSquaredSample(rng, d) {return2*gammaSample(rng, d /2) }functionstdTSample(rng, d) {const normal =boxMuller(rng)const chi2 =chiSquaredSample(rng, d)return normal /Math.sqrt(chi2 / d) *Math.sqrt((d -2) / d) }return { mulberry32, boxMuller, stdTSample }}
// ============================================================// TIME SERIES AND FORMATTING UTILITIES// ============================================================// Autocorrelation function (for lags 1..maxLag)computeACF = (series, maxLag) => {const n = series.lengthconst mean = series.reduce((a, b) => a + b,0) / nconst demeaned = series.map(x => x - mean)const var0 = demeaned.reduce((a, b) => a + b * b,0) / nconst result = []for (let k =1; k <= maxLag; k++) {let sum =0for (let t = k; t < n; t++) sum += demeaned[t] * demeaned[t - k] result.push({ lag: k,acf: sum / (n * var0) }) }return result}
// Ljung-Box statisticljungBox = (series, K) => {const acfVals =computeACF(series, K)const n = series.lengthlet stat =0for (const { lag, acf } of acfVals) { stat += ((n +2) / (n - lag)) * acf * acf }return n * stat}
// Descriptive statistics helperdescStats = (arr) => {const n = arr.lengthconst mean = arr.reduce((a, b) => a + b,0) / nconst m2 = arr.reduce((a, b) => a + (b - mean) **2,0) / nconst sd =Math.sqrt(m2)const z = arr.map(x => (x - mean) / sd)const skew = z.reduce((a, b) => a + b **3,0) / nconst kurt = z.reduce((a, b) => a + b **4,0) / nreturn { mean,variance: m2, sd, skew, kurt }}
// Format number to fixed decimalsfmt = (x, d) => x ===undefined||isNaN(x) ?"N/A": x.toFixed(d)
// Percentage formatter for plot axespctFmt = x => (x *100).toFixed(1) +"%"
// Round to 10 decimal places to avoid floating-point artifactsfpRound = x =>Math.round(x *1e10) /1e10
// ============================================================// LEGEND HELPER (interactive: click items to toggle series)// ============================================================// Returns a viewof-compatible DOM element. Its .value is a Set// of hidden keys. Clicking an item toggles it and dispatches an// "input" event so dependent OJS cells (plots) re-render.// Each item: { key, label, color, type }. key defaults to label.legend = (items) => {const el =document.createElement("div") el.style.cssText="display:flex; flex-wrap:wrap; margin-top:-4px; margin-bottom:6px;"const hidden =newSet()for (const d of items) {const key = d.key|| d.labelconst span =document.createElement("span") span.style.cssText="display:inline-flex; align-items:center; gap:4px; margin-right:14px; cursor:pointer; user-select:none; transition:opacity 0.15s;"let swatchHTMLif (d.type==="dot") { swatchHTML =`<svg width="12" height="12"><circle cx="6" cy="6" r="5" fill="${d.color}" opacity="0.8"/></svg>` } elseif (d.type==="dashed") { swatchHTML =`<svg width="22" height="12"><line x1="0" y1="6" x2="22" y2="6" stroke="${d.color}" stroke-width="2" stroke-dasharray="4 2"/></svg>` } elseif (d.type==="rect") { swatchHTML =`<svg width="14" height="14"><rect width="14" height="14" fill="${d.color}"/></svg>` } else { swatchHTML =`<svg width="22" height="12"><line x1="0" y1="6" x2="22" y2="6" stroke="${d.color}" stroke-width="2"/></svg>` } span.innerHTML=`${swatchHTML}<span style="font-size:0.82rem;">${d.label}</span>` span.addEventListener("click", () => {const nowHidden =!hidden.has(key)if (nowHidden) hidden.add(key);else hidden.delete(key) span.style.opacity= nowHidden ?"0.35":"1" span.querySelector("span").style.textDecoration= nowHidden ?"line-through":"none" el.value=newSet(hidden) el.dispatchEvent(newEvent("input", {bubbles:true})) }) el.appendChild(span) } el.value=newSet(hidden)return el}
1. The filtering step
The key idea behind FHS is that while raw returns \(R_t\) exhibit volatility clustering (they are not i.i.d.), the standardized residuals
\[
\hat{z}_t = \frac{R_t}{\hat{\sigma}_t}
\]
should be approximately i.i.d. if the GARCH model is correctly specified. This “filtering” step removes the time-varying volatility from the returns, leaving behind the pure shock component whose empirical distribution can be used for simulation.
Note
Using true parameters. In this simulation we use the true GARCH parameters (since we generated the data). In practice, the parameters would be estimated via maximum likelihood (see the MLE page and the GARCH models page).
Tip
How to experiment
Start with Normal shocks and observe that both raw returns and standardized residuals look similar in the QQ plot. Then switch to “Standardized t” and lower the degrees of freedom: the raw returns will show exaggerated fat tails (S-shaped QQ plot) due to the combination of fat-tailed shocks and volatility clustering, while the standardized residuals reveal only the true shock distribution. Check the ACF tab to confirm that filtering removes the autocorrelation from squared returns.
html`<p style="color:#666;font-size:0.85rem;"><strong>Top:</strong> Raw returns R<sub>t</sub> with ±2σ<sub>t</sub> bands (gray shading). Volatility clustering is visible. <strong>Bottom:</strong> Standardized shocks ẑ<sub>t</sub> = R<sub>t</sub>/σ<sub>t</sub> with ±2 reference lines. The shocks look much more homogeneous.</p>`
html`<p style="color:#666;font-size:0.85rem;"><strong>Left:</strong> QQ plot of raw returns (standardized to zero mean, unit variance) vs. normal. The S-shape results from mixing different volatility regimes. <strong>Right:</strong> QQ plot of standardized shocks vs. normal. Departures from the 45-degree line now reflect the true shock distribution only.</p>`
html`<p style="color:#666;font-size:0.85rem;">Ljung-Box(15) for R²: <strong>${fmt(fsLBRetSq,1)}</strong> | Ljung-Box(15) for ẑ²: <strong>${fmt(fsLBStdSq,1)}</strong> | χ²(15) critical value at 5%: 24.996. Strong autocorrelation in squared returns (volatility clustering) is removed after GARCH filtering.</p>`
html`<table class="table" style="width:100%;"><thead><tr> <th>Statistic</th> <th>Raw returns R<sub>t</sub></th> <th>Standardized shocks ẑ<sub>t</sub></th></tr></thead><tbody><tr><td style="font-weight:500;">Mean</td> <td>${fmt(fsStatsRet.mean*100,4)}%</td> <td>${fmt(fsStatsStd.mean,4)}</td></tr><tr><td style="font-weight:500;">Std. deviation</td> <td>${fmt(fsStatsRet.sd*100,4)}%</td> <td>${fmt(fsStatsStd.sd,4)}</td></tr><tr><td style="font-weight:500;">Skewness</td> <td>${fmt(fsStatsRet.skew,4)}</td> <td>${fmt(fsStatsStd.skew,4)}</td></tr><tr><td style="font-weight:500;">Kurtosis</td> <td>${fmt(fsStatsRet.kurt,4)}</td> <td>${fmt(fsStatsStd.kurt,4)}</td></tr><tr><td style="font-weight:500;">Excess kurtosis</td> <td style="font-weight:700; color:#d62728;">${fmt(fsStatsRet.kurt-3,4)}</td> <td style="font-weight:700; color:#d62728;">${fmt(fsStatsStd.kurt-3,4)}</td></tr><tr><td style="font-weight:500;">Ljung-Box(15) of squared series</td> <td style="font-weight:700;">${fmt(fsLBRetSq,1)}</td> <td>${fmt(fsLBStdSq,1)}</td></tr></tbody></table><p style="color:#666;font-size:0.85rem;">Raw returns have higher excess kurtosis than the standardized shocks because volatility clustering adds additional leptokurtosis beyond what the shock distribution contributes. The Ljung-Box statistic confirms that GARCH filtering removes the autocorrelation from squared returns.</p>`
2. FHS VaR and ES
Once we have extracted the standardized shocks \(\hat{z}_t\), FHS computes risk measures by scaling the empirical quantile of those shocks by the current volatility forecast:
How the return series is simulated. A GARCH(1,1) process is simulated with a fixed long-run daily volatility of 1.5%, using the \(\alpha\) and \(\beta\) sliders below and the chosen shock distribution. The standardized residuals are then extracted using the true GARCH parameters. The volatility forecast slider \(\hat{\sigma}_{t+1}\) is independent of the simulation: it controls only the scaling applied to the empirical shock quantile when computing VaR and ES, letting you explore how the same set of shocks translates into different risk measures at different volatility levels.
Tip
How to experiment
Switch to “Standardized t” with low degrees of freedom to see FHS capture fat tails. Then vary the volatility forecast \(\hat{\sigma}_{t+1}\): watch FHS VaR and Normal VaR scale linearly while Standard HS stays flat. In the “Exceeding history” tab, increase \(\hat{\sigma}_{t+1}\) to see FHS generate losses larger than any observed in the historical sample.
{const shocks = fvResults.shocksconst sq = fvResults.shockQuantileconst nq =qnorm(fvResults.p)return Plot.plot({height:380,marginLeft:60,x: { label:"Standardized shock ẑ",grid:false },y: { label:"Count",grid:true },marks: [ Plot.rectY(shocks, Plot.binX({ y:"count" }, {x: d => d,fill: d => d < sq ?"#d62728":"#ccc",fillOpacity:0.6,thresholds:80 })), Plot.ruleX([sq], { stroke:"#d62728",strokeWidth:2 }), Plot.ruleX([nq], { stroke:"#2f71d5",strokeWidth:2,strokeDasharray:"6 3" }), Plot.text([[sq,0]], { x: d => d[0],y: d => d[1],text: d =>`FHS: ${fmt(d[0],3)}`,dy:-10,dx:-5,textAnchor:"end",fill:"#d62728",fontWeight:700,fontSize:12 }), Plot.text([[nq,0]], { x: d => d[0],y: d => d[1],text: d =>`Normal: ${fmt(d[0],3)}`,dy:-10,dx:5,textAnchor:"start",fill:"#2f71d5",fontWeight:700,fontSize:12 }) ] })}
html`<p style="color:#666;font-size:0.85rem;">Histogram of standardized shocks. The <span style="color:#d62728;font-weight:700;">red line</span> is the ${fvConf}% empirical percentile used by FHS. The <span style="color:#2f71d5;font-weight:700;">blue dashed line</span> is Φ<sup>−1</sup>(${fvConf}%) = ${fmt(qnorm(fvResults.p),3)} used by the Normal approach. The red shaded area contains the ${fvResults.tailCount} observations used to compute ES.</p>`
html`<p style="color:#666;font-size:0.85rem;">Both FHS and Normal VaR scale linearly with σ̂<sub>t+1</sub>, but FHS has a steeper slope when shocks are fat-tailed (|ẑ<sub>(p)</sub>| > |Φ<sup>−1</sup>(p)|). Standard HS VaR is constant because it ignores the current volatility.</p>`
{const r = fvResultsconst worstLoss =Math.abs(r.worstRet) *100const worstSig = r.worstSigma*100const worstZ =Math.abs(r.worstShock)const fhsLoss = r.fhsImpliedLoss*100const exceeds = fhsLoss > worstLossconst curSig = fvSigmareturnhtml`<div style="max-width:600px;margin:0 auto;"> <table class="table" style="width:100%;"> <thead><tr><th colspan="2" style="text-align:center;">Worst historical loss analysis</th></tr></thead> <tbody> <tr><td style="font-weight:500;">Worst raw loss</td> <td style="text-align:right;font-weight:700;">${fmt(worstLoss,3)}%</td></tr> <tr><td style="font-weight:500;">Day's volatility σ<sub>${r.worstIdx+1}</sub></td> <td style="text-align:right;">${fmt(worstSig,3)}%</td></tr> <tr><td style="font-weight:500;">Implied shock |ẑ| = loss / σ</td> <td style="text-align:right;">${fmt(worstZ,3)}</td></tr> <tr style="border-top:2px solid #333;"><td style="font-weight:500;">Current σ̂<sub>t+1</sub></td> <td style="text-align:right;">${fmt(curSig,1)}%</td></tr> <tr><td style="font-weight:500;">FHS-implied loss = |ẑ| × σ̂<sub>t+1</sub></td> <td style="text-align:right;font-weight:700;font-size:1.1em;color:${exceeds ?'#d62728':'#2e7d32'};">${fmt(fhsLoss,3)}%</td></tr> </tbody></table> <div style="padding:12px;border-radius:8px;background:${exceeds ?'#fff3e0':'#e8f5e9'};text-align:center;margin-top:8px;">${exceeds?html`<strong style="color:#d62728;">FHS generates a ${fmt(fhsLoss,3)}% loss, exceeding the worst historical loss of ${fmt(worstLoss,3)}%.</strong><br><span style="font-size:0.9rem;">A moderate past shock (${fmt(worstZ,2)}σ) combined with high current volatility (${fmt(curSig,1)}%) produces a loss never observed in the data.</span>`:html`<strong style="color:#2e7d32;">FHS-implied loss (${fmt(fhsLoss,3)}%) is within the historical range.</strong><br><span style="font-size:0.9rem;">Current volatility is not high enough to push the worst shock beyond the observed maximum.</span>`} </div> </div>`}
{const r = fvResultsconst fhsRatio = r.fhsES/ r.fhsVaRconst normRatio = r.normES/ r.normVaRconst hsRatio = r.hsES/ r.hsVaRreturnhtml`<table class="table" style="width:100%;"><thead><tr> <th>Measure</th> <th style="color:#d62728;">FHS</th> <th style="color:#2f71d5;">Normal</th> <th style="color:#e67e22;">Standard HS</th></tr></thead><tbody><tr><td style="font-weight:500;">VaR (% of portfolio)</td> <td style="font-weight:700;">${fmt(r.fhsVaR*100,4)}%</td> <td>${fmt(r.normVaR*100,4)}%</td> <td>${fmt(r.hsVaR*100,4)}%</td></tr><tr><td style="font-weight:500;">ES (% of portfolio)</td> <td style="font-weight:700;">${fmt(r.fhsES*100,4)}%</td> <td>${fmt(r.normES*100,4)}%</td> <td>${fmt(r.hsES*100,4)}%</td></tr><tr><td style="font-weight:500;">ES / VaR ratio</td> <td>${fmt(fhsRatio,4)}</td> <td>${fmt(normRatio,4)}</td> <td>${fmt(hsRatio,4)}</td></tr><tr><td style="font-weight:500;">Shock quantile</td> <td>ẑ<sub>(p)</sub> = ${fmt(r.shockQuantile,4)}</td> <td>Φ<sup>−1</sup>(p) = ${fmt(qnorm(r.p),4)}</td> <td>n/a</td></tr></tbody></table><p style="color:#666;font-size:0.85rem;">FHS uses the empirical shock quantile ẑ<sub>(p)</sub> scaled by the volatility forecast σ̂<sub>t+1</sub> = ${fvSigma}%. The Normal approach uses Φ<sup>−1</sup>(p) instead. Standard HS uses the raw return percentile directly, ignoring current volatility. When shocks are fat-tailed, the FHS shock quantile is more negative than the normal one, producing higher VaR and ES.</p>`}
3. Method comparison: HS vs. GARCH-Normal vs. FHS
To see why FHS matters in practice, consider a volatility regime change: a calm period followed by a stress period. How quickly does each risk measure adapt?
Standard HS uses a rolling window of raw returns. After the regime shift, it takes many days for the high-volatility observations to dominate the window, so VaR adapts slowly.
GARCH-Normal captures volatility dynamics immediately through \(\hat{\sigma}_{t+1}\), but it uses the normal quantile, which underestimates tail risk if shocks are fat-tailed.
FHS adapts immediately (through \(\hat{\sigma}_{t+1}\)) and captures fat tails (through the empirical shock quantile).
Note
How the return series is simulated. A two-regime GARCH(1,1) process is simulated with fixed \(\alpha = 0.10\) and \(\beta = 0.85\) over \(N = 2000\) days. At the regime switch point, the GARCH intercept \(\omega\) changes from \(\omega_1 = V_{L,1}(1-\alpha-\beta)\) to \(\omega_2 = V_{L,2}(1-\alpha-\beta)\), shifting the long-run variance target. The conditional variance itself transitions gradually through the GARCH recursion \(\sigma^2_{t+1} = \omega + \alpha R^2_t + \beta \sigma^2_t\) (no discontinuous jump). Shocks are always drawn from a standardized Student-t distribution with degrees of freedom \(d\). All three methods use the true GARCH conditional volatility \(\sigma_t\) (known from the simulation), so the comparison isolates the effect of the shock distribution assumption (normal quantile vs. empirical quantile) and the volatility responsiveness (rolling window vs. GARCH forecast).
Note
In-sample illustration. This comparison is performed in-sample: the same data used to build the shock database is also used to evaluate exceedances. In practice, model performance should be assessed out-of-sample, applying VaR forecasts to future returns not used in estimation. In-sample results tend to be optimistic, particularly for FHS, which benefits from seeing the shocks it is evaluated against. The purpose here is to illustrate the qualitative differences between methods, not to provide a realistic backtesting exercise.
Tip
How to experiment
Set a large contrast between calm and stress volatilities (e.g., 1% vs. 4%). After the regime switch, watch how HS VaR lags behind while FHS and GARCH-Normal adapt quickly. Then check the “Exceedance rates” tab: HS will be too conservative before the switch and too lenient after, while FHS should maintain rates closer to the target \(p\). Lower the degrees of freedom to increase the gap between GARCH-Normal and FHS.
html`<p style="color:#666;font-size:0.85rem;">VaR lines are plotted as negative values (loss thresholds). After the regime switch, Standard HS (orange) adjusts slowly because it relies on a rolling window. GARCH-Normal (blue) and FHS (red) adapt immediately. FHS exceeds GARCH-Normal when shocks are fat-tailed.</p>`
html`<p style="color:#666;font-size:0.85rem;">Each dot marks a day where the actual loss exceeded the method's VaR. A cluster of exceedances after the regime switch indicates the method was too slow to react. Fewer exceedances overall (closer to p × N) indicates better calibration.</p>`
{const s = mcStatsconst fmtPct = x => (x *100).toFixed(2) +"%"const highlight = (rate, target) => {const ratio = rate / targetif (ratio >2) return'color:#d62728;font-weight:700;'if (ratio <0.5) return'color:#2e7d32;font-weight:700;'return'' }returnhtml`<table class="table" style="width:100%;"><thead><tr> <th>Method</th> <th>Pre-switch rate</th> <th>Post-switch rate</th> <th>Overall rate</th> <th>Target p</th></tr></thead><tbody><tr><td style="font-weight:500;color:#e67e22;">Standard HS</td> <td style="${highlight(s.hs.pre, s.p)}">${fmtPct(s.hs.pre)} (${s.hs.preCount}/${s.preN})</td> <td style="${highlight(s.hs.post, s.p)}">${fmtPct(s.hs.post)} (${s.hs.postCount}/${s.postN})</td> <td style="${highlight(s.hs.all, s.p)}">${fmtPct(s.hs.all)} (${s.hs.allCount}/${s.total})</td> <td>${fmtPct(s.p)}</td></tr><tr><td style="font-weight:500;color:#2f71d5;">GARCH-Normal</td> <td style="${highlight(s.gn.pre, s.p)}">${fmtPct(s.gn.pre)} (${s.gn.preCount}/${s.preN})</td> <td style="${highlight(s.gn.post, s.p)}">${fmtPct(s.gn.post)} (${s.gn.postCount}/${s.postN})</td> <td style="${highlight(s.gn.all, s.p)}">${fmtPct(s.gn.all)} (${s.gn.allCount}/${s.total})</td> <td>${fmtPct(s.p)}</td></tr><tr><td style="font-weight:500;color:#d62728;">FHS</td> <td style="${highlight(s.fhs.pre, s.p)}">${fmtPct(s.fhs.pre)} (${s.fhs.preCount}/${s.preN})</td> <td style="${highlight(s.fhs.post, s.p)}">${fmtPct(s.fhs.post)} (${s.fhs.postCount}/${s.postN})</td> <td style="${highlight(s.fhs.all, s.p)}">${fmtPct(s.fhs.all)} (${s.fhs.allCount}/${s.total})</td> <td>${fmtPct(s.p)}</td></tr></tbody></table><p style="color:#666;font-size:0.85rem;">Rates highlighted in <span style="color:#d62728;font-weight:700;">red</span> exceed twice the target (too many exceedances, VaR too low). Rates in <span style="color:#2e7d32;font-weight:700;">green</span> are below half the target (too few exceedances, VaR too conservative). A well-calibrated model should have rates close to p = ${fmtPct(s.p)} in both periods.</p>`}
html`<p style="color:#666;font-size:0.85rem;">Total exceedance counts for each method over ${mcStats.total} evaluation days. The dashed line marks the expected count under perfect calibration (p × N = ${fmt(mcStats.total* mcStats.p,1)}). A well-calibrated method should have a count close to this line.</p>`
References
Barone-Adesi, Giovanni, Kostas Giannopoulos, and Les Vosper. 1999. “VaR Without Correlations for Portfolios of Derivative Securities.”Journal of Futures Markets 19 (5): 583–602.
Christoffersen, Peter F. 2012. Elements of Financial Risk Management. 2nd ed. Academic Press.