fpRound = x => Math.round(x * 1e10) / 1e10
// Standard normal CDF (Abramowitz & Stegun approximation)
normalCDF = x => {
const a1 = 0.254829592, a2 = -0.284496736, a3 = 1.421413741
const a4 = -1.453152027, a5 = 1.061405429, p = 0.3275911
const sign = x < 0 ? -1 : 1
const z = Math.abs(x) / Math.sqrt(2)
const t = 1.0 / (1.0 + p * z)
const y = 1 - (((((a5 * t + a4) * t) + a3) * t + a2) * t + a1) * t * Math.exp(-z * z)
return 0.5 * (1 + sign * y)
}Historical and Weighted Historical Simulation
Interactive exploration of Historical Simulation, Weighted Historical Simulation, and volatility-scaled approaches for computing VaR and ES using Tesla returns
Historical simulation (HS) is the most widely used method for computing VaR and ES in practice. Rather than assuming a specific probability distribution, it uses actual historical changes as scenarios for what could happen tomorrow (see Hull 2023, chap. 12; Christoffersen 2012, chap. 2).
This page explores HS and its extensions interactively using daily Tesla stock returns (see Stylized facts). We examine the method’s strengths and, more importantly, its serious weaknesses, which motivate the dynamic variance models covered later in the course.
// Inverse normal CDF (Acklam's algorithm, max |error| ~ 1.15e-9)
qnorm = {
const a1 = -3.969683028665376e+01, a2 = 2.209460984245205e+02
const a3 = -2.759285104469687e+02, a4 = 1.383577518672690e+02
const a5 = -3.066479806614716e+01, a6 = 2.506628277459239e+00
const b1 = -5.447609879822406e+01, b2 = 1.615858368580409e+02
const b3 = -1.556989798598866e+02, b4 = 6.680131188771972e+01
const b5 = -1.328068155288572e+01
const c1 = -7.784894002430293e-03, c2 = -3.223964580411365e-01
const c3 = -2.400758277161838e+00, c4 = -2.549732539343734e+00
const c5 = 4.374664141464968e+00, c6 = 2.938163982698783e+00
const d1 = 7.784695709041462e-03, d2 = 3.224671290700398e-01
const d3 = 2.445134137142996e+00, d4 = 3.754408661907416e+00
const pLow = 0.02425, pHigh = 1 - pLow
return p => {
if (p <= 0) return -Infinity
if (p >= 1) return Infinity
if (p < pLow) {
const q = Math.sqrt(-2 * Math.log(p))
return (((((c1*q+c2)*q+c3)*q+c4)*q+c5)*q+c6) / ((((d1*q+d2)*q+d3)*q+d4)*q+1)
}
if (p <= pHigh) {
const q = p - 0.5, r = q * q
return (((((a1*r+a2)*r+a3)*r+a4)*r+a5)*r+a6)*q / (((((b1*r+b2)*r+b3)*r+b4)*r+b5)*r+1)
}
const q = Math.sqrt(-2 * Math.log(1 - p))
return -(((((c1*q+c2)*q+c3)*q+c4)*q+c5)*q+c6) / ((((d1*q+d2)*q+d3)*q+d4)*q+1)
}
}// ============================================================
// DATA LOADING
// ============================================================
returnsRaw = {
const raw = await FileAttachment("../../data/tesla_returns.csv").csv({ typed: true })
return raw.map(d => ({ date: new Date(d.Index), ret: d.return }))
}// ============================================================
// LEGEND HELPER (interactive: click items to toggle series)
// ============================================================
// Returns a viewof-compatible DOM element. Its .value is a Set
// of hidden keys. Clicking an item toggles it and dispatches an
// "input" event so dependent OJS cells (plots) re-render.
// Each item: { key, label, color, type }. key defaults to label.
legend = (items) => {
const el = document.createElement("div")
el.style.cssText = "display:flex; flex-wrap:wrap; margin-top:-4px; margin-bottom:6px;"
const hidden = new Set()
for (const d of items) {
const key = d.key || d.label
const span = document.createElement("span")
span.style.cssText = "display:inline-flex; align-items:center; gap:4px; margin-right:14px; cursor:pointer; user-select:none; transition:opacity 0.15s;"
let swatchHTML
if (d.type === "dot") {
swatchHTML = `<svg width="12" height="12"><circle cx="6" cy="6" r="5" fill="${d.color}" opacity="0.8"/></svg>`
} else if (d.type === "dashed") {
swatchHTML = `<svg width="22" height="12"><line x1="0" y1="6" x2="22" y2="6" stroke="${d.color}" stroke-width="2" stroke-dasharray="4 2"/></svg>`
} else {
swatchHTML = `<svg width="22" height="12"><line x1="0" y1="6" x2="22" y2="6" stroke="${d.color}" stroke-width="2"/></svg>`
}
span.innerHTML = `${swatchHTML}<span style="font-size:0.82rem;">${d.label}</span>`
span.addEventListener("click", () => {
const nowHidden = !hidden.has(key)
if (nowHidden) hidden.add(key); else hidden.delete(key)
span.style.opacity = nowHidden ? "0.35" : "1"
span.querySelector("span").style.textDecoration = nowHidden ? "line-through" : "none"
el.value = new Set(hidden)
el.dispatchEvent(new Event("input", {bubbles: true}))
})
el.appendChild(span)
}
el.value = new Set(hidden)
return el
}// ============================================================
// SEEDED PRNG (Mulberry32)
// ============================================================
rng_utils = {
function mulberry32(seed) {
let s = seed | 0
return function() {
s = (s + 0x6D2B79F5) | 0
let t = Math.imul(s ^ (s >>> 15), 1 | s)
t = (t + Math.imul(t ^ (t >>> 7), 61 | t)) ^ t
return ((t ^ (t >>> 14)) >>> 0) / 4294967296
}
}
return { mulberry32 }
}1. Historical Simulation VaR/ES calculator
The HS approach treats each of the past \(m\) daily returns as an equally likely scenario for tomorrow. The VaR at confidence level \(q\) is simply the \((1-q)\)-th percentile of these past returns, and ES is the average of losses exceeding VaR:
\[ \text{VaR}_q = \text{Loss ranked at position } \lceil m(1-q) \rceil, \qquad \text{ES}_q = \frac{1}{\lceil m(1-q) \rceil - 1} \sum_{\text{losses} > \text{VaR}} \text{Loss}_i \]
Note
Linear interpolation
When \(m(1-q)\) is an integer (e.g., \(500 \times 0.01 = 5\)), the VaR is simply the observation at that rank, i.e., the 5th worst loss. When \(m(1-q)\) is not an integer (e.g., \(250 \times 0.01 = 2.5\)), the VaR falls between two observations and is computed by linear interpolation between the adjacent ranked losses (see Christoffersen 2012, sec. 2.1).
Tip
How to experiment
Adjust the window size \(m\) to see how VaR and ES change with different amounts of historical data. Try \(m = 250\) at \(q = 0.99\) to see interpolation in action (position 2.5 falls between the 2nd and 3rd worst losses). Increase the confidence level to see how few observations determine the VaR estimate.
// Compute HS VaR and ES from the last m returns
hsResult = {
const m = Math.min(hsWindow, nObs)
const window = retValues.slice(nObs - m)
const windowDates = returnsRaw.slice(nObs - m)
// Create array of {ret, date, index} and sort by return (ascending = worst losses first)
const sorted = window.map((r, i) => ({ ret: r, date: windowDates[i].date, idx: i }))
.sort((a, b) => a.ret - b.ret)
const tailPos = fpRound(m * (1 - hsConf)) // may be fractional
const tailCount = Math.ceil(tailPos)
// VaR: the tailPos-th smallest return (1-indexed), with linear interpolation
// when tailPos is non-integer (e.g., 1% of 250 = 2.5 → interpolate between 2nd and 3rd)
let varHS
if (tailPos === Math.floor(tailPos)) {
// Exact integer position: VaR is this observation (e.g., 5th worst loss for 500 at 99%)
varHS = -sorted[Math.max(0, tailPos - 1)].ret
} else {
// Non-integer: linear interpolation between adjacent observations
const lo = Math.floor(tailPos) - 1 // 0-indexed
const hi = Math.ceil(tailPos) - 1
const frac = tailPos - Math.floor(tailPos)
if (lo < 0) {
varHS = -sorted[0].ret
} else {
varHS = -(sorted[lo].ret * (1 - frac) + sorted[hi].ret * frac)
}
}
// ES = average of losses strictly worse than VaR
const tailLosses = sorted.slice(0, tailCount - 1).map(d => -d.ret)
const esHS = tailLosses.length > 0
? tailLosses.reduce((a, b) => a + b, 0) / tailLosses.length
: varHS
// Top N worst for display
const worstN = Math.min(20, m)
const worstLosses = sorted.slice(0, worstN).map((d, i) => ({
rank: i + 1,
ret: d.ret,
loss: -d.ret,
date: d.date,
inTail: i < tailCount - 1,
isVaR: i === tailCount - 1
}))
return { varHS, esHS, tailCount, m, sorted, worstLosses }
}Plot.plot({
height: 380,
marginLeft: 60,
marginRight: 20,
x: { label: "Rank (worst to best)", grid: false },
y: { label: "Loss (% of portfolio)", grid: true, tickFormat: d => (d * 100).toFixed(0) + "%" },
marks: [
Plot.barY(hsResult.worstLosses, {
x: "rank", y: "loss",
fill: d => d.inTail ? "#d62728" : (d.isVaR ? "#ff7f0e" : "#4682b4"),
fillOpacity: 0.8,
tip: true,
title: d => `Rank ${d.rank}\nDate: ${d.date.toISOString().slice(0, 10)}\nLoss: ${(d.loss * 100).toFixed(2)}%${d.inTail ? " (in ES tail)" : d.isVaR ? " (VaR)" : ""}`
}),
Plot.ruleY([hsResult.varHS], {
stroke: "#ff7f0e", strokeWidth: 2, strokeDasharray: "6 3"
}),
Plot.ruleY([hsResult.esHS], {
stroke: "#d62728", strokeWidth: 2, strokeDasharray: "6 3"
}),
Plot.text([{ y: hsResult.varHS, label: `VaR = ${(hsResult.varHS * 100).toFixed(2)}%` }], {
x: hsResult.worstLosses.length, y: "y", text: "label",
fill: "#ff7f0e", fontWeight: "bold", fontSize: 10, textAnchor: "end", dx: -5, dy: -8
}),
Plot.text([{ y: hsResult.esHS, label: `ES = ${(hsResult.esHS * 100).toFixed(2)}%` }], {
x: hsResult.worstLosses.length, y: "y", text: "label",
fill: "#d62728", fontWeight: "bold", fontSize: 10, textAnchor: "end", dx: -5, dy: -8
})
]
}){
const l = legend([
{ label: "Losses exceeding VaR (ES tail)", color: "#d62728", type: "dot" },
{ label: "VaR observation", color: "#ff7f0e", type: "dot" },
{ label: "Other tail losses", color: "#4682b4", type: "dot" },
{ label: "VaR level", color: "#ff7f0e", type: "dashed" },
{ label: "ES level", color: "#d62728", type: "dashed" }
])
const p = html`<p style="color:#666; font-size:0.85rem;">The ${hsResult.tailCount - 1} losses exceeding VaR (red bars) are averaged to compute ES. With ${hsResult.m} observations at ${(hsConf * 100).toFixed(1)}% confidence, VaR is determined by just ${hsResult.tailCount} observations.</p>`
return html`${l}${p}`
}Plot.plot({
height: 360,
marginLeft: 60,
marginRight: 20,
x: { label: "Return", grid: false, tickFormat: d => (d * 100).toFixed(0) + "%" },
y: { label: "Cumulative probability", grid: true, domain: [0, Math.min(0.15, hsCdfData[hsCdfData.length - 1].cdf * 1.5)] },
marks: [
Plot.dot(hsCdfData, {
x: "ret", y: "cdf", fill: "#4682b4", r: 3, fillOpacity: 0.7,
tip: true,
title: d => `Return: ${(d.ret * 100).toFixed(2)}%\nCDF: ${(d.cdf * 100).toFixed(2)}%\nDate: ${d.date.toISOString().slice(0, 10)}`
}),
Plot.link(hsCdfData.slice(0, -1).map((d, i) => ({
x1: d.ret, y1: d.cdf, x2: hsCdfData[i + 1].ret, y2: d.cdf
})), {
x1: "x1", y1: "y1", x2: "x2", y2: "y2", stroke: "#4682b4", strokeWidth: 1.5
}),
Plot.ruleX([-hsResult.varHS], {
stroke: "#ff7f0e", strokeWidth: 2, strokeDasharray: "6 3"
}),
Plot.ruleX([-hsResult.esHS], {
stroke: "#d62728", strokeWidth: 2, strokeDasharray: "6 3"
}),
Plot.ruleY([1 - hsConf], {
stroke: "#888", strokeWidth: 1, strokeDasharray: "4 4"
}),
Plot.text([{ x: -hsResult.varHS, label: "VaR" }], {
x: "x", y: Math.min(0.14, hsCdfData[hsCdfData.length - 1].cdf * 1.4),
text: "label", fill: "#ff7f0e", fontWeight: "bold", fontSize: 10, dy: -5
}),
Plot.text([{ x: -hsResult.esHS, label: "ES" }], {
x: "x", y: Math.min(0.14, hsCdfData[hsCdfData.length - 1].cdf * 1.4),
text: "label", fill: "#d62728", fontWeight: "bold", fontSize: 10, dy: -5
})
]
}){
const worst5 = hsResult.worstLosses.slice(0, Math.min(10, hsResult.tailCount + 2))
const rows = worst5.map(d =>
`<tr${d.inTail ? ' style="background:#fce4ec;"' : d.isVaR ? ' style="background:#fff3e0;"' : ''}>
<td>${d.rank}</td>
<td>${d.date.toISOString().slice(0, 10)}</td>
<td style="font-weight:${d.inTail || d.isVaR ? '700' : '400'};">${(d.loss * 100).toFixed(2)}%</td>
<td>${d.inTail ? "ES tail" : d.isVaR ? "VaR" : ""}</td>
</tr>`
).join("")
return html`<table class="table" style="width:100%;">
<thead><tr><th colspan="4">Historical Simulation results (m = ${hsResult.m}, q = ${(hsConf * 100).toFixed(1)}%)</th></tr>
<tr><th>Rank</th><th>Date</th><th>Loss</th><th>Role</th></tr></thead>
<tbody>
<tr style="background:#fff3e0;"><td colspan="2" style="font-weight:700;">${(hsConf * 100).toFixed(1)}% VaR</td><td style="font-weight:700;">${(hsResult.varHS * 100).toFixed(2)}%</td><td></td></tr>
<tr style="background:#fce4ec;"><td colspan="2" style="font-weight:700;">${(hsConf * 100).toFixed(1)}% ES</td><td style="font-weight:700;">${(hsResult.esHS * 100).toFixed(2)}%</td><td></td></tr>
<tr><td colspan="2">Tail observations</td><td>${hsResult.tailCount}</td><td>of ${hsResult.m}</td></tr>
<tr><td colspan="4" style="font-weight:500; padding-top:12px;">Worst losses:</td></tr>
${rows}
</tbody>
</table>
<p style="color:#666; font-size:0.85rem;">Red rows: losses used to compute ES (strictly exceeding VaR). Orange row: the VaR observation itself. With only ${hsResult.tailCount} observations in the tail, the VaR estimate is inherently imprecise.</p>`
}2. Sample size trade-off explorer
The choice of window size \(m\) creates an inherent tension. Small \(m\) makes VaR erratic and sensitive to individual observations entering or leaving the window. Large \(m\) makes VaR sluggish and unresponsive to current market conditions. The result is the characteristic “box-shaped” VaR patterns documented by Christoffersen (2012).
Tip
How to experiment
Compare two different window sizes. Watch for the box-shaped jumps in VaR when a large loss suddenly enters or exits the rolling window. Notice how the 250-day VaR can be nearly twice the 1000-day VaR during volatile periods.
// Compute rolling HS VaR for a given window size
// Returns array starting at index m (first day with enough data)
rollingHSVaR = (m, conf) => {
const n = retValues.length
const result = []
const p = 1 - conf
const pos = fpRound(m * p)
for (let i = m; i < n; i++) {
const window = retValues.slice(i - m, i).sort((a, b) => a - b)
let varVal
if (pos === Math.floor(pos) && pos >= 1) {
varVal = -window[pos - 1]
} else {
const lo = Math.max(0, Math.floor(pos) - 1)
const hi = Math.ceil(pos) - 1
const frac = pos - Math.floor(pos)
varVal = lo === hi ? -window[lo] : -(window[lo] * (1 - frac) + window[hi] * frac)
}
result.push({
date: returnsRaw[i].date,
ret: retValues[i],
var_: Math.max(0, varVal)
})
}
return result
}// Merge the two series for comparison (align by date)
ssMerged = {
const map2 = new Map(ssData2.map(d => [d.date.getTime(), d]))
return ssData1.filter(d => map2.has(d.date.getTime())).map(d => {
const d2 = map2.get(d.date.getTime())
return {
date: d.date,
ret: d.ret,
var1: d.var_,
var2: d2.var_,
diff: d.var_ - d2.var_
}
})
}viewof ssLegend = legend([
{ key: "returns", label: "Daily return", color: "#4682b4", type: "dot" },
{ key: "breach1", label: `m=${ssM1} breach only`, color: "#ff7f0e", type: "dot" },
{ key: "breach2", label: `m=${ssM2} breach only`, color: "#d62728", type: "dot" },
{ key: "breachBoth", label: "Both breach", color: "#9467bd", type: "dot" },
{ key: "var1", label: `−VaR (m=${ssM1})`, color: "#ff7f0e", type: "line" },
{ key: "var2", label: `−VaR (m=${ssM2})`, color: "#d62728", type: "line" }
]){
// Merge VaR from both windows by date for breach detection
const map1 = new Map(ssData1.map(d => [d.date.getTime(), d.var_]))
const map2 = new Map(ssData2.map(d => [d.date.getTime(), d.var_]))
// Build breach data from dates covered by at least one window
const coveredDates = new Set([...ssData1.map(d => d.date.getTime()), ...ssData2.map(d => d.date.getTime())])
const allReturns = returnsRaw.filter(d => coveredDates.has(d.date.getTime()))
const allBreaches = allReturns.filter(d => {
const v1 = map1.get(d.date.getTime()), v2 = map2.get(d.date.getTime())
return (v1 !== undefined && d.ret < -v1) || (v2 !== undefined && d.ret < -v2)
}).map(d => {
const v1 = map1.get(d.date.getTime()), v2 = map2.get(d.date.getTime())
const b1 = v1 !== undefined && d.ret < -v1, b2 = v2 !== undefined && d.ret < -v2
return { ...d,
var_: v1, var2: v2,
color: b1 && b2 ? "#9467bd" : b1 ? "#ff7f0e" : "#d62728",
r: b1 && b2 ? 3 : 2.5,
bkey: b1 && b2 ? "breachBoth" : b1 ? "breach1" : "breach2",
tipText: `${b1 && b2 ? "Both breach" : b1 ? `m=${ssM1} breach only` : `m=${ssM2} breach only`}\nDate: ${d.date.toISOString().slice(0, 10)}\nReturn: ${(d.ret * 100).toFixed(2)}%${b1 ? `\nVaR (m=${ssM1}): ${(v1 * 100).toFixed(2)}%` : ""}${b2 ? `\nVaR (m=${ssM2}): ${(v2 * 100).toFixed(2)}%` : ""}`
}
})
const breachDates = new Set(allBreaches.map(d => d.date.getTime()))
const noBreach = returnsRaw.filter(d => !breachDates.has(d.date.getTime()))
const h = ssLegend
const visibleBreaches = allBreaches.filter(d => !h.has(d.bkey))
return Plot.plot({
height: 400,
marginLeft: 60,
marginRight: 20,
x: { label: "Date", type: "time", grid: false },
y: { label: "Daily return / VaR", grid: true, tickFormat: d => (d * 100).toFixed(0) + "%" },
marks: [
...(!h.has("returns") ? [Plot.dot(noBreach, {
x: "date", y: "ret", fill: "#4682b4", fillOpacity: 0.2, r: 1
})] : []),
...(visibleBreaches.length > 0 ? [
Plot.dot(visibleBreaches, {
x: "date", y: "ret", fill: "color", r: "r", fillOpacity: 0.8
}),
Plot.tip(visibleBreaches, Plot.pointer({
x: "date", y: "ret",
title: "tipText"
}))
] : []),
...(!h.has("var1") ? [Plot.line(ssData1, {
x: "date", y: d => -d.var_, stroke: "#ff7f0e", strokeWidth: 1.8
})] : []),
...(!h.has("var2") ? [Plot.line(ssData2, {
x: "date", y: d => -d.var_, stroke: "#d62728", strokeWidth: 1.8
})] : []),
Plot.ruleY([0], { stroke: "#888", strokeOpacity: 0.3 })
]
})
}{
const map2 = new Map(ssData2.map(d => [d.date.getTime(), d.var_]))
const merged = ssData1.filter(d => map2.has(d.date.getTime())).map(d => ({
...d, var2: map2.get(d.date.getTime())
}))
const b1 = merged.filter(d => d.ret < -d.var_ && d.ret >= -d.var2).length
const b2 = merged.filter(d => d.ret >= -d.var_ && d.ret < -d.var2).length
const bBoth = merged.filter(d => d.ret < -d.var_ && d.ret < -d.var2).length
return html`<p style="color:#666; font-size:0.85rem;">Rolling ${(ssConf * 100).toFixed(1)}% HS VaR for two window sizes. Breach counts: m=${ssM1} only ${b1}, m=${ssM2} only ${b2}, both ${bBoth}. Notice the box-shaped jumps: these occur when a large loss abruptly enters or exits the rolling window.</p>`
}Plot.plot({
height: 340,
marginLeft: 60,
marginRight: 20,
x: { label: "Date", type: "time", grid: false },
y: { label: "VaR difference (m₁ − m₂)", grid: true, tickFormat: d => (d * 100).toFixed(1) + "%" },
marks: [
Plot.areaY(ssMerged, {
x: "date", y: "diff",
fill: d => d.diff > 0 ? "#ff7f0e" : "#2ca02c", fillOpacity: 0.3
}),
Plot.line(ssMerged, {
x: "date", y: "diff", stroke: "#333", strokeWidth: 1.2
}),
Plot.ruleY([0], { stroke: "#888", strokeWidth: 1, strokeDasharray: "4 4" })
]
}){
const breach1 = ssData1.filter(d => d.ret < -d.var_).length
const breach2 = ssData2.filter(d => d.ret < -d.var_).length
const avgVar1 = ssData1.reduce((a, d) => a + d.var_, 0) / ssData1.length
const avgVar2 = ssData2.reduce((a, d) => a + d.var_, 0) / ssData2.length
const maxVar1 = Math.max(...ssData1.map(d => d.var_))
const maxVar2 = Math.max(...ssData2.map(d => d.var_))
const expected = ((1 - ssConf) * 100).toFixed(1)
return html`<table class="table" style="width:100%;">
<thead><tr><th>Metric</th><th>m₁ = ${ssM1}</th><th>m₂ = ${ssM2}</th></tr></thead>
<tbody>
<tr><td style="font-weight:500;">Average VaR</td><td>${(avgVar1 * 100).toFixed(2)}%</td><td>${(avgVar2 * 100).toFixed(2)}%</td></tr>
<tr><td style="font-weight:500;">Maximum VaR</td><td>${(maxVar1 * 100).toFixed(2)}%</td><td>${(maxVar2 * 100).toFixed(2)}%</td></tr>
<tr><td style="font-weight:500;">VaR breaches</td><td>${breach1} (${(breach1 / ssData1.length * 100).toFixed(1)}%)</td><td>${breach2} (${(breach2 / ssData2.length * 100).toFixed(1)}%)</td></tr>
<tr><td style="font-weight:500;">Expected breach rate</td><td colspan="2">${expected}%</td></tr>
<tr><td style="font-weight:500;">Observations used</td><td>${ssData1.length}</td><td>${ssData2.length}</td></tr>
</tbody>
</table>
<p style="color:#666; font-size:0.85rem;">A smaller window reacts faster to volatility changes but produces more erratic VaR estimates. A larger window is smoother but can be dangerously unresponsive during regime changes.</p>`
}3. HS vs. RiskMetrics VaR response
The most important critique of HS is its slow response to changing volatility. RiskMetrics, despite being a much simpler model, tracks actual risk conditions far more accurately because it uses exponential smoothing on squared returns: \(\sigma_{t+1}^2 = \lambda \sigma_t^2 + (1-\lambda) R_t^2\) (see Christoffersen 2012, sec. 4).
Tip
How to experiment
Compare how quickly each method responds to Tesla’s volatile periods. Notice how HS VaR barely moves after a single large loss, while RiskMetrics VaR jumps immediately. After volatility subsides, HS VaR stays elevated for months while RiskMetrics declines.
// RiskMetrics VaR
cmpRM = {
const n = retValues.length
const sig2 = new Array(n)
sig2[0] = retValues.reduce((a, b) => a + b ** 2, 0) / n
for (let i = 1; i < n; i++) {
sig2[i] = cmpLambda * sig2[i - 1] + (1 - cmpLambda) * retValues[i - 1] ** 2
}
const zAlpha = qnorm(cmpConf)
const pdfZ = normalPDF(zAlpha)
const tailProb = 1 - cmpConf
return returnsRaw.map((d, i) => {
const sigma = Math.sqrt(sig2[i])
return {
date: d.date,
ret: d.ret,
sigma,
var_: sigma * zAlpha,
es: sigma * pdfZ / tailProb
}
})
}// Align HS and RM data by date
cmpMerged = {
const hsMap = new Map(cmpHS.map(d => [d.date.getTime(), d]))
return cmpRM.filter(d => hsMap.has(d.date.getTime())).map(d => {
const hs = hsMap.get(d.date.getTime())
return {
date: d.date,
ret: d.ret,
varHS: hs.var_,
varRM: d.var_,
sigmaRM: d.sigma
}
})
}viewof cmpVarLegend = legend([
{ key: "returns", label: "Daily return", color: "#4682b4", type: "dot" },
{ key: "breachHS", label: "HS breach only", color: "#ff7f0e", type: "dot" },
{ key: "breachRM", label: "RM breach only", color: "#2ca02c", type: "dot" },
{ key: "breachBoth", label: "Both breach", color: "#d62728", type: "dot" },
{ key: "varHS", label: `−VaR HS (m=${cmpM})`, color: "#ff7f0e", type: "line" },
{ key: "varRM", label: `−VaR RiskMetrics (λ=${cmpLambda.toFixed(2)})`, color: "#2ca02c", type: "line" }
]){
const noBreach = cmpMerged.filter(d => d.ret >= -d.varHS && d.ret >= -d.varRM)
const allBreaches = cmpMerged.filter(d => d.ret < -d.varHS || d.ret < -d.varRM).map(d => {
const bH = d.ret < -d.varHS, bR = d.ret < -d.varRM
return { ...d,
color: bH && bR ? "#d62728" : bH ? "#ff7f0e" : "#2ca02c",
r: bH && bR ? 3 : 2.5,
bkey: bH && bR ? "breachBoth" : bH ? "breachHS" : "breachRM",
tipText: `${bH && bR ? "Both breach" : bH ? "HS breach only" : "RM breach only"}\nDate: ${d.date.toISOString().slice(0, 10)}\nReturn: ${(d.ret * 100).toFixed(2)}%${bH ? `\nVaR HS: ${(d.varHS * 100).toFixed(2)}%` : ""}${bR ? `\nVaR RM: ${(d.varRM * 100).toFixed(2)}%` : ""}`
}
})
const h = cmpVarLegend
const visibleBreaches = allBreaches.filter(d => !h.has(d.bkey))
return Plot.plot({
height: 400,
marginLeft: 60,
marginRight: 20,
x: { label: "Date", type: "time", grid: false },
y: { label: "Daily return / VaR", grid: true, tickFormat: d => (d * 100).toFixed(0) + "%" },
marks: [
...(!h.has("returns") ? [Plot.dot(noBreach, {
x: "date", y: "ret", fill: "#4682b4", fillOpacity: 0.2, r: 1
})] : []),
...(visibleBreaches.length > 0 ? [
Plot.dot(visibleBreaches, {
x: "date", y: "ret", fill: "color", r: "r", fillOpacity: 0.8
}),
Plot.tip(visibleBreaches, Plot.pointer({
x: "date", y: "ret",
title: "tipText"
}))
] : []),
...(!h.has("varHS") ? [Plot.line(cmpMerged, {
x: "date", y: d => -d.varHS, stroke: "#ff7f0e", strokeWidth: 1.8
})] : []),
...(!h.has("varRM") ? [Plot.line(cmpMerged, {
x: "date", y: d => -d.varRM, stroke: "#2ca02c", strokeWidth: 1.8
})] : []),
Plot.ruleY([0], { stroke: "#888", strokeOpacity: 0.3 })
]
})
}{
const bHS = cmpMerged.filter(d => d.ret < -d.varHS && d.ret >= -d.varRM).length
const bRM = cmpMerged.filter(d => d.ret >= -d.varHS && d.ret < -d.varRM).length
const bBoth = cmpMerged.filter(d => d.ret < -d.varHS && d.ret < -d.varRM).length
return html`<p style="color:#666; font-size:0.85rem;">RiskMetrics VaR (green) responds immediately to volatility changes. HS VaR (orange) lags behind. Breach counts: HS only ${bHS}, RM only ${bRM}, both ${bBoth}. See the Breaches tab for more detail.</p>`
}viewof cmpBreachLegend = legend([
{ key: "noBreach", label: "No breach", color: "#4682b4", type: "dot" },
{ key: "breachHS", label: "HS breach only", color: "#ff7f0e", type: "dot" },
{ key: "breachRM", label: "RM breach only", color: "#2ca02c", type: "dot" },
{ key: "breachBoth", label: "Both breach", color: "#d62728", type: "dot" }
]){
const noBreach = cmpMerged.filter(d => d.ret >= -d.varHS && d.ret >= -d.varRM)
const allBreaches = cmpMerged.filter(d => d.ret < -d.varHS || d.ret < -d.varRM).map(d => {
const bH = d.ret < -d.varHS, bR = d.ret < -d.varRM
return { ...d,
color: bH && bR ? "#d62728" : bH ? "#ff7f0e" : "#2ca02c",
bkey: bH && bR ? "breachBoth" : bH ? "breachHS" : "breachRM",
tipText: `${bH && bR ? "Both breach" : bH ? "HS breach only" : "RM breach only"}\nDate: ${d.date.toISOString().slice(0, 10)}\nReturn: ${(d.ret * 100).toFixed(2)}%${bH ? `\nVaR HS: ${(d.varHS * 100).toFixed(2)}%` : ""}${bR ? `\nVaR RM: ${(d.varRM * 100).toFixed(2)}%` : ""}`
}
})
const h = cmpBreachLegend
const visibleBreaches = allBreaches.filter(d => !h.has(d.bkey))
return Plot.plot({
height: 400,
marginLeft: 60,
marginRight: 20,
x: { label: "Date", type: "time", grid: false },
y: { label: "Daily return", grid: true, tickFormat: d => (d * 100).toFixed(0) + "%" },
marks: [
...(!h.has("noBreach") ? [Plot.dot(noBreach, {
x: "date", y: "ret", fill: "#4682b4", fillOpacity: 0.15, r: 1
})] : []),
...(visibleBreaches.length > 0 ? [
Plot.dot(visibleBreaches, {
x: "date", y: "ret", fill: "color", r: 3, fillOpacity: 0.8
}),
Plot.tip(visibleBreaches, Plot.pointer({
x: "date", y: "ret",
title: "tipText"
}))
] : []),
Plot.ruleY([0], { stroke: "#888", strokeOpacity: 0.3 })
]
})
}{
const hsBreaches = cmpMerged.filter(d => d.ret < -d.varHS).length
const rmBreaches = cmpMerged.filter(d => d.ret < -d.varRM).length
const bothBreaches = cmpMerged.filter(d => d.ret < -d.varHS && d.ret < -d.varRM).length
return html`<p style="color:#666; font-size:0.85rem;">HS breaches: ${hsBreaches} (${(hsBreaches / cmpMerged.length * 100).toFixed(1)}%). RM breaches: ${rmBreaches} (${(rmBreaches / cmpMerged.length * 100).toFixed(1)}%). Expected: ${((1 - cmpConf) * 100).toFixed(1)}%. HS tends to breach more during volatile periods because its VaR is too low.</p>`
}{
const maxV = Math.max(...cmpMerged.map(d => Math.max(d.varHS, d.varRM)))
return Plot.plot({
height: 360,
marginLeft: 60,
marginRight: 20,
x: { label: "HS VaR", grid: true, tickFormat: d => (d * 100).toFixed(0) + "%" },
y: { label: "RiskMetrics VaR", grid: true, tickFormat: d => (d * 100).toFixed(0) + "%" },
marks: [
Plot.line([{ x: 0, y: 0 }, { x: maxV, y: maxV }], {
x: "x", y: "y", stroke: "#888", strokeDasharray: "6 3", strokeWidth: 1
}),
Plot.dot(cmpMerged, {
x: "varHS", y: "varRM", fill: "#4682b4", fillOpacity: 0.3, r: 2,
tip: true,
title: d => `Date: ${d.date.toISOString().slice(0, 10)}\nHS VaR: ${(d.varHS * 100).toFixed(2)}%\nRM VaR: ${(d.varRM * 100).toFixed(2)}%`
})
]
})
}4. Weighted Historical Simulation explorer
WHS assigns exponentially declining weights to past observations, giving more influence to recent returns. The weight for observation \(\tau\) days ago is:
\[ w_\tau = \frac{\eta^{\tau-1}(1-\eta)}{1-\eta^m}, \quad \tau = 1, 2, \ldots, m \]
VaR is found by sorting returns with their paired weights and accumulating weights from the worst loss until the cumulative weight exceeds \(1-q\) (see Hull 2023, sec. 12.3.1).
Tip
How to experiment
Set \(\eta = 1.00\) to recover basic HS (equal weights). Lower \(\eta\) to 0.95 to see dramatic concentration of weight on recent observations. Watch how the VaR time series responds faster to volatility changes as \(\eta\) decreases.
// Compute WHS VaR for a given window, eta, and confidence
whsVaR = (rets, eta, m, conf) => {
const p = 1 - conf
const n = rets.length
// Compute weights: tau=1 is most recent, tau=m is oldest
const denom = eta >= 1.0 ? m : (1 - Math.pow(eta, m)) / (1 - eta)
const weights = new Array(m)
for (let tau = 1; tau <= m; tau++) {
weights[tau - 1] = Math.pow(eta, tau - 1) / denom
}
// Pair returns with weights (tau=1 = most recent = rets[m-1])
const paired = []
for (let tau = 1; tau <= m; tau++) {
paired.push({ ret: rets[m - tau], weight: weights[tau - 1] })
}
// Sort by return ascending (worst losses first)
paired.sort((a, b) => a.ret - b.ret)
// Accumulate weights from worst loss until cumulative weight reaches p
// VaR = the loss where cumulative weight first reaches or exceeds p
// (no interpolation: weights are discrete probabilities, not equal fractions)
let cumW = 0
for (let i = 0; i < paired.length; i++) {
cumW += paired[i].weight
if (cumW >= p) {
return { var_: -paired[i].ret, paired }
}
}
return { var_: -paired[paired.length - 1].ret, paired }
}// Weight profile for display
whsWeightProfile = {
const m = Math.min(whsM, nObs)
const eta = whsEta
const denom = eta >= 1.0 ? m : (1 - Math.pow(eta, m)) / (1 - eta)
const pts = []
for (let tau = 1; tau <= Math.min(m, 200); tau++) {
pts.push({ tau, weight: Math.pow(eta, tau - 1) / denom, equalWeight: 1 / m })
}
return pts
}// Rolling WHS VaR time series
whsRolling = {
const m = Math.min(whsM, nObs)
const n = retValues.length
const result = []
for (let i = m; i < n; i++) {
const window = retValues.slice(i - m, i)
const { var_ } = whsVaR(window, whsEta, m, whsConf)
result.push({
date: returnsRaw[i].date,
ret: retValues[i],
varWHS: var_
})
}
return result
}// Weighted tail table for the most recent window
whsTailTable = {
const m = Math.min(whsM, nObs)
const window = retValues.slice(nObs - m)
const { paired } = whsVaR(window, whsEta, m, whsConf)
// Show worst 15 losses with cumulative weights
const p = 1 - whsConf
let cumW = 0
const rows = []
for (let i = 0; i < Math.min(15, paired.length); i++) {
cumW += paired[i].weight
rows.push({
rank: i + 1,
ret: paired[i].ret,
loss: -paired[i].ret,
weight: paired[i].weight,
cumWeight: cumW,
exceeds: cumW >= p,
firstExceed: cumW >= p && (cumW - paired[i].weight) < p
})
}
return { rows, threshold: p }
}Plot.plot({
height: 300,
marginLeft: 60,
marginRight: 20,
x: { label: "Observation age τ (1 = most recent)", grid: false },
y: { label: "Weight", grid: true },
marks: [
Plot.line(whsWeightProfile, {
x: "tau", y: "weight", stroke: "#d62728", strokeWidth: 2
}),
Plot.ruleY([whsWeightProfile[0].equalWeight], {
stroke: "#4682b4", strokeWidth: 1.5, strokeDasharray: "6 3"
}),
Plot.text([{ x: Math.min(whsM, 200) * 0.7, y: whsWeightProfile[0].equalWeight, label: `Equal weight = 1/${Math.min(whsM, nObs)}` }], {
x: "x", y: "y", text: "label", fill: "#4682b4", fontSize: 10, dy: -10
})
]
}){
const m = Math.min(whsM, nObs)
const effN = whsEta >= 1.0 ? m : (1 - Math.pow(whsEta, m)) / (1 - whsEta) * whsWeightProfile[0].weight * m
const halfLife = whsEta >= 1.0 ? Infinity : Math.log(0.5) / Math.log(whsEta)
return html`<p style="color:#666; font-size:0.85rem;">Exponential weights with η = ${whsEta.toFixed(3)}. The most recent observation has weight ${(whsWeightProfile[0].weight * 100).toFixed(3)}% vs equal weight ${(1/m * 100).toFixed(3)}%. Half-life: ${halfLife === Infinity ? "∞ (equal weighting)" : halfLife.toFixed(1) + " days"}. After this many days, an observation's weight has halved.</p>`
}viewof whsCmpLegend = legend([
{ key: "returns", label: "Daily return", color: "#4682b4", type: "dot" },
{ key: "breachHS", label: "HS breach only", color: "#ff7f0e", type: "dot" },
{ key: "breachWHS", label: "WHS breach only", color: "#d62728", type: "dot" },
{ key: "breachBoth", label: "Both breach", color: "#9467bd", type: "dot" },
{ key: "varHS", label: `−VaR HS (equal weights)`, color: "#ff7f0e", type: "line" },
{ key: "varWHS", label: `−VaR WHS (η=${whsEta.toFixed(3)})`, color: "#d62728", type: "line" }
]){
// Align by date
const hsMap = new Map(whsBasicHS.map(d => [d.date.getTime(), d]))
const aligned = whsRolling.filter(d => hsMap.has(d.date.getTime())).map(d => ({
date: d.date,
ret: d.ret,
varWHS: d.varWHS,
varHS: hsMap.get(d.date.getTime()).var_
}))
const noBreach = aligned.filter(d => d.ret >= -d.varWHS && d.ret >= -d.varHS)
const allBreaches = aligned.filter(d => d.ret < -d.varHS || d.ret < -d.varWHS).map(d => {
const bH = d.ret < -d.varHS, bW = d.ret < -d.varWHS
return { ...d,
color: bH && bW ? "#9467bd" : bH ? "#ff7f0e" : "#d62728",
r: bH && bW ? 3 : 2.5,
bkey: bH && bW ? "breachBoth" : bH ? "breachHS" : "breachWHS",
tipText: `${bH && bW ? "Both breach" : bH ? "HS breach only" : "WHS breach only"}\nDate: ${d.date.toISOString().slice(0, 10)}\nReturn: ${(d.ret * 100).toFixed(2)}%${bH ? `\nVaR HS: ${(d.varHS * 100).toFixed(2)}%` : ""}${bW ? `\nVaR WHS: ${(d.varWHS * 100).toFixed(2)}%` : ""}`
}
})
const h = whsCmpLegend
const visibleBreaches = allBreaches.filter(d => !h.has(d.bkey))
return Plot.plot({
height: 400,
marginLeft: 60,
marginRight: 20,
x: { label: "Date", type: "time", grid: false },
y: { label: "Daily return / VaR", grid: true, tickFormat: d => (d * 100).toFixed(0) + "%" },
marks: [
...(!h.has("returns") ? [Plot.dot(noBreach, {
x: "date", y: "ret", fill: "#4682b4", fillOpacity: 0.2, r: 1
})] : []),
...(visibleBreaches.length > 0 ? [
Plot.dot(visibleBreaches, {
x: "date", y: "ret", fill: "color", r: "r", fillOpacity: 0.8
}),
Plot.tip(visibleBreaches, Plot.pointer({
x: "date", y: "ret",
title: "tipText"
}))
] : []),
...(!h.has("varHS") ? [Plot.line(aligned, {
x: "date", y: d => -d.varHS, stroke: "#ff7f0e", strokeWidth: 1.5
})] : []),
...(!h.has("varWHS") ? [Plot.line(aligned, {
x: "date", y: d => -d.varWHS, stroke: "#d62728", strokeWidth: 1.8
})] : []),
Plot.ruleY([0], { stroke: "#888", strokeOpacity: 0.3 })
]
})
}{
const hsMap = new Map(whsBasicHS.map(d => [d.date.getTime(), d]))
const aligned = whsRolling.filter(d => hsMap.has(d.date.getTime())).map(d => ({
...d, varHS: hsMap.get(d.date.getTime()).var_
}))
const bHS = aligned.filter(d => d.ret < -d.varHS && d.ret >= -d.varWHS).length
const bWHS = aligned.filter(d => d.ret >= -d.varHS && d.ret < -d.varWHS).length
const bBoth = aligned.filter(d => d.ret < -d.varHS && d.ret < -d.varWHS).length
return html`<p style="color:#666; font-size:0.85rem;">WHS VaR (red) responds faster to recent large losses than basic HS VaR (orange). Breach counts: HS only ${bHS}, WHS only ${bWHS}, both ${bBoth}. The difference is most visible during and after volatile periods. Set η = 1.000 to see them converge.</p>`
}{
const rows = whsTailTable.rows.map(d =>
`<tr${d.firstExceed ? ' style="background:#fff3e0; font-weight:700;"' : d.exceeds ? '' : ' style="background:#fce4ec;"'}>
<td>${d.rank}</td>
<td>${(d.loss * 100).toFixed(2)}%</td>
<td>${(d.weight * 100).toFixed(4)}%</td>
<td>${(d.cumWeight * 100).toFixed(4)}%</td>
<td>${d.firstExceed ? "← VaR" : d.exceeds ? "" : "ES tail"}</td>
</tr>`
).join("")
return html`<table class="table" style="width:100%;">
<thead>
<tr><th colspan="5">Weighted tail (most recent window, η = ${whsEta.toFixed(3)}, m = ${Math.min(whsM, nObs)})</th></tr>
<tr><th>Rank</th><th>Loss</th><th>Weight</th><th>Cumul. weight</th><th></th></tr>
</thead>
<tbody>${rows}</tbody>
</table>
<p style="color:#666; font-size:0.85rem;">Returns sorted by loss (worst first) with their exponential weights. VaR is where cumulative weight first exceeds ${((1 - whsConf) * 100).toFixed(1)}%. Red rows: losses used to compute ES. Orange row: VaR observation.</p>`
}5. WHS asymmetry: long vs. short position
A critical limitation of WHS: it treats large losses as signals of increased risk but ignores large gains. If you hold a short position, a market crash is a gain, and WHS fails to detect the volatility increase. In contrast, RiskMetrics squares returns, treating both gains and losses symmetrically (see Christoffersen 2012, sec. 3).
Tip
How to experiment
Toggle between Long and Short positions. With a long position, WHS responds quickly to market crashes (large losses). Switch to Short: the same crash is now a large gain, and WHS barely reacts. This asymmetry is a fundamental flaw of the WHS approach.
// Rolling WHS and HS VaR for the chosen position
asymRolling = {
const m = Math.min(asymM, nObs)
const n = asymReturns.length
const resultWHS = []
const resultHS = []
const p = 1 - asymConf
for (let i = m; i < n; i++) {
const window = asymReturns.slice(i - m, i)
// WHS
const { var_: varWHS } = whsVaR(window, asymEta, m, asymConf)
// Basic HS
const sorted = window.slice().sort((a, b) => a - b)
const pos = fpRound(m * p)
let varHS
if (pos === Math.floor(pos) && pos >= 1) {
varHS = -sorted[pos - 1]
} else {
const lo = Math.max(0, Math.floor(pos) - 1)
const hi = Math.ceil(pos) - 1
const frac = pos - Math.floor(pos)
varHS = lo === hi ? -sorted[lo] : -(sorted[lo] * (1 - frac) + sorted[hi] * frac)
}
resultWHS.push({
date: returnsRaw[i].date,
ret: asymReturns[i],
loss: -asymReturns[i],
varWHS: Math.max(0, varWHS),
varHS: Math.max(0, varHS)
})
}
return resultWHS
}// Compute both long and short WHS VaR for comparison table
asymBothStats = {
const m = Math.min(asymM, nObs)
const n = retValues.length
const compute = (rets) => {
let breachesWHS = 0, breachesHS = 0
let sumVarWHS = 0, sumVarHS = 0
let count = 0
for (let i = m; i < n; i++) {
const window = rets.slice(i - m, i)
const { var_: varWHS } = whsVaR(window, asymEta, m, asymConf)
const sorted = window.slice().sort((a, b) => a - b)
const p = 1 - asymConf
const pos = fpRound(m * p)
const lo = Math.max(0, Math.floor(pos) - 1)
const hi = Math.ceil(pos) - 1
const frac = pos - Math.floor(pos)
const varHS = lo === hi ? -sorted[lo] : -(sorted[lo] * (1 - frac) + sorted[hi] * frac)
const v1 = Math.max(0, varWHS)
const v2 = Math.max(0, varHS)
sumVarWHS += v1
sumVarHS += v2
if (rets[i] < -v1) breachesWHS++
if (rets[i] < -v2) breachesHS++
count++
}
return {
avgVarWHS: sumVarWHS / count,
avgVarHS: sumVarHS / count,
breachesWHS, breachesHS, count
}
}
const longRets = retValues
const shortRets = retValues.map(r => -r)
return { long: compute(longRets), short: compute(shortRets) }
}viewof asymLegend = legend([
{ key: "returns", label: `${asymPos} position return`, color: "#4682b4", type: "dot" },
{ key: "breachHS", label: "HS breach only", color: "#ff7f0e", type: "dot" },
{ key: "breachWHS", label: "WHS breach only", color: "#d62728", type: "dot" },
{ key: "breachBoth", label: "Both breach", color: "#9467bd", type: "dot" },
{ key: "varHS", label: `−VaR HS`, color: "#ff7f0e", type: "line" },
{ key: "varWHS", label: `−VaR WHS (η=${asymEta.toFixed(3)})`, color: "#d62728", type: "line" }
]){
const noBreach = asymRolling.filter(d => d.ret >= -d.varWHS && d.ret >= -d.varHS)
const allBreaches = asymRolling.filter(d => d.ret < -d.varHS || d.ret < -d.varWHS).map(d => {
const bH = d.ret < -d.varHS, bW = d.ret < -d.varWHS
return { ...d,
color: bH && bW ? "#9467bd" : bH ? "#ff7f0e" : "#d62728",
r: bH && bW ? 3 : 2.5,
bkey: bH && bW ? "breachBoth" : bH ? "breachHS" : "breachWHS",
tipText: `${bH && bW ? "Both breach" : bH ? "HS breach only" : "WHS breach only"}\nDate: ${d.date.toISOString().slice(0, 10)}\nReturn: ${(d.ret * 100).toFixed(2)}%${bH ? `\nVaR HS: ${(d.varHS * 100).toFixed(2)}%` : ""}${bW ? `\nVaR WHS: ${(d.varWHS * 100).toFixed(2)}%` : ""}`
}
})
const h = asymLegend
const visibleBreaches = allBreaches.filter(d => !h.has(d.bkey))
return Plot.plot({
height: 400,
marginLeft: 60,
marginRight: 20,
x: { label: "Date", type: "time", grid: false },
y: { label: `${asymPos} position return / VaR`, grid: true, tickFormat: d => (d * 100).toFixed(0) + "%" },
marks: [
...(!h.has("returns") ? [Plot.dot(noBreach, {
x: "date", y: "ret", fill: "#4682b4", fillOpacity: 0.2, r: 1
})] : []),
...(visibleBreaches.length > 0 ? [
Plot.dot(visibleBreaches, {
x: "date", y: "ret", fill: "color", r: "r", fillOpacity: 0.8
}),
Plot.tip(visibleBreaches, Plot.pointer({
x: "date", y: "ret",
title: "tipText"
}))
] : []),
...(!h.has("varHS") ? [Plot.line(asymRolling, {
x: "date", y: d => -d.varHS, stroke: "#ff7f0e", strokeWidth: 1.5
})] : []),
...(!h.has("varWHS") ? [Plot.line(asymRolling, {
x: "date", y: d => -d.varWHS, stroke: "#d62728", strokeWidth: 1.8
})] : []),
Plot.ruleY([0], { stroke: "#888", strokeOpacity: 0.3 })
]
})
}{
const bHS = asymRolling.filter(d => d.ret < -d.varHS && d.ret >= -d.varWHS).length
const bWHS = asymRolling.filter(d => d.ret >= -d.varHS && d.ret < -d.varWHS).length
const bBoth = asymRolling.filter(d => d.ret < -d.varHS && d.ret < -d.varWHS).length
const note = asymPos === "Short"
? `With a short position, market crashes become large gains. WHS fails to increase VaR because it only reacts to losses, not gains. Breach counts: HS only ${bHS}, WHS only ${bWHS}, both ${bBoth}.`
: `With a long position, market crashes are large losses. WHS correctly responds by increasing VaR. Breach counts: HS only ${bHS}, WHS only ${bWHS}, both ${bBoth}. Switch to Short to see the critical failure.`
return html`<p style="color:#666; font-size:0.85rem;">${note}</p>`
}// Show 60-day rolling VaR change after big moves
asymResponse = {
const data = asymRolling
const pts = []
for (let i = 1; i < data.length; i++) {
const varChange = (data[i].varWHS - data[i - 1].varWHS) / data[i - 1].varWHS
pts.push({
ret: data[i - 1].ret * 100,
varChange: varChange * 100,
date: data[i].date
})
}
return pts.filter(d => Math.abs(d.ret) <= 25)
}Plot.plot({
height: 360,
marginLeft: 60,
marginRight: 20,
x: { label: `Previous day ${asymPos.toLowerCase()} return (%)`, grid: false },
y: { label: "WHS VaR change (%)", grid: true },
marks: [
Plot.dot(asymResponse, {
x: "ret", y: "varChange",
fill: d => d.ret < 0 ? "#d62728" : "#2ca02c",
fillOpacity: 0.3, r: 2,
tip: true,
title: d => `Date: ${d.date.toISOString().slice(0, 10)}\nReturn: ${d.ret.toFixed(2)}%\nVaR change: ${d.varChange.toFixed(2)}%`
}),
Plot.ruleX([0], { stroke: "#888", strokeDasharray: "4 4", strokeOpacity: 0.4 }),
Plot.ruleY([0], { stroke: "#888", strokeDasharray: "4 4", strokeOpacity: 0.4 })
]
})html`<p style="color:#666; font-size:0.85rem;">Each dot shows yesterday's ${asymPos.toLowerCase()} return vs. the percentage change in today's WHS VaR. For a long position, large negative returns (left) tend to increase VaR. For a short position, large negative returns are gains and WHS VaR barely responds, and the asymmetry is clearly visible.</p>`{
const s = asymBothStats
const expected = ((1 - asymConf) * 100).toFixed(1)
return html`<table class="table" style="width:100%;">
<thead><tr><th>Metric</th><th>Long position</th><th>Short position</th></tr></thead>
<tbody>
<tr><td colspan="3" style="font-weight:700; background:#f5f5f5;">Weighted HS (η = ${asymEta.toFixed(3)})</td></tr>
<tr><td style="font-weight:500;">Average VaR</td><td>${(s.long.avgVarWHS * 100).toFixed(2)}%</td><td>${(s.short.avgVarWHS * 100).toFixed(2)}%</td></tr>
<tr><td style="font-weight:500;">VaR breaches</td><td>${s.long.breachesWHS} (${(s.long.breachesWHS / s.long.count * 100).toFixed(1)}%)</td><td>${s.short.breachesWHS} (${(s.short.breachesWHS / s.short.count * 100).toFixed(1)}%)</td></tr>
<tr><td colspan="3" style="font-weight:700; background:#f5f5f5;">Basic HS (equal weights)</td></tr>
<tr><td style="font-weight:500;">Average VaR</td><td>${(s.long.avgVarHS * 100).toFixed(2)}%</td><td>${(s.short.avgVarHS * 100).toFixed(2)}%</td></tr>
<tr><td style="font-weight:500;">VaR breaches</td><td>${s.long.breachesHS} (${(s.long.breachesHS / s.long.count * 100).toFixed(1)}%)</td><td>${s.short.breachesHS} (${(s.short.breachesHS / s.short.count * 100).toFixed(1)}%)</td></tr>
<tr><td style="font-weight:500;">Expected breach rate</td><td colspan="2">${expected}%</td></tr>
</tbody>
</table>
<p style="color:#666; font-size:0.85rem;">Compare breach rates for long and short positions. WHS should produce similar breach rates for both directions if it correctly tracks volatility. In practice, the asymmetric treatment of gains and losses means WHS may fail for one direction. RiskMetrics treats gains and losses symmetrically (R² is the same regardless of sign).</p>`
}6. VaR standard error and precision
HS estimates percentiles from finite data, introducing substantial sampling error. The standard error of the VaR estimate is:
\[ \text{SE}(\hat{x}_q) = \frac{1}{f(x)} \sqrt{\frac{q(1-q)}{n}} \]
where \(f(x)\) is the probability density at the VaR level, estimated here using a Gaussian kernel density estimator. With 500 observations at 99% confidence, VaR is determined by just 5 observations (see Hull 2023, sec. 12.2).
Tip
How to experiment
Increase the sample size to see the confidence interval narrow, but notice it shrinks only as \(\sqrt{n}\). Increase the confidence level to see precision deteriorate dramatically. Click “New bootstrap sample” to see how much VaR varies across different random resamples.
// Compute VaR and SE from a subsample
seResult = {
const n = Math.min(seN, nObs)
const sample = retValues.slice(nObs - n)
const sorted = sample.slice().sort((a, b) => a - b)
const p = 1 - seConf
const pos = fpRound(n * p)
const lo = Math.max(0, Math.floor(pos) - 1)
const hi = Math.ceil(pos) - 1
const frac = pos - Math.floor(pos)
const varHS = lo === hi ? -sorted[lo] : -(sorted[lo] * (1 - frac) + sorted[hi] * frac)
// Estimate f(x) using Gaussian kernel density estimation
const mu = sample.reduce((a, b) => a + b, 0) / n
const sd = Math.sqrt(sample.reduce((a, b) => a + (b - mu) ** 2, 0) / (n - 1))
// Silverman's rule of thumb bandwidth
const h = 1.06 * sd * Math.pow(n, -0.2)
const xq = -varHS // VaR quantile in return space
let fx = 0
for (let i = 0; i < n; i++) {
fx += normalPDF((xq - sample[i]) / h)
}
fx /= (n * h)
const se = (1 / fx) * Math.sqrt(seConf * (1 - seConf) / n)
const ci95lo = varHS - 1.96 * se
const ci95hi = varHS + 1.96 * se
const tailObs = Math.ceil(fpRound(n * (1 - seConf)))
return { varHS, se, ci95lo, ci95hi, n, tailObs, mu, sd }
}// Bootstrap VaR distribution
bsDistribution = {
bsReseedCount // reactive dependency for reseed
const n = Math.min(seN, nObs)
const sample = retValues.slice(nObs - n)
const nBoot = 500
const rng = rng_utils.mulberry32(42 + bsReseedCount)
const p = 1 - seConf
const vars = []
for (let b = 0; b < nBoot; b++) {
// Resample with replacement
const resample = new Array(n)
for (let i = 0; i < n; i++) {
resample[i] = sample[Math.floor(rng() * n)]
}
resample.sort((a, b) => a - b)
const pos = fpRound(n * p)
const lo = Math.max(0, Math.floor(pos) - 1)
const hi = Math.ceil(pos) - 1
const frac = pos - Math.floor(pos)
const v = lo === hi ? -resample[lo] : -(resample[lo] * (1 - frac) + resample[hi] * frac)
vars.push(v)
}
vars.sort((a, b) => a - b)
const bsMean = vars.reduce((a, b) => a + b, 0) / nBoot
const bsSD = Math.sqrt(vars.reduce((a, b) => a + (b - bsMean) ** 2, 0) / (nBoot - 1))
const ci95lo = vars[Math.floor(nBoot * 0.025)]
const ci95hi = vars[Math.floor(nBoot * 0.975)]
return { vars, bsMean, bsSD, ci95lo, ci95hi }
}{
// Show the VaR point estimate with CI band across different sample sizes
const currentN = Math.min(seN, nObs)
const sizes = [...new Set([100, 200, 250, 500, 750, 1000, 1500, 2000, currentN])].filter(s => s <= nObs).sort((a, b) => a - b)
const ciData = sizes.map(n => {
const sample = retValues.slice(nObs - n)
const sorted = sample.slice().sort((a, b) => a - b)
const p = 1 - seConf
const pos = fpRound(n * p)
const lo = Math.max(0, Math.floor(pos) - 1)
const hi = Math.ceil(pos) - 1
const frac = pos - Math.floor(pos)
const v = lo === hi ? -sorted[lo] : -(sorted[lo] * (1 - frac) + sorted[hi] * frac)
const mu = sample.reduce((a, b) => a + b, 0) / n
const sd = Math.sqrt(sample.reduce((a, b) => a + (b - mu) ** 2, 0) / (n - 1))
const h = 1.06 * sd * Math.pow(n, -0.2)
const xq = -v
let fx = 0
for (let i = 0; i < n; i++) fx += normalPDF((xq - sample[i]) / h)
fx /= (n * h)
const se = (1 / fx) * Math.sqrt(seConf * (1 - seConf) / n)
return { n, var_: v, se, ciLo: v - 1.96 * se, ciHi: v + 1.96 * se, current: n === Math.min(seN, nObs) }
})
return Plot.plot({
height: 360,
marginLeft: 60,
marginRight: 20,
x: { label: "Sample size n", grid: false },
y: { label: "VaR estimate", grid: true, tickFormat: d => (d * 100).toFixed(1) + "%" },
marks: [
Plot.link(ciData, {
x1: "n", y1: "ciLo", x2: "n", y2: "ciHi",
stroke: "#4682b4", strokeWidth: 3, strokeOpacity: 0.3
}),
Plot.dot(ciData, {
x: "n", y: "var_",
fill: d => d.current ? "#d62728" : "#4682b4",
r: d => d.current ? 6 : 4,
tip: true,
title: d => `n = ${d.n}\nVaR: ${(d.var_ * 100).toFixed(2)}%\nSE: ${(d.se * 100).toFixed(2)}%\n95% CI: [${(d.ciLo * 100).toFixed(2)}%, ${(d.ciHi * 100).toFixed(2)}%]`
})
]
})
}html`<p style="color:#666; font-size:0.85rem;">${(seConf * 100).toFixed(1)}% VaR estimates at different sample sizes with 95% confidence intervals. Red dot: current selection (n = ${seResult.n}). The CI narrows only as √n: quadrupling the sample size merely halves the standard error. Current estimate: VaR = ${(seResult.varHS * 100).toFixed(2)}% ± ${(1.96 * seResult.se * 100).toFixed(2)}%.</p>`{
const nBins = 40
const lo = Math.min(...bsDistribution.vars)
const hi = Math.max(...bsDistribution.vars)
const w = (hi - lo) / nBins
const bins = Array.from({ length: nBins }, (_, i) => ({
x0: lo + i * w, x1: lo + (i + 1) * w, count: 0
}))
for (const v of bsDistribution.vars) {
const idx = Math.min(nBins - 1, Math.floor((v - lo) / w))
bins[idx].count++
}
const total = bsDistribution.vars.length
for (const b of bins) b.density = b.count / (total * w)
return Plot.plot({
height: 340,
marginLeft: 60,
marginRight: 20,
x: { label: "Bootstrap VaR estimate", grid: false, tickFormat: d => (d * 100).toFixed(1) + "%" },
y: { label: "Density", grid: true },
marks: [
Plot.rectY(bins, {
x1: "x0", x2: "x1", y: "density",
fill: "#4682b4", fillOpacity: 0.6
}),
Plot.ruleX([seResult.varHS], {
stroke: "#d62728", strokeWidth: 2
}),
Plot.ruleX([bsDistribution.ci95lo], {
stroke: "#ff7f0e", strokeWidth: 1.5, strokeDasharray: "6 3"
}),
Plot.ruleX([bsDistribution.ci95hi], {
stroke: "#ff7f0e", strokeWidth: 1.5, strokeDasharray: "6 3"
})
]
})
}{
const l = legend([
{ label: "Point estimate", color: "#d62728", type: "line" },
{ label: "Bootstrap 95% CI", color: "#ff7f0e", type: "dashed" }
])
const p = html`<p style="color:#666; font-size:0.85rem;">Distribution of VaR from 500 bootstrap resamples (n = ${seResult.n}). Bootstrap mean: ${(bsDistribution.bsMean * 100).toFixed(2)}%, SD: ${(bsDistribution.bsSD * 100).toFixed(2)}%, 95% CI: [${(bsDistribution.ci95lo * 100).toFixed(2)}%, ${(bsDistribution.ci95hi * 100).toFixed(2)}%]. Click "New bootstrap sample" to generate a different set of resamples.</p>`
return html`${l}${p}`
}{
const confLevels = [0.90, 0.95, 0.975, 0.99, 0.995, 0.999]
const n = seResult.n
const rows = confLevels.map(q => {
const tailObs = Math.ceil(fpRound(n * (1 - q)))
const highlight = Math.abs(q - seConf) < 0.001
return `<tr${highlight ? ' style="background:#fff3e0; font-weight:700;"' : ''}>
<td>${(q * 100).toFixed(1)}%</td>
<td>${tailObs}</td>
<td>${(tailObs / n * 100).toFixed(2)}%</td>
<td>${tailObs <= 2 ? "Very poor" : tailObs <= 5 ? "Poor" : tailObs <= 10 ? "Moderate" : tailObs <= 25 ? "Adequate" : "Good"}</td>
</tr>`
}).join("")
return html`<table class="table" style="width:100%;">
<thead>
<tr><th colspan="4">Number of observations determining VaR (n = ${n})</th></tr>
<tr><th>Confidence</th><th>Tail observations</th><th>% of sample</th><th>Precision</th></tr>
</thead>
<tbody>${rows}</tbody>
</table>
<p style="color:#666; font-size:0.85rem;">At high confidence levels, VaR depends on very few observations. A 99% VaR from 500 observations relies on just 5 data points. This is the fundamental precision problem of historical simulation: increasing confidence requires exponentially more data.</p>`
}7. Volatility-scaled Historical Simulation
Volatility scaling adjusts historical returns by the ratio of current to historical volatility. If current volatility is higher than it was at the time of a historical scenario, that scenario is scaled up proportionally (see Hull 2023, sec. 12.3.2):
\[ \text{Adjusted return}_i = R_i \times \frac{\sigma_{n+1}}{\sigma_i} \]
where \(\sigma_i\) is the volatility estimate at time \(i\), computed using the RiskMetrics EWMA model:
\[ \sigma^2_{i+1} = \lambda \, \sigma^2_i + (1 - \lambda) \, R_i^2 \]
seeded with the full-sample variance. Note that the EWMA formula uses \(R_i^2\) rather than \((R_i - \mu)^2\), which is equivalent to assuming \(\mu = 0\). This is standard in the RiskMetrics approach, since for daily returns the mean is negligibly small relative to volatility.
This method can produce VaR estimates exceeding any historical loss, which is impossible with basic HS.
Tip
How to experiment
Adjust the EWMA λ to control how responsive the volatility estimates are. Lower λ means more volatile scaling ratios. Compare the volatility-scaled VaR (which can exceed all historical losses) with the basic HS VaR (which is bounded by the worst historical return).
// Compute EWMA volatility for each day
vsEwma = {
const n = retValues.length
const sig2 = new Array(n)
sig2[0] = retValues.reduce((a, b) => a + b ** 2, 0) / n
for (let i = 1; i < n; i++) {
sig2[i] = vsLambda * sig2[i - 1] + (1 - vsLambda) * retValues[i - 1] ** 2
}
return sig2.map(s => Math.sqrt(s))
}// Compute volatility-scaled HS VaR
vsRolling = {
const m = Math.min(vsM, nObs)
const n = retValues.length
const p = 1 - vsConf
const result = []
for (let i = m; i < n; i++) {
const sigCurrent = vsEwma[i]
// Scale each return in the window by sigma_current / sigma_i
const scaledRets = []
for (let j = i - m; j < i; j++) {
const ratio = vsEwma[j] > 0 ? sigCurrent / vsEwma[j] : 1
scaledRets.push(retValues[j] * ratio)
}
// Basic (unscaled) window
const basicRets = retValues.slice(i - m, i).slice().sort((a, b) => a - b)
// Scaled VaR
scaledRets.sort((a, b) => a - b)
const pos = fpRound(m * p)
const lo = Math.max(0, Math.floor(pos) - 1)
const hi = Math.ceil(pos) - 1
const frac = pos - Math.floor(pos)
const varScaled = lo === hi ? -scaledRets[lo] : -(scaledRets[lo] * (1 - frac) + scaledRets[hi] * frac)
const varBasic = lo === hi ? -basicRets[lo] : -(basicRets[lo] * (1 - frac) + basicRets[hi] * frac)
// Max historical loss in window
const maxHistLoss = -basicRets[0]
result.push({
date: returnsRaw[i].date,
ret: retValues[i],
varScaled: Math.max(0, varScaled),
varBasic: Math.max(0, varBasic),
sigCurrent: sigCurrent,
maxHistLoss,
exceedsMax: varScaled > maxHistLoss
})
}
return result
}// Scaling ratios for display
vsScalingRatios = {
const m = Math.min(vsM, nObs)
const n = retValues.length
const ratios = []
const currentSig = vsEwma[n - 1]
const windowStart = n - m
for (let j = windowStart; j < n; j++) {
ratios.push({
date: returnsRaw[j].date,
ratio: vsEwma[j] > 0 ? currentSig / vsEwma[j] : 1,
tau: n - j
})
}
return ratios
}Plot.plot({
height: 300,
marginLeft: 60,
marginRight: 20,
x: { label: "Date", type: "time", grid: false },
y: { label: "Scaling ratio σ_current / σ_i", grid: true },
marks: [
Plot.line(vsScalingRatios, {
x: "date", y: "ratio", stroke: "#d62728", strokeWidth: 1.5
}),
Plot.ruleY([1], { stroke: "#888", strokeWidth: 1, strokeDasharray: "4 4" }),
Plot.text([{ x: vsScalingRatios[0].date, y: 1, label: "No scaling (ratio = 1)" }], {
x: "x", y: "y", text: "label", fill: "#888", fontSize: 10, dy: -10, textAnchor: "start"
})
]
})html`<p style="color:#666; font-size:0.85rem;">Ratio of current EWMA volatility to historical EWMA volatility for each observation in the window (λ = ${vsLambda.toFixed(2)}). Ratios above 1 mean scenarios are scaled up (current volatility is higher); below 1 means scaled down. Periods of past high volatility get damped; past calm periods get amplified.</p>`viewof vsLegend = legend([
{ key: "returns", label: "Daily return", color: "#4682b4", type: "dot" },
{ key: "breachBasic", label: "Basic HS breach only", color: "#ff7f0e", type: "dot" },
{ key: "breachScaled", label: "Vol-scaled breach only", color: "#d62728", type: "dot" },
{ key: "breachBoth", label: "Both breach", color: "#9467bd", type: "dot" },
{ key: "varBasic", label: "−VaR basic HS", color: "#ff7f0e", type: "line" },
{ key: "varScaled", label: "−VaR volatility-scaled HS", color: "#d62728", type: "line" }
]){
const noBreach = vsRolling.filter(d => d.ret >= -d.varBasic && d.ret >= -d.varScaled)
const allBreaches = vsRolling.filter(d => d.ret < -d.varBasic || d.ret < -d.varScaled).map(d => {
const bB = d.ret < -d.varBasic, bS = d.ret < -d.varScaled
return { ...d,
color: bB && bS ? "#9467bd" : bB ? "#ff7f0e" : "#d62728",
r: bB && bS ? 3 : 2.5,
bkey: bB && bS ? "breachBoth" : bB ? "breachBasic" : "breachScaled",
tipText: `${bB && bS ? "Both breach" : bB ? "Basic HS breach only" : "Vol-scaled breach only"}\nDate: ${d.date.toISOString().slice(0, 10)}\nReturn: ${(d.ret * 100).toFixed(2)}%${bB ? `\nVaR basic: ${(d.varBasic * 100).toFixed(2)}%` : ""}${bS ? `\nVaR scaled: ${(d.varScaled * 100).toFixed(2)}%` : ""}`
}
})
const h = vsLegend
const visibleBreaches = allBreaches.filter(d => !h.has(d.bkey))
return Plot.plot({
height: 400,
marginLeft: 60,
marginRight: 20,
x: { label: "Date", type: "time", grid: false },
y: { label: "Daily return / VaR", grid: true, tickFormat: d => (d * 100).toFixed(0) + "%" },
marks: [
...(!h.has("returns") ? [Plot.dot(noBreach, {
x: "date", y: "ret", fill: "#4682b4", fillOpacity: 0.2, r: 1
})] : []),
...(visibleBreaches.length > 0 ? [
Plot.dot(visibleBreaches, {
x: "date", y: "ret", fill: "color", r: "r", fillOpacity: 0.8
}),
Plot.tip(visibleBreaches, Plot.pointer({
x: "date", y: "ret",
title: "tipText"
}))
] : []),
...(!h.has("varBasic") ? [Plot.line(vsRolling, {
x: "date", y: d => -d.varBasic, stroke: "#ff7f0e", strokeWidth: 1.5
})] : []),
...(!h.has("varScaled") ? [Plot.line(vsRolling, {
x: "date", y: d => -d.varScaled, stroke: "#d62728", strokeWidth: 1.8
})] : []),
Plot.ruleY([0], { stroke: "#888", strokeOpacity: 0.3 })
]
})
}{
const bBasic = vsRolling.filter(d => d.ret < -d.varBasic && d.ret >= -d.varScaled).length
const bScaled = vsRolling.filter(d => d.ret >= -d.varBasic && d.ret < -d.varScaled).length
const bBoth = vsRolling.filter(d => d.ret < -d.varBasic && d.ret < -d.varScaled).length
const exceedsCount = vsRolling.filter(d => d.exceedsMax).length
return html`<p style="color:#666; font-size:0.85rem;">Volatility-scaled VaR (red) adapts to current volatility conditions, while basic HS VaR (orange) is bounded by historical observations. Breach counts: basic HS only ${bBasic}, vol-scaled only ${bScaled}, both ${bBoth}. On ${exceedsCount} days (${(exceedsCount / vsRolling.length * 100).toFixed(1)}%), the scaled VaR exceeded the maximum historical loss in the window, which is impossible with basic HS.</p>`
}{
const breachBasic = vsRolling.filter(d => d.ret < -d.varBasic).length
const breachScaled = vsRolling.filter(d => d.ret < -d.varScaled).length
const avgBasic = vsRolling.reduce((a, d) => a + d.varBasic, 0) / vsRolling.length
const avgScaled = vsRolling.reduce((a, d) => a + d.varScaled, 0) / vsRolling.length
const maxBasic = Math.max(...vsRolling.map(d => d.varBasic))
const maxScaled = Math.max(...vsRolling.map(d => d.varScaled))
const exceedsCount = vsRolling.filter(d => d.exceedsMax).length
const expected = ((1 - vsConf) * 100).toFixed(1)
return html`<table class="table" style="width:100%;">
<thead><tr><th>Metric</th><th>Basic HS</th><th>Volatility-scaled HS</th></tr></thead>
<tbody>
<tr><td style="font-weight:500;">Average VaR</td><td>${(avgBasic * 100).toFixed(2)}%</td><td>${(avgScaled * 100).toFixed(2)}%</td></tr>
<tr><td style="font-weight:500;">Maximum VaR</td><td>${(maxBasic * 100).toFixed(2)}%</td><td style="font-weight:700;">${(maxScaled * 100).toFixed(2)}%</td></tr>
<tr><td style="font-weight:500;">VaR breaches</td><td>${breachBasic} (${(breachBasic / vsRolling.length * 100).toFixed(1)}%)</td><td>${breachScaled} (${(breachScaled / vsRolling.length * 100).toFixed(1)}%)</td></tr>
<tr><td style="font-weight:500;">Expected breach rate</td><td colspan="2">${expected}%</td></tr>
<tr style="background:#fff3e0;"><td style="font-weight:700;">Days VaR exceeds max historical loss</td><td>0 (impossible)</td><td style="font-weight:700;">${exceedsCount} (${(exceedsCount / vsRolling.length * 100).toFixed(1)}%)</td></tr>
</tbody>
</table>
<p style="color:#666; font-size:0.85rem;">Volatility scaling addresses HS's key limitation: inability to produce VaR estimates beyond historical experience. By scaling scenarios to current volatility conditions, it can signal higher risk than any past observation suggested, which is crucial during unprecedented market stress.</p>`
}References
Christoffersen, Peter F. 2012. Elements of Financial Risk Management. 2nd ed. Academic Press.
Hull, John. 2023. Risk Management and Financial Institutions. 6th ed. John Wiley & Sons.