// ============================================================
// MATH UTILITIES: Normal distribution
// ============================================================
// Standard normal CDF (Abramowitz & Stegun approximation)
normalCDF = x => {
const a1 = 0.254829592, a2 = -0.284496736, a3 = 1.421413741
const a4 = -1.453152027, a5 = 1.061405429, p = 0.3275911
const sign = x < 0 ? -1 : 1
const z = Math.abs(x) / Math.sqrt(2)
const t = 1.0 / (1.0 + p * z)
const y = 1 - (((((a5 * t + a4) * t) + a3) * t + a2) * t + a1) * t * Math.exp(-z * z)
return 0.5 * (1 + sign * y)
}Parametric VaR and ES
Interactive exploration of Cornish-Fisher, Standardized t, and Asymmetric t approaches to Value-at-Risk and Expected Shortfall
When portfolio returns are not normally distributed, using the normal quantile to compute VaR and ES can systematically underestimate risk. Financial returns typically exhibit fat tails (excess kurtosis) and often negative skewness, meaning large losses occur more frequently than the normal distribution predicts (see Christoffersen 2012, chap. 6).
This page explores three parametric alternatives that capture these features. Each method models the distribution of standardized returns \(z_t = R_{PF,t} / \sigma_{PF}\), where \(\sigma_{PF}\) is the portfolio volatility:
\[ R_{PF,t} = \sigma_{PF} \, z_t, \quad z_t \sim D(0,1) \]
The question is: what distribution \(D(0,1)\) best describes the standardized returns?
// Inverse normal CDF (Acklam's algorithm, max |error| ~ 1.15e-9)
qnorm = {
const a1 = -3.969683028665376e+01, a2 = 2.209460984245205e+02
const a3 = -2.759285104469687e+02, a4 = 1.383577518672690e+02
const a5 = -3.066479806614716e+01, a6 = 2.506628277459239e+00
const b1 = -5.447609879822406e+01, b2 = 1.615858368580409e+02
const b3 = -1.556989798598866e+02, b4 = 6.680131188771972e+01
const b5 = -1.328068155288572e+01
const c1 = -7.784894002430293e-03, c2 = -3.223964580411365e-01
const c3 = -2.400758277161838e+00, c4 = -2.549732539343734e+00
const c5 = 4.374664141464968e+00, c6 = 2.938163982698783e+00
const d1 = 7.784695709041462e-03, d2 = 3.224671290700398e-01
const d3 = 2.445134137142996e+00, d4 = 3.754408661907416e+00
const pLow = 0.02425, pHigh = 1 - pLow
return p => {
if (p <= 0) return -Infinity
if (p >= 1) return Infinity
if (p < pLow) {
const q = Math.sqrt(-2 * Math.log(p))
return (((((c1*q+c2)*q+c3)*q+c4)*q+c5)*q+c6) / ((((d1*q+d2)*q+d3)*q+d4)*q+1)
}
if (p <= pHigh) {
const q = p - 0.5, r = q * q
return (((((a1*r+a2)*r+a3)*r+a4)*r+a5)*r+a6)*q / (((((b1*r+b2)*r+b3)*r+b4)*r+b5)*r+1)
}
const q = Math.sqrt(-2 * Math.log(1 - p))
return -(((((c1*q+c2)*q+c3)*q+c4)*q+c5)*q+c6) / ((((d1*q+d2)*q+d3)*q+d4)*q+1)
}
}// ============================================================
// MATH UTILITIES: Log-gamma and Student-t distribution
// ============================================================
// Log-gamma function (Lanczos approximation, g=7)
// Wrapped as a returned function to avoid OJS circular dependency on self-recursion
lgamma = {
const g = 7
const coef = [
0.99999999999980993, 676.5203681218851, -1259.1392167224028,
771.32342877765313, -176.61502916214059, 12.507343278686905,
-0.13857109526572012, 9.9843695780195716e-6, 1.5056327351493116e-7
]
function _lgamma(x) {
if (x <= 0) return Infinity
if (x < 0.5) {
return Math.log(Math.PI / Math.sin(Math.PI * x)) - _lgamma(1 - x)
}
x -= 1
let a = coef[0]
const t = x + g + 0.5
for (let i = 1; i < g + 2; i++) a += coef[i] / (x + i)
return 0.5 * Math.log(2 * Math.PI) + (x + 0.5) * Math.log(t) - t + Math.log(a)
}
return _lgamma
}// Regularized incomplete beta function (Numerical Recipes algorithm)
// Wrapped to avoid OJS circular dependency on self-recursion
regIncBeta = {
// Continued fraction for incomplete beta (Numerical Recipes betacf)
function betacf(a, b, x) {
const qab = a + b, qap = a + 1, qam = a - 1
const TINY = 1e-30, EPS = 1e-14, MAXITER = 200
let c = 1
let d = 1 - qab * x / qap
if (Math.abs(d) < TINY) d = TINY
d = 1 / d
let h = d
for (let m = 1; m <= MAXITER; m++) {
const m2 = 2 * m
// Even step
let aa = m * (b - m) * x / ((qam + m2) * (a + m2))
d = 1 + aa * d; if (Math.abs(d) < TINY) d = TINY
c = 1 + aa / c; if (Math.abs(c) < TINY) c = TINY
d = 1 / d; h *= d * c
// Odd step
aa = -(a + m) * (qab + m) * x / ((a + m2) * (qap + m2))
d = 1 + aa * d; if (Math.abs(d) < TINY) d = TINY
c = 1 + aa / c; if (Math.abs(c) < TINY) c = TINY
d = 1 / d
const del = d * c
h *= del
if (Math.abs(del - 1) < EPS) break
}
return h
}
function _rib(a, b, x) {
if (x < 0 || x > 1) return NaN
if (x === 0) return 0
if (x === 1) return 1
// Use symmetry relation for numerical stability
if (x > (a + 1) / (a + b + 2)) {
return 1 - _rib(b, a, 1 - x)
}
const lbeta = lgamma(a) + lgamma(b) - lgamma(a + b)
const front = Math.exp(Math.log(x) * a + Math.log(1 - x) * b - lbeta) / a
return front * betacf(a, b, x)
}
return _rib
}// Student-t quantile via Newton-Raphson
qt = (p, d) => {
if (p <= 0) return -Infinity
if (p >= 1) return Infinity
if (p === 0.5) return 0
// Initial guess from normal approximation
let x = qnorm(p)
// Refinement via Newton-Raphson
for (let i = 0; i < 50; i++) {
const err = tCDF(x, d) - p
const deriv = tPDF(x, d)
if (Math.abs(deriv) < 1e-30) break
const dx = err / deriv
x -= dx
if (Math.abs(dx) < 1e-12 * (1 + Math.abs(x))) break
}
return x
}// Asymmetric t PDF
asymTPDF = (z, d1, d2) => {
const { A, B, C } = asymTConsts(d1, d2)
const cutoff = -A / B
if (z < cutoff) {
const u = (B * z + A) / (1 - d2)
return B * C * Math.pow(1 + u * u / (d1 - 2), -(1 + d1) / 2)
} else {
const u = (B * z + A) / (1 + d2)
return B * C * Math.pow(1 + u * u / (d1 - 2), -(1 + d1) / 2)
}
}// Asymmetric t quantile (inverse CDF)
asymTQuantile = (p, d1, d2) => {
const { A, B, C } = asymTConsts(d1, d2)
const threshold = (1 - d2) / 2
if (p < threshold) {
const q = p / (1 - d2)
return (1 / B) * ((1 - d2) * Math.sqrt((d1 - 2) / d1) * qt(q, d1) - A)
} else {
const q = (p - threshold) / (1 + d2) + 0.5
return (1 / B) * ((1 + d2) * Math.sqrt((d1 - 2) / d1) * qt(q, d1) - A)
}
}// Asymmetric t CDF
asymTCDF = (z, d1, d2) => {
const { A, B, C } = asymTConsts(d1, d2)
const cutoff = -A / B
if (z < cutoff) {
const u = (B * z + A) / ((1 - d2) * Math.sqrt((d1 - 2) / d1))
return (1 - d2) * tCDF(u, d1)
} else {
const u = (B * z + A) / ((1 + d2) * Math.sqrt((d1 - 2) / d1))
return (1 - d2) / 2 + (1 + d2) * (tCDF(u, d1) - 0.5)
}
}// Asymmetric t moments
asymTMoments = (d1, d2) => {
const { A, B, C } = asymTConsts(d1, d2)
const m2 = 1 + 3 * d2 * d2
const m3 = d1 > 3 ? 16 * C * d2 * (1 + d2 * d2) * (d1 - 2) * (d1 - 2) / ((d1 - 1) * (d1 - 3)) : NaN
const m4 = d1 > 4 ? 3 * (d1 - 2) / (d1 - 4) * (1 + 10 * d2 * d2 + 5 * Math.pow(d2, 4)) : NaN
const skew = (m3 - 3 * A * m2 + 2 * A * A * A) / (B * B * B)
const kurt = (m4 - 4 * A * m3 + 6 * A * A * m2 - 3 * Math.pow(A, 4)) / Math.pow(B, 4) - 3
return { skew, kurt }
}// Modified ES from Boudt, Peterson and Croux (2008, Journal of Risk).
// Integrates z * dG_2(z) (second-order Edgeworth density) up to CF quantile.
// The I^q integrals simplify so all Phi(g) terms cancel.
// Matches PerformanceAnalytics::ES(method="modified").
// Operational floor (min with g) ensures ES >= VaR.
cfES = (p, zeta1, zeta2, sigma) => {
const z = qnorm(p)
const g = cfQuantile(p, zeta1, zeta2)
const phi_g = normalPDF(g)
const s = zeta1, k = zeta2
const eg = -(phi_g / p) * (1
+ (s / 6) * g * g * g
+ (k / 24) * (g * g * g * g - 2 * g * g - 1)
+ (s * s / 72) * (Math.pow(g, 6) - 9 * Math.pow(g, 4) + 9 * g * g + 3))
return -sigma * Math.min(eg, g)
}// Asymmetric t ES (as a positive number, for p < (1-d2)/2)
asymTES = (p, d1, d2, sigma) => {
const { A, B, C } = asymTConsts(d1, d2)
const Q = asymTQuantile(p, d1, d2)
// First integral term
const u = (B * Q + A) / (1 - d2)
const term1 = C * (1 - d2) * (1 - d2) / (B * p)
* Math.pow(1 + u * u / (d1 - 2), (1 - d1) / 2)
* (d1 - 2) / (d1 - 1)
// Second integral term (involves Student-t CDF)
const tArg = Math.sqrt(d1 / (d1 - 2)) * u
const term2 = A * C * (1 - d2) / (B * p)
* Math.sqrt(Math.PI * (d1 - 2)) * Math.exp(lgamma(d1 / 2) - lgamma((d1 + 1) / 2))
* tCDF(tArg, d1)
return sigma * (term1 + term2)
}// PRNG: Seeded pseudo-random number generator (Mulberry32)
// Bundled as an object so all functions are accessible from other cells
rng_utils = {
function mulberry32(seed) {
return function() {
seed |= 0; seed = seed + 0x6D2B79F5 | 0
let t = Math.imul(seed ^ seed >>> 15, 1 | seed)
t = t + Math.imul(t ^ t >>> 7, 61 | t) ^ t
return ((t ^ t >>> 14) >>> 0) / 4294967296
}
}
function boxMuller(rng) {
const u1 = rng(), u2 = rng()
return Math.sqrt(-2 * Math.log(u1)) * Math.cos(2 * Math.PI * u2)
}
function gammaSample(rng, shape) {
if (shape < 1) {
return gammaSample(rng, shape + 1) * Math.pow(rng(), 1 / shape)
}
const d = shape - 1 / 3
const c = 1 / Math.sqrt(9 * d)
while (true) {
let x, v
do {
x = boxMuller(rng)
v = 1 + c * x
} while (v <= 0)
v = v * v * v
const u = rng()
if (u < 1 - 0.0331 * (x * x) * (x * x)) return d * v
if (Math.log(u) < 0.5 * x * x + d * (1 - v + Math.log(v))) return d * v
}
}
function chiSquaredSample(rng, d) {
return 2 * gammaSample(rng, d / 2)
}
function stdTSample(rng, d) {
const normal = boxMuller(rng)
const chi2 = chiSquaredSample(rng, d)
return normal / Math.sqrt(chi2 / d) * Math.sqrt((d - 2) / d)
}
return { mulberry32, boxMuller, stdTSample }
}1. QQ plot explorer
The quantile-quantile (QQ) plot compares the empirical quantiles of a sample against the theoretical quantiles of a reference distribution. If the data follows the reference distribution, the points lie on the 45-degree line. Systematic deviations reveal fat tails, skewness, or other departures from the reference.
For standardized returns \(z_i\) (sorted in ascending order), the QQ plot against the normal is:
\[ \{X_i, Y_i\} = \left\{ \Phi^{-1}_{(i-0.5)/T}, \; z_i \right\} \]
Tip
How to experiment
Select different distributions and adjust the degrees of freedom. With the standardized t, watch the S-shaped departure emerge as \(d\) decreases (fatter tails). With the asymmetric t, set \(d_2 < 0\) to see the left tail deviate more than the right, which is the typical pattern for equity returns.
// Generate sample
qqSample = {
qqReseedCount // trigger reactivity on reseed
const rng = rng_utils.mulberry32(42 + qqReseedCount)
const n = qqN
const samples = []
for (let i = 0; i < n; i++) {
if (qqDist === "Normal") {
samples.push(rng_utils.boxMuller(rng))
} else if (qqDist === "Standardized t") {
samples.push(rng_utils.stdTSample(rng, qqDfEff))
} else {
// Asymmetric t via inverse CDF
samples.push(asymTQuantile(rng(), qqDfEff, qqD2Eff))
}
}
return samples.sort((a, b) => a - b)
}qqStats = {
const n = qqSample.length
const mean = qqSample.reduce((a, b) => a + b, 0) / n
const variance = qqSample.reduce((a, b) => a + (b - mean) ** 2, 0) / (n - 1)
const sd = Math.sqrt(variance)
const z = qqSample.map(x => (x - mean) / sd)
const skew = z.reduce((a, b) => a + b ** 3, 0) / n
const kurt = z.reduce((a, b) => a + b ** 4, 0) / n
// Theoretical values
let thSkew = 0, thKurt = 0
if (qqDist === "Normal") {
thSkew = 0; thKurt = 3
} else if (qqDist === "Standardized t") {
thSkew = 0
thKurt = qqDfEff > 4 ? 6 / (qqDfEff - 4) + 3 : Infinity
} else {
const mom = asymTMoments(qqDfEff, qqD2Eff)
thSkew = mom.skew
thKurt = mom.kurt + 3
}
return { mean, variance, sd, skew, kurt, thSkew, thKurt }
}{
const lim = Math.max(
4,
Math.max(Math.abs(qqPoints[0].empirical), Math.abs(qqPoints[qqPoints.length - 1].empirical)),
Math.max(Math.abs(qqPoints[0].theoretical), Math.abs(qqPoints[qqPoints.length - 1].theoretical))
) * 1.05
return Plot.plot({
height: 400,
marginLeft: 55,
marginRight: 20,
x: { label: "Normal theoretical quantiles", grid: true, domain: [-lim, lim] },
y: { label: "Empirical quantiles", grid: true, domain: [-lim, lim] },
marks: [
Plot.line([{ x: -lim, y: -lim }, { x: lim, y: lim }], {
x: "x", y: "y", stroke: "#d62728", strokeWidth: 2, strokeDasharray: "6 3"
}),
Plot.dot(qqPoints, {
x: "theoretical", y: "empirical", fill: "#4682b4", r: 1.5, fillOpacity: 0.4
})
]
})
}html`<div style="display:flex; gap:18px; font-size:0.85rem; margin-top:-6px; margin-bottom:2px; flex-wrap:wrap;">
<span><svg width="24" height="10"><line x1="0" y1="5" x2="24" y2="5" stroke="#d62728" stroke-width="2" stroke-dasharray="6 3"/></svg> 45° reference</span>
<span><svg width="10" height="10"><circle cx="5" cy="5" r="3" fill="#4682b4" opacity="0.6"/></svg> Sample quantiles</span>
</div>`html`<p style="color:#666; font-size:0.85rem;">QQ plot of ${qqN} simulated observations from the ${qqDist} distribution${qqDist === "Standardized t" ? ` (d = ${qqDfEff})` : qqDist === "Asymmetric t" ? ` (d₁ = ${qqDfEff}, d₂ = ${qqD2Eff.toFixed(2)})` : ""} against the standard normal. Points above the line in the left tail indicate a fatter left tail than normal; points below the line in the right tail indicate a fatter right tail.</p>`qqDensityData = {
const pts = []
for (let z = -5; z <= 5; z += 0.02) {
const normalD = normalPDF(z)
let chosenD
if (qqDist === "Normal") {
chosenD = normalD
} else if (qqDist === "Standardized t") {
chosenD = stdTPDF(z, qqDfEff)
} else {
chosenD = asymTPDF(z, qqDfEff, qqD2Eff)
}
pts.push({ z, normal: normalD, chosen: chosenD })
}
return pts
}Plot.plot({
height: 360,
marginLeft: 55,
marginRight: 20,
x: { label: "z (standardized return)", grid: false },
y: { label: "Density", grid: true },
marks: [
Plot.line(qqDensityData, {
x: "z", y: "normal", stroke: "#888", strokeWidth: 1.5, strokeDasharray: "6 3"
}),
Plot.line(qqDensityData, {
x: "z", y: "chosen", stroke: "#d62728", strokeWidth: 2.5
}),
Plot.text([{ x: 2.5, y: normalPDF(2.5) + 0.01, label: "Normal" }], {
x: "x", y: "y", text: "label", fill: "#888", fontSize: 11
}),
Plot.text([{ x: -2.5, y: (qqDist === "Normal" ? normalPDF(-2.5) : qqDist === "Standardized t" ? stdTPDF(-2.5, qqDfEff) : asymTPDF(-2.5, qqDfEff, qqD2Eff)) + 0.01, label: qqDist }], {
x: "x", y: "y", text: "label", fill: "#d62728", fontSize: 11, fontWeight: "bold"
}),
Plot.ruleY([0], { stroke: "#888", strokeOpacity: 0.3 })
]
})html`<div style="display:flex; gap:18px; font-size:0.85rem; margin-top:-6px; margin-bottom:2px; flex-wrap:wrap;">
<span><svg width="24" height="10"><line x1="0" y1="5" x2="24" y2="5" stroke="#888" stroke-width="1.5" stroke-dasharray="6 3"/></svg> Normal</span>
<span><svg width="24" height="10"><line x1="0" y1="5" x2="24" y2="5" stroke="#d62728" stroke-width="2.5"/></svg> ${qqDist}</span>
</div>`qqLeftTailData = {
const pts = []
for (let z = -6; z <= -1.5; z += 0.02) {
const normalD = normalPDF(z)
let chosenD
if (qqDist === "Normal") {
chosenD = normalD
} else if (qqDist === "Standardized t") {
chosenD = stdTPDF(z, qqDfEff)
} else {
chosenD = asymTPDF(z, qqDfEff, qqD2Eff)
}
pts.push({ z, normal: normalD, chosen: chosenD })
}
return pts
}Plot.plot({
height: 360,
marginLeft: 55,
marginRight: 20,
x: { label: "z (standardized return)", grid: false, domain: [-6, -1.5] },
y: { label: "Density", grid: true },
marks: [
Plot.areaY(qqLeftTailData, {
x: "z", y: "normal", fill: "#888", fillOpacity: 0.1
}),
Plot.areaY(qqLeftTailData, {
x: "z", y: "chosen", fill: "#d62728", fillOpacity: 0.1
}),
Plot.line(qqLeftTailData, {
x: "z", y: "normal", stroke: "#888", strokeWidth: 1.5, strokeDasharray: "6 3"
}),
Plot.line(qqLeftTailData, {
x: "z", y: "chosen", stroke: "#d62728", strokeWidth: 2.5
}),
Plot.text([{ x: -2, y: normalPDF(-2) + 0.003, label: "Normal" }], {
x: "x", y: "y", text: "label", fill: "#888", fontSize: 11
}),
Plot.text([{ x: -3.5, y: (qqDist === "Normal" ? normalPDF(-3.5) : qqDist === "Standardized t" ? stdTPDF(-3.5, qqDfEff) : asymTPDF(-3.5, qqDfEff, qqD2Eff)) + 0.003, label: qqDist }], {
x: "x", y: "y", text: "label", fill: "#d62728", fontSize: 11, fontWeight: "bold"
}),
Plot.ruleY([0], { stroke: "#888", strokeOpacity: 0.3 })
]
})html`<div style="display:flex; gap:18px; font-size:0.85rem; margin-top:-6px; margin-bottom:2px; flex-wrap:wrap;">
<span><svg width="24" height="10"><line x1="0" y1="5" x2="24" y2="5" stroke="#888" stroke-width="1.5" stroke-dasharray="6 3"/></svg> Normal</span>
<span><svg width="24" height="10"><line x1="0" y1="5" x2="24" y2="5" stroke="#d62728" stroke-width="2.5"/></svg> ${qqDist}</span>
</div>`html`<table class="table" style="width:100%;">
<thead><tr><th>Statistic</th><th>Sample</th><th>Theoretical</th></tr></thead>
<tbody>
<tr><td style="font-weight:500;">Mean</td><td>${qqStats.mean.toFixed(4)}</td><td>0</td></tr>
<tr><td style="font-weight:500;">Variance</td><td>${qqStats.variance.toFixed(4)}</td><td>1</td></tr>
<tr><td style="font-weight:500;">Skewness (ζ₁)</td><td>${qqStats.skew.toFixed(4)}</td><td>${isFinite(qqStats.thSkew) ? qqStats.thSkew.toFixed(4) : "∞"}</td></tr>
<tr><td style="font-weight:500;">Kurtosis</td><td style="font-weight:700;">${qqStats.kurt.toFixed(4)}</td><td style="font-weight:700;">${isFinite(qqStats.thKurt) ? qqStats.thKurt.toFixed(4) : "∞"}</td></tr>
<tr><td style="font-weight:500;">Excess kurtosis (ζ₂)</td><td>${(qqStats.kurt - 3).toFixed(4)}</td><td>${isFinite(qqStats.thKurt) ? (qqStats.thKurt - 3).toFixed(4) : "∞"}</td></tr>
</tbody></table>
<p style="color:#666; font-size:0.85rem;">The normal distribution has kurtosis = 3 (excess kurtosis = 0) and skewness = 0. Higher excess kurtosis means fatter tails. Negative skewness means the left tail is heavier than the right. For the Student-t family, the fourth moment (and hence kurtosis) only exists when d > 4; for d ≤ 4 the theoretical kurtosis is ∞.</p>`2. Cornish-Fisher VaR and ES
The Cornish-Fisher (CF) approximation adjusts normal distribution quantiles for skewness (\(\zeta_1\)) and excess kurtosis (\(\zeta_2\)) using a Taylor expansion:
\[ CF_p^{-1} = \Phi_p^{-1} + \frac{\zeta_1}{6}\left[(\Phi_p^{-1})^2 - 1\right] + \frac{\zeta_2}{24}\left[(\Phi_p^{-1})^3 - 3\Phi_p^{-1}\right] - \frac{\zeta_1^2}{36}\left[2(\Phi_p^{-1})^3 - 5\Phi_p^{-1}\right] \]
\[ VaR^p = -\sigma_{PF} \cdot CF_p^{-1} \]
When \(\zeta_1 = \zeta_2 = 0\), the CF quantile reduces to the normal quantile. The ES uses the modified Expected Shortfall from Boudt et al. (2008), which integrates the second-order Edgeworth density up to the CF quantile:
\[ ES^p = -\sigma_{PF} \cdot \min\left\{E_{G_2},\; CF_p^{-1}\right\} \]
where, with \(g = CF_p^{-1}\):
\[ E_{G_2} = \frac{-\phi(g)}{p}\left[1 + \frac{\zeta_1}{6}g^3 + \frac{\zeta_2}{24}\left(g^4 - 2g^2 - 1\right) + \frac{\zeta_1^2}{72}\left(g^6 - 9g^4 + 9g^2 + 3\right)\right] \]
The \(\min\) operator ensures \(ES \geq VaR\).
Tip
How to experiment
Start with zero skewness and kurtosis (normal case), then increase excess kurtosis to see VaR grow. Next, add negative skewness to see a further increase. The waterfall chart in the “Quantile breakdown” tab shows exactly how each moment contributes to the adjustment.
cfCalc = {
const s = cfSigma / 100
const z = qnorm(cfP)
// CF quantile components
const baseline = z
const skewTerm = (cfZeta1 / 6) * (z * z - 1)
const kurtTerm = (cfZeta2 / 24) * (z * z * z - 3 * z)
const skewSqTerm = -(cfZeta1 * cfZeta1 / 36) * (2 * z * z * z - 5 * z)
const cfQ = baseline + skewTerm + kurtTerm + skewSqTerm
const normalVaR = -s * z
const cfVaR = -s * cfQ
const nES = normalES(cfP, s)
const cES = cfES(cfP, cfZeta1, cfZeta2, s)
return {
z, cfQ, normalVaR, cfVaR, nES, cES, s,
baseline, skewTerm, kurtTerm, skewSqTerm,
varPctDiff: (cfVaR / normalVaR - 1) * 100,
esPctDiff: (cES / nES - 1) * 100
}
}Plot.plot({
height: 360,
marginLeft: 55,
marginRight: 20,
x: { label: "Return (%)", grid: false },
y: { label: "Density", grid: true },
marks: [
Plot.areaY(cfTailCF, {
x: "x", y: "density",
fill: "#d62728", fillOpacity: 0.15,
curve: "linear"
}),
Plot.line(cfDensityData, {
x: "x", y: "density", stroke: "#4682b4", strokeWidth: 2.5
}),
Plot.ruleX([-cfCalc.normalVaR * 100], {
stroke: "#4682b4", strokeWidth: 2.5, strokeDasharray: "6 3"
}),
Plot.ruleX([-cfCalc.cfVaR * 100], {
stroke: "#d62728", strokeWidth: 2.5
}),
Plot.ruleX([-cfCalc.nES * 100], {
stroke: "#4682b4", strokeWidth: 1.5, strokeDasharray: "3 3"
}),
Plot.ruleX([-cfCalc.cES * 100], {
stroke: "#d62728", strokeWidth: 1.5, strokeDasharray: "3 3"
}),
Plot.text([{ x: -cfCalc.normalVaR * 100, label: "Normal VaR" }], {
x: "x", y: cfDensityData.reduce((m, d) => Math.max(m, d.density), 0) * 0.95,
text: "label", fill: "#4682b4", fontWeight: "bold", fontSize: 11, dx: 35
}),
Plot.text([{ x: -cfCalc.cfVaR * 100, label: "CF VaR" }], {
x: "x", y: cfDensityData.reduce((m, d) => Math.max(m, d.density), 0) * 0.85,
text: "label", fill: "#d62728", fontWeight: "bold", fontSize: 11, dx: -25
}),
Plot.ruleY([0], { stroke: "#888", strokeOpacity: 0.3 })
]
})html`<div style="display:flex; gap:18px; font-size:0.85rem; margin-top:-6px; margin-bottom:2px; flex-wrap:wrap;">
<span><svg width="24" height="10"><line x1="0" y1="5" x2="24" y2="5" stroke="#4682b4" stroke-width="2.5" stroke-dasharray="6 3"/></svg> Normal VaR</span>
<span><svg width="24" height="10"><line x1="0" y1="5" x2="24" y2="5" stroke="#d62728" stroke-width="2.5"/></svg> CF VaR</span>
<span><svg width="24" height="10"><line x1="0" y1="5" x2="24" y2="5" stroke="#4682b4" stroke-width="1.5" stroke-dasharray="3 3"/></svg> Normal ES</span>
<span><svg width="24" height="10"><line x1="0" y1="5" x2="24" y2="5" stroke="#d62728" stroke-width="1.5" stroke-dasharray="3 3"/></svg> CF ES</span>
</div>`html`<p style="color:#666; font-size:0.85rem;">Normal return distribution (σ = ${cfSigma.toFixed(1)}%) with VaR and ES for both normal and Cornish-Fisher (ζ₁ = ${cfZeta1.toFixed(1)}, ζ₂ = ${cfZeta2.toFixed(1)}) approaches. The shaded area represents the tail probability p = ${(cfP * 100).toFixed(1)}% at the CF quantile.</p>`cfWaterfall = [
{ term: "Normal Φ⁻¹(p)", value: cfCalc.baseline, cumulative: cfCalc.baseline },
{ term: "+ Skewness", value: cfCalc.skewTerm, cumulative: cfCalc.baseline + cfCalc.skewTerm },
{ term: "+ Kurtosis", value: cfCalc.kurtTerm, cumulative: cfCalc.baseline + cfCalc.skewTerm + cfCalc.kurtTerm },
{ term: "+ Skew²", value: cfCalc.skewSqTerm, cumulative: cfCalc.cfQ },
{ term: "= CF⁻¹(p)", value: cfCalc.cfQ, cumulative: cfCalc.cfQ }
]Plot.plot({
height: 340,
marginLeft: 100,
marginRight: 40,
marginBottom: 40,
x: { label: "Quantile value", grid: true },
y: { label: null, domain: cfWaterfall.map(d => d.term), padding: 0.3 },
marks: [
Plot.barX(cfWaterfall.slice(0, 4), {
y: "term", x: "value",
fill: d => d.value < 0 ? "#d62728" : "#2ca02c",
fillOpacity: 0.7
}),
Plot.barX([cfWaterfall[4]], {
y: "term", x: "value",
fill: "#4682b4", fillOpacity: 0.9
}),
// Component labels (first 4 bars): negative values to the left, positive to the right
Plot.text(cfWaterfall.slice(0, 4).filter(d => d.value < 0), {
y: "term", x: "value",
text: d => d.value.toFixed(4),
dx: -5, textAnchor: "end",
fontSize: 11, fontWeight: "bold"
}),
Plot.text(cfWaterfall.slice(0, 4).filter(d => d.value >= 0), {
y: "term", x: "value",
text: d => d.value.toFixed(4),
dx: 5, textAnchor: "start",
fontSize: 11, fontWeight: "bold"
}),
// Total bar label: place inside the bar to avoid overlapping y-axis label
Plot.text([cfWaterfall[4]], {
y: "term", x: "value",
text: d => d.value.toFixed(4),
dx: 5, textAnchor: "start",
fill: "white", fontSize: 11, fontWeight: "bold"
}),
Plot.ruleX([0], { stroke: "#888" })
]
})html`<p style="color:#666; font-size:0.85rem;">Decomposition of the CF quantile into its four components. The baseline is the normal quantile Φ⁻¹(${(cfP * 100).toFixed(1)}%) = ${cfCalc.baseline.toFixed(4)}. Negative skewness pushes the quantile further left (red); excess kurtosis also pushes it left; the squared-skewness term provides a correction. The final CF quantile is ${cfCalc.cfQ.toFixed(4)} vs. the normal ${cfCalc.baseline.toFixed(4)}.</p>`html`<table class="table" style="width:100%;">
<thead><tr><th>Measure</th><th>Normal</th><th>Cornish-Fisher</th><th>% Difference</th></tr></thead>
<tbody>
<tr><td style="font-weight:500;">Quantile</td><td>${cfCalc.z.toFixed(4)}</td><td>${cfCalc.cfQ.toFixed(4)}</td><td>---</td></tr>
<tr><td style="font-weight:500;">VaR (% of portfolio)</td><td>${(cfCalc.normalVaR * 100).toFixed(4)}%</td><td style="font-weight:700;">${(cfCalc.cfVaR * 100).toFixed(4)}%</td><td style="font-weight:700; color:${cfCalc.varPctDiff > 0 ? '#d62728' : '#2ca02c'};">${cfCalc.varPctDiff > 0 ? "+" : ""}${cfCalc.varPctDiff.toFixed(1)}%</td></tr>
<tr><td style="font-weight:500;">ES (% of portfolio)</td><td>${(cfCalc.nES * 100).toFixed(4)}%</td><td style="font-weight:700;">${(cfCalc.cES * 100).toFixed(4)}%</td><td style="font-weight:700; color:${cfCalc.esPctDiff > 0 ? '#d62728' : '#2ca02c'};">${cfCalc.esPctDiff > 0 ? "+" : ""}${cfCalc.esPctDiff.toFixed(1)}%</td></tr>
<tr><td style="font-weight:500;">ES / VaR ratio</td><td>${(cfCalc.nES / cfCalc.normalVaR).toFixed(4)}</td><td>${(cfCalc.cES / cfCalc.cfVaR).toFixed(4)}</td><td>---</td></tr>
</tbody></table>
<p style="color:#666; font-size:0.85rem;">With ζ₁ = ${cfZeta1.toFixed(1)} and ζ₂ = ${cfZeta2.toFixed(1)}, the Cornish-Fisher VaR is ${Math.abs(cfCalc.varPctDiff).toFixed(1)}% ${cfCalc.varPctDiff > 0 ? "higher" : "lower"} than the normal VaR. Negative skewness and positive excess kurtosis both increase VaR, reflecting the fatter left tail.</p>`3. Standardized t distribution: VaR and ES
The standardized \(\tilde{t}(d)\) distribution has zero mean and unit variance, with fatter tails than the normal controlled by the degrees of freedom parameter \(d\):
\[ f_{\tilde{t}(d)}(z;d) = C(d)\left(1 + \frac{z^2}{d-2}\right)^{-(1+d)/2}, \quad d > 2 \]
where \(C(d)\) is the normalizing constant:
\[ C(d) = \frac{\Gamma((d+1)/2)}{\Gamma(d/2)\sqrt{\pi(d-2)}} \]
The VaR and ES formulas are:
\[ VaR^p = -\sigma_{PF} \sqrt{\frac{d-2}{d}} \cdot t_p^{-1}(d), \qquad ES^p = -\sigma_{PF} \cdot \frac{C(d)}{p}\left[\left(1 + \frac{t_p^{-1}(d)^2}{d}\right)^{\frac{1-d}{2}} \frac{d-2}{1-d}\right] \]
The degrees of freedom can be estimated from the excess kurtosis via method of moments: \(\hat{d} = 6/\hat{\zeta}_2 + 4\).
Tip
How to experiment
Lower degrees of freedom means fatter tails. At \(d = 5\), the VaR is dramatically higher than normal. As \(d\) increases toward 50, the standardized t converges to the normal. The “VaR and ES vs d” tab shows this convergence path.
tCalc = {
const s = tSigma / 100
const nVaR = -s * qnorm(tP)
const tVaR = -s * stdTQuantile(tP, tDf)
const nES = normalES(tP, s)
const tES = stdTES(tP, tDf, s)
const exKurt = tDf > 4 ? 6 / (tDf - 4) : Infinity
return {
s, nVaR, tVaR, nES, tES, exKurt,
varPctDiff: (tVaR / nVaR - 1) * 100,
esPctDiff: (tES / nES - 1) * 100
}
}{
const nVaRz = qnorm(tP)
const tVaRz = stdTQuantile(tP, tDf)
return Plot.plot({
height: 360,
marginLeft: 55,
marginRight: 20,
x: { label: "z (standardized return)", grid: false },
y: { label: "Density", grid: true },
marks: [
Plot.areaY(tDensityData.filter(d => d.z <= tVaRz), {
x: "z", y: "studentT",
fill: "#d62728", fillOpacity: 0.15
}),
Plot.line(tDensityData, {
x: "z", y: "normal", stroke: "#4682b4", strokeWidth: 2, strokeDasharray: "6 3"
}),
Plot.line(tDensityData, {
x: "z", y: "studentT", stroke: "#d62728", strokeWidth: 2.5
}),
Plot.ruleX([nVaRz], {
stroke: "#4682b4", strokeWidth: 2, strokeDasharray: "6 3"
}),
Plot.ruleX([tVaRz], {
stroke: "#d62728", strokeWidth: 2.5
}),
Plot.text([{ x: nVaRz + 0.15, label: "Normal" }], {
x: "x", y: 0.02, text: "label", fill: "#4682b4", fontSize: 11, fontWeight: "bold"
}),
Plot.text([{ x: tVaRz - 0.15, label: "t̃(d)" }], {
x: "x", y: 0.04, text: "label", fill: "#d62728", fontSize: 11, fontWeight: "bold", textAnchor: "end"
}),
Plot.ruleY([0], { stroke: "#888", strokeOpacity: 0.3 })
]
})
}html`<div style="display:flex; gap:18px; font-size:0.85rem; margin-top:-6px; margin-bottom:2px; flex-wrap:wrap;">
<span><svg width="24" height="10"><line x1="0" y1="5" x2="24" y2="5" stroke="#4682b4" stroke-width="2" stroke-dasharray="6 3"/></svg> Normal (density & VaR)</span>
<span><svg width="24" height="10"><line x1="0" y1="5" x2="24" y2="5" stroke="#d62728" stroke-width="2.5"/></svg> Standardized t̃(${tDf.toFixed(1)}) (density & VaR)</span>
</div>`Plot.plot({
height: 360,
marginLeft: 55,
marginRight: 20,
x: { label: "Degrees of freedom d", grid: false },
y: { label: "Risk measure (% of portfolio)", grid: true },
color: {
domain: ["VaR", "ES"],
range: ["#ff7f0e", "#d62728"],
legend: true
},
marks: [
Plot.ruleY([tCurveData.nVaR], {
stroke: "#ff7f0e", strokeWidth: 1.5, strokeDasharray: "6 3"
}),
Plot.ruleY([tCurveData.nES], {
stroke: "#d62728", strokeWidth: 1.5, strokeDasharray: "6 3"
}),
Plot.text([{ y: tCurveData.nVaR, label: "Normal VaR" }], {
x: 48, y: "y", text: "label", fill: "#ff7f0e", fontSize: 10, dy: -8
}),
Plot.text([{ y: tCurveData.nES, label: "Normal ES" }], {
x: 48, y: "y", text: "label", fill: "#d62728", fontSize: 10, dy: -8
}),
Plot.line(tCurveData.pts, {
x: "d", y: "VaR", stroke: "#ff7f0e", strokeWidth: 2.5
}),
Plot.line(tCurveData.pts, {
x: "d", y: "ES", stroke: "#d62728", strokeWidth: 2.5
}),
Plot.dot([{ d: tDf, VaR: tCalc.tVaR * 100 }], {
x: "d", y: "VaR", fill: "#ff7f0e", r: 6, stroke: "white", strokeWidth: 2
}),
Plot.dot([{ d: tDf, ES: tCalc.tES * 100 }], {
x: "d", y: "ES", fill: "#d62728", r: 6, stroke: "white", strokeWidth: 2
})
]
})html`<div style="display:flex; gap:18px; font-size:0.85rem; margin-top:-6px; margin-bottom:2px; flex-wrap:wrap;">
<span><svg width="24" height="10"><line x1="0" y1="5" x2="24" y2="5" stroke="#ff7f0e" stroke-width="2.5"/></svg> VaR (t̃)</span>
<span><svg width="24" height="10"><line x1="0" y1="5" x2="24" y2="5" stroke="#d62728" stroke-width="2.5"/></svg> ES (t̃)</span>
<span><svg width="24" height="10"><line x1="0" y1="5" x2="24" y2="5" stroke="#ff7f0e" stroke-width="1.5" stroke-dasharray="6 3"/></svg> Normal VaR</span>
<span><svg width="24" height="10"><line x1="0" y1="5" x2="24" y2="5" stroke="#d62728" stroke-width="1.5" stroke-dasharray="6 3"/></svg> Normal ES</span>
</div>`html`<table class="table" style="width:100%;">
<thead><tr><th colspan="3">Method of moments: d̂ = 6/ζ̂₂ + 4</th></tr>
<tr><th>d</th><th>Excess kurtosis ζ₂ = 6/(d−4)</th><th>Tail behaviour</th></tr></thead>
<tbody>
${momTable.map(d => `<tr${d.highlighted ? ' style="background:#fff3cd;"' : ''}>
<td style="font-weight:500;">${d.d}</td>
<td>${d.exKurt}</td>
<td>${d.d <= 6 ? "Very fat tails" : d.d <= 10 ? "Fat tails" : d.d <= 20 ? "Moderate tails" : "Near normal"}</td>
</tr>`).join("")}
</tbody></table>
<p style="color:#666; font-size:0.85rem;">Current selection: d = ${tDf.toFixed(1)} → excess kurtosis ζ₂ = ${tCalc.exKurt === Infinity ? "∞" : tCalc.exKurt.toFixed(4)}. Conversely, if sample excess kurtosis is known, invert: d̂ = 6/ζ̂₂ + 4. The distribution requires d > 4 for finite kurtosis.</p>`html`<table class="table" style="width:100%;">
<thead><tr><th>Measure</th><th>Normal</th><th>Standardized t̃(${tDf.toFixed(1)})</th><th>% Difference</th></tr></thead>
<tbody>
<tr><td style="font-weight:500;">VaR (% of portfolio)</td><td>${(tCalc.nVaR * 100).toFixed(4)}%</td><td style="font-weight:700;">${(tCalc.tVaR * 100).toFixed(4)}%</td><td style="font-weight:700; color:#d62728;">+${tCalc.varPctDiff.toFixed(1)}%</td></tr>
<tr><td style="font-weight:500;">ES (% of portfolio)</td><td>${(tCalc.nES * 100).toFixed(4)}%</td><td style="font-weight:700;">${(tCalc.tES * 100).toFixed(4)}%</td><td style="font-weight:700; color:#d62728;">+${tCalc.esPctDiff.toFixed(1)}%</td></tr>
<tr><td style="font-weight:500;">ES / VaR ratio</td><td>${(tCalc.nES / tCalc.nVaR).toFixed(4)}</td><td>${(tCalc.tES / tCalc.tVaR).toFixed(4)}</td><td>---</td></tr>
<tr><td style="font-weight:500;">Excess kurtosis ζ₂</td><td>0</td><td>${tCalc.exKurt === Infinity ? "∞" : tCalc.exKurt.toFixed(4)}</td><td>---</td></tr>
<tr><td style="font-weight:500;">Skewness ζ₁</td><td>0</td><td>0 (symmetric)</td><td>---</td></tr>
</tbody></table>
<p style="color:#666; font-size:0.85rem;">The standardized t distribution captures fat tails but is symmetric, so it cannot capture skewness. If returns are negatively skewed, the symmetric t will compromise the right tail fit to improve the left. For p = ${(tP * 100).toFixed(1)}%, the t̃(${tDf.toFixed(1)}) VaR is ${tCalc.varPctDiff.toFixed(1)}% higher than the normal VaR.</p>`4. Asymmetric t distribution
The asymmetric t distribution extends the standardized t to capture both fat tails and skewness by pasting two scaled t distributions at a point \(-A/B\):
- \(d_1 > 2\): controls tail fatness (kurtosis)
- \(-1 < d_2 < 1\): controls asymmetry (skewness); \(d_2 = 0\) gives the symmetric t as a special case
\[ VaR^p = -\sigma_{PF} \cdot F_{asyt}^{-1}(p;\,d_1, d_2) \]
where the quantile function is piecewise, using the standard Student-t quantile \(t_q^{-1}(d_1)\) on each side with different scaling.
Tip
How to experiment
Set \(d_2 = 0\) to recover the symmetric t. Make \(d_2\) negative to fatten the left tail (typical for equities), and observe the VaR increase relative to both the normal and symmetric t. The “Skewness and kurtosis” tab reproduces the textbook’s Figure 6.5, showing how \(d_1\) and \(d_2\) jointly control these moments.
asymCalc = {
const s = asymSigma / 100
const nVaR = -s * qnorm(asymP)
const symVaR = -s * stdTQuantile(asymP, asymD1)
const asymVaR = -s * asymTQuantile(asymP, asymD1, asymD2)
const nES = normalES(asymP, s)
const symES = stdTES(asymP, asymD1, s)
const aES = asymTES(asymP, asymD1, asymD2, s)
const mom = asymTMoments(asymD1, asymD2)
const symExKurt = asymD1 > 4 ? 6 / (asymD1 - 4) : Infinity
return {
s, nVaR, symVaR, asymVaR, nES, symES, aES,
skew: mom.skew, kurt: mom.kurt, symExKurt,
varPctNorm: (asymVaR / nVaR - 1) * 100,
varPctSym: (asymVaR / symVaR - 1) * 100,
esPctNorm: (aES / nES - 1) * 100
}
}{
const nQ = qnorm(asymP)
const symQ = stdTQuantile(asymP, asymD1)
const asymQ = asymTQuantile(asymP, asymD1, asymD2)
return Plot.plot({
height: 380,
marginLeft: 55,
marginRight: 20,
x: { label: "z (standardized return)", grid: false, domain: [-5.5, 5.5] },
y: { label: "Density", grid: true },
marks: [
Plot.line(asymDensityData, {
x: "z", y: "normal", stroke: "#888", strokeWidth: 1.5, strokeDasharray: "6 3"
}),
Plot.line(asymDensityData, {
x: "z", y: "symT", stroke: "#4682b4", strokeWidth: 1.5, strokeDasharray: "3 3"
}),
Plot.line(asymDensityData, {
x: "z", y: "asymT", stroke: "#d62728", strokeWidth: 2.5
}),
Plot.ruleX([nQ], { stroke: "#888", strokeWidth: 1.5, strokeDasharray: "6 3" }),
Plot.ruleX([symQ], { stroke: "#4682b4", strokeWidth: 1.5, strokeDasharray: "3 3" }),
Plot.ruleX([asymQ], { stroke: "#d62728", strokeWidth: 2.5 }),
Plot.text([{ x: 3.5, y: normalPDF(3.5) + 0.005, label: "Normal" }], {
x: "x", y: "y", text: "label", fill: "#888", fontSize: 10
}),
Plot.text([{ x: 3.5, y: stdTPDF(3.5, asymD1) + 0.005, label: "Sym. t" }], {
x: "x", y: "y", text: "label", fill: "#4682b4", fontSize: 10
}),
Plot.text([{ x: -3.5, y: asymTPDF(-3.5, asymD1, asymD2) + 0.005, label: "Asym. t" }], {
x: "x", y: "y", text: "label", fill: "#d62728", fontSize: 10, fontWeight: "bold"
}),
Plot.ruleY([0], { stroke: "#888", strokeOpacity: 0.3 })
]
})
}html`<div style="display:flex; gap:18px; font-size:0.85rem; margin-top:-6px; margin-bottom:2px; flex-wrap:wrap;">
<span><svg width="24" height="10"><line x1="0" y1="5" x2="24" y2="5" stroke="#888" stroke-width="1.5" stroke-dasharray="6 3"/></svg> Normal</span>
<span><svg width="24" height="10"><line x1="0" y1="5" x2="24" y2="5" stroke="#4682b4" stroke-width="1.5" stroke-dasharray="3 3"/></svg> Sym. t̃(${asymD1.toFixed(1)})</span>
<span><svg width="24" height="10"><line x1="0" y1="5" x2="24" y2="5" stroke="#d62728" stroke-width="2.5"/></svg> Asym. t(${asymD1.toFixed(1)}, ${asymD2.toFixed(2)})</span>
</div>`{
const pw = Math.floor((width - 30) / 2)
const skewPlot = Plot.plot({
height: 300,
width: pw,
marginLeft: 55,
marginRight: 15,
x: { label: "d₂ (asymmetry parameter)", grid: true },
y: { label: "Skewness ζ₁", grid: true },
marks: [
Plot.line(skewCurveData, {
x: "d2", y: "skew", stroke: "#d62728", strokeWidth: 2.5
}),
Plot.dot([{ d2: asymD2, skew: asymCalc.skew }], {
x: "d2", y: "skew", fill: "#d62728", r: 6, stroke: "white", strokeWidth: 2
}),
Plot.ruleY([0], { stroke: "#888", strokeDasharray: "4 4" }),
Plot.ruleX([0], { stroke: "#888", strokeDasharray: "4 4" }),
Plot.text([{ x: 0.5, y: skewCurveData.length > 0 ? skewCurveData[skewCurveData.length - 1].skew * 0.9 : 0, label: `d₁ = ${asymD1.toFixed(1)}` }], {
x: "x", y: "y", text: "label", fill: "#d62728", fontSize: 11
})
]
})
const kurtPlot = Plot.plot({
height: 300,
width: pw,
marginLeft: 55,
marginRight: 15,
x: { label: "d₁ (degrees of freedom)", grid: true },
y: { label: "Excess kurtosis ζ₂", grid: true },
marks: [
Plot.line(kurtCurveData, {
x: "d1", y: "kurt", stroke: "#4682b4", strokeWidth: 2.5
}),
Plot.dot([{ d1: asymD1, kurt: asymCalc.kurt }], {
x: "d1", y: "kurt", fill: "#4682b4", r: 6, stroke: "white", strokeWidth: 2
}),
Plot.ruleY([0], { stroke: "#888", strokeDasharray: "4 4" }),
Plot.text([{ x: 20, y: kurtCurveData.length > 0 ? kurtCurveData[Math.floor(kurtCurveData.length * 0.7)].kurt : 0, label: `d₂ = ${asymD2.toFixed(2)}` }], {
x: "x", y: "y", text: "label", fill: "#4682b4", fontSize: 11
})
]
})
const container = html`<div style="display:flex; gap:10px;"></div>`
container.appendChild(skewPlot)
container.appendChild(kurtPlot)
return container
}html`<p style="color:#666; font-size:0.85rem;"><strong>Left:</strong> Skewness as a function of d₂ for d₁ = ${asymD1.toFixed(1)}. Negative d₂ produces negative skewness (fatter left tail). <strong>Right:</strong> Excess kurtosis as a function of d₁ for d₂ = ${asymD2.toFixed(2)}. Lower d₁ means fatter tails. The dots mark the current parameter values. Compare with Christoffersen (2012), Figure 6.5.</p>`html`<table class="table" style="width:100%;">
<thead><tr><th>Measure</th><th>Normal</th><th>Sym. t̃(${asymD1.toFixed(1)})</th><th>Asym. t(${asymD1.toFixed(1)}, ${asymD2.toFixed(2)})</th></tr></thead>
<tbody>
<tr><td style="font-weight:500;">VaR (%)</td><td>${(asymCalc.nVaR * 100).toFixed(4)}%</td><td>${(asymCalc.symVaR * 100).toFixed(4)}%</td><td style="font-weight:700;">${(asymCalc.asymVaR * 100).toFixed(4)}%</td></tr>
<tr><td style="font-weight:500;">VaR vs Normal</td><td>---</td><td style="color:#d62728;">+${((asymCalc.symVaR / asymCalc.nVaR - 1) * 100).toFixed(1)}%</td><td style="font-weight:700; color:#d62728;">+${asymCalc.varPctNorm.toFixed(1)}%</td></tr>
<tr><td style="font-weight:500;">ES (%)</td><td>${(asymCalc.nES * 100).toFixed(4)}%</td><td>${(asymCalc.symES * 100).toFixed(4)}%</td><td style="font-weight:700;">${(asymCalc.aES * 100).toFixed(4)}%</td></tr>
<tr><td style="font-weight:500;">ES / VaR ratio</td><td>${(asymCalc.nES / asymCalc.nVaR).toFixed(4)}</td><td>${(asymCalc.symES / asymCalc.symVaR).toFixed(4)}</td><td>${(asymCalc.aES / asymCalc.asymVaR).toFixed(4)}</td></tr>
<tr><td style="font-weight:500;">Skewness ζ₁</td><td>0</td><td>0</td><td>${asymCalc.skew.toFixed(4)}</td></tr>
<tr><td style="font-weight:500;">Excess kurtosis ζ₂</td><td>0</td><td>${asymCalc.symExKurt === Infinity ? "∞" : asymCalc.symExKurt.toFixed(4)}</td><td>${isFinite(asymCalc.kurt) ? asymCalc.kurt.toFixed(4) : "∞"}</td></tr>
</tbody></table>
<p style="color:#666; font-size:0.85rem;">The asymmetric t captures both fat tails and skewness. With d₂ = ${asymD2.toFixed(2)} (${asymD2 < 0 ? "negative skewness" : asymD2 > 0 ? "positive skewness" : "symmetric"}), the VaR is ${asymCalc.varPctSym > 0 ? asymCalc.varPctSym.toFixed(1) + "% higher" : Math.abs(asymCalc.varPctSym).toFixed(1) + "% lower"} than the symmetric t. The symmetric t cannot distinguish between left and right tail risk.</p>`5. Grand comparison
This section provides a side-by-side comparison of all four parametric approaches for the same set of distributional assumptions. The skewness and excess kurtosis parameters are used directly by the Cornish-Fisher method; the standardized t degrees of freedom are estimated from the excess kurtosis via the method of moments (\(\hat{d} = 6/\hat{\zeta}_2 + 4\)).
Note
On the asymmetric t in this comparison
Matching the asymmetric t parameters to given skewness and kurtosis requires solving nonlinear equations numerically. For simplicity, this comparison uses the method-of-moments \(d_1\) from excess kurtosis and sets \(d_2\) to approximate the target skewness. The exact match would require iterative optimization.
gcCalc = {
const s = gcSigma / 100
const dMoM = Math.max(4.1, 6 / gcZeta2 + 4)
// Normal
const nVaR = -s * qnorm(gcP)
const nES = normalES(gcP, s)
// Cornish-Fisher
const cfQ = cfQuantile(gcP, gcZeta1, gcZeta2)
const cVaR = -s * cfQ
const cES = cfES(gcP, gcZeta1, gcZeta2, s)
// Standardized t (using method of moments d)
const tVaR = -s * stdTQuantile(gcP, dMoM)
const tES = stdTES(gcP, dMoM, s)
// Asymmetric t: use d1 from MoM, search d2 for skewness
// Simple bisection to find d2 that matches target skewness
let d2est = 0
if (Math.abs(gcZeta1) > 0.01 && dMoM > 3) {
let lo = -0.8, hi = 0.8
const target = gcZeta1
for (let iter = 0; iter < 50; iter++) {
const mid = (lo + hi) / 2
const mom = asymTMoments(dMoM, mid)
if (mom.skew < target) lo = mid
else hi = mid
}
d2est = (lo + hi) / 2
}
const aVaR = -s * asymTQuantile(gcP, dMoM, d2est)
const aES = asymTES(gcP, dMoM, d2est, s)
const aMom = asymTMoments(dMoM, d2est)
return {
nVaR, nES, cVaR, cES, tVaR, tES, aVaR, aES,
dMoM, d2est, s,
aSkew: aMom.skew, aKurt: aMom.kurt
}
}gcBarData = [
{ method: "Normal", measure: "VaR", value: gcCalc.nVaR * 100 },
{ method: "Cornish-Fisher", measure: "VaR", value: gcCalc.cVaR * 100 },
{ method: "Std. t", measure: "VaR", value: gcCalc.tVaR * 100 },
{ method: "Asym. t", measure: "VaR", value: gcCalc.aVaR * 100 },
{ method: "Normal", measure: "ES", value: gcCalc.nES * 100 },
{ method: "Cornish-Fisher", measure: "ES", value: gcCalc.cES * 100 },
{ method: "Std. t", measure: "ES", value: gcCalc.tES * 100 },
{ method: "Asym. t", measure: "ES", value: gcCalc.aES * 100 }
]Plot.plot({
height: 340,
marginLeft: 110,
marginRight: 40,
marginBottom: 40,
color: {
domain: ["Normal", "Cornish-Fisher", "Std. t", "Asym. t"],
range: ["#888", "#ff7f0e", "#4682b4", "#d62728"],
legend: true
},
x: { label: "VaR (% of portfolio)", grid: true },
y: { label: null, domain: ["Normal", "Cornish-Fisher", "Std. t", "Asym. t"], padding: 0.3 },
marks: [
Plot.barX(gcBarData.filter(d => d.measure === "VaR"), {
y: "method", x: "value",
fill: "method",
fillOpacity: 0.8
}),
Plot.text(gcBarData.filter(d => d.measure === "VaR"), {
y: "method", x: "value",
text: d => d.value.toFixed(3) + "%",
dx: 5, textAnchor: "start", fontSize: 11, fontWeight: "bold"
}),
Plot.ruleX([0], { stroke: "#888" })
]
})html`<p style="color:#666; font-size:0.85rem;">VaR at p = ${(gcP * 100).toFixed(1)}% for each method. The normal VaR serves as the baseline. Cornish-Fisher uses ζ₁ = ${gcZeta1.toFixed(1)}, ζ₂ = ${gcZeta2.toFixed(1)} directly. The standardized t uses d = ${gcCalc.dMoM.toFixed(1)} from the method of moments. The asymmetric t uses d₁ = ${gcCalc.dMoM.toFixed(1)}, d₂ = ${gcCalc.d2est.toFixed(3)}.</p>`Plot.plot({
height: 340,
marginLeft: 110,
marginRight: 40,
marginBottom: 40,
color: {
domain: ["Normal", "Cornish-Fisher", "Std. t", "Asym. t"],
range: ["#888", "#ff7f0e", "#4682b4", "#d62728"],
legend: true
},
x: { label: "ES (% of portfolio)", grid: true },
y: { label: null, domain: ["Normal", "Cornish-Fisher", "Std. t", "Asym. t"], padding: 0.3 },
marks: [
Plot.barX(gcBarData.filter(d => d.measure === "ES"), {
y: "method", x: "value",
fill: "method",
fillOpacity: 0.8
}),
Plot.text(gcBarData.filter(d => d.measure === "ES"), {
y: "method", x: "value",
text: d => d.value.toFixed(3) + "%",
dx: 5, textAnchor: "start", fontSize: 11, fontWeight: "bold"
}),
Plot.ruleX([0], { stroke: "#888" })
]
}){
const nQ = qnorm(gcP)
const tQ = stdTQuantile(gcP, gcCalc.dMoM)
const aQ = asymTQuantile(gcP, gcCalc.dMoM, gcCalc.d2est)
return Plot.plot({
height: 340,
marginLeft: 55,
marginRight: 20,
x: { label: "z (standardized return)", grid: false, domain: [-6, -1] },
y: { label: "Density (log scale)", grid: true, type: "log", domain: [1e-5, 0.2],
ticks: [1e-5, 1e-4, 1e-3, 1e-2, 1e-1],
tickFormat: d => d.toExponential(0) },
marks: [
Plot.line(gcTailData, {
x: "z", y: "normal", stroke: "#888", strokeWidth: 2, strokeDasharray: "6 3"
}),
Plot.line(gcTailData, {
x: "z", y: "stdT", stroke: "#4682b4", strokeWidth: 2
}),
Plot.line(gcTailData, {
x: "z", y: "asymT", stroke: "#d62728", strokeWidth: 2.5
}),
Plot.ruleX([nQ], { stroke: "#888", strokeWidth: 1.5, strokeDasharray: "6 3" }),
Plot.ruleX([tQ], { stroke: "#4682b4", strokeWidth: 1.5 }),
Plot.ruleX([aQ], { stroke: "#d62728", strokeWidth: 2 }),
Plot.text([{ x: -1.5, y: 0.1, label: "Normal" }], {
x: "x", y: "y", text: "label", fill: "#888", fontSize: 10
}),
Plot.text([{ x: -1.5, y: 0.06, label: "Std. t" }], {
x: "x", y: "y", text: "label", fill: "#4682b4", fontSize: 10
}),
Plot.text([{ x: -1.5, y: 0.035, label: "Asym. t" }], {
x: "x", y: "y", text: "label", fill: "#d62728", fontSize: 10, fontWeight: "bold"
})
]
})
}html`<div style="display:flex; gap:18px; font-size:0.85rem; margin-top:-6px; margin-bottom:2px; flex-wrap:wrap;">
<span><svg width="24" height="10"><line x1="0" y1="5" x2="24" y2="5" stroke="#888" stroke-width="2" stroke-dasharray="6 3"/></svg> Normal</span>
<span><svg width="24" height="10"><line x1="0" y1="5" x2="24" y2="5" stroke="#4682b4" stroke-width="2"/></svg> Std. t̃(${gcCalc.dMoM.toFixed(1)})</span>
<span><svg width="24" height="10"><line x1="0" y1="5" x2="24" y2="5" stroke="#d62728" stroke-width="2.5"/></svg> Asym. t(${gcCalc.dMoM.toFixed(1)}, ${gcCalc.d2est.toFixed(2)})</span>
</div>`html`<p style="color:#666; font-size:0.85rem;">Left tail of the density on a <strong>log scale</strong>, which makes the tail differences dramatically visible. The normal density drops off exponentially, while the t distributions drop off as a power law, much slower. The asymmetric t has even more mass in the left tail when d₂ < 0. Vertical lines mark each method's VaR quantile.</p>`html`<table class="table" style="width:100%;">
<thead><tr><th>Method</th><th>VaR (%)</th><th>ES (%)</th><th>ES/VaR</th><th>VaR vs Normal</th></tr></thead>
<tbody>
<tr><td style="font-weight:500;">Normal</td><td>${(gcCalc.nVaR * 100).toFixed(4)}%</td><td>${(gcCalc.nES * 100).toFixed(4)}%</td><td>${(gcCalc.nES / gcCalc.nVaR).toFixed(3)}</td><td>---</td></tr>
<tr><td style="font-weight:500;">Cornish-Fisher</td><td>${(gcCalc.cVaR * 100).toFixed(4)}%</td><td>${(gcCalc.cES * 100).toFixed(4)}%</td><td>${(gcCalc.cES / gcCalc.cVaR).toFixed(3)}</td><td style="color:#d62728; font-weight:700;">+${((gcCalc.cVaR / gcCalc.nVaR - 1) * 100).toFixed(1)}%</td></tr>
<tr><td style="font-weight:500;">Standardized t̃(${gcCalc.dMoM.toFixed(1)})</td><td>${(gcCalc.tVaR * 100).toFixed(4)}%</td><td>${(gcCalc.tES * 100).toFixed(4)}%</td><td>${(gcCalc.tES / gcCalc.tVaR).toFixed(3)}</td><td style="color:#d62728; font-weight:700;">+${((gcCalc.tVaR / gcCalc.nVaR - 1) * 100).toFixed(1)}%</td></tr>
<tr><td style="font-weight:500;">Asym. t(${gcCalc.dMoM.toFixed(1)}, ${gcCalc.d2est.toFixed(2)})</td><td style="font-weight:700;">${(gcCalc.aVaR * 100).toFixed(4)}%</td><td style="font-weight:700;">${(gcCalc.aES * 100).toFixed(4)}%</td><td>${(gcCalc.aES / gcCalc.aVaR).toFixed(3)}</td><td style="color:#d62728; font-weight:700;">+${((gcCalc.aVaR / gcCalc.nVaR - 1) * 100).toFixed(1)}%</td></tr>
</tbody></table>
<p style="color:#666; font-size:0.85rem;">Parameters: σ = ${gcSigma.toFixed(1)}%, p = ${(gcP * 100).toFixed(1)}%, ζ₁ = ${gcZeta1.toFixed(1)}, ζ₂ = ${gcZeta2.toFixed(1)}. The symmetric t uses d = 6/ζ₂ + 4 = ${gcCalc.dMoM.toFixed(1)} (method of moments). The asymmetric t uses d₁ = ${gcCalc.dMoM.toFixed(1)} and d₂ = ${gcCalc.d2est.toFixed(3)} (matched to target skewness ζ₁ = ${gcZeta1.toFixed(1)}, achieved: ${gcCalc.aSkew.toFixed(3)}). All non-normal methods produce higher VaR than the normal, reflecting the fat tails and negative skewness.</p>`References
Boudt, Kris, Brian Peterson, and Christophe Croux. 2008. “Estimation and Decomposition of Downside Risk for Portfolios with Non-Normal Returns.” The Journal of Risk 11 (2): 79–103.
Christoffersen, Peter F. 2012. Elements of Financial Risk Management. 2nd ed. Academic Press.