Premchan369 commited on
Commit
01cc2df
·
verified ·
1 Parent(s): b6c23e5

Add risk management engine: VaR/CVaR, stress testing, compliance monitor

Browse files
Files changed (1) hide show
  1. risk_management.py +703 -0
risk_management.py ADDED
@@ -0,0 +1,703 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Risk Management Engine: VaR, CVaR, Stress Testing, Compliance Layer
2
+
3
+ THE MOST IMPORTANT MODULE IN ALPHA FORGE.
4
+
5
+ A trader without risk management is a trader who will blow up.
6
+ This is not optional. This is the difference between compounding wealth
7
+ and losing everything in one bad week.
8
+ """
9
+ import numpy as np
10
+ import pandas as pd
11
+ from scipy import stats
12
+ from typing import Dict, List, Optional, Tuple, Callable
13
+ from dataclasses import dataclass
14
+ import warnings
15
+ warnings.filterwarnings('ignore')
16
+
17
+
18
+ @dataclass
19
+ class RiskLimits:
20
+ """Risk limit configuration — THE CONTRACT WITH YOURSELF"""
21
+
22
+ # Position-level limits
23
+ max_position_pct: float = 0.20 # No single position > 20%
24
+ max_sector_pct: float = 0.40 # No sector > 40%
25
+ max_leverage: float = 2.0 # Gross exposure < 200%
26
+
27
+ # Portfolio-level limits
28
+ max_portfolio_volatility: float = 0.15 # Annual vol < 15%
29
+ max_drawdown_limit: float = 0.15 # Circuit breaker at 15%
30
+ daily_loss_limit: float = 0.03 # Stop if down 3% in a day
31
+ monthly_loss_limit: float = 0.06 # Stop if down 6% in a month
32
+
33
+ # VaR/CVaR limits
34
+ daily_var_limit: float = 0.02 # 1-day 95% VaR < 2%
35
+ daily_cvar_limit: float = 0.03 # 1-day 95% CVaR < 3%
36
+
37
+ # Compliance / regulatory
38
+ max_turnover_annual: float = 3.0 # Max 300% annual turnover
39
+ min_holding_days: int = 1 # No day-trading (if applicable)
40
+ restricted_symbols: List[str] = None # Blacklist
41
+
42
+ def __post_init__(self):
43
+ if self.restricted_symbols is None:
44
+ self.restricted_symbols = []
45
+
46
+
47
+ class ValueAtRisk:
48
+ """
49
+ Value at Risk (VaR) and Conditional VaR (CVaR) Engine.
50
+
51
+ VaR answers: "What is the maximum loss I can expect with X% confidence?"
52
+ CVaR answers: "If I lose more than VaR, how bad does it get on average?"
53
+
54
+ Three methods:
55
+ 1. Historical Simulation (non-parametric, uses actual returns)
56
+ 2. Parametric (assumes normal/t distribution)
57
+ 3. Monte Carlo (simulates many scenarios)
58
+ """
59
+
60
+ def __init__(self, confidence: float = 0.95,
61
+ method: str = 'historical',
62
+ distribution: str = 't'):
63
+ """
64
+ Args:
65
+ confidence: Confidence level (e.g., 0.95 = 95%)
66
+ method: 'historical', 'parametric', 'monte_carlo'
67
+ distribution: For parametric: 'normal' or 't'
68
+ """
69
+ self.confidence = confidence
70
+ self.method = method
71
+ self.distribution = distribution
72
+ self.alpha = 1 - confidence
73
+
74
+ def historical_var(self, returns: np.ndarray,
75
+ weights: Optional[np.ndarray] = None) -> Dict:
76
+ """
77
+ Historical Simulation VaR.
78
+
79
+ Advantages: No distribution assumption, captures fat tails
80
+ Disadvantages: Requires lots of history, assumes past repeats
81
+ """
82
+ if weights is not None:
83
+ port_returns = returns @ weights
84
+ else:
85
+ port_returns = returns
86
+
87
+ # VaR is the (1-confidence) percentile
88
+ var = np.percentile(port_returns, self.alpha * 100)
89
+
90
+ # CVaR (Expected Shortfall): average of returns beyond VaR
91
+ cvar = port_returns[port_returns <= var].mean() if (port_returns <= var).any() else var
92
+
93
+ # Additional metrics
94
+ var_pct = var * 100
95
+ cvar_pct = cvar * 100
96
+
97
+ # Breaches: How often did actual losses exceed VaR?
98
+ breaches = np.sum(port_returns < var)
99
+ breach_rate = breaches / len(port_returns)
100
+
101
+ return {
102
+ 'method': 'historical',
103
+ 'confidence': self.confidence,
104
+ 'var': var,
105
+ 'var_pct': var_pct,
106
+ 'cvar': cvar,
107
+ 'cvar_pct': cvar_pct,
108
+ 'breaches': breaches,
109
+ 'breach_rate': breach_rate,
110
+ 'expected_breach_rate': self.alpha,
111
+ 'n_observations': len(port_returns)
112
+ }
113
+
114
+ def parametric_var(self, returns: np.ndarray,
115
+ weights: Optional[np.ndarray] = None,
116
+ cov_matrix: Optional[np.ndarray] = None) -> Dict:
117
+ """
118
+ Parametric (variance-covariance) VaR.
119
+
120
+ Assumes returns follow a known distribution (normal or Student's t).
121
+
122
+ Advantages: Computationally fast, works with small samples
123
+ Disadvantages: Assumes distribution, misses fat tails
124
+ """
125
+ if weights is not None and cov_matrix is not None:
126
+ # Multi-asset portfolio
127
+ port_mean = weights @ returns.mean(axis=0)
128
+ port_var = weights @ cov_matrix @ weights
129
+ port_std = np.sqrt(port_var)
130
+ elif weights is not None:
131
+ port_returns = returns @ weights
132
+ port_mean = port_returns.mean()
133
+ port_std = port_returns.std()
134
+ else:
135
+ port_mean = returns.mean()
136
+ port_std = returns.std()
137
+
138
+ if self.distribution == 'normal':
139
+ z = stats.norm.ppf(self.alpha)
140
+ var = port_mean + z * port_std
141
+ # CVaR for normal: mu - sigma * phi(z) / alpha
142
+ cvar = port_mean - port_std * stats.norm.pdf(z) / self.alpha
143
+
144
+ elif self.distribution == 't':
145
+ # Fit Student's t distribution
146
+ df, loc, scale = stats.t.fit(returns.flatten() if returns.ndim == 1 else returns.mean(axis=1))
147
+ t_alpha = stats.t.ppf(self.alpha, df, loc=loc, scale=scale)
148
+ var = t_alpha
149
+
150
+ # CVaR for t distribution (approximation)
151
+ # ES = - (scale * (df + t_alpha^2) / (df - 1)) * pdf(t_alpha) / alpha + loc
152
+ pdf_at_alpha = stats.t.pdf(t_alpha, df, loc=loc, scale=scale)
153
+ cvar = loc - (scale * (df + t_alpha**2) / (df - 1)) * pdf_at_alpha / self.alpha
154
+
155
+ else:
156
+ raise ValueError(f"Unknown distribution: {self.distribution}")
157
+
158
+ return {
159
+ 'method': 'parametric',
160
+ 'distribution': self.distribution,
161
+ 'confidence': self.confidence,
162
+ 'var': var,
163
+ 'var_pct': var * 100,
164
+ 'cvar': cvar,
165
+ 'cvar_pct': cvar * 100,
166
+ 'port_mean': port_mean,
167
+ 'port_std': port_std
168
+ }
169
+
170
+ def monte_carlo_var(self, returns: np.ndarray,
171
+ weights: Optional[np.ndarray] = None,
172
+ cov_matrix: Optional[np.ndarray] = None,
173
+ n_simulations: int = 100000) -> Dict:
174
+ """
175
+ Monte Carlo Simulation VaR.
176
+
177
+ Simulates many future scenarios using fitted distributions.
178
+
179
+ Advantages: Captures complex distributions, can include correlations
180
+ Disadvantages: Computationally expensive, model-dependent
181
+ """
182
+ if weights is not None and cov_matrix is not None:
183
+ # Multi-asset simulation
184
+ means = returns.mean(axis=0)
185
+
186
+ # Cholesky decomposition for correlated random variables
187
+ try:
188
+ L = np.linalg.cholesky(cov_matrix)
189
+ except:
190
+ # Add small diagonal to ensure positive definite
191
+ L = np.linalg.cholesky(cov_matrix + np.eye(len(cov_matrix)) * 1e-8)
192
+
193
+ # Simulate
194
+ n_assets = len(means)
195
+ random_normals = np.random.randn(n_simulations, n_assets)
196
+ simulated_returns = random_normals @ L.T
197
+
198
+ # Portfolio returns
199
+ port_sim = simulated_returns @ weights
200
+ else:
201
+ # Single asset
202
+ if returns.ndim == 2:
203
+ returns = returns.mean(axis=1)
204
+
205
+ mean = returns.mean()
206
+ std = returns.std()
207
+
208
+ simulated_returns = np.random.normal(mean, std, n_simulations)
209
+ port_sim = simulated_returns
210
+
211
+ # Calculate VaR/CVaR from simulations
212
+ var = np.percentile(port_sim, self.alpha * 100)
213
+ cvar = port_sim[port_sim <= var].mean() if (port_sim <= var).any() else var
214
+
215
+ return {
216
+ 'method': 'monte_carlo',
217
+ 'confidence': self.confidence,
218
+ 'var': var,
219
+ 'var_pct': var * 100,
220
+ 'cvar': cvar,
221
+ 'cvar_pct': cvar * 100,
222
+ 'n_simulations': n_simulations
223
+ }
224
+
225
+ def calculate(self, returns: np.ndarray,
226
+ weights: Optional[np.ndarray] = None,
227
+ cov_matrix: Optional[np.ndarray] = None,
228
+ n_simulations: int = 100000) -> Dict:
229
+ """Calculate VaR using configured method"""
230
+ if self.method == 'historical':
231
+ return self.historical_var(returns, weights)
232
+ elif self.method == 'parametric':
233
+ return self.parametric_var(returns, weights, cov_matrix)
234
+ elif self.method == 'monte_carlo':
235
+ return self.monte_carlo_var(returns, weights, cov_matrix, n_simulations)
236
+ else:
237
+ raise ValueError(f"Unknown method: {self.method}")
238
+
239
+ def backtest_var(self, returns: np.ndarray,
240
+ window: int = 252,
241
+ weights: Optional[np.ndarray] = None) -> pd.DataFrame:
242
+ """
243
+ Backtest VaR: rolling window VaR calculation and breach testing.
244
+
245
+ If VaR at 95% confidence is breached more than 5% of the time,
246
+ your model is underestimating risk.
247
+
248
+ Returns DataFrame with rolling VaR and breach indicators.
249
+ """
250
+ results = []
251
+ n = len(returns)
252
+
253
+ for i in range(window, n):
254
+ hist = returns[i-window:i]
255
+ actual = returns[i] if returns.ndim == 1 else returns[i]
256
+
257
+ var_result = self.historical_var(hist, weights)
258
+ var_threshold = var_result['var']
259
+
260
+ port_return = actual if weights is None else actual @ weights
261
+
262
+ results.append({
263
+ 'date': i,
264
+ 'actual_return': port_return,
265
+ 'var': var_threshold,
266
+ 'breach': port_return < var_threshold,
267
+ 'excess_loss': max(0, -(port_return - var_threshold))
268
+ })
269
+
270
+ df = pd.DataFrame(results)
271
+ df['breach_rate'] = df['breach'].rolling(window).mean()
272
+
273
+ return df
274
+
275
+
276
+ class StressTesting:
277
+ """
278
+ Stress Testing Engine.
279
+
280
+ Tests portfolio under extreme but plausible scenarios.
281
+ The goal: "Can I survive 2008? March 2020? Flash Crash?"
282
+
283
+ Scenarios:
284
+ 1. Historical events (replay actual market crises)
285
+ 2. Factor shocks (move one risk factor dramatically)
286
+ 3. Custom scenarios (user-defined)
287
+ """
288
+
289
+ HISTORICAL_SCENARIOS = {
290
+ 'black_monday_1987': {
291
+ 'sp500_shock': -0.20,
292
+ 'volatility_multiplier': 5.0,
293
+ 'correlation_spike': 0.9,
294
+ 'description': 'Black Monday — S&P 500 drops 20% in one day, vol explodes'
295
+ },
296
+ 'financial_crisis_2008': {
297
+ 'sp500_shock': -0.50,
298
+ 'duration_months': 18,
299
+ 'credit_spread_widening': 0.05,
300
+ 'description': '2008 Financial Crisis — 50% drawdown over 18 months'
301
+ },
302
+ 'covid_crash_2020': {
303
+ 'sp500_shock': -0.34,
304
+ 'duration_days': 23,
305
+ 'vix_spike': 80,
306
+ 'recovery_days': 150,
307
+ 'description': 'March 2020 COVID crash — 34% in 23 days, VIX to 82'
308
+ },
309
+ 'flash_crash_2010': {
310
+ 'sp500_shock': -0.09,
311
+ 'duration_minutes': 36,
312
+ 'liquidity_evaporation': True,
313
+ 'description': 'May 6, 2010 — 9% drop in 36 minutes, liquidity vanished'
314
+ },
315
+ 'russian_default_1998': {
316
+ 'sp500_shock': -0.15,
317
+ 'flight_to_quality': True,
318
+ 'emergency_rate_cut': 0.005,
319
+ 'description': '1998 Russian default/LTCM — 15% drop, flight to Treasuries'
320
+ }
321
+ }
322
+
323
+ def __init__(self, portfolio_returns: pd.Series,
324
+ benchmark_returns: Optional[pd.Series] = None,
325
+ current_weights: Optional[np.ndarray] = None):
326
+ self.portfolio_returns = portfolio_returns
327
+ self.benchmark_returns = benchmark_returns
328
+ self.current_weights = current_weights
329
+
330
+ def run_scenario(self, scenario_name: str) -> Dict:
331
+ """Run a predefined historical scenario"""
332
+ if scenario_name not in self.HISTORICAL_SCENARIOS:
333
+ raise ValueError(f"Unknown scenario: {scenario_name}")
334
+
335
+ scenario = self.HISTORICAL_SCENARIOS[scenario_name]
336
+
337
+ # Estimate portfolio beta to S&P 500
338
+ if self.benchmark_returns is not None:
339
+ aligned = pd.concat([self.portfolio_returns, self.benchmark_returns], axis=1).dropna()
340
+ cov = np.cov(aligned.iloc[:, 0], aligned.iloc[:, 1])
341
+ beta = cov[0, 1] / cov[1, 1] if cov[1, 1] > 0 else 1.0
342
+ else:
343
+ beta = 1.0
344
+
345
+ # Projected portfolio shock
346
+ shock = scenario['sp500_shock'] * beta
347
+
348
+ # Drawdown from shock
349
+ drawdown = shock
350
+
351
+ # Time to recover (based on historical averages)
352
+ recovery_days = scenario.get('recovery_days', 500)
353
+ recovery_months = recovery_days / 21 # Trading days per month
354
+
355
+ return {
356
+ 'scenario': scenario_name,
357
+ 'description': scenario['description'],
358
+ 'projected_shock': shock,
359
+ 'projected_drawdown': drawdown,
360
+ 'estimated_recovery_months': recovery_months,
361
+ 'survival_check': abs(drawdown) < 0.50, # Can you survive?
362
+ 'would_blow_up': abs(drawdown) > 0.90 # Would you be ruined?
363
+ }
364
+
365
+ def run_all_scenarios(self) -> pd.DataFrame:
366
+ """Run all predefined historical scenarios"""
367
+ results = []
368
+ for name in self.HISTORICAL_SCENARIOS:
369
+ result = self.run_scenario(name)
370
+ results.append(result)
371
+
372
+ return pd.DataFrame(results)
373
+
374
+ def factor_shock(self, factor_returns: pd.DataFrame,
375
+ shock_factors: Dict[str, float]) -> Dict:
376
+ """
377
+ Apply factor shocks and project portfolio impact.
378
+
379
+ Args:
380
+ factor_returns: Factor returns DataFrame (columns = factors)
381
+ shock_factors: Dict of {factor_name: shock_magnitude}
382
+
383
+ Returns:
384
+ Projected portfolio impact
385
+ """
386
+ # Regress portfolio returns on factors
387
+ from sklearn.linear_model import LinearRegression
388
+
389
+ aligned = pd.concat([self.portfolio_returns, factor_returns], axis=1).dropna()
390
+ if len(aligned) < 100:
391
+ return {'error': 'Insufficient data for factor regression'}
392
+
393
+ X = aligned.iloc[:, 1:].values
394
+ y = aligned.iloc[:, 0].values
395
+
396
+ reg = LinearRegression().fit(X, y)
397
+
398
+ # Apply shocks
399
+ shock_vector = np.zeros(len(factor_returns.columns))
400
+ for factor, shock in shock_factors.items():
401
+ if factor in factor_returns.columns:
402
+ idx = list(factor_returns.columns).index(factor)
403
+ shock_vector[idx] = shock
404
+
405
+ projected_impact = reg.coef_ @ shock_vector
406
+
407
+ return {
408
+ 'projected_return': projected_impact,
409
+ 'projected_drawdown': projected_impact,
410
+ 'factor_exposures': dict(zip(factor_returns.columns, reg.coef_)),
411
+ 'r_squared': reg.score(X, y)
412
+ }
413
+
414
+ def custom_scenario(self,
415
+ shock_returns: Dict[str, float],
416
+ weights: Optional[np.ndarray] = None) -> Dict:
417
+ """
418
+ Run a custom scenario with specific asset shocks.
419
+
420
+ Args:
421
+ shock_returns: Dict of {asset_name: return_shock}
422
+ weights: Portfolio weights (if None, use current_weights)
423
+ """
424
+ if weights is None:
425
+ weights = self.current_weights
426
+
427
+ if weights is None:
428
+ return {'error': 'No weights provided'}
429
+
430
+ assets = list(shock_returns.keys())
431
+ shocks = np.array([shock_returns.get(a, 0) for a in assets])
432
+
433
+ portfolio_impact = weights[:len(shocks)] @ shocks
434
+
435
+ return {
436
+ 'scenario': 'custom',
437
+ 'shocked_assets': shock_returns,
438
+ 'portfolio_impact': portfolio_impact,
439
+ 'projected_drawdown': portfolio_impact
440
+ }
441
+
442
+
443
+ class ComplianceMonitor:
444
+ """
445
+ Real-time compliance monitoring.
446
+
447
+ Checks every decision against risk limits.
448
+
449
+ Outputs:
450
+ - GREEN: Within all limits
451
+ - YELLOW: Approaching limits (warning)
452
+ - RED: Limit breached (HARD STOP)
453
+ """
454
+
455
+ def __init__(self, limits: RiskLimits):
456
+ self.limits = limits
457
+ self.violations = []
458
+ self.warnings = []
459
+
460
+ def check_position_limits(self, weights: np.ndarray,
461
+ sector_map: Optional[Dict[int, str]] = None) -> Dict:
462
+ """Check if positions comply with limits"""
463
+ status = {'status': 'GREEN', 'violations': [], 'warnings': []}
464
+
465
+ # Individual position limits
466
+ for i, w in enumerate(weights):
467
+ if abs(w) > self.limits.max_position_pct:
468
+ status['violations'].append(
469
+ f"Position {i}: {w*100:.1f}% > max {self.limits.max_position_pct*100:.0f}%"
470
+ )
471
+ status['status'] = 'RED'
472
+
473
+ # Sector limits
474
+ if sector_map is not None:
475
+ sectors = {}
476
+ for i, sector in sector_map.items():
477
+ if i < len(weights):
478
+ sectors[sector] = sectors.get(sector, 0) + abs(weights[i])
479
+
480
+ for sector, total in sectors.items():
481
+ if total > self.limits.max_sector_pct:
482
+ status['violations'].append(
483
+ f"Sector {sector}: {total*100:.1f}% > max {self.limits.max_sector_pct*100:.0f}%"
484
+ )
485
+ status['status'] = 'RED'
486
+
487
+ # Leverage
488
+ gross_leverage = np.sum(np.abs(weights))
489
+ if gross_leverage > self.limits.max_leverage:
490
+ status['violations'].append(
491
+ f"Gross leverage: {gross_leverage:.2f}x > max {self.limits.max_leverage:.1f}x"
492
+ )
493
+ status['status'] = 'RED'
494
+
495
+ return status
496
+
497
+ def check_var_limits(self, var_result: Dict) -> Dict:
498
+ """Check if VaR is within limits"""
499
+ status = {'status': 'GREEN', 'violations': [], 'warnings': []}
500
+
501
+ daily_var = abs(var_result.get('var_pct', 0)) / 100
502
+ daily_cvar = abs(var_result.get('cvar_pct', 0)) / 100
503
+
504
+ if daily_var > self.limits.daily_var_limit:
505
+ status['violations'].append(
506
+ f"Daily VaR: {daily_var*100:.2f}% > limit {self.limits.daily_var_limit*100:.0f}%"
507
+ )
508
+ status['status'] = 'RED'
509
+ elif daily_var > self.limits.daily_var_limit * 0.8:
510
+ status['warnings'].append(
511
+ f"Daily VaR: {daily_var*100:.2f}% approaching limit"
512
+ )
513
+ if status['status'] == 'GREEN':
514
+ status['status'] = 'YELLOW'
515
+
516
+ if daily_cvar > self.limits.daily_cvar_limit:
517
+ status['violations'].append(
518
+ f"Daily CVaR: {daily_cvar*100:.2f}% > limit {self.limits.daily_cvar_limit*100:.0f}%"
519
+ )
520
+ status['status'] = 'RED'
521
+
522
+ return status
523
+
524
+ def check_drawdown(self, current_drawdown: float) -> Dict:
525
+ """Check if drawdown is within limits"""
526
+ status = {'status': 'GREEN', 'violations': [], 'warnings': []}
527
+
528
+ if current_drawdown < -self.limits.max_drawdown_limit:
529
+ status['violations'].append(
530
+ f"Drawdown: {current_drawdown*100:.1f}% > limit {self.limits.max_drawdown_limit*100:.0f}%"
531
+ )
532
+ status['status'] = 'RED'
533
+ elif current_drawdown < -self.limits.max_drawdown_limit * 0.7:
534
+ status['warnings'].append(
535
+ f"Drawdown: {current_drawdown*100:.1f}% approaching limit"
536
+ )
537
+ if status['status'] == 'GREEN':
538
+ status['status'] = 'YELLOW'
539
+
540
+ return status
541
+
542
+ def full_compliance_check(self,
543
+ weights: np.ndarray,
544
+ var_result: Dict,
545
+ current_drawdown: float,
546
+ sector_map: Optional[Dict[int, str]] = None) -> Dict:
547
+ """Run all compliance checks and return overall status"""
548
+ pos_check = self.check_position_limits(weights, sector_map)
549
+ var_check = self.check_var_limits(var_result)
550
+ dd_check = self.check_drawdown(current_drawdown)
551
+
552
+ # Aggregate
553
+ all_violations = (pos_check.get('violations', []) +
554
+ var_check.get('violations', []) +
555
+ dd_check.get('violations', []))
556
+ all_warnings = (pos_check.get('warnings', []) +
557
+ var_check.get('warnings', []) +
558
+ dd_check.get('warnings', []))
559
+
560
+ # Determine overall status
561
+ if any(s.get('status') == 'RED' for s in [pos_check, var_check, dd_check]):
562
+ overall_status = 'RED'
563
+ elif any(s.get('status') == 'YELLOW' for s in [pos_check, var_check, dd_check]):
564
+ overall_status = 'YELLOW'
565
+ else:
566
+ overall_status = 'GREEN'
567
+
568
+ result = {
569
+ 'overall_status': overall_status,
570
+ 'position_check': pos_check,
571
+ 'var_check': var_check,
572
+ 'drawdown_check': dd_check,
573
+ 'violations': all_violations,
574
+ 'warnings': all_warnings,
575
+ 'can_trade': overall_status != 'RED',
576
+ 'should_reduce': overall_status == 'YELLOW'
577
+ }
578
+
579
+ # Log
580
+ if overall_status == 'RED':
581
+ self.violations.append(result)
582
+ elif overall_status == 'YELLOW':
583
+ self.warnings.append(result)
584
+
585
+ return result
586
+
587
+
588
+ def run_full_risk_assessment(returns: pd.DataFrame,
589
+ weights: np.ndarray,
590
+ current_drawdown: float = 0.0,
591
+ sector_map: Optional[Dict[int, str]] = None,
592
+ confidence: float = 0.95) -> Dict:
593
+ """
594
+ Complete risk assessment — run this EVERY DAY before trading.
595
+
596
+ Returns:
597
+ Comprehensive risk report with VaR, stress tests, and compliance status
598
+ """
599
+ print("=" * 70)
600
+ print(" DAILY RISK ASSESSMENT")
601
+ print("=" * 70)
602
+
603
+ # 1. VaR Calculation
604
+ print("\n[1/4] Value at Risk Analysis...")
605
+ var_engine = ValueAtRisk(confidence=confidence, method='historical')
606
+ var_result = var_engine.calculate(returns.values, weights=weights)
607
+
608
+ var_engine_param = ValueAtRisk(confidence=confidence, method='parametric', distribution='t')
609
+ var_param = var_engine_param.calculate(returns.values, weights=weights)
610
+
611
+ print(f" Historical VaR ({confidence*100:.0f}%): {var_result['var_pct']:.2f}%")
612
+ print(f" Historical CVaR ({confidence*100:.0f}%): {var_result['cvar_pct']:.2f}%")
613
+ print(f" Parametric VaR (Student-t): {var_param['var_pct']:.2f}%")
614
+ print(f" Parametric CVaR (Student-t): {var_param['cvar_pct']:.2f}%")
615
+
616
+ # 2. Stress Testing
617
+ print("\n[2/4] Stress Testing...")
618
+ port_returns = (returns.values @ weights) if returns.values.ndim == 2 else returns.values
619
+ stress = StressTesting(
620
+ pd.Series(port_returns),
621
+ benchmark_returns=returns.iloc[:, 0] if len(returns.columns) > 0 else None
622
+ )
623
+ stress_results = stress.run_all_scenarios()
624
+
625
+ for _, row in stress_results.iterrows():
626
+ icon = '✓' if not row['would_blow_up'] else '✗'
627
+ print(f" {icon} {row['scenario']}: {row['projected_drawdown']*100:.1f}% drawdown, "
628
+ f"recovery {row['estimated_recovery_months']:.0f} months")
629
+
630
+ # 3. Compliance
631
+ print("\n[3/4] Compliance Check...")
632
+ limits = RiskLimits()
633
+ compliance = ComplianceMonitor(limits)
634
+ compliance_result = compliance.full_compliance_check(
635
+ weights, var_result, current_drawdown, sector_map
636
+ )
637
+
638
+ status_icon = {'GREEN': '✓', 'YELLOW': '⚠', 'RED': '✗'}
639
+ icon = status_icon.get(compliance_result['overall_status'], '?')
640
+ print(f" {icon} OVERALL STATUS: {compliance_result['overall_status']}")
641
+
642
+ if compliance_result['violations']:
643
+ print(f" VIOLATIONS:")
644
+ for v in compliance_result['violations']:
645
+ print(f" - {v}")
646
+
647
+ if compliance_result['warnings']:
648
+ print(f" WARNINGS:")
649
+ for w in compliance_result['warnings']:
650
+ print(f" - {w}")
651
+
652
+ # 4. Risk Summary
653
+ print("\n[4/4] Risk Summary...")
654
+
655
+ # Portfolio statistics
656
+ port_mean = np.mean(port_returns) * 252
657
+ port_vol = np.std(port_returns) * np.sqrt(252)
658
+ sharpe = (port_mean - 0.04) / port_vol if port_vol > 0 else 0
659
+ max_dd = (np.cumprod(1 + port_returns) - np.maximum.accumulate(np.cumprod(1 + port_returns))).min()
660
+
661
+ summary = {
662
+ 'daily_var_95': var_result['var_pct'],
663
+ 'daily_cvar_95': var_result['cvar_pct'],
664
+ 'annual_return': port_mean,
665
+ 'annual_volatility': port_vol,
666
+ 'sharpe_ratio': sharpe,
667
+ 'max_drawdown': max_dd,
668
+ 'compliance_status': compliance_result['overall_status'],
669
+ 'can_trade': compliance_result['can_trade'],
670
+ 'should_reduce': compliance_result['should_reduce'],
671
+ 'stress_tests': stress_results.to_dict('records'),
672
+ 'violations': compliance_result['violations'],
673
+ 'warnings': compliance_result['warnings']
674
+ }
675
+
676
+ print(f"\n Annual Return: {port_mean*100:.1f}%")
677
+ print(f" Annual Volatility: {port_vol*100:.1f}%")
678
+ print(f" Sharpe Ratio: {sharpe:.2f}")
679
+ print(f" Max Drawdown: {max_dd*100:.1f}%")
680
+ print(f" Daily VaR (95%): {var_result['var_pct']:.2f}%")
681
+ print(f" Daily CVaR (95%): {var_result['cvar_pct']:.2f}%")
682
+ print(f"\n {'='*70}")
683
+ print(f" CAN TRADE TODAY: {compliance_result['can_trade']}")
684
+ print(f" {'='*70}")
685
+
686
+ return summary
687
+
688
+
689
+ if __name__ == '__main__':
690
+ # Test risk management
691
+ np.random.seed(42)
692
+
693
+ # Generate synthetic portfolio
694
+ n_days = 1000
695
+ n_assets = 5
696
+ returns = np.random.randn(n_days, n_assets) * 0.02
697
+ weights = np.array([0.20, 0.20, 0.20, 0.20, 0.20])
698
+
699
+ returns_df = pd.DataFrame(returns, columns=[f'Asset_{i}' for i in range(n_assets)])
700
+
701
+ summary = run_full_risk_assessment(
702
+ returns_df, weights, current_drawdown=-0.05
703
+ )