Initial commit — Trading AI Secure project complet

Architecture Docker (8 services), FastAPI, TimescaleDB, Redis, Streamlit.
Stratégies : scalping, intraday, swing. MLEngine + RegimeDetector (HMM).
BacktestEngine + WalkForwardAnalyzer + Optuna optimizer.
Routes API complètes dont /optimize async.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
Tika
2026-03-08 17:38:09 +00:00
commit da30ef19ed
111 changed files with 31723 additions and 0 deletions

11
tests/__init__.py Normal file
View File

@@ -0,0 +1,11 @@
"""
Tests Package - Suite de Tests Complète.
Ce package contient tous les tests pour Trading AI Secure:
- unit/: Tests unitaires
- integration/: Tests d'intégration
- e2e/: Tests end-to-end
- fixtures/: Fixtures et données de test
"""
__version__ = "0.1.0-alpha"

96
tests/conftest.py Normal file
View File

@@ -0,0 +1,96 @@
"""
Pytest Configuration - Fixtures Globales.
Ce fichier contient les fixtures pytest partagées par tous les tests.
"""
import pytest
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
from typing import Dict
from src.core.risk_manager import RiskManager
from src.core.strategy_engine import StrategyEngine
@pytest.fixture
def sample_config() -> Dict:
"""
Fixture de configuration de test.
Returns:
Configuration complète pour tests
"""
return {
'risk_limits': {
'initial_capital': 10000.0,
'global_limits': {
'max_portfolio_risk': 0.05,
'max_position_size': 0.10,
'max_drawdown': 0.15,
'max_daily_loss': 0.03,
'max_correlation': 0.7,
},
'strategy_limits': {
'scalping': {
'risk_per_trade': 0.01,
'max_trades_per_day': 50,
},
'intraday': {
'risk_per_trade': 0.02,
'max_trades_per_day': 20,
},
}
},
'strategy_params': {
'scalping_strategy': {
'name': 'scalping',
'timeframe': '5m',
'risk_per_trade': 0.01,
'max_holding_time': 1800,
'max_trades_per_day': 50,
'adaptive_params': {
'bb_period': 20,
'rsi_period': 14,
'min_confidence': 0.65,
}
}
}
}
@pytest.fixture
def risk_manager(sample_config) -> RiskManager:
"""Fixture RiskManager initialisé."""
rm = RiskManager()
rm.initialize(sample_config['risk_limits'])
return rm
@pytest.fixture
def sample_ohlcv_data() -> pd.DataFrame:
"""Fixture de données OHLCV pour tests."""
dates = pd.date_range(start='2024-01-01', periods=100, freq='1H')
np.random.seed(42)
base_price = 1.1000
returns = np.random.normal(0.0001, 0.01, 100)
prices = base_price * np.exp(np.cumsum(returns))
df = pd.DataFrame(index=dates)
df['close'] = prices
df['open'] = df['close'].shift(1).fillna(df['close'].iloc[0])
df['high'] = df[['open', 'close']].max(axis=1) * (1 + np.random.uniform(0, 0.001, 100))
df['low'] = df[['open', 'close']].min(axis=1) * (1 - np.random.uniform(0, 0.001, 100))
df['volume'] = np.random.randint(1000, 10000, 100)
return df
@pytest.fixture(autouse=True)
def reset_singletons():
"""Reset les singletons entre chaque test."""
RiskManager._instance = None
yield
RiskManager._instance = None

1
tests/unit/__init__.py Normal file
View File

@@ -0,0 +1 @@
"""Tests unitaires."""

View File

@@ -0,0 +1,205 @@
"""
Tests Unitaires - DataValidator.
Tests de validation et nettoyage des données.
"""
import pytest
import pandas as pd
import numpy as np
from src.data.data_validator import DataValidator
class TestDataValidation:
"""Tests de validation des données."""
def test_validate_valid_data(self, sample_ohlcv_data):
"""Test validation de données valides."""
validator = DataValidator()
is_valid, errors = validator.validate(sample_ohlcv_data)
assert is_valid is True
assert len(errors) == 0
def test_validate_empty_dataframe(self):
"""Test rejet DataFrame vide."""
validator = DataValidator()
df = pd.DataFrame()
is_valid, errors = validator.validate(df)
assert is_valid is False
assert len(errors) > 0
assert 'empty' in errors[0].lower()
def test_validate_missing_columns(self):
"""Test rejet si colonnes manquantes."""
validator = DataValidator()
df = pd.DataFrame({
'open': [1.1, 1.2],
'close': [1.15, 1.25]
# Manque high, low, volume
})
is_valid, errors = validator.validate(df)
assert is_valid is False
assert any('missing columns' in e.lower() for e in errors)
def test_validate_price_inconsistency(self):
"""Test détection incohérences de prix."""
validator = DataValidator()
df = pd.DataFrame({
'open': [1.1, 1.2, 1.3],
'high': [1.15, 1.25, 1.35],
'low': [1.2, 1.3, 1.4], # Low > High (invalide)
'close': [1.12, 1.22, 1.32],
'volume': [1000, 2000, 3000]
})
is_valid, errors = validator.validate(df)
assert is_valid is False
assert any('high < low' in e.lower() for e in errors)
def test_validate_excessive_missing_values(self):
"""Test rejet si trop de valeurs manquantes."""
validator = DataValidator(config={'max_missing_pct': 0.05})
df = pd.DataFrame({
'open': [1.1, np.nan, 1.3, np.nan, 1.5] * 10,
'high': [1.15, 1.25, np.nan, 1.45, 1.55] * 10,
'low': [1.05, 1.15, 1.25, np.nan, 1.45] * 10,
'close': [1.12, 1.22, 1.32, 1.42, np.nan] * 10,
'volume': [1000] * 50
})
is_valid, errors = validator.validate(df)
assert is_valid is False
assert any('missing values' in e.lower() for e in errors)
class TestDataCleaning:
"""Tests de nettoyage des données."""
def test_clean_removes_duplicates(self):
"""Test suppression des doublons."""
validator = DataValidator()
dates = pd.date_range('2024-01-01', periods=10, freq='1H')
df = pd.DataFrame({
'open': [1.1] * 10,
'high': [1.15] * 10,
'low': [1.05] * 10,
'close': [1.12] * 10,
'volume': [1000] * 10
}, index=dates)
# Ajouter doublon
df = pd.concat([df, df.iloc[[5]]])
assert len(df) == 11
df_clean = validator.clean(df)
assert len(df_clean) == 10
def test_clean_sorts_chronologically(self):
"""Test tri chronologique."""
validator = DataValidator()
dates = pd.date_range('2024-01-01', periods=10, freq='1H')
df = pd.DataFrame({
'open': [1.1] * 10,
'high': [1.15] * 10,
'low': [1.05] * 10,
'close': [1.12] * 10,
'volume': [1000] * 10
}, index=dates)
# Mélanger l'ordre
df = df.sample(frac=1)
df_clean = validator.clean(df)
assert df_clean.index.is_monotonic_increasing
def test_clean_interpolates_missing_values(self):
"""Test interpolation valeurs manquantes."""
validator = DataValidator()
df = pd.DataFrame({
'open': [1.1, np.nan, 1.3, 1.4, 1.5],
'high': [1.15, 1.25, np.nan, 1.45, 1.55],
'low': [1.05, 1.15, 1.25, np.nan, 1.45],
'close': [1.12, 1.22, 1.32, 1.42, 1.52],
'volume': [1000, 2000, 3000, 4000, 5000]
})
df_clean = validator.clean(df)
# Vérifier que les NaN sont interpolés
assert df_clean['open'].isna().sum() == 0
assert df_clean['high'].isna().sum() == 0
assert df_clean['low'].isna().sum() == 0
def test_clean_fixes_price_inconsistencies(self):
"""Test correction incohérences de prix."""
validator = DataValidator()
df = pd.DataFrame({
'open': [1.1, 1.2, 1.3],
'high': [1.05, 1.15, 1.25], # High < Open (invalide)
'low': [1.15, 1.25, 1.35], # Low > Open (invalide)
'close': [1.12, 1.22, 1.32],
'volume': [1000, 2000, 3000]
})
df_clean = validator.clean(df)
# Vérifier cohérence
assert (df_clean['high'] >= df_clean['low']).all()
assert (df_clean['high'] >= df_clean['open']).all()
assert (df_clean['high'] >= df_clean['close']).all()
assert (df_clean['low'] <= df_clean['open']).all()
assert (df_clean['low'] <= df_clean['close']).all()
class TestDataQualityReport:
"""Tests du rapport de qualité."""
def test_generate_quality_report(self, sample_ohlcv_data):
"""Test génération rapport de qualité."""
validator = DataValidator()
report = validator.get_data_quality_report(sample_ohlcv_data)
assert 'total_rows' in report
assert 'date_range' in report
assert 'missing_values' in report
assert 'is_valid' in report
assert 'price_stats' in report
assert report['total_rows'] == len(sample_ohlcv_data)
assert report['is_valid'] is True
def test_report_includes_statistics(self, sample_ohlcv_data):
"""Test inclusion statistiques dans rapport."""
validator = DataValidator()
report = validator.get_data_quality_report(sample_ohlcv_data)
price_stats = report['price_stats']
assert 'mean_close' in price_stats
assert 'std_close' in price_stats
assert 'min_close' in price_stats
assert 'max_close' in price_stats
assert price_stats['mean_close'] > 0
assert price_stats['std_close'] > 0

View File

@@ -0,0 +1 @@
"""Tests unitaires pour le module ML."""

View File

@@ -0,0 +1,523 @@
"""
Tests Unitaires - FeatureEngineering.
Tests de la création de features pour ML.
"""
import pytest
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
from src.ml.feature_engineering import FeatureEngineering
class TestFeatureEngineeringInitialization:
"""Tests d'initialisation."""
def test_initialization_default(self):
"""Test initialisation par défaut."""
fe = FeatureEngineering()
assert fe.config == {}
assert len(fe.feature_names) == 0
def test_initialization_with_config(self):
"""Test initialisation avec config."""
config = {'param1': 'value1'}
fe = FeatureEngineering(config)
assert fe.config == config
class TestFeatureCreation:
"""Tests de création de features."""
@pytest.fixture
def sample_data(self):
"""Génère des données de test."""
dates = pd.date_range(start='2024-01-01', periods=300, freq='1H')
np.random.seed(42)
returns = np.random.normal(0.0001, 0.01, 300)
prices = 1.1000 * np.exp(np.cumsum(returns))
df = pd.DataFrame(index=dates)
df['close'] = prices
df['open'] = df['close'].shift(1).fillna(df['close'].iloc[0])
df['high'] = df[['open', 'close']].max(axis=1) * (1 + np.random.uniform(0, 0.001, 300))
df['low'] = df[['open', 'close']].min(axis=1) * (1 - np.random.uniform(0, 0.001, 300))
df['volume'] = np.random.randint(1000, 10000, 300)
return df
def test_create_all_features(self, sample_data):
"""Test création de toutes les features."""
fe = FeatureEngineering()
features_df = fe.create_all_features(sample_data)
assert isinstance(features_df, pd.DataFrame)
assert len(features_df) > 0
assert len(fe.feature_names) > 0
def test_features_count(self, sample_data):
"""Test que le nombre de features est correct."""
fe = FeatureEngineering()
features_df = fe.create_all_features(sample_data)
# Devrait créer 100+ features
assert len(fe.feature_names) >= 100
def test_no_nan_in_features(self, sample_data):
"""Test qu'il n'y a pas de NaN dans les features."""
fe = FeatureEngineering()
features_df = fe.create_all_features(sample_data)
# Après dropna, ne devrait pas y avoir de NaN
assert features_df.isna().sum().sum() == 0
class TestPriceFeatures:
"""Tests des features basées sur les prix."""
@pytest.fixture
def sample_data(self):
"""Génère des données de test."""
dates = pd.date_range(start='2024-01-01', periods=300, freq='1H')
np.random.seed(42)
returns = np.random.normal(0.0001, 0.01, 300)
prices = 1.1000 * np.exp(np.cumsum(returns))
df = pd.DataFrame(index=dates)
df['close'] = prices
df['open'] = df['close'].shift(1).fillna(df['close'].iloc[0])
df['high'] = df[['open', 'close']].max(axis=1) * 1.001
df['low'] = df[['open', 'close']].min(axis=1) * 0.999
df['volume'] = np.random.randint(1000, 10000, 300)
return df
def test_price_features_created(self, sample_data):
"""Test que les features de prix sont créées."""
fe = FeatureEngineering()
df = fe._create_price_features(sample_data.copy())
assert 'returns' in df.columns
assert 'log_returns' in df.columns
assert 'high_low_ratio' in df.columns
assert 'close_open_ratio' in df.columns
assert 'price_position' in df.columns
def test_returns_calculation(self, sample_data):
"""Test calcul des returns."""
fe = FeatureEngineering()
df = fe._create_price_features(sample_data.copy())
# Vérifier que returns est calculé correctement
expected_returns = sample_data['close'].pct_change()
pd.testing.assert_series_equal(
df['returns'].dropna(),
expected_returns.dropna(),
check_names=False
)
def test_price_position_range(self, sample_data):
"""Test que price_position est entre 0 et 1."""
fe = FeatureEngineering()
df = fe._create_price_features(sample_data.copy())
price_pos = df['price_position'].dropna()
assert (price_pos >= 0).all()
assert (price_pos <= 1).all()
class TestTechnicalIndicators:
"""Tests des indicateurs techniques."""
@pytest.fixture
def sample_data(self):
"""Génère des données de test."""
dates = pd.date_range(start='2024-01-01', periods=300, freq='1H')
np.random.seed(42)
returns = np.random.normal(0.0001, 0.01, 300)
prices = 1.1000 * np.exp(np.cumsum(returns))
df = pd.DataFrame(index=dates)
df['close'] = prices
df['open'] = df['close'].shift(1).fillna(df['close'].iloc[0])
df['high'] = df[['open', 'close']].max(axis=1) * 1.001
df['low'] = df[['open', 'close']].min(axis=1) * 0.999
df['volume'] = np.random.randint(1000, 10000, 300)
return df
def test_moving_averages_created(self, sample_data):
"""Test création des moyennes mobiles."""
fe = FeatureEngineering()
df = fe._create_technical_indicators(sample_data.copy())
# Vérifier SMA
for period in [5, 10, 20, 50, 100, 200]:
assert f'sma_{period}' in df.columns
assert f'ema_{period}' in df.columns
def test_rsi_calculation(self, sample_data):
"""Test calcul RSI."""
fe = FeatureEngineering()
df = fe._create_technical_indicators(sample_data.copy())
# Vérifier RSI
for period in [7, 14, 21]:
assert f'rsi_{period}' in df.columns
# RSI devrait être entre 0 et 100
rsi = df[f'rsi_{period}'].dropna()
assert (rsi >= 0).all()
assert (rsi <= 100).all()
def test_macd_calculation(self, sample_data):
"""Test calcul MACD."""
fe = FeatureEngineering()
df = fe._create_technical_indicators(sample_data.copy())
assert 'macd' in df.columns
assert 'macd_signal' in df.columns
assert 'macd_hist' in df.columns
def test_bollinger_bands(self, sample_data):
"""Test calcul Bollinger Bands."""
fe = FeatureEngineering()
df = fe._create_technical_indicators(sample_data.copy())
for period in [20, 50]:
assert f'bb_upper_{period}' in df.columns
assert f'bb_middle_{period}' in df.columns
assert f'bb_lower_{period}' in df.columns
assert f'bb_width_{period}' in df.columns
assert f'bb_position_{period}' in df.columns
# Vérifier ordre: upper > middle > lower
upper = df[f'bb_upper_{period}'].dropna()
middle = df[f'bb_middle_{period}'].dropna()
lower = df[f'bb_lower_{period}'].dropna()
assert (upper >= middle).all()
assert (middle >= lower).all()
def test_atr_calculation(self, sample_data):
"""Test calcul ATR."""
fe = FeatureEngineering()
df = fe._create_technical_indicators(sample_data.copy())
for period in [7, 14, 21]:
assert f'atr_{period}' in df.columns
# ATR devrait être positif
atr = df[f'atr_{period}'].dropna()
assert (atr > 0).all()
class TestStatisticalFeatures:
"""Tests des features statistiques."""
@pytest.fixture
def sample_data(self):
"""Génère des données de test."""
dates = pd.date_range(start='2024-01-01', periods=300, freq='1H')
np.random.seed(42)
returns = np.random.normal(0.0001, 0.01, 300)
prices = 1.1000 * np.exp(np.cumsum(returns))
df = pd.DataFrame(index=dates)
df['close'] = prices
df['open'] = df['close'].shift(1).fillna(df['close'].iloc[0])
df['high'] = df[['open', 'close']].max(axis=1) * 1.001
df['low'] = df[['open', 'close']].min(axis=1) * 0.999
df['volume'] = np.random.randint(1000, 10000, 300)
return df
def test_statistical_features_created(self, sample_data):
"""Test création features statistiques."""
fe = FeatureEngineering()
df = fe._create_statistical_features(sample_data.copy())
for period in [10, 20, 50]:
assert f'mean_{period}' in df.columns
assert f'std_{period}' in df.columns
assert f'skew_{period}' in df.columns
assert f'kurt_{period}' in df.columns
assert f'zscore_{period}' in df.columns
def test_zscore_calculation(self, sample_data):
"""Test calcul z-score."""
fe = FeatureEngineering()
df = fe._create_statistical_features(sample_data.copy())
# Z-score devrait avoir moyenne ~0 et std ~1
zscore = df['zscore_20'].dropna()
assert abs(zscore.mean()) < 0.5
assert abs(zscore.std() - 1.0) < 0.5
class TestVolatilityFeatures:
"""Tests des features de volatilité."""
@pytest.fixture
def sample_data(self):
"""Génère des données de test."""
dates = pd.date_range(start='2024-01-01', periods=300, freq='1H')
np.random.seed(42)
returns = np.random.normal(0.0001, 0.01, 300)
prices = 1.1000 * np.exp(np.cumsum(returns))
df = pd.DataFrame(index=dates)
df['close'] = prices
df['open'] = df['close'].shift(1).fillna(df['close'].iloc[0])
df['high'] = df[['open', 'close']].max(axis=1) * 1.001
df['low'] = df[['open', 'close']].min(axis=1) * 0.999
df['volume'] = np.random.randint(1000, 10000, 300)
return df
def test_volatility_features_created(self, sample_data):
"""Test création features volatilité."""
fe = FeatureEngineering()
# Ajouter returns d'abord
df = sample_data.copy()
df['returns'] = df['close'].pct_change()
df = fe._create_volatility_features(df)
for period in [10, 20, 50]:
assert f'volatility_{period}' in df.columns
assert 'parkinson_vol' in df.columns
assert 'gk_vol' in df.columns
assert 'vol_ratio' in df.columns
def test_volatility_positive(self, sample_data):
"""Test que la volatilité est positive."""
fe = FeatureEngineering()
df = sample_data.copy()
df['returns'] = df['close'].pct_change()
df = fe._create_volatility_features(df)
vol = df['volatility_20'].dropna()
assert (vol > 0).all()
class TestVolumeFeatures:
"""Tests des features de volume."""
@pytest.fixture
def sample_data(self):
"""Génère des données de test."""
dates = pd.date_range(start='2024-01-01', periods=300, freq='1H')
np.random.seed(42)
returns = np.random.normal(0.0001, 0.01, 300)
prices = 1.1000 * np.exp(np.cumsum(returns))
df = pd.DataFrame(index=dates)
df['close'] = prices
df['open'] = df['close'].shift(1).fillna(df['close'].iloc[0])
df['high'] = df[['open', 'close']].max(axis=1) * 1.001
df['low'] = df[['open', 'close']].min(axis=1) * 0.999
df['volume'] = np.random.randint(1000, 10000, 300)
return df
def test_volume_features_created(self, sample_data):
"""Test création features volume."""
fe = FeatureEngineering()
df = fe._create_volume_features(sample_data.copy())
for period in [5, 10, 20]:
assert f'volume_ma_{period}' in df.columns
assert 'volume_ratio' in df.columns
assert 'volume_change' in df.columns
assert 'obv' in df.columns
assert 'vwap' in df.columns
class TestTimeFeatures:
"""Tests des features temporelles."""
@pytest.fixture
def sample_data(self):
"""Génère des données de test avec index datetime."""
dates = pd.date_range(start='2024-01-01', periods=300, freq='1H')
np.random.seed(42)
returns = np.random.normal(0.0001, 0.01, 300)
prices = 1.1000 * np.exp(np.cumsum(returns))
df = pd.DataFrame(index=dates)
df['close'] = prices
df['open'] = df['close'].shift(1).fillna(df['close'].iloc[0])
df['high'] = df[['open', 'close']].max(axis=1) * 1.001
df['low'] = df[['open', 'close']].min(axis=1) * 0.999
df['volume'] = np.random.randint(1000, 10000, 300)
return df
def test_time_features_created(self, sample_data):
"""Test création features temporelles."""
fe = FeatureEngineering()
df = fe._create_time_features(sample_data.copy())
assert 'hour' in df.columns
assert 'hour_sin' in df.columns
assert 'hour_cos' in df.columns
assert 'day_of_week' in df.columns
assert 'dow_sin' in df.columns
assert 'dow_cos' in df.columns
assert 'month' in df.columns
assert 'month_sin' in df.columns
assert 'month_cos' in df.columns
def test_cyclic_encoding_range(self, sample_data):
"""Test que l'encodage cyclique est dans [-1, 1]."""
fe = FeatureEngineering()
df = fe._create_time_features(sample_data.copy())
for col in ['hour_sin', 'hour_cos', 'dow_sin', 'dow_cos', 'month_sin', 'month_cos']:
values = df[col].dropna()
assert (values >= -1).all()
assert (values <= 1).all()
class TestFeatureImportance:
"""Tests de feature importance."""
@pytest.fixture
def sample_features(self):
"""Génère des features de test."""
np.random.seed(42)
n_samples = 1000
n_features = 20
features = pd.DataFrame(
np.random.randn(n_samples, n_features),
columns=[f'feature_{i}' for i in range(n_features)]
)
return features
@pytest.fixture
def sample_target(self):
"""Génère une target de test."""
np.random.seed(42)
return pd.Series(np.random.randn(1000))
def test_get_feature_importance(self, sample_features, sample_target):
"""Test calcul feature importance."""
fe = FeatureEngineering()
importance = fe.get_feature_importance(
sample_features,
sample_target,
method='mutual_info'
)
assert isinstance(importance, pd.DataFrame)
assert 'feature' in importance.columns
assert 'importance' in importance.columns
assert len(importance) == len(sample_features.columns)
def test_select_top_features(self, sample_features, sample_target):
"""Test sélection top features."""
fe = FeatureEngineering()
top_features = fe.select_top_features(
sample_features,
sample_target,
n_features=10
)
assert isinstance(top_features, list)
assert len(top_features) == 10
assert all(f in sample_features.columns for f in top_features)
class TestFeatureEngineeringIntegration:
"""Tests d'intégration."""
@pytest.fixture
def sample_data(self):
"""Génère des données de test."""
dates = pd.date_range(start='2024-01-01', periods=500, freq='1H')
np.random.seed(42)
returns = np.random.normal(0.0001, 0.01, 500)
prices = 1.1000 * np.exp(np.cumsum(returns))
df = pd.DataFrame(index=dates)
df['close'] = prices
df['open'] = df['close'].shift(1).fillna(df['close'].iloc[0])
df['high'] = df[['open', 'close']].max(axis=1) * (1 + np.random.uniform(0, 0.001, 500))
df['low'] = df[['open', 'close']].min(axis=1) * (1 - np.random.uniform(0, 0.001, 500))
df['volume'] = np.random.randint(1000, 10000, 500)
return df
def test_full_workflow(self, sample_data):
"""Test workflow complet."""
fe = FeatureEngineering()
# 1. Créer toutes les features
features_df = fe.create_all_features(sample_data)
assert len(features_df) > 0
assert len(fe.feature_names) >= 100
# 2. Vérifier pas de NaN
assert features_df.isna().sum().sum() == 0
# 3. Créer target
target = features_df['returns'].shift(-1).dropna()
features_for_ml = features_df.iloc[:-1]
# 4. Feature importance
importance = fe.get_feature_importance(
features_for_ml[fe.feature_names],
target,
method='correlation'
)
assert len(importance) > 0
# 5. Sélectionner top features
top_features = fe.select_top_features(
features_for_ml[fe.feature_names],
target,
n_features=50
)
assert len(top_features) == 50

View File

@@ -0,0 +1,473 @@
"""
Tests Unitaires - RegimeDetector.
Tests de la détection de régimes de marché avec HMM.
"""
import pytest
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
from src.ml.regime_detector import RegimeDetector
class TestRegimeDetectorInitialization:
"""Tests d'initialisation du RegimeDetector."""
def test_initialization_default(self):
"""Test initialisation avec paramètres par défaut."""
detector = RegimeDetector()
assert detector.n_regimes == 4
assert detector.random_state == 42
assert detector.is_fitted is False
assert len(detector.feature_names) == 0
def test_initialization_custom_regimes(self):
"""Test initialisation avec nombre de régimes personnalisé."""
detector = RegimeDetector(n_regimes=3)
assert detector.n_regimes == 3
def test_regime_names_defined(self):
"""Test que les noms de régimes sont définis."""
detector = RegimeDetector()
assert len(detector.REGIME_NAMES) == 4
assert 'Trending Up' in detector.REGIME_NAMES.values()
assert 'Trending Down' in detector.REGIME_NAMES.values()
assert 'Ranging' in detector.REGIME_NAMES.values()
assert 'High Volatility' in detector.REGIME_NAMES.values()
class TestRegimeDetectorFitting:
"""Tests d'entraînement du modèle."""
@pytest.fixture
def sample_data(self):
"""Génère des données de test."""
dates = pd.date_range(start='2024-01-01', periods=200, freq='1H')
np.random.seed(42)
returns = np.random.normal(0.0001, 0.01, 200)
prices = 1.1000 * np.exp(np.cumsum(returns))
df = pd.DataFrame(index=dates)
df['close'] = prices
df['open'] = df['close'].shift(1).fillna(df['close'].iloc[0])
df['high'] = df[['open', 'close']].max(axis=1) * 1.001
df['low'] = df[['open', 'close']].min(axis=1) * 0.999
df['volume'] = np.random.randint(1000, 10000, 200)
return df
def test_fit_success(self, sample_data):
"""Test entraînement réussi."""
detector = RegimeDetector()
detector.fit(sample_data)
assert detector.is_fitted is True
assert len(detector.feature_names) > 0
def test_fit_creates_features(self, sample_data):
"""Test que fit crée les features."""
detector = RegimeDetector()
detector.fit(sample_data)
# Vérifier que les features attendues sont créées
expected_features = ['returns', 'volatility', 'trend', 'range', 'volume_change', 'momentum']
for feature in expected_features:
assert feature in detector.feature_names
def test_fit_with_insufficient_data(self):
"""Test avec données insuffisantes."""
detector = RegimeDetector()
# Données trop courtes
dates = pd.date_range(start='2024-01-01', periods=10, freq='1H')
df = pd.DataFrame({
'close': np.random.randn(10),
'open': np.random.randn(10),
'high': np.random.randn(10),
'low': np.random.randn(10),
'volume': np.random.randint(1000, 10000, 10)
}, index=dates)
# Devrait lever une erreur ou gérer gracieusement
try:
detector.fit(df)
# Si pas d'erreur, vérifier que le modèle n'est pas fitted
# ou qu'il y a un warning
except Exception as e:
# Acceptable
pass
class TestRegimeDetectorPrediction:
"""Tests de prédiction des régimes."""
@pytest.fixture
def fitted_detector(self, sample_data):
"""Retourne un détecteur entraîné."""
detector = RegimeDetector()
detector.fit(sample_data)
return detector
@pytest.fixture
def sample_data(self):
"""Génère des données de test."""
dates = pd.date_range(start='2024-01-01', periods=200, freq='1H')
np.random.seed(42)
returns = np.random.normal(0.0001, 0.01, 200)
prices = 1.1000 * np.exp(np.cumsum(returns))
df = pd.DataFrame(index=dates)
df['close'] = prices
df['open'] = df['close'].shift(1).fillna(df['close'].iloc[0])
df['high'] = df[['open', 'close']].max(axis=1) * 1.001
df['low'] = df[['open', 'close']].min(axis=1) * 0.999
df['volume'] = np.random.randint(1000, 10000, 200)
return df
def test_predict_regime_returns_array(self, fitted_detector, sample_data):
"""Test que predict_regime retourne un array."""
regimes = fitted_detector.predict_regime(sample_data)
assert isinstance(regimes, np.ndarray)
assert len(regimes) > 0
def test_predict_regime_values_valid(self, fitted_detector, sample_data):
"""Test que les régimes prédits sont valides."""
regimes = fitted_detector.predict_regime(sample_data)
# Tous les régimes doivent être entre 0 et n_regimes-1
assert (regimes >= 0).all()
assert (regimes < fitted_detector.n_regimes).all()
def test_predict_current_regime(self, fitted_detector, sample_data):
"""Test prédiction du régime actuel."""
current_regime = fitted_detector.predict_current_regime(sample_data)
assert isinstance(current_regime, (int, np.integer))
assert 0 <= current_regime < fitted_detector.n_regimes
def test_predict_without_fitting(self, sample_data):
"""Test prédiction sans entraînement préalable."""
detector = RegimeDetector()
with pytest.raises(ValueError, match="not fitted"):
detector.predict_regime(sample_data)
def test_get_regime_probabilities(self, fitted_detector, sample_data):
"""Test obtention des probabilités."""
probabilities = fitted_detector.get_regime_probabilities(sample_data)
assert isinstance(probabilities, np.ndarray)
assert probabilities.shape[1] == fitted_detector.n_regimes
# Vérifier que les probabilités somment à 1
prob_sums = probabilities.sum(axis=1)
np.testing.assert_array_almost_equal(prob_sums, np.ones(len(prob_sums)), decimal=5)
class TestRegimeDetectorStatistics:
"""Tests des statistiques de régimes."""
@pytest.fixture
def fitted_detector(self, sample_data):
"""Retourne un détecteur entraîné."""
detector = RegimeDetector()
detector.fit(sample_data)
return detector
@pytest.fixture
def sample_data(self):
"""Génère des données de test."""
dates = pd.date_range(start='2024-01-01', periods=200, freq='1H')
np.random.seed(42)
returns = np.random.normal(0.0001, 0.01, 200)
prices = 1.1000 * np.exp(np.cumsum(returns))
df = pd.DataFrame(index=dates)
df['close'] = prices
df['open'] = df['close'].shift(1).fillna(df['close'].iloc[0])
df['high'] = df[['open', 'close']].max(axis=1) * 1.001
df['low'] = df[['open', 'close']].min(axis=1) * 0.999
df['volume'] = np.random.randint(1000, 10000, 200)
return df
def test_get_regime_name(self, fitted_detector):
"""Test récupération du nom d'un régime."""
for regime in range(fitted_detector.n_regimes):
name = fitted_detector.get_regime_name(regime)
assert isinstance(name, str)
assert len(name) > 0
def test_get_regime_statistics(self, fitted_detector, sample_data):
"""Test calcul des statistiques."""
stats = fitted_detector.get_regime_statistics(sample_data)
assert 'regime_counts' in stats
assert 'regime_percentages' in stats
assert 'current_regime' in stats
assert 'current_regime_name' in stats
# Vérifier que les pourcentages somment à 1
total_pct = sum(stats['regime_percentages'].values())
assert abs(total_pct - 1.0) < 0.01
class TestRegimeDetectorAdaptation:
"""Tests d'adaptation des paramètres."""
def test_adapt_strategy_parameters(self):
"""Test adaptation des paramètres selon le régime."""
detector = RegimeDetector()
base_params = {
'min_confidence': 0.6,
'risk_per_trade': 0.02
}
# Tester pour chaque régime
for regime in range(4):
adapted = detector.adapt_strategy_parameters(regime, base_params)
assert 'min_confidence' in adapted
assert 'risk_per_trade' in adapted
# Les paramètres doivent être modifiés
assert adapted != base_params
def test_adapt_trending_up(self):
"""Test adaptation pour régime Trending Up."""
detector = RegimeDetector()
base_params = {
'min_confidence': 0.6,
'risk_per_trade': 0.02
}
adapted = detector.adapt_strategy_parameters(0, base_params) # 0 = Trending Up
# Devrait être plus agressif
assert adapted['min_confidence'] < base_params['min_confidence']
assert adapted['risk_per_trade'] > base_params['risk_per_trade']
def test_adapt_high_volatility(self):
"""Test adaptation pour régime High Volatility."""
detector = RegimeDetector()
base_params = {
'min_confidence': 0.6,
'risk_per_trade': 0.02
}
adapted = detector.adapt_strategy_parameters(3, base_params) # 3 = High Volatility
# Devrait être plus conservateur
assert adapted['min_confidence'] > base_params['min_confidence']
assert adapted['risk_per_trade'] < base_params['risk_per_trade']
def test_should_trade_in_regime(self):
"""Test décision de trading selon régime."""
detector = RegimeDetector()
# Scalping devrait trader en Ranging
assert detector.should_trade_in_regime(2, 'scalping') is True
# Scalping ne devrait pas trader en High Volatility
assert detector.should_trade_in_regime(3, 'scalping') is False
# Intraday devrait trader en Trending
assert detector.should_trade_in_regime(0, 'intraday') is True
assert detector.should_trade_in_regime(1, 'intraday') is True
# Intraday ne devrait pas trader en Ranging
assert detector.should_trade_in_regime(2, 'intraday') is False
class TestRegimeDetectorFeatures:
"""Tests de calcul des features."""
@pytest.fixture
def sample_data(self):
"""Génère des données de test."""
dates = pd.date_range(start='2024-01-01', periods=200, freq='1H')
np.random.seed(42)
returns = np.random.normal(0.0001, 0.01, 200)
prices = 1.1000 * np.exp(np.cumsum(returns))
df = pd.DataFrame(index=dates)
df['close'] = prices
df['open'] = df['close'].shift(1).fillna(df['close'].iloc[0])
df['high'] = df[['open', 'close']].max(axis=1) * 1.001
df['low'] = df[['open', 'close']].min(axis=1) * 0.999
df['volume'] = np.random.randint(1000, 10000, 200)
return df
def test_calculate_features(self, sample_data):
"""Test calcul des features."""
detector = RegimeDetector()
features = detector._calculate_features(sample_data)
assert isinstance(features, pd.DataFrame)
assert len(features) > 0
# Vérifier présence des features
expected_features = ['returns', 'volatility', 'trend', 'range', 'volume_change', 'momentum']
for feature in expected_features:
assert feature in features.columns
def test_features_no_nan(self, sample_data):
"""Test que les features n'ont pas de NaN après nettoyage."""
detector = RegimeDetector()
features = detector._calculate_features(sample_data)
# Après dropna, ne devrait pas y avoir de NaN
assert features.isna().sum().sum() == 0
def test_normalize_features(self):
"""Test normalisation des features."""
detector = RegimeDetector()
# Créer features test
X = np.random.randn(100, 6)
X_normalized = detector._normalize_features(X)
# Vérifier que la moyenne est proche de 0 et std proche de 1
assert abs(X_normalized.mean()) < 0.1
assert abs(X_normalized.std() - 1.0) < 0.1
class TestRegimeDetectorEdgeCases:
"""Tests des cas limites."""
def test_with_missing_columns(self):
"""Test avec colonnes manquantes."""
detector = RegimeDetector()
# DataFrame incomplet
df = pd.DataFrame({
'close': np.random.randn(100)
# Manque open, high, low, volume
})
with pytest.raises(KeyError):
detector.fit(df)
def test_with_constant_prices(self):
"""Test avec prix constants."""
detector = RegimeDetector()
dates = pd.date_range(start='2024-01-01', periods=200, freq='1H')
df = pd.DataFrame(index=dates)
df['close'] = 1.1000 # Prix constant
df['open'] = 1.1000
df['high'] = 1.1000
df['low'] = 1.1000
df['volume'] = 1000
# Devrait gérer gracieusement (ou lever erreur appropriée)
try:
detector.fit(df)
# Si réussit, vérifier que le modèle est fitted
assert detector.is_fitted
except Exception:
# Acceptable si erreur appropriée
pass
def test_regime_name_invalid(self):
"""Test avec numéro de régime invalide."""
detector = RegimeDetector()
# Régime hors limites
name = detector.get_regime_name(999)
# Devrait retourner un nom par défaut
assert 'Regime' in name
class TestRegimeDetectorIntegration:
"""Tests d'intégration."""
@pytest.fixture
def sample_data(self):
"""Génère des données de test."""
dates = pd.date_range(start='2024-01-01', periods=500, freq='1H')
np.random.seed(42)
# Créer différents régimes
regimes = []
prices = []
base_price = 1.1000
for i in range(500):
if i < 125: # Trending Up
regime = 0
price = base_price * (1 + i * 0.0001)
elif i < 250: # Trending Down
regime = 1
price = base_price * (1 - (i - 125) * 0.0001)
elif i < 375: # Ranging
regime = 2
price = base_price + 0.001 * np.sin(i / 10)
else: # High Volatility
regime = 3
price = base_price + 0.01 * np.random.randn()
regimes.append(regime)
prices.append(price)
df = pd.DataFrame(index=dates)
df['close'] = prices
df['open'] = df['close'].shift(1).fillna(df['close'].iloc[0])
df['high'] = df[['open', 'close']].max(axis=1) * 1.001
df['low'] = df[['open', 'close']].min(axis=1) * 0.999
df['volume'] = np.random.randint(1000, 10000, 500)
return df
def test_full_workflow(self, sample_data):
"""Test workflow complet."""
detector = RegimeDetector(n_regimes=4)
# 1. Fit
detector.fit(sample_data)
assert detector.is_fitted
# 2. Predict
regimes = detector.predict_regime(sample_data)
assert len(regimes) > 0
# 3. Current regime
current = detector.predict_current_regime(sample_data)
assert 0 <= current < 4
# 4. Statistics
stats = detector.get_regime_statistics(sample_data)
assert 'current_regime' in stats
# 5. Adaptation
adapted = detector.adapt_strategy_parameters(current, {'min_confidence': 0.6})
assert 'min_confidence' in adapted
# 6. Should trade
should_trade = detector.should_trade_in_regime(current, 'intraday')
assert isinstance(should_trade, bool)

View File

@@ -0,0 +1,314 @@
"""
Tests Unitaires - RiskManager.
Tests complets du Risk Manager incluant:
- Pattern Singleton
- Validation pré-trade
- Gestion positions
- Métriques de risque
- Circuit breakers
"""
import pytest
from datetime import datetime
from src.core.risk_manager import RiskManager, Position, RiskMetrics
class TestRiskManagerSingleton:
"""Tests du pattern Singleton."""
def test_singleton_same_instance(self):
"""Vérifie que deux appels retournent la même instance."""
rm1 = RiskManager()
rm2 = RiskManager()
assert rm1 is rm2
def test_singleton_shared_state(self, risk_manager):
"""Vérifie que l'état est partagé entre instances."""
rm1 = risk_manager
rm1.portfolio_value = 15000.0
rm2 = RiskManager()
assert rm2.portfolio_value == 15000.0
class TestRiskManagerInitialization:
"""Tests d'initialisation."""
def test_initialize_with_config(self, sample_config):
"""Vérifie l'initialisation avec configuration."""
rm = RiskManager()
rm.initialize(sample_config['risk_limits'])
assert rm.initial_capital == 10000.0
assert rm.portfolio_value == 10000.0
assert rm.peak_value == 10000.0
assert len(rm.positions) == 0
def test_config_loaded_correctly(self, risk_manager, sample_config):
"""Vérifie que la configuration est chargée."""
assert risk_manager.config == sample_config['risk_limits']
class TestTradeValidation:
"""Tests de validation pré-trade."""
def test_validate_trade_success(self, risk_manager):
"""Test validation d'un trade valide."""
is_valid, error = risk_manager.validate_trade(
symbol='EURUSD',
quantity=1000,
price=1.1000,
stop_loss=1.0950,
take_profit=1.1100,
strategy='intraday'
)
assert is_valid is True
assert error is None
def test_validate_trade_no_stop_loss(self, risk_manager):
"""Test rejet si pas de stop-loss."""
is_valid, error = risk_manager.validate_trade(
symbol='EURUSD',
quantity=1000,
price=1.1000,
stop_loss=None,
take_profit=1.1100,
strategy='intraday'
)
assert is_valid is False
assert 'stop-loss' in error.lower()
def test_validate_trade_excessive_risk(self, risk_manager):
"""Test rejet si risque trop élevé."""
is_valid, error = risk_manager.validate_trade(
symbol='EURUSD',
quantity=100000, # Très grande position
price=1.1000,
stop_loss=1.0000, # Stop très loin
take_profit=1.2000,
strategy='intraday'
)
assert is_valid is False
assert 'risk' in error.lower()
def test_validate_trade_position_too_large(self, risk_manager):
"""Test rejet si position trop grande."""
is_valid, error = risk_manager.validate_trade(
symbol='EURUSD',
quantity=20000, # > 10% du portfolio
price=1.1000,
stop_loss=1.0950,
take_profit=1.1100,
strategy='intraday'
)
assert is_valid is False
assert 'position size' in error.lower()
def test_validate_trade_bad_risk_reward(self, risk_manager):
"""Test rejet si R:R ratio insuffisant."""
is_valid, error = risk_manager.validate_trade(
symbol='EURUSD',
quantity=1000,
price=1.1000,
stop_loss=1.0950, # 50 pips risk
take_profit=1.1020, # 20 pips reward (R:R = 0.4)
strategy='intraday'
)
assert is_valid is False
assert 'risk/reward' in error.lower()
class TestPositionManagement:
"""Tests de gestion des positions."""
def test_add_position(self, risk_manager):
"""Test ajout d'une position."""
position = Position(
symbol='EURUSD',
quantity=1000,
entry_price=1.1000,
current_price=1.1000,
stop_loss=1.0950,
take_profit=1.1100,
strategy='intraday',
entry_time=datetime.now(),
unrealized_pnl=0.0,
risk_amount=50.0
)
risk_manager.add_position(position)
assert 'EURUSD' in risk_manager.positions
assert risk_manager.positions['EURUSD'] == position
assert risk_manager.total_trades == 1
def test_update_position(self, risk_manager):
"""Test mise à jour d'une position."""
position = Position(
symbol='EURUSD',
quantity=1000,
entry_price=1.1000,
current_price=1.1000,
stop_loss=1.0950,
take_profit=1.1100,
strategy='intraday',
entry_time=datetime.now(),
unrealized_pnl=0.0,
risk_amount=50.0
)
risk_manager.add_position(position)
risk_manager.update_position('EURUSD', 1.1050)
assert risk_manager.positions['EURUSD'].current_price == 1.1050
assert risk_manager.positions['EURUSD'].unrealized_pnl == 50.0
def test_close_position_profit(self, risk_manager):
"""Test fermeture position avec profit."""
position = Position(
symbol='EURUSD',
quantity=1000,
entry_price=1.1000,
current_price=1.1000,
stop_loss=1.0950,
take_profit=1.1100,
strategy='intraday',
entry_time=datetime.now(),
unrealized_pnl=0.0,
risk_amount=50.0
)
risk_manager.add_position(position)
initial_value = risk_manager.portfolio_value
pnl = risk_manager.close_position('EURUSD', 1.1100, 'take_profit')
assert pnl == 100.0
assert 'EURUSD' not in risk_manager.positions
assert risk_manager.portfolio_value == initial_value + 100.0
assert risk_manager.winning_trades == 1
def test_close_position_loss(self, risk_manager):
"""Test fermeture position avec perte."""
position = Position(
symbol='EURUSD',
quantity=1000,
entry_price=1.1000,
current_price=1.1000,
stop_loss=1.0950,
take_profit=1.1100,
strategy='intraday',
entry_time=datetime.now(),
unrealized_pnl=0.0,
risk_amount=50.0
)
risk_manager.add_position(position)
initial_value = risk_manager.portfolio_value
pnl = risk_manager.close_position('EURUSD', 1.0950, 'stop_loss')
assert pnl == -50.0
assert risk_manager.portfolio_value == initial_value - 50.0
assert risk_manager.losing_trades == 1
class TestRiskMetrics:
"""Tests des métriques de risque."""
def test_get_risk_metrics(self, risk_manager):
"""Test calcul des métriques de risque."""
metrics = risk_manager.get_risk_metrics()
assert isinstance(metrics, RiskMetrics)
assert metrics.total_risk >= 0
assert metrics.current_drawdown >= 0
assert 0 <= metrics.risk_utilization <= 1
def test_calculate_drawdown(self, risk_manager):
"""Test calcul du drawdown."""
risk_manager.peak_value = 12000.0
risk_manager.portfolio_value = 10800.0
dd = risk_manager._calculate_current_drawdown()
assert dd == 0.10 # 10% drawdown
def test_calculate_var(self, risk_manager):
"""Test calcul VaR."""
# Ajouter historique de P&L
risk_manager.pnl_history = [100, -50, 75, -30, 120, -80, 90, -40, 110, -60] * 3
var = risk_manager._calculate_var(confidence=0.95)
assert var > 0
class TestCircuitBreakers:
"""Tests des circuit breakers."""
def test_halt_on_max_drawdown(self, risk_manager):
"""Test arrêt si drawdown maximum atteint."""
risk_manager.peak_value = 10000.0
risk_manager.portfolio_value = 8400.0 # 16% drawdown
risk_manager.check_circuit_breakers()
assert risk_manager.trading_halted is True
assert 'drawdown' in risk_manager.halt_reason.lower()
def test_halt_on_daily_loss(self, risk_manager):
"""Test arrêt si perte journalière excessive."""
# Simuler grosse perte journalière
risk_manager.portfolio_value = 10000.0
risk_manager.daily_trades = [
{'time': datetime.now(), 'strategy': 'test'}
]
risk_manager.pnl_history = [-400] # -4% en un jour
risk_manager.check_circuit_breakers()
assert risk_manager.trading_halted is True
def test_resume_trading(self, risk_manager):
"""Test reprise du trading."""
risk_manager.halt_trading("Test halt")
assert risk_manager.trading_halted is True
risk_manager.resume_trading()
assert risk_manager.trading_halted is False
assert risk_manager.halt_reason is None
class TestStatistics:
"""Tests des statistiques."""
def test_get_statistics(self, risk_manager):
"""Test récupération des statistiques."""
stats = risk_manager.get_statistics()
assert 'portfolio_value' in stats
assert 'total_return' in stats
assert 'win_rate' in stats
assert 'total_trades' in stats
def test_win_rate_calculation(self, risk_manager):
"""Test calcul du win rate."""
risk_manager.winning_trades = 6
risk_manager.losing_trades = 4
risk_manager.total_trades = 10
stats = risk_manager.get_statistics()
assert stats['win_rate'] == 0.6

View File

@@ -0,0 +1,318 @@
"""
Tests Unitaires - Strategies.
Tests des stratégies de trading:
- BaseStrategy
- ScalpingStrategy
- IntradayStrategy
- SwingStrategy
"""
import pytest
import pandas as pd
import numpy as np
from src.strategies.base_strategy import BaseStrategy, Signal
from src.strategies.scalping.scalping_strategy import ScalpingStrategy
from src.strategies.intraday.intraday_strategy import IntradayStrategy
from src.strategies.swing.swing_strategy import SwingStrategy
class TestBaseStrategy:
"""Tests de la classe BaseStrategy."""
def test_cannot_instantiate_abstract_class(self):
"""Vérifie qu'on ne peut pas instancier BaseStrategy directement."""
with pytest.raises(TypeError):
BaseStrategy({})
def test_position_sizing_kelly(self, sample_config):
"""Test calcul position sizing avec Kelly Criterion."""
strategy = ScalpingStrategy(sample_config['strategy_params']['scalping_strategy'])
# Simuler historique
strategy.win_rate = 0.6
strategy.avg_win = 100
strategy.avg_loss = -50
signal = Signal(
symbol='EURUSD',
direction='LONG',
entry_price=1.1000,
stop_loss=1.0950,
take_profit=1.1100,
confidence=0.8,
timestamp=pd.Timestamp.now(),
strategy='scalping',
metadata={}
)
position_size = strategy.calculate_position_size(
signal=signal,
portfolio_value=10000,
current_volatility=0.02
)
assert position_size > 0
assert position_size < 10000 # Pas plus que le portfolio
class TestScalpingStrategy:
"""Tests de la stratégie Scalping."""
def test_initialization(self, sample_config):
"""Test initialisation de la stratégie."""
strategy = ScalpingStrategy(sample_config['strategy_params']['scalping_strategy'])
assert strategy.name == 'scalping'
assert 'bb_period' in strategy.parameters
assert 'rsi_period' in strategy.parameters
def test_calculate_indicators(self, sample_config, sample_ohlcv_data):
"""Test calcul des indicateurs."""
strategy = ScalpingStrategy(sample_config['strategy_params']['scalping_strategy'])
df = strategy.calculate_indicators(sample_ohlcv_data)
# Vérifier que tous les indicateurs sont présents
assert 'bb_upper' in df.columns
assert 'bb_lower' in df.columns
assert 'bb_position' in df.columns
assert 'rsi' in df.columns
assert 'macd' in df.columns
assert 'macd_hist' in df.columns
assert 'atr' in df.columns
def test_analyze_generates_signal(self, sample_config, sample_ohlcv_data):
"""Test génération de signal."""
strategy = ScalpingStrategy(sample_config['strategy_params']['scalping_strategy'])
# Créer données oversold
df = sample_ohlcv_data.copy()
df = strategy.calculate_indicators(df)
signal = strategy.analyze(df)
# Signal peut être None ou Signal valide
if signal is not None:
assert isinstance(signal, Signal)
assert signal.direction in ['LONG', 'SHORT']
assert signal.stop_loss is not None
assert signal.take_profit is not None
assert 0 <= signal.confidence <= 1
def test_oversold_conditions(self, sample_config):
"""Test détection conditions oversold."""
strategy = ScalpingStrategy(sample_config['strategy_params']['scalping_strategy'])
# Créer données oversold artificielles
dates = pd.date_range(start='2024-01-01', periods=100, freq='5min')
df = pd.DataFrame(index=dates)
# Prix descendant puis rebond
prices = np.linspace(1.1000, 1.0900, 100)
df['close'] = prices
df['open'] = df['close'].shift(1).fillna(df['close'].iloc[0])
df['high'] = df[['open', 'close']].max(axis=1) * 1.001
df['low'] = df[['open', 'close']].min(axis=1) * 0.999
df['volume'] = 5000
df = strategy.calculate_indicators(df)
# Vérifier RSI oversold
assert df['rsi'].iloc[-1] < 50 # Devrait être bas
class TestIntradayStrategy:
"""Tests de la stratégie Intraday."""
def test_initialization(self, sample_config):
"""Test initialisation."""
config = {
'name': 'intraday',
'timeframe': '1h',
'risk_per_trade': 0.02,
'max_holding_time': 28800,
'max_trades_per_day': 20,
'adaptive_params': {
'ema_fast': 9,
'ema_slow': 21,
'ema_trend': 50,
'adx_threshold': 25,
}
}
strategy = IntradayStrategy(config)
assert strategy.name == 'intraday'
assert strategy.parameters['ema_fast'] == 9
assert strategy.parameters['ema_slow'] == 21
def test_calculate_adx(self, sample_config, sample_ohlcv_data):
"""Test calcul ADX."""
config = {
'name': 'intraday',
'timeframe': '1h',
'adaptive_params': {
'ema_fast': 9,
'ema_slow': 21,
'ema_trend': 50,
}
}
strategy = IntradayStrategy(config)
df = strategy.calculate_indicators(sample_ohlcv_data)
# Vérifier ADX calculé
assert 'adx' in df.columns
assert 'pos_di' in df.columns
assert 'neg_di' in df.columns
# ADX devrait être entre 0 et 100
adx_values = df['adx'].dropna()
assert (adx_values >= 0).all()
assert (adx_values <= 100).all()
def test_ema_crossover_detection(self, sample_config):
"""Test détection croisement EMA."""
config = {
'name': 'intraday',
'timeframe': '1h',
'adaptive_params': {
'ema_fast': 9,
'ema_slow': 21,
'ema_trend': 50,
'adx_threshold': 25,
}
}
strategy = IntradayStrategy(config)
# Créer données avec croisement
dates = pd.date_range(start='2024-01-01', periods=100, freq='1H')
df = pd.DataFrame(index=dates)
# Tendance haussière
df['close'] = np.linspace(1.0900, 1.1100, 100)
df['open'] = df['close'].shift(1).fillna(df['close'].iloc[0])
df['high'] = df[['open', 'close']].max(axis=1) * 1.001
df['low'] = df[['open', 'close']].min(axis=1) * 0.999
df['volume'] = 5000
df = strategy.calculate_indicators(df)
# Vérifier que EMA fast > EMA slow en fin de tendance
assert df['ema_fast'].iloc[-1] > df['ema_slow'].iloc[-1]
class TestSwingStrategy:
"""Tests de la stratégie Swing."""
def test_initialization(self):
"""Test initialisation."""
config = {
'name': 'swing',
'timeframe': '4h',
'adaptive_params': {
'sma_short': 20,
'sma_long': 50,
'fibonacci_lookback': 50,
}
}
strategy = SwingStrategy(config)
assert strategy.name == 'swing'
assert strategy.parameters['sma_short'] == 20
assert strategy.parameters['sma_long'] == 50
def test_fibonacci_levels(self, sample_ohlcv_data):
"""Test calcul niveaux Fibonacci."""
config = {
'name': 'swing',
'timeframe': '4h',
'adaptive_params': {
'sma_short': 20,
'sma_long': 50,
'fibonacci_lookback': 50,
}
}
strategy = SwingStrategy(config)
df = strategy.calculate_indicators(sample_ohlcv_data)
# Vérifier niveaux Fibonacci
assert 'fib_236' in df.columns
assert 'fib_382' in df.columns
assert 'fib_500' in df.columns
assert 'fib_618' in df.columns
assert 'fib_786' in df.columns
# Vérifier ordre des niveaux
last_row = df.iloc[-1]
assert last_row['fib_high'] >= last_row['fib_236']
assert last_row['fib_236'] >= last_row['fib_382']
assert last_row['fib_382'] >= last_row['fib_500']
assert last_row['fib_500'] >= last_row['fib_618']
assert last_row['fib_618'] >= last_row['fib_786']
assert last_row['fib_786'] >= last_row['fib_low']
def test_get_strategy_info(self):
"""Test récupération infos stratégie."""
config = {
'name': 'swing',
'timeframe': '4h',
'adaptive_params': {}
}
strategy = SwingStrategy(config)
info = strategy.get_strategy_info()
assert 'name' in info
assert 'type' in info
assert 'timeframe' in info
assert 'indicators' in info
assert info['type'] == 'swing'
class TestSignal:
"""Tests de la classe Signal."""
def test_signal_creation(self):
"""Test création d'un signal."""
signal = Signal(
symbol='EURUSD',
direction='LONG',
entry_price=1.1000,
stop_loss=1.0950,
take_profit=1.1100,
confidence=0.75,
timestamp=pd.Timestamp.now(),
strategy='scalping',
metadata={'rsi': 25, 'bb_position': 0.1}
)
assert signal.symbol == 'EURUSD'
assert signal.direction == 'LONG'
assert signal.confidence == 0.75
assert 'rsi' in signal.metadata
def test_signal_risk_reward(self):
"""Test calcul risk/reward d'un signal."""
signal = Signal(
symbol='EURUSD',
direction='LONG',
entry_price=1.1000,
stop_loss=1.0950, # 50 pips risk
take_profit=1.1100, # 100 pips reward
confidence=0.75,
timestamp=pd.Timestamp.now(),
strategy='scalping',
metadata={}
)
risk = abs(signal.entry_price - signal.stop_loss)
reward = abs(signal.take_profit - signal.entry_price)
rr_ratio = reward / risk
assert rr_ratio == 2.0 # R:R de 2:1