|
import sys |
|
import os |
|
import ccxt |
|
import pandas as pd |
|
import numpy as np |
|
from datetime import datetime |
|
import ta |
|
import argparse |
|
from sklearn.ensemble import RandomForestClassifier |
|
from sklearn.model_selection import train_test_split |
|
from sklearn.metrics import accuracy_score |
|
import pickle |
|
import warnings |
|
import re |
|
|
|
|
|
warnings.filterwarnings('ignore') |
|
|
|
|
|
pd.set_option('display.max_columns', None) |
|
pd.set_option('display.max_rows', None) |
|
pd.set_option('display.expand_frame_repr', True) |
|
|
|
class MLTechnicalScanner: |
|
def __init__(self, training_mode=False): |
|
self.training_mode = training_mode |
|
self.model = None |
|
self.model_file = "technical_ml_model.pkl" |
|
self.training_data_file = "training_data.csv" |
|
self.results_file = "results.txt" |
|
self.min_training_samples = 100 |
|
self.load_ml_model() |
|
|
|
|
|
self.exchanges = {} |
|
for id in ccxt.exchanges: |
|
exchange = getattr(ccxt, id) |
|
self.exchanges[id] = exchange() |
|
|
|
|
|
self.feature_columns = [ |
|
'rsi', 'macd', 'bollinger_upper', 'bollinger_lower', |
|
'volume_ma', 'ema_20', 'ema_50', 'adx' |
|
] |
|
|
|
|
|
self.performance_history = pd.DataFrame(columns=[ |
|
'timestamp', 'symbol', 'prediction', 'actual', 'profit' |
|
]) |
|
|
|
|
|
self.training_data = pd.DataFrame(columns=self.feature_columns + ['target']) |
|
|
|
def init_results_file(self): |
|
"""Initialize results file only when starting a new scan""" |
|
with open(self.results_file, 'w') as f: |
|
f.write("Scan Results Log\n") |
|
f.write("="*50 + "\n") |
|
f.write(f"Scan started at {datetime.now()}\n\n") |
|
|
|
def log_result(self, message): |
|
"""Log message to results file""" |
|
try: |
|
with open(self.results_file, 'a') as f: |
|
f.write(message + '\n') |
|
except Exception as e: |
|
print(f"Error writing to results file: {str(e)}") |
|
|
|
def load_ml_model(self): |
|
"""Load trained ML model if exists""" |
|
if os.path.exists(self.model_file): |
|
with open(self.model_file, 'rb') as f: |
|
self.model = pickle.load(f) |
|
msg = "Loaded trained model from file" |
|
print(msg) |
|
self.log_result(msg) |
|
else: |
|
msg = "Initializing new model" |
|
print(msg) |
|
self.log_result(msg) |
|
self.model = RandomForestClassifier(n_estimators=100, random_state=42) |
|
|
|
def save_ml_model(self): |
|
"""Save trained ML model""" |
|
with open(self.model_file, 'wb') as f: |
|
pickle.dump(self.model, f) |
|
msg = "Saved model to file" |
|
print(msg) |
|
self.log_result(msg) |
|
|
|
def load_training_data(self): |
|
"""Load existing training data if available""" |
|
if os.path.exists(self.training_data_file): |
|
self.training_data = pd.read_csv(self.training_data_file) |
|
msg = f"Loaded {len(self.training_data)} training samples" |
|
print(msg) |
|
self.log_result(msg) |
|
|
|
def save_training_data(self): |
|
"""Save training data to file""" |
|
self.training_data.to_csv(self.training_data_file, index=False) |
|
msg = f"Saved {len(self.training_data)} training samples" |
|
print(msg) |
|
self.log_result(msg) |
|
|
|
def calculate_features(self, df): |
|
"""Calculate technical indicators""" |
|
try: |
|
close = df['close'].astype(float) |
|
high = df['high'].astype(float) |
|
low = df['low'].astype(float) |
|
volume = df['volume'].astype(float) |
|
|
|
|
|
df['rsi'] = ta.momentum.rsi(close, window=14) |
|
df['macd'] = ta.trend.macd_diff(close) |
|
|
|
|
|
bollinger = ta.volatility.BollingerBands(close) |
|
df['bollinger_upper'] = bollinger.bollinger_hband() |
|
df['bollinger_lower'] = bollinger.bollinger_lband() |
|
|
|
|
|
df['volume_ma'] = volume.rolling(window=20).mean() |
|
|
|
|
|
df['ema_20'] = ta.trend.ema_indicator(close, window=20) |
|
df['ema_50'] = ta.trend.ema_indicator(close, window=50) |
|
df['adx'] = ta.trend.adx(high, low, close, window=14) |
|
|
|
return df |
|
except Exception as e: |
|
error_msg = f"Error calculating features: {str(e)}" |
|
print(error_msg) |
|
self.log_result(error_msg) |
|
return None |
|
|
|
def train_initial_model(self): |
|
"""Train initial model if we have enough data""" |
|
self.load_training_data() |
|
|
|
if len(self.training_data) >= self.min_training_samples: |
|
X = self.training_data[self.feature_columns] |
|
y = self.training_data['target'] |
|
|
|
X_train, X_test, y_train, y_test = train_test_split( |
|
X, y, test_size=0.2, random_state=42 |
|
) |
|
|
|
self.model.fit(X_train, y_train) |
|
|
|
|
|
preds = self.model.predict(X_test) |
|
accuracy = accuracy_score(y_test, preds) |
|
msg = f"Initial model trained with accuracy: {accuracy:.2f}" |
|
print(msg) |
|
self.log_result(msg) |
|
|
|
self.save_ml_model() |
|
return True |
|
else: |
|
msg = f"Not enough training data ({len(self.training_data)} samples). Need at least {self.min_training_samples}." |
|
print(msg) |
|
self.log_result(msg) |
|
return False |
|
|
|
def predict_direction(self, features): |
|
"""Predict price direction using ML model""" |
|
try: |
|
if self.model is None or not hasattr(self.model, 'classes_'): |
|
return 0 |
|
|
|
features = features[self.feature_columns].values.reshape(1, -1) |
|
return self.model.predict(features)[0] |
|
except Exception as e: |
|
error_msg = f"Prediction error: {str(e)}" |
|
print(error_msg) |
|
self.log_result(error_msg) |
|
return 0 |
|
|
|
def collect_training_sample(self, symbol, exchange, timeframe='1h'): |
|
"""Collect data sample for training""" |
|
try: |
|
ohlcv = exchange.fetch_ohlcv(symbol, timeframe, limit=100) |
|
if len(ohlcv) < 50: |
|
return |
|
|
|
df = pd.DataFrame(ohlcv, columns=['timestamp', 'open', 'high', 'low', 'close', 'volume']) |
|
df = self.calculate_features(df) |
|
if df is None: |
|
return |
|
|
|
current_price = df['close'].iloc[-1] |
|
future_price = df['close'].iloc[-1] |
|
|
|
price_change = future_price - current_price |
|
target = 1 if price_change > 0 else (-1 if price_change < 0 else 0) |
|
|
|
features = df.iloc[-2].copy() |
|
features['target'] = target |
|
|
|
new_row = pd.DataFrame([features]) |
|
self.training_data = pd.concat([self.training_data, new_row], ignore_index=True) |
|
msg = f"Collected training sample for {symbol}" |
|
print(msg) |
|
self.log_result(msg) |
|
|
|
if len(self.training_data) % 10 == 0: |
|
self.save_training_data() |
|
|
|
except Exception as e: |
|
error_msg = f"Error collecting training sample: {str(e)}" |
|
print(error_msg) |
|
self.log_result(error_msg) |
|
|
|
def scan_symbol(self, symbol, exchange, timeframes): |
|
"""Scan symbol for trading opportunities""" |
|
try: |
|
primary_tf = timeframes[0] |
|
ohlcv = exchange.fetch_ohlcv(symbol, primary_tf, limit=100) |
|
if len(ohlcv) < 50: |
|
return |
|
|
|
df = pd.DataFrame(ohlcv, columns=['timestamp', 'open', 'high', 'low', 'close', 'volume']) |
|
df = self.calculate_features(df) |
|
if df is None: |
|
return |
|
|
|
latest = df.iloc[-1].copy() |
|
features = pd.DataFrame([latest[self.feature_columns]]) |
|
|
|
if self.training_mode: |
|
self.collect_training_sample(symbol, exchange, primary_tf) |
|
return |
|
|
|
prediction = self.predict_direction(features) |
|
|
|
|
|
ema_20 = df['ema_20'].iloc[-1] |
|
ema_50 = df['ema_50'].iloc[-1] |
|
price = df['close'].iloc[-1] |
|
|
|
uptrend = (ema_20 > ema_50) and (price > ema_20) |
|
downtrend = (ema_20 < ema_50) and (price < ema_20) |
|
|
|
if uptrend and prediction == 1: |
|
self.alert(symbol, "STRONG UPTREND", timeframes, price) |
|
elif downtrend and prediction == -1: |
|
self.alert(symbol, "STRONG DOWNTREND", timeframes, price) |
|
elif uptrend: |
|
self.alert(symbol, "UPTREND", timeframes, price) |
|
elif downtrend: |
|
self.alert(symbol, "DOWNTREND", timeframes, price) |
|
|
|
except Exception as e: |
|
error_msg = f"Error scanning {symbol}: {str(e)}" |
|
print(error_msg) |
|
self.log_result(error_msg) |
|
|
|
def alert(self, symbol, trend_type, timeframes, current_price): |
|
"""Generate alert for detected trend""" |
|
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S') |
|
message = f"({trend_type}) detected for {symbol} at price {current_price} on {timeframes} at {timestamp}" |
|
print(message) |
|
self.log_result(message) |
|
|
|
def compare_results(self, exchange_name): |
|
"""Compare previous results with current prices""" |
|
try: |
|
if not os.path.exists(self.results_file): |
|
print("No results file found to compare") |
|
return |
|
|
|
exchange = self.exchanges.get(exchange_name.lower()) |
|
if not exchange: |
|
print(f"Exchange {exchange_name} not supported") |
|
return |
|
|
|
|
|
pattern = r"\((.*?)\) detected for (.*?) at price ([\d.]+) on" |
|
|
|
with open(self.results_file, 'r') as f: |
|
lines = f.readlines() |
|
|
|
print("\n=== Price Comparison Report ===") |
|
print(f"Generated at: {datetime.now()}\n") |
|
|
|
for line in lines: |
|
match = re.search(pattern, line) |
|
if match: |
|
trend_type = match.group(1) |
|
symbol = match.group(2) |
|
old_price = float(match.group(3)) |
|
timestamp = line.split(' at ')[-1].strip() |
|
|
|
try: |
|
ticker = exchange.fetch_ticker(symbol) |
|
current_price = ticker['last'] |
|
price_change = current_price - old_price |
|
percent_change = (price_change / old_price) * 100 |
|
|
|
print(f"Symbol: {symbol}") |
|
print(f"Previous: {trend_type} at {old_price} ({timestamp})") |
|
print(f"Current: {current_price} ({datetime.now().strftime('%Y-%m-%d %H:%M:%S')})") |
|
print(f"Change: {price_change:.4f} ({percent_change:.2f}%)") |
|
print("-" * 50) |
|
except Exception as e: |
|
print(f"Error fetching current price for {symbol}: {str(e)}") |
|
continue |
|
|
|
print("\n=== End of Report ===") |
|
|
|
except Exception as e: |
|
print(f"Error comparing results: {str(e)}") |
|
|
|
|
|
if __name__ == "__main__": |
|
parser = argparse.ArgumentParser() |
|
parser.add_argument("-e", "--exchange", help="Exchange name", required=False) |
|
parser.add_argument("-f", "--filter", help="Asset filter", required=False) |
|
parser.add_argument("-tf", "--timeframes", help="Timeframes to scan (comma separated)", required=False) |
|
parser.add_argument("--train", help="Run in training mode", action="store_true") |
|
parser.add_argument("--compare", help="Compare previous results with current prices", action="store_true") |
|
args = parser.parse_args() |
|
|
|
if args.compare: |
|
scanner = MLTechnicalScanner() |
|
if args.exchange: |
|
scanner.compare_results(args.exchange) |
|
else: |
|
print("Please specify an exchange with -e/--exchange when using --compare") |
|
sys.exit(0) |
|
|
|
if not all([args.exchange, args.filter, args.timeframes]): |
|
print("Error: --exchange, --filter, and --timeframes are required when not using --compare") |
|
sys.exit(1) |
|
|
|
scanner = MLTechnicalScanner(training_mode=args.train) |
|
|
|
|
|
scanner.init_results_file() |
|
|
|
exchange = scanner.exchanges.get(args.exchange.lower()) |
|
if not exchange: |
|
error_msg = f"Exchange {args.exchange} not supported" |
|
print(error_msg) |
|
scanner.log_result(error_msg) |
|
sys.exit(1) |
|
|
|
try: |
|
markets = exchange.fetch_markets() |
|
except Exception as e: |
|
error_msg = f"Error fetching markets: {str(e)}" |
|
print(error_msg) |
|
scanner.log_result(error_msg) |
|
sys.exit(1) |
|
|
|
symbols = [ |
|
m['id'] for m in markets |
|
if m['active'] and args.filter in m['id'] |
|
] |
|
|
|
if not symbols: |
|
error_msg = f"No symbols found matching filter {args.filter}" |
|
print(error_msg) |
|
scanner.log_result(error_msg) |
|
sys.exit(1) |
|
|
|
if args.train: |
|
train_msg = f"Running in training mode for {len(symbols)} symbols" |
|
print(train_msg) |
|
scanner.log_result(train_msg) |
|
for symbol in symbols: |
|
scanner.collect_training_sample(symbol, exchange) |
|
|
|
if scanner.train_initial_model(): |
|
success_msg = "Training completed successfully" |
|
print(success_msg) |
|
scanner.log_result(success_msg) |
|
else: |
|
fail_msg = "Not enough data collected for training" |
|
print(fail_msg) |
|
scanner.log_result(fail_msg) |
|
sys.exit(0) |
|
|
|
if not hasattr(scanner.model, 'classes_'): |
|
warn_msg = "Warning: No trained model available. Running with basic scanning only." |
|
print(warn_msg) |
|
scanner.log_result(warn_msg) |
|
|
|
timeframes = args.timeframes.split(',') |
|
scan_msg = f"Scanning {len(symbols)} symbols on timeframes {timeframes}" |
|
print(scan_msg) |
|
scanner.log_result(scan_msg) |
|
|
|
for symbol in symbols: |
|
scanner.scan_symbol(symbol, exchange, timeframes) |
|
|
|
|
|
end_msg = f"\nScan completed at {datetime.now()}" |
|
print(end_msg) |
|
scanner.log_result(end_msg) |