Upload 2024_classification_pipeline.py
Browse files- 2024_classification_pipeline.py +117 -0
2024_classification_pipeline.py
ADDED
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
pip install boto3 pandas_ta tensorflow==2.16.1 keras==3.3.3 --quiet
|
3 |
+
|
4 |
+
#Import utility libraries
|
5 |
+
import numpy as np
|
6 |
+
import pandas as pd
|
7 |
+
import os
|
8 |
+
import yfinance as yf
|
9 |
+
import pandas_ta as ta
|
10 |
+
import boto3
|
11 |
+
|
12 |
+
#Import Machine Learning Libraries
|
13 |
+
import tensorflow as tf
|
14 |
+
import keras
|
15 |
+
|
16 |
+
#Import Charting
|
17 |
+
import matplotlib.pyplot as plt
|
18 |
+
import seaborn as sns
|
19 |
+
|
20 |
+
print(f"TensorFlow version: {tf.__version__}")
|
21 |
+
print(f"Keras version: {keras.__version__}")
|
22 |
+
|
23 |
+
"""#Data Creation
|
24 |
+
|
25 |
+
"""
|
26 |
+
|
27 |
+
data = pd.read_csv("http://www.nasdaqtrader.com/dynamic/SymDir/nasdaqtraded.txt", sep='|')
|
28 |
+
data_clean = data[data['Test Issue'] == 'N']
|
29 |
+
symbols = data_clean['NASDAQ Symbol'].tolist()
|
30 |
+
print('total number of symbols traded = {}'.format(len(symbols)))
|
31 |
+
|
32 |
+
# Function to create features using pandas
|
33 |
+
def create_features(orig_data: pd.DataFrame) -> pd.DataFrame:
|
34 |
+
data = orig_data.copy() # Ensure we're working with a copy
|
35 |
+
data['Price_Diff_1'] = (data['Close'] - data['Close'].shift(1)) * 100 / data['Close'].shift(1)
|
36 |
+
data['Price_Diff_3'] = (data['Close'] - data['Close'].shift(3)) * 100 / data['Close'].shift(3)
|
37 |
+
data['Price_Diff_5'] = (data['Close'] - data['Close'].shift(5)) * 100 / data['Close'].shift(5)
|
38 |
+
data['MA_10'] = data['Close'].rolling(window=10).mean()
|
39 |
+
data['MA_50'] = data['Close'].rolling(window=50).mean()
|
40 |
+
data['Volume_Diff_1'] = (data['Volume'] - data['Volume'].shift(1)) * 100 / data['Volume'].shift(1)
|
41 |
+
data['Volume_Diff_3'] = (data['Volume'] - data['Volume'].shift(3)) * 100 / data['Volume'].shift(3)
|
42 |
+
data['Volume_Diff_5'] = (data['Volume'] - data['Volume'].shift(5)) * 100 / data['Volume'].shift(5)
|
43 |
+
data['Historical_Volatility'] = data['Close'].pct_change().rolling(window=20).std() * np.sqrt(252)
|
44 |
+
window = 20
|
45 |
+
data['MA'] = data['Close'].rolling(window=window).mean()
|
46 |
+
data['BB_upper'] = data['MA'] + 2 * data['Close'].rolling(window=window).std()
|
47 |
+
data['BB_lower'] = data['MA'] - 2 * data['Close'].rolling(window=window).std()
|
48 |
+
|
49 |
+
data['MACD'] = data['Close'].ewm(span=12, adjust=False).mean() - data['Close'].ewm(span=26, adjust=False).mean()
|
50 |
+
|
51 |
+
delta = data['Close'].diff()
|
52 |
+
gain = delta.clip(lower=0).rolling(window=14).mean()
|
53 |
+
loss = (-delta.clip(upper=0)).rolling(window=14).mean()
|
54 |
+
rs = gain / loss
|
55 |
+
data['RSI'] = 100 - (100 / (1 + rs))
|
56 |
+
|
57 |
+
low_min = data['Low'].rolling(window=14).min()
|
58 |
+
high_max = data['High'].rolling(window=14).max()
|
59 |
+
data['Stochastic_Oscillator'] = (data['Close'] - low_min) / (high_max - low_min)
|
60 |
+
|
61 |
+
# Supertrend
|
62 |
+
supertrend = ta.supertrend(data['High'], data['Low'], data['Close'], length=7, multiplier=3.0)
|
63 |
+
data['Supertrend'] = supertrend[f'SUPERTd_7_3.0']
|
64 |
+
|
65 |
+
return data.dropna()
|
66 |
+
|
67 |
+
combined_data = pd.DataFrame()
|
68 |
+
max_rows = 2000000 # Maximum number of rows
|
69 |
+
|
70 |
+
for idx, symbol in enumerate(symbols):
|
71 |
+
print(f"Processing stock {symbol} at {idx} position with current records : {combined_data.shape}")
|
72 |
+
try:
|
73 |
+
data = yf.download(symbol, period='max')
|
74 |
+
if data.shape[0] == 0:
|
75 |
+
continue
|
76 |
+
data.reset_index(inplace = True)
|
77 |
+
data['Symbol'] = symbol
|
78 |
+
|
79 |
+
trimmed_data = data[-2000:].copy()
|
80 |
+
|
81 |
+
# Shift the DataFrame to compare consecutive rows
|
82 |
+
shifted_df = trimmed_data.shift(-1)
|
83 |
+
|
84 |
+
# Identify rows where 'Open', 'High', 'Low', and 'Close' are the same as the next row
|
85 |
+
duplicates = (trimmed_data['Open'] == shifted_df['Open']) & (trimmed_data['High'] == shifted_df['High']) & \
|
86 |
+
(trimmed_data['Low'] == shifted_df['Low']) & (trimmed_data['Close'] == shifted_df['Close'])
|
87 |
+
|
88 |
+
# Drop the duplicate rows
|
89 |
+
trimmed_data = trimmed_data[~duplicates]
|
90 |
+
|
91 |
+
cleaned_data = create_features(trimmed_data)
|
92 |
+
combined_data = pd.concat([combined_data, cleaned_data], ignore_index=True)
|
93 |
+
|
94 |
+
# Check if the combined DataFrame has reached the maximum number of rows
|
95 |
+
if combined_data.shape[0] >= max_rows:
|
96 |
+
print(f"Reached {max_rows} rows. Stopping.")
|
97 |
+
break
|
98 |
+
except:
|
99 |
+
print("In except for ticker: ", symbol)
|
100 |
+
|
101 |
+
new_column_order = ['Symbol', 'Date', 'Open', 'High', 'Low', 'Close', 'Adj Close', 'Volume', 'MACD', 'RSI','Stochastic_Oscillator', 'Supertrend', 'Price_Diff_1', 'Price_Diff_3', 'Price_Diff_5', 'MA_10', 'MA_50', 'MA', 'Volume_Diff_1', 'Volume_Diff_3', 'Volume_Diff_5', 'Historical_Volatility', 'BB_upper', 'BB_lower', ]
|
102 |
+
combined_data = combined_data[new_column_order]
|
103 |
+
|
104 |
+
|
105 |
+
|
106 |
+
|
107 |
+
|
108 |
+
|
109 |
+
|
110 |
+
|
111 |
+
|
112 |
+
|
113 |
+
|
114 |
+
|
115 |
+
|
116 |
+
|
117 |
+
|