rc9494 commited on
Commit
259f359
1 Parent(s): 263d22a

Upload SP500_Date_Offset.py

Browse files
Files changed (1) hide show
  1. SP500_Date_Offset.py +812 -0
SP500_Date_Offset.py ADDED
@@ -0,0 +1,812 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ Created on Wed May 1 13:17:02 2024
4
+
5
+ @author: RC
6
+ """
7
+
8
+
9
+
10
+
11
+
12
+ # ================================ LIBRARIES ================================ #
13
+ import numpy as np
14
+ import pandas as pd
15
+ import yfinance as yf
16
+ import datasets
17
+ from typing import List
18
+ import csv
19
+ import json
20
+ import logging
21
+
22
+ import warnings
23
+ from fredapi import Fred
24
+ from time import sleep
25
+ from urllib.request import Request, urlopen
26
+ from bs4 import BeautifulSoup as soup
27
+
28
+
29
+
30
+
31
+ dictArgs = {'key_file_path' : 'fred_api_key.txt', # set local directory
32
+ 'fred_source_path' : 'fred.csv', # set location of data dictionary
33
+ 'security_sym' : '^GSPC', # set security symbol
34
+ 'security_name' : 'SP500', # set security name
35
+ 'export_path' : 'SP500_Date_Offset.csv' # set export destination
36
+ }
37
+
38
+ # =========================================================================== #
39
+
40
+
41
+
42
+
43
+
44
+ # ================================== INFO =================================== #
45
+ _CITATION = """\
46
+ @online{BEA_GDP,
47
+ author = {{U.S. Bureau of Economic Analysis}},
48
+ title = {Real Gross Domestic Product [GDPC1]},
49
+ year = {2024},
50
+ url = {https://fred.stlouisfed.org/series/GDPC1},
51
+ organization = {FRED, Federal Reserve Bank of St. Louis},
52
+ urldate = {2024-03-13}
53
+ }
54
+ @online{Consumer_Sentiment,
55
+ author = {{Surveys of Consumers, University of Michigan}},
56
+ title = {University of Michigan: Consumer Sentiment © [UMCSENT]},
57
+ year = {2024},
58
+ url = {https://fred.stlouisfed.org/series/UMCSENT},
59
+ organization = {FRED, Federal Reserve Bank of St. Louis},
60
+ urldate = {2024-03-13}
61
+ }
62
+ @online{CPI_All_Items,
63
+ author = {{U.S. Bureau of Labor Statistics}},
64
+ title = {Consumer Price Index for All Urban Consumers: All Items in U.S. City Average [CPIAUCSL]},
65
+ year = {2024},
66
+ url = {https://fred.stlouisfed.org/series/CPIAUCSL},
67
+ organization = {FRED, Federal Reserve Bank of St. Louis},
68
+ urldate = {2024-03-13}
69
+ }
70
+ @online{CPI_All_Items_Less_Food_Energy,
71
+ author = {{U.S. Bureau of Labor Statistics}},
72
+ title = {Consumer Price Index for All Urban Consumers: All Items Less Food and Energy in U.S. City Average [CPILFESL]},
73
+ year = {2024},
74
+ url = {https://fred.stlouisfed.org/series/CPILFESL},
75
+ organization = {FRED, Federal Reserve Bank of St. Louis},
76
+ urldate = {2024-03-13}
77
+ }
78
+ @online{Fed_Funds_Rate,
79
+ author = {{Board of Governors of the Federal Reserve System (US)}},
80
+ title = {Federal Funds Effective Rate [DFF]},
81
+ year = {2024},
82
+ url = {https://fred.stlouisfed.org/series/DFF},
83
+ organization = {FRED, Federal Reserve Bank of St. Louis},
84
+ urldate = {2024-03-20}
85
+ }
86
+ @online{New_Housing_Units_Started,
87
+ author = {{U.S. Census Bureau and U.S. Department of Housing and Urban Development}},
88
+ title = {New Privately-Owned Housing Units Started: Total Units [HOUST]},
89
+ year = {2024},
90
+ url = {https://fred.stlouisfed.org/series/HOUST},
91
+ organization = {FRED, Federal Reserve Bank of St. Louis},
92
+ urldate = {2024-03-19}
93
+ }
94
+ @online{New_One_Family_Houses_Sold,
95
+ author = {{U.S. Census Bureau and U.S. Department of Housing and Urban Development}},
96
+ title = {New One Family Houses Sold: United States [HSN1F]},
97
+ year = {2024},
98
+ url = {https://fred.stlouisfed.org/series/HSN1F},
99
+ organization = {FRED, Federal Reserve Bank of St. Louis},
100
+ urldate = {2024-03-13}
101
+ }
102
+ @online{PCE_Chain_Price_Index,
103
+ author = {{U.S. Bureau of Economic Analysis}},
104
+ title = {Personal Consumption Expenditures: Chain-type Price Index [PCEPI]},
105
+ year = {2024},
106
+ url = {https://fred.stlouisfed.org/series/PCEPI},
107
+ organization = {FRED, Federal Reserve Bank of St. Louis},
108
+ urldate = {2024-03-13}
109
+ }
110
+ @online{PCE_Excluding_Food_Energy,
111
+ author = {{U.S. Bureau of Economic Analysis}},
112
+ title = {Personal Consumption Expenditures Excluding Food and Energy (Chain-Type Price Index) [PCEPILFE]},
113
+ year = {2024},
114
+ url = {https://fred.stlouisfed.org/series/PCEPILFE},
115
+ organization = {FRED, Federal Reserve Bank of St. Louis},
116
+ urldate = {2024-03-13}
117
+ }
118
+ @online{SP500,
119
+ author = {{S&P Dow Jones Indices LLC}},
120
+ title = {S\&P 500 [SP500]},
121
+ year = {2024},
122
+ url = {https://fred.stlouisfed.org/series/SP500},
123
+ organization = {FRED, Federal Reserve Bank of St. Louis},
124
+ urldate = {2024-03-20}
125
+ }
126
+ @online{Total_Construction_Spending,
127
+ author = {{U.S. Census Bureau}},
128
+ title = {Total Construction Spending: Total Construction in the United States [TTLCONS]},
129
+ year = {2024},
130
+ url = {https://fred.stlouisfed.org/series/TTLCONS},
131
+ organization = {FRED, Federal Reserve Bank of St. Louis},
132
+ urldate = {2024-03-13}
133
+ }
134
+ @online{Total_Nonfarm_Employees,
135
+ author = {{U.S. Bureau of Labor Statistics}},
136
+ title = {All Employees, Total Nonfarm [PAYEMS]},
137
+ year = {2024},
138
+ url = {https://fred.stlouisfed.org/series/PAYEMS},
139
+ organization = {FRED, Federal Reserve Bank of St. Louis},
140
+ urldate = {2024-03-13}
141
+ }
142
+ @online{Unemployment_Rate,
143
+ author = {{U.S. Bureau of Labor Statistics}},
144
+ title = {Unemployment Rate [UNRATE]},
145
+ year = {2024},
146
+ url = {https://fred.stlouisfed.org/series/UNRATE},
147
+ organization = {FRED, Federal Reserve Bank of St. Louis},
148
+ urldate = {2024-03-13}
149
+ }
150
+ """
151
+
152
+ # You can copy an official description
153
+ _DESCRIPTION = """\
154
+ The S&P 500 Date Offset project seeks to offer an alternative way of modeling
155
+ financial trends from economic conditions.
156
+
157
+ Due to the rigorous tabulation process, the gap between when economic data is
158
+ reported and the time which it is meant to describe can be months. Moreover,
159
+ when this data is released, it is usually backdated to correspond with the date
160
+ of the first day of the time period it reflects. That said, if the data causes
161
+ a correction in financial markets, that change will be reflected in the data
162
+ for the day of the release (and not the back dated day!).
163
+
164
+ That prompts the immediate question: would data offset to reflect investors'
165
+ knowledge in the moment provide a better model for the markets than the
166
+ traditionally structured data?
167
+
168
+ In addition to the S&P 500 daily close price--which is used here to represent
169
+ the stock market overall--variables were chosen from the list of Leading,
170
+ Lagging and Coincident Indicators as maintained by the Conference Board.
171
+ Those variables and their transformations are:
172
+ (M/M = Month-over-month percent change,
173
+ Q/Q = Quarter-over-quarter percent change,
174
+ Y/Y = Year-over-year percent change
175
+ )
176
+
177
+ - Consumer Sentiment, University of Michigan
178
+ Freq: Monthly
179
+ Tran: M/M, Y/Y
180
+
181
+ - Consumer Price Index
182
+ - All Items
183
+ - All Items less Food & Energy
184
+ Freq: Monthly
185
+ Tran: M/M, Y/Y
186
+
187
+ - Federal Funds Rate
188
+ Freq: Daily
189
+ Tran: None
190
+
191
+ - Gross Domestic Product
192
+ Freq: Quarterly
193
+ Tran: Q/Q, Y/Y
194
+
195
+ - New Housing Units Started
196
+ Freq: Monthly
197
+ Tran: M/M, Y/Y
198
+
199
+ - New One Family Houses Sold
200
+ Freq: Monthly
201
+ Tran: M/M, Y/Y
202
+
203
+ - Personal Consumption Expenditure: Chain-type Price Index
204
+ - All Items
205
+ - All Items excluding Food & Energy
206
+ Freq: Monthly
207
+ Tran: M/M, Y/Y
208
+
209
+ - Total Construction Spending
210
+ Freq: Monthly
211
+ Tran: M/M, Y/Y
212
+
213
+ - Total Nonfarm Employment
214
+ Freq: Monthly
215
+ Tran: M/M, Y/Y
216
+
217
+ - Unemployment Rate
218
+ Freq: Monthly
219
+ Tran: M/M, Y/Y
220
+
221
+ """
222
+
223
+ # Homepage
224
+ _HOMEPAGE = "https://github.com/RileyTheEcon/SP500_Date_Offset"
225
+
226
+ # License is a mix of Public Domain and Creative Commons
227
+ # Sourcing the data so that it is all Public Domain is a longer term goal for
228
+ # this project
229
+ _LICENSE = ""
230
+
231
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
232
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
233
+ _URL = "https://huggingface.co/datasets/rc9494/SP500_Date_Offset/dataset/"
234
+ _URLS = {
235
+ "dev": _URL + "blob/main/SP500_Date_Offset.csv"
236
+ }
237
+ # =========================================================================== #
238
+
239
+
240
+
241
+
242
+
243
+ # ================================ FUNCTIONS ================================ #
244
+ # I originally developed the below function for a personal project and built
245
+ # on it for this assignment: originally took data series names and ID codes as
246
+ # List of Tuples, expanded functionality to take table instead and create the
247
+ # list of tuples internally
248
+ def get_fred_data (fred_key, dfFred,
249
+ col_names = {'Name':'Name', 'SeriesID':'SeriesID'},
250
+ try_limit=5, courtesy_sleep = 0.5
251
+ ) :
252
+ '''
253
+ Parameters
254
+ ----------
255
+ fred_key : STR
256
+ Valid FRED API as str
257
+ dfFred : DataFrame-like
258
+ DataFrame-like with an array of desired variable names, and FRED
259
+ series ID codes
260
+ col_names : DICT, optional
261
+ Dictionary matching column names of dfFred column names with the column
262
+ names assumed by the function.
263
+ try_limit : INT, optional
264
+ Function will attempt to access the data associated with a given series
265
+ ID this many times before issuing a warning and continuing.
266
+ The default is 5.
267
+ courtesy_sleep: FLT, optional
268
+ Wait between making new server requests to avoid flooding the server,
269
+ or if the server is erroring. The default is 0.5 seconds.
270
+ Returns : dfData
271
+ -------
272
+ DATAFRAME
273
+ Returns a dataframe of data requested from FRED server. Each data
274
+ series is in its own column, joined on datetime index, and sorted
275
+ chronologically
276
+ '''
277
+
278
+ dfFred = pd.DataFrame(dfFred) # convert to DF object for version control
279
+ dfData = pd.DataFrame() # create place in memory
280
+ fred = Fred(fred_key) # convert to API key object
281
+
282
+
283
+
284
+ # Version control df names
285
+ col_names = {value:key for key, value in col_names.items()}
286
+ dfFred.rename(columns=col_names, inplace=True)
287
+
288
+
289
+
290
+ # Remove gaps & warn duplicates
291
+ dfFred = dfFred.dropna()
292
+
293
+ item_dupe = []
294
+ for name in dfFred.columns :
295
+ item_dupe = dfFred[dfFred.duplicated(name)][name].tolist()
296
+ if len(item_dupe)>0 :
297
+ warnings.warn(f"Duplicated entries found in '{name}': {item_dupe}")
298
+ # end if
299
+ # end for
300
+ dfFred = dfFred[~dfFred['Name'].duplicated(keep='first')]
301
+
302
+
303
+
304
+ # Download data -- using item-wise iter to be nice to hosting server
305
+ for indx, row in dfFred.iterrows() :
306
+ bContinue = 0
307
+ intErrorCount = 0
308
+
309
+ while (bContinue==0)&(intErrorCount<try_limit) :
310
+ try : # Attempt dl through API
311
+ data = pd.DataFrame(fred.get_series(row['SeriesID'])
312
+ ).rename(columns={0:row['Name']})
313
+ data.index.name = 'date'
314
+ except : # Extract data from raw txt page if API fails for any reason
315
+ try:
316
+ htmlPage = dlURL('https://fred.stlouisfed.org/data/'+
317
+ row['SeriesID']+'.txt')
318
+
319
+ listRows = htmlPage.text.split('\n')
320
+ listRows = listRows[listRows.index([x for x in listRows
321
+ if 'DATE' in x][0])+1:]
322
+ listRows = [[pd.to_datetime(x[:x.index(' ')]),
323
+ float(isolate_better(x,' ','\r',b_end=1))
324
+ ]
325
+ for x in listRows if x!=''
326
+ ]
327
+
328
+ data = pd.DataFrame(listRows,columns=['index',row['Name']]
329
+ ).set_index('index')
330
+ data.index.name = 'date'
331
+ except :
332
+ intErrorCount+=1
333
+ sleep(1)
334
+ else : bContinue = 1
335
+ # endtry
336
+ else : bContinue = 1
337
+ # endtry
338
+ # endwhile
339
+
340
+ # If both approaches above fail - warn user
341
+ if intErrorCount>=try_limit :
342
+ warnings.warn('\nFailure in accessing data from:\n'+
343
+ f'Name: {row["Name"]}\n'+
344
+ f'ID: {row["SeriesID"]}\n'
345
+ )
346
+
347
+ # If the above ran successfully - append along date index
348
+ else :
349
+ if len(dfData)==0 : dfData = data
350
+ else : dfData = dfData.join(data,how='outer',
351
+ )
352
+ # endif
353
+
354
+ sleep(courtesy_sleep) # Let's do our best to be polite to the hosting server
355
+ # endfor
356
+
357
+ return dfData.sort_index()
358
+ ####
359
+ def get_historic_data (SeriesID, api_key,
360
+ series_name = 'value',
361
+ stale_data = 500
362
+ ) :
363
+
364
+ # Get data
365
+ fred = Fred(api_key)
366
+ df = fred.get_series_all_releases(SeriesID)
367
+
368
+ # Calc gap between reported date and actual date; drop stale data
369
+ df['diff'] = df['realtime_start'] - df['date']
370
+ df = df[df['diff'] <= pd.Timedelta(str(stale_data)+' days')
371
+ ].copy()
372
+
373
+ # Get most recent data by actual date
374
+ # Some reports contain original data and revisions, so we grab the most
375
+ # current data from each reporting date
376
+ max_order_indices = (df.sort_values('date')
377
+ .groupby('realtime_start')['date']
378
+ .idxmax()
379
+ )
380
+ df = df.loc[max_order_indices].copy()
381
+
382
+ # Drop unneeded columns; set index
383
+ for col in ['date', 'diff'] : del df[col]
384
+
385
+ dict_rename = {'realtime_start' : 'date'}
386
+ if series_name!='value' : dict_rename['value'] = series_name
387
+
388
+ df.rename(columns = dict_rename,
389
+ inplace = True
390
+ )
391
+ df.set_index('date', inplace = True)
392
+
393
+ return df
394
+ ####
395
+ def dlURL (url , parser = "html.parser" ) :
396
+ req = Request(url,headers={'User-Agent':'Mozilla/5.0'})
397
+ urlClient = urlopen(req)
398
+ pageRough = urlClient.read()
399
+ urlClient.close()
400
+ pageSoup = soup(pageRough,parser)
401
+
402
+ return pageSoup
403
+ #### / ####
404
+ # "isolate_better" and its helper function "reverse" are functions I originally
405
+ # wrote for a personal project while still teaching myself Python basics.
406
+ # Is it a crude and inefficient way to do something that there are probably
407
+ # native functions/methods for? Probably, but it works with the other
408
+ # pre-existing code I have.
409
+ def reverse (stri) :
410
+ x = ""
411
+ for i in stri :
412
+ x = i + x
413
+ return x
414
+ ####
415
+ def isolate_better (stri , start , end, b_end = 0) :
416
+ strShort = ''
417
+ posStart = 0
418
+ posEnd = 0
419
+
420
+ if b_end==1 :
421
+ posEnd = stri.find(end)
422
+ strShort = stri[:posEnd]
423
+ strShort = reverse(strShort)
424
+ start = reverse(start)
425
+ posStart = posEnd - strShort.find(start)
426
+ #
427
+ else :
428
+ posStart = stri.find(start)+len(start)
429
+ strShort = stri[posStart:]
430
+ posEnd = posStart + strShort.find(end)
431
+ #
432
+ return stri[posStart:posEnd]
433
+ ####
434
+ def check_data (dfFred, fred_key) :
435
+ # Check to make sure sufficient data is available
436
+ df = pd.DataFrame() # create space in memory
437
+
438
+ for i,r in dfFred[~dfFred['Freq'].isin(['Daily', 'Weekly'])].iterrows() :
439
+ # Download data
440
+ df = get_historic_data(r['SeriesID'],
441
+ fred_key,
442
+ r['Name']
443
+ )
444
+
445
+ # Report series statistics
446
+ print(r['Name'],'\n',
447
+ 'First Obs.: ', df.first_valid_index(), '\n',
448
+ 'Count Obs.: ', len(df), '\n',
449
+ '\n'
450
+ )
451
+ # end for i,r
452
+ #### / ####
453
+ def main(key_file_path, # File path for FRED API key, txt
454
+ fred_source_path, # File path for variable names & FRED series ID, csv
455
+ security_sym, # Ticker symbol for security of interest (S&P 500)
456
+ security_name, # Name of security of interest
457
+ export_path # File path to save data
458
+ ) :
459
+
460
+
461
+
462
+ # Seek API key; Prompt user if not found; access from repo if not given
463
+ bDownload = False # Bool: Dl from repo or generate fresh?
464
+ # true = download pre-generated data from repo ; false = gen new
465
+
466
+ try :
467
+ # try to get key from file
468
+ with open(key_file_path, 'r') as file :
469
+ fred_key = file.read()
470
+ # endwith
471
+
472
+ except FileNotFoundError :
473
+ print('FRED api key not found!\n'+
474
+ 'Please enter api key or hit enter to download static dataset from repo:'
475
+ )
476
+ fred_key = input()
477
+
478
+ if len(fred_key)==0 : bDownload = True
479
+ else :
480
+ pass # test validity of api key
481
+ # end if len
482
+
483
+ except Exception as oops : print(f"Something odd happened: {oops}")
484
+ #
485
+
486
+
487
+
488
+
489
+
490
+ # Import list of variables if it exists ; else download from repo
491
+ if not bDownload : # skip chunk if we're dl'ing from repo
492
+ try :
493
+ # import list of variable to pull
494
+ dfFred = pd.read_csv(fred_source_path)
495
+
496
+ except FileNotFoundError :
497
+ print('Could not find list of variables to generate: '+
498
+ fred_source_path+'\n'+
499
+ 'Switching to download static dataset from repo instead!\n'
500
+ )
501
+ bDownload = True
502
+
503
+ # end try/except
504
+ # end if bDownload
505
+
506
+ #
507
+
508
+
509
+
510
+
511
+
512
+ # If above checks fail, then download from existing repo
513
+ if bDownload :
514
+ dfData = pd.read_csv('https://raw.githubusercontent.com/RileyTheEcon/'+
515
+ 'SP500_Date_Offset/main/SP500_Offset.csv',
516
+ index_col='Date'
517
+ )
518
+
519
+ # If all above checks pass, generate fresh data from FRED api
520
+ else :
521
+
522
+ # Download YFinance data
523
+ dfFinance = yf.download(security_sym)['Adj Close']
524
+ dfFinance.rename(security_name, inplace=True)
525
+ #
526
+
527
+
528
+
529
+
530
+
531
+ # Iter thru data series; handle as specified
532
+ dfEcon = pd.DataFrame() # make place in memory
533
+
534
+ for i,r in dfFred.iterrows() :
535
+ if not pd.notnull(r['SeriesID']) : # skip if info missing
536
+ continue
537
+ # end if
538
+
539
+ # Create space in memory
540
+ df = pd.DataFrame()
541
+
542
+ # Import data
543
+ if r['Freq'] in ['Daily', 'Weekly'] :
544
+ # Dl data for daily/ weekly freq
545
+ df = get_fred_data(fred_key,
546
+ pd.DataFrame(r).T[['Name','SeriesID']]
547
+ )
548
+
549
+ else :
550
+ # Dl data for daily/ weekly freq
551
+ df = get_historic_data(r['SeriesID'],
552
+ fred_key
553
+ )
554
+ df.rename(columns = {'value': r['Name']},
555
+ inplace = True
556
+ )
557
+
558
+ # Indicate report date
559
+ df[r['Name']+'_release'] = 1
560
+
561
+ # end if import
562
+
563
+ # Attach to full dataframe
564
+ dfEcon = dfEcon.join(df, how='outer')
565
+
566
+ # end for iterrows
567
+ #
568
+
569
+
570
+
571
+
572
+
573
+ # Combine & fill numeric vars & export
574
+ # Ffill numeric vars & fillna(0) indicators
575
+ # left append to stock data
576
+ dfData = (pd.DataFrame(dfFinance)
577
+ .join(dfEcon[[x for x in dfEcon.columns
578
+ if len(dfEcon[x].unique())>3]
579
+ ].ffill(),
580
+ how='left'
581
+ )
582
+ .join(dfEcon[[x for x in dfEcon.columns
583
+ if len(dfEcon[x].unique())<=3]
584
+ ].fillna(0),
585
+ how='left'
586
+ )
587
+ )
588
+
589
+ # Export
590
+ if len(export_path)>0 :
591
+ dfData.to_csv(export_path)
592
+ # end if
593
+ #
594
+
595
+ # end if bDownload
596
+
597
+ return dfData
598
+ #
599
+
600
+ ####
601
+ class SP500_Date_Offset(datasets.GeneratorBasedBuilder):
602
+ """ . """
603
+
604
+ _URLS = _URLS
605
+ VERSION = datasets.Version("1.1.0")
606
+
607
+ def _info(self):
608
+ raise ValueError('woops!')
609
+ return datasets.DatasetInfo(
610
+ description=_DESCRIPTION,
611
+ features=datasets.Features(
612
+ {
613
+ "Date": datasets.Value("datetime"),
614
+ "SP500": datasets.Value("float"),
615
+ "Fed-Rate": datasets.Value("float"),
616
+ "Yield-10Y": datasets.Value("float"),
617
+ "Yield-1M": datasets.Value("float"),
618
+ "Yield-1Y": datasets.Value("float"),
619
+ "Yield-20Y": datasets.Value("float"),
620
+ "Yield-2Y": datasets.Value("float"),
621
+ "Yield-30Y": datasets.Value("float"),
622
+ "Yield-3M": datasets.Value("float"),
623
+ "Yield-3Y": datasets.Value("float"),
624
+ "Yield-5Y": datasets.Value("float"),
625
+ "Yield-6M": datasets.Value("float"),
626
+ "Yield-7Y": datasets.Value("float"),
627
+ "Bus-Apps": datasets.Value("float"),
628
+ "Loans-CI": datasets.Value("float"),
629
+ "Loans-Cons": datasets.Value("float"),
630
+ "Loans-RE": datasets.Value("float"),
631
+ "Unemp-Claims": datasets.Value("float"),
632
+ "Con-Sentim": datasets.Value("float"),
633
+ "Con-Sentim_release": datasets.Value("bool"),
634
+ "Con-Spends": datasets.Value("float"),
635
+ "Con-Spends_release": datasets.Value("bool"),
636
+ "CPI": datasets.Value("float"),
637
+ "CPI_release": datasets.Value("bool"),
638
+ "CPI-Core": datasets.Value("float"),
639
+ "CPI-Core_release": datasets.Value("bool"),
640
+ "CPI-Services": datasets.Value("float"),
641
+ "CPI-Services_release": datasets.Value("bool"),
642
+ "Home-Sales": datasets.Value("float"),
643
+ "Home-Sales_release": datasets.Value("bool"),
644
+ "Home-Starts": datasets.Value("float"),
645
+ "Home-Starts_release": datasets.Value("bool"),
646
+ "Income-Trans": datasets.Value("float"),
647
+ "Income-Trans_release": datasets.Value("bool"),
648
+ "Indust-Prod": datasets.Value("float"),
649
+ "Indust-Prod_release": datasets.Value("bool"),
650
+ "Inventory-Sales": datasets.Value("float"),
651
+ "Inventory-Sales_release": datasets.Value("bool"),
652
+ "Manu-Hours": datasets.Value("float"),
653
+ "Manu-Hours_release": datasets.Value("bool"),
654
+ "MT-Sales": datasets.Value("float"),
655
+ "MT-Sales_release": datasets.Value("bool"),
656
+ "NO-Capital": datasets.Value("float"),
657
+ "NO-Capital_release": datasets.Value("bool"),
658
+ "NO-Consumer": datasets.Value("float"),
659
+ "NO-Consumer_release": datasets.Value("bool"),
660
+ "NO-Durables": datasets.Value("float"),
661
+ "NO-Durables_release": datasets.Value("bool"),
662
+ "NO-Unfilled": datasets.Value("float"),
663
+ "NO-Unfilled_release": datasets.Value("bool"),
664
+ "PCE": datasets.Value("float"),
665
+ "PCE_release": datasets.Value("bool"),
666
+ "PCE-Core": datasets.Value("float"),
667
+ "PCE-Core_release": datasets.Value("bool"),
668
+ "PPI-Architect": datasets.Value("float"),
669
+ "PPI-Architect_release": datasets.Value("bool"),
670
+ "Total-Emp": datasets.Value("float"),
671
+ "Total-Emp_release": datasets.Value("bool"),
672
+ "Unemploy": datasets.Value("float"),
673
+ "Unemploy_release": datasets.Value("bool"),
674
+ "Unemp-Weeks": datasets.Value("float"),
675
+ "Unemp-Weeks_release": datasets.Value("bool"),
676
+ "Delinq-CreditC": datasets.Value("float"),
677
+ "Delinq-CreditC_release": datasets.Value("bool"),
678
+ "GDP": datasets.Value("float"),
679
+ "GDP_release": datasets.Value("bool"),
680
+ }
681
+ ),
682
+ # No default supervised_keys (as we have to pass both question
683
+ # and context as input).
684
+ supervised_keys=None,
685
+ homepage="https://github.com/RileyTheEcon/SP500_Date_Offset",
686
+ citation=_CITATION,
687
+ )
688
+
689
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
690
+ urls_to_download = self._URLS
691
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
692
+
693
+ return [
694
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]})
695
+ ]
696
+
697
+ def _generate_examples(self, filepath):
698
+ """This function returns the examples in the raw (text) form."""
699
+ logging.info("generating examples from = %s", filepath)
700
+
701
+
702
+ dictArgs = {'key_file_path' : 'fred_api_key.txt', # set local directory
703
+ 'fred_source_path' : 'fred.csv', # set location of data dictionary
704
+ 'security_sym' : '^GSPC', # set security symbol
705
+ 'security_name' : 'SP500', # set security name
706
+ 'export_path' : 'SP500_Date_Offset.csv' # set export destination
707
+ }
708
+
709
+ dfData = main(**dictArgs)
710
+
711
+ for i,r in dfData.iteritems() :
712
+ # Features currently used are "context", "question", and "answers".
713
+ # Others are extracted here for the ease of future expansions.
714
+ yield i, {
715
+ 'Date': i,
716
+ "SP500": r["SP500"],
717
+ "Fed-Rate": r["Fed-Rate"],
718
+ "Yield-10Y": r["Yield-10Y"],
719
+ "Yield-1M": r["Yield-1M"],
720
+ "Yield-1Y": r["Yield-1Y"],
721
+ "Yield-20Y": r["Yield-20Y"],
722
+ "Yield-2Y": r["Yield-2Y"],
723
+ "Yield-30Y": r["Yield-30Y"],
724
+ "Yield-3M": r["Yield-3M"],
725
+ "Yield-3Y": r["Yield-3Y"],
726
+ "Yield-5Y": r["Yield-5Y"],
727
+ "Yield-6M": r["Yield-6M"],
728
+ "Yield-7Y": r["Yield-7Y"],
729
+ "Bus-Apps": r["Bus-Apps"],
730
+ "Loans-CI": r["Loans-CI"],
731
+ "Loans-Cons": r["Loans-Cons"],
732
+ "Loans-RE": r["Loans-RE"],
733
+ "Unemp-Claims": r["Unemp-Claims"],
734
+ "Con-Sentim": r["Con-Sentim"],
735
+ "Con-Sentim_release": r["Con-Sentim_release"],
736
+ "Con-Spends": r["Con-Spends"],
737
+ "Con-Spends_release": r["Con-Spends_release"],
738
+ "CPI": r["CPI"],
739
+ "CPI_release": r["CPI_release"],
740
+ "CPI-Core": r["CPI-Core"],
741
+ "CPI-Core_release": r["CPI-Core_release"],
742
+ "CPI-Services": r["CPI-Services"],
743
+ "CPI-Services_release": r["CPI-Services_release"],
744
+ "Home-Sales": r["Home-Sales"],
745
+ "Home-Sales_release": r["Home-Sales_release"],
746
+ "Home-Starts": r["Home-Starts"],
747
+ "Home-Starts_release": r["Home-Starts_release"],
748
+ "Income-Trans": r["Income-Trans"],
749
+ "Income-Trans_release": r["Income-Trans_release"],
750
+ "Indust-Prod": r["Indust-Prod"],
751
+ "Indust-Prod_release": r["Indust-Prod_release"],
752
+ "Inventory-Sales": r["Inventory-Sales"],
753
+ "Inventory-Sales_release": r["Inventory-Sales_release"],
754
+ "Manu-Hours": r["Manu-Hours"],
755
+ "Manu-Hours_release": r["Manu-Hours_release"],
756
+ "MT-Sales": r["MT-Sales"],
757
+ "MT-Sales_release": r["MT-Sales_release"],
758
+ "NO-Capital": r["NO-Capital"],
759
+ "NO-Capital_release": r["NO-Capital_release"],
760
+ "NO-Consumer": r["NO-Consumer"],
761
+ "NO-Consumer_release": r["NO-Consumer_release"],
762
+ "NO-Durables": r["NO-Durables"],
763
+ "NO-Durables_release": r["NO-Durables_release"],
764
+ "NO-Unfilled": r["NO-Unfilled"],
765
+ "NO-Unfilled_release": r["NO-Unfilled_release"],
766
+ "PCE": r["PCE"],
767
+ "PCE_release": r["PCE_release"],
768
+ "PCE-Core": r["PCE-Core"],
769
+ "PCE-Core_release": r["PCE-Core_release"],
770
+ "PPI-Architect": r["PPI-Architect"],
771
+ "PPI-Architect_release": r["PPI-Architect_release"],
772
+ "Total-Emp": r["Total-Emp"],
773
+ "Total-Emp_release": r["Total-Emp_release"],
774
+ "Unemploy": r["Unemploy"],
775
+ "Unemploy_release": r["Unemploy_release"],
776
+ "Unemp-Weeks": r["Unemp-Weeks"],
777
+ "Unemp-Weeks_release": r["Unemp-Weeks_release"],
778
+ "Delinq-CreditC": r["Delinq-CreditC"],
779
+ "Delinq-CreditC_release": r["Delinq-CreditC_release"],
780
+ "GDP": r["GDP"],
781
+ "GDP_release": r["GDP_release"],
782
+ }
783
+ # end for
784
+ # end def
785
+ # end class
786
+ # =========================================================================== #
787
+
788
+
789
+
790
+
791
+
792
+ # =================================== MAIN ================================== #
793
+ if __name__ == "__main__" :
794
+ print(__doc__)
795
+ main(**dictArgs)
796
+ # endif
797
+ # =========================================================================== #
798
+
799
+
800
+
801
+ ''' DEBUG
802
+ key_file_path = dictArgs['key_file_path']
803
+ fred_source_path = dictArgs['fred_source_path']
804
+ security_sym = dictArgs['security_sym']
805
+ security_name = dictArgs['security_name']
806
+ export_path = dictArgs['export_path']
807
+ '''
808
+
809
+
810
+
811
+
812
+