cyberosa
commited on
Commit
·
cd451ea
1
Parent(s):
4df107b
adding update scripts for the dataset
Browse files- scripts/__init__.py +0 -0
- scripts/active_traders.py +91 -0
- scripts/cleaning_old_info.py +110 -0
- scripts/closed_markets_divergence.py +269 -0
- scripts/cloud_storage.py +93 -0
- scripts/daily_data.py +61 -0
- scripts/get_mech_info.py +335 -0
- scripts/gnosis_timestamps.py +186 -0
- scripts/manage_space_files.py +40 -0
- scripts/market_metrics.py +64 -0
- scripts/markets.py +464 -0
- scripts/mech_request_utils.py +603 -0
- scripts/nr_mech_calls.py +268 -0
- scripts/num_mech_calls.py +94 -0
- scripts/profitability.py +528 -0
- scripts/pull_data.py +171 -0
- scripts/queries.py +161 -0
- scripts/staking.py +304 -0
- scripts/tools.py +320 -0
- scripts/tools_metrics.py +95 -0
- scripts/update_tools_accuracy.py +120 -0
- scripts/utils.py +430 -0
- scripts/web3_utils.py +276 -0
scripts/__init__.py
ADDED
File without changes
|
scripts/active_traders.py
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
import pickle
|
3 |
+
from web3_utils import ROOT_DIR, TMP_DIR
|
4 |
+
from staking import check_list_addresses
|
5 |
+
|
6 |
+
|
7 |
+
def get_trader_type(address: str, service_map: dict) -> str:
|
8 |
+
# check if it is part of any service id on the map
|
9 |
+
keys = service_map.keys()
|
10 |
+
last_key = max(keys)
|
11 |
+
|
12 |
+
for key, value in service_map.items():
|
13 |
+
if value["safe_address"].lower() == address.lower():
|
14 |
+
# found a service
|
15 |
+
return "Olas"
|
16 |
+
|
17 |
+
return "non_Olas"
|
18 |
+
|
19 |
+
|
20 |
+
def compute_active_traders_dataset():
|
21 |
+
"""Function to prepare the active traders dataset"""
|
22 |
+
with open(ROOT_DIR / "service_map.pkl", "rb") as f:
|
23 |
+
service_map = pickle.load(f)
|
24 |
+
# read tools info
|
25 |
+
tools_df = pd.read_parquet(TMP_DIR / "tools.parquet")
|
26 |
+
# rename the request_month_year_week
|
27 |
+
tools_df.rename(
|
28 |
+
columns={"request_month_year_week": "month_year_week"}, inplace=True
|
29 |
+
)
|
30 |
+
tool_traders = tools_df.trader_address.unique()
|
31 |
+
mapping = check_list_addresses(tool_traders)
|
32 |
+
# add trader type to tools_df
|
33 |
+
tools_df["trader_type"] = tools_df.trader_address.apply(lambda x: mapping[x])
|
34 |
+
tools_df = tools_df[
|
35 |
+
["month_year_week", "market_creator", "trader_type", "trader_address"]
|
36 |
+
]
|
37 |
+
tools_df.drop_duplicates(inplace=True)
|
38 |
+
# read trades info
|
39 |
+
all_trades = pd.read_parquet(ROOT_DIR / "all_trades_profitability.parquet")
|
40 |
+
|
41 |
+
# read unknown info
|
42 |
+
unknown_traders = pd.read_parquet(ROOT_DIR / "unknown_traders.parquet")
|
43 |
+
unknown_traders["creation_timestamp"] = pd.to_datetime(
|
44 |
+
unknown_traders["creation_timestamp"]
|
45 |
+
)
|
46 |
+
unknown_traders["creation_timestamp"] = unknown_traders[
|
47 |
+
"creation_timestamp"
|
48 |
+
].dt.tz_convert("UTC")
|
49 |
+
unknown_traders = unknown_traders.sort_values(
|
50 |
+
by="creation_timestamp", ascending=True
|
51 |
+
)
|
52 |
+
unknown_traders["month_year_week"] = (
|
53 |
+
unknown_traders["creation_timestamp"]
|
54 |
+
.dt.to_period("W")
|
55 |
+
.dt.start_time.dt.strftime("%b-%d-%Y")
|
56 |
+
)
|
57 |
+
unknown_traders["trader_type"] = "unknown"
|
58 |
+
unknown_traders = unknown_traders[
|
59 |
+
["month_year_week", "trader_type", "market_creator", "trader_address"]
|
60 |
+
]
|
61 |
+
unknown_traders.drop_duplicates(inplace=True)
|
62 |
+
|
63 |
+
all_trades["creation_timestamp"] = pd.to_datetime(all_trades["creation_timestamp"])
|
64 |
+
all_trades["creation_timestamp"] = all_trades["creation_timestamp"].dt.tz_convert(
|
65 |
+
"UTC"
|
66 |
+
)
|
67 |
+
all_trades = all_trades.sort_values(by="creation_timestamp", ascending=True)
|
68 |
+
all_trades["month_year_week"] = (
|
69 |
+
all_trades["creation_timestamp"]
|
70 |
+
.dt.to_period("W")
|
71 |
+
.dt.start_time.dt.strftime("%b-%d-%Y")
|
72 |
+
)
|
73 |
+
all_trades["trader_type"] = all_trades["staking"].apply(
|
74 |
+
lambda x: "non_Olas" if x == "non_Olas" else "Olas"
|
75 |
+
)
|
76 |
+
all_trades = all_trades[
|
77 |
+
["month_year_week", "market_creator", "trader_type", "trader_address"]
|
78 |
+
]
|
79 |
+
all_trades.drop_duplicates(inplace=True)
|
80 |
+
filtered_traders_data = pd.concat([all_trades, tools_df], axis=0)
|
81 |
+
filtered_traders_data.drop_duplicates(inplace=True)
|
82 |
+
if len(unknown_traders) > 0:
|
83 |
+
# merge
|
84 |
+
filtered_traders_data = pd.concat(
|
85 |
+
[filtered_traders_data, unknown_traders], axis=0
|
86 |
+
)
|
87 |
+
filtered_traders_data.to_parquet(ROOT_DIR / "active_traders.parquet")
|
88 |
+
|
89 |
+
|
90 |
+
if __name__ == "__main__":
|
91 |
+
compute_active_traders_dataset()
|
scripts/cleaning_old_info.py
ADDED
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
from utils import ROOT_DIR, TMP_DIR, transform_to_datetime
|
3 |
+
|
4 |
+
|
5 |
+
def clean_old_data_from_parquet_files(cutoff_date: str):
|
6 |
+
print("Cleaning oldest data")
|
7 |
+
# Convert the string to datetime64[ns, UTC]
|
8 |
+
min_date_utc = pd.to_datetime(cutoff_date, format="%Y-%m-%d", utc=True)
|
9 |
+
|
10 |
+
# clean tools.parquet
|
11 |
+
try:
|
12 |
+
tools = pd.read_parquet(TMP_DIR / "tools.parquet")
|
13 |
+
|
14 |
+
# make sure creator_address is in the columns
|
15 |
+
assert "trader_address" in tools.columns, "trader_address column not found"
|
16 |
+
|
17 |
+
# lowercase and strip creator_address
|
18 |
+
tools["trader_address"] = tools["trader_address"].str.lower().str.strip()
|
19 |
+
|
20 |
+
tools["request_time"] = pd.to_datetime(tools["request_time"], utc=True)
|
21 |
+
|
22 |
+
print(f"length before filtering {len(tools)}")
|
23 |
+
tools = tools.loc[tools["request_time"] > min_date_utc]
|
24 |
+
print(f"length after filtering {len(tools)}")
|
25 |
+
tools.to_parquet(TMP_DIR / "tools.parquet", index=False)
|
26 |
+
|
27 |
+
except Exception as e:
|
28 |
+
print(f"Error cleaning tools file {e}")
|
29 |
+
|
30 |
+
# clean all_trades_profitability.parquet
|
31 |
+
try:
|
32 |
+
all_trades = pd.read_parquet(ROOT_DIR / "all_trades_profitability.parquet")
|
33 |
+
|
34 |
+
all_trades["creation_timestamp"] = pd.to_datetime(
|
35 |
+
all_trades["creation_timestamp"], utc=True
|
36 |
+
)
|
37 |
+
|
38 |
+
print(f"length before filtering {len(all_trades)}")
|
39 |
+
all_trades = all_trades.loc[all_trades["creation_timestamp"] > min_date_utc]
|
40 |
+
print(f"length after filtering {len(all_trades)}")
|
41 |
+
all_trades.to_parquet(
|
42 |
+
ROOT_DIR / "all_trades_profitability.parquet", index=False
|
43 |
+
)
|
44 |
+
|
45 |
+
except Exception as e:
|
46 |
+
print(f"Error cleaning all trades profitability file {e}")
|
47 |
+
|
48 |
+
# clean unknown_traders.parquet
|
49 |
+
try:
|
50 |
+
unknown_traders = pd.read_parquet(ROOT_DIR / "unknown_traders.parquet")
|
51 |
+
|
52 |
+
unknown_traders["creation_timestamp"] = pd.to_datetime(
|
53 |
+
unknown_traders["creation_timestamp"], utc=True
|
54 |
+
)
|
55 |
+
|
56 |
+
print(f"length unknown traders before filtering {len(unknown_traders)}")
|
57 |
+
unknown_traders = unknown_traders.loc[
|
58 |
+
unknown_traders["creation_timestamp"] > min_date_utc
|
59 |
+
]
|
60 |
+
print(f"length unknown traders after filtering {len(unknown_traders)}")
|
61 |
+
unknown_traders.to_parquet(ROOT_DIR / "unknown_traders.parquet", index=False)
|
62 |
+
|
63 |
+
except Exception as e:
|
64 |
+
print(f"Error cleaning unknown_traders file {e}")
|
65 |
+
|
66 |
+
# clean fpmmTrades.parquet
|
67 |
+
try:
|
68 |
+
fpmmTrades = pd.read_parquet(TMP_DIR / "fpmmTrades.parquet")
|
69 |
+
try:
|
70 |
+
fpmmTrades["creationTimestamp"] = fpmmTrades["creationTimestamp"].apply(
|
71 |
+
lambda x: transform_to_datetime(x)
|
72 |
+
)
|
73 |
+
except Exception as e:
|
74 |
+
print(f"Transformation not needed")
|
75 |
+
fpmmTrades["creation_timestamp"] = pd.to_datetime(
|
76 |
+
fpmmTrades["creationTimestamp"]
|
77 |
+
)
|
78 |
+
fpmmTrades["creation_timestamp"] = pd.to_datetime(
|
79 |
+
fpmmTrades["creation_timestamp"], utc=True
|
80 |
+
)
|
81 |
+
|
82 |
+
print(f"length before filtering {len(fpmmTrades)}")
|
83 |
+
fpmmTrades = fpmmTrades.loc[fpmmTrades["creation_timestamp"] > min_date_utc]
|
84 |
+
print(f"length after filtering {len(fpmmTrades)}")
|
85 |
+
fpmmTrades.to_parquet(TMP_DIR / "fpmmTrades.parquet", index=False)
|
86 |
+
|
87 |
+
except Exception as e:
|
88 |
+
print(f"Error cleaning fpmmTrades file {e}")
|
89 |
+
|
90 |
+
# clean invalid trades parquet
|
91 |
+
try:
|
92 |
+
invalid_trades = pd.read_parquet(ROOT_DIR / "invalid_trades.parquet")
|
93 |
+
|
94 |
+
invalid_trades["creation_timestamp"] = pd.to_datetime(
|
95 |
+
invalid_trades["creation_timestamp"], utc=True
|
96 |
+
)
|
97 |
+
|
98 |
+
print(f"length before filtering {len(invalid_trades)}")
|
99 |
+
invalid_trades = invalid_trades.loc[
|
100 |
+
invalid_trades["creation_timestamp"] > min_date_utc
|
101 |
+
]
|
102 |
+
print(f"length after filtering {len(invalid_trades)}")
|
103 |
+
invalid_trades.to_parquet(ROOT_DIR / "invalid_trades.parquet", index=False)
|
104 |
+
|
105 |
+
except Exception as e:
|
106 |
+
print(f"Error cleaning fpmmTrades file {e}")
|
107 |
+
|
108 |
+
|
109 |
+
if __name__ == "__main__":
|
110 |
+
clean_old_data_from_parquet_files("2024-10-25")
|
scripts/closed_markets_divergence.py
ADDED
@@ -0,0 +1,269 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import pandas as pd
|
3 |
+
import numpy as np
|
4 |
+
from typing import Any, Union
|
5 |
+
from string import Template
|
6 |
+
import requests
|
7 |
+
import pickle
|
8 |
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
9 |
+
from tqdm import tqdm
|
10 |
+
import time
|
11 |
+
from datetime import datetime
|
12 |
+
from utils import ROOT_DIR, TMP_DIR
|
13 |
+
|
14 |
+
NUM_WORKERS = 10
|
15 |
+
IPFS_POLL_INTERVAL = 0.2
|
16 |
+
INVALID_ANSWER_HEX = (
|
17 |
+
"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
|
18 |
+
)
|
19 |
+
INVALID_ANSWER = -1
|
20 |
+
SUBGRAPH_API_KEY = os.environ.get("SUBGRAPH_API_KEY", None)
|
21 |
+
OMEN_SUBGRAPH_URL = Template(
|
22 |
+
"""https://gateway-arbitrum.network.thegraph.com/api/${subgraph_api_key}/subgraphs/id/9fUVQpFwzpdWS9bq5WkAnmKbNNcoBwatMR4yZq81pbbz"""
|
23 |
+
)
|
24 |
+
get_token_amounts_query = Template(
|
25 |
+
"""
|
26 |
+
{
|
27 |
+
|
28 |
+
fpmmLiquidities(
|
29 |
+
where: {
|
30 |
+
fpmm_: {
|
31 |
+
creator: "${fpmm_creator}",
|
32 |
+
id: "${fpmm_id}",
|
33 |
+
},
|
34 |
+
id_gt: ""
|
35 |
+
}
|
36 |
+
orderBy: creationTimestamp
|
37 |
+
orderDirection: asc
|
38 |
+
)
|
39 |
+
{
|
40 |
+
id
|
41 |
+
outcomeTokenAmounts
|
42 |
+
creationTimestamp
|
43 |
+
additionalLiquidityParameter
|
44 |
+
}
|
45 |
+
}
|
46 |
+
"""
|
47 |
+
)
|
48 |
+
CREATOR = "0x89c5cc945dd550BcFfb72Fe42BfF002429F46Fec"
|
49 |
+
PEARL_CREATOR = "0xFfc8029154ECD55ABED15BD428bA596E7D23f557"
|
50 |
+
market_creators_map = {"quickstart": CREATOR, "pearl": PEARL_CREATOR}
|
51 |
+
headers = {
|
52 |
+
"Accept": "application/json, multipart/mixed",
|
53 |
+
"Content-Type": "application/json",
|
54 |
+
}
|
55 |
+
|
56 |
+
|
57 |
+
def _to_content(q: str) -> dict[str, Any]:
|
58 |
+
"""Convert the given query string to payload content, i.e., add it under a `queries` key and convert it to bytes."""
|
59 |
+
finalized_query = {
|
60 |
+
"query": q,
|
61 |
+
"variables": None,
|
62 |
+
"extensions": {"headers": None},
|
63 |
+
}
|
64 |
+
return finalized_query
|
65 |
+
|
66 |
+
|
67 |
+
def collect_liquidity_info(
|
68 |
+
index: int, fpmm_id: str, market_creator: str
|
69 |
+
) -> dict[str, Any]:
|
70 |
+
omen_subgraph = OMEN_SUBGRAPH_URL.substitute(subgraph_api_key=SUBGRAPH_API_KEY)
|
71 |
+
market_creator_id = market_creators_map[market_creator]
|
72 |
+
query = get_token_amounts_query.substitute(
|
73 |
+
fpmm_creator=market_creator_id.lower(),
|
74 |
+
fpmm_id=fpmm_id,
|
75 |
+
)
|
76 |
+
content_json = _to_content(query)
|
77 |
+
# print(f"Executing liquidity query {query}")
|
78 |
+
res = requests.post(omen_subgraph, headers=headers, json=content_json)
|
79 |
+
result_json = res.json()
|
80 |
+
tokens_info = result_json.get("data", {}).get("fpmmLiquidities", [])
|
81 |
+
if not tokens_info:
|
82 |
+
return None
|
83 |
+
|
84 |
+
# the last item is the final information of the market
|
85 |
+
last_info = tokens_info[-1]
|
86 |
+
token_amounts = [int(x) for x in last_info["outcomeTokenAmounts"]]
|
87 |
+
time.sleep(IPFS_POLL_INTERVAL)
|
88 |
+
return {fpmm_id: token_amounts}
|
89 |
+
|
90 |
+
|
91 |
+
def convert_hex_to_int(x: Union[str, float]) -> Union[int, float]:
|
92 |
+
"""Convert hex to int"""
|
93 |
+
if isinstance(x, float):
|
94 |
+
return np.nan
|
95 |
+
if isinstance(x, str):
|
96 |
+
if x == INVALID_ANSWER_HEX:
|
97 |
+
return "invalid"
|
98 |
+
return "yes" if int(x, 16) == 0 else "no"
|
99 |
+
|
100 |
+
|
101 |
+
def get_closed_markets():
|
102 |
+
print("Reading parquet file with closed markets data from trades")
|
103 |
+
try:
|
104 |
+
markets = pd.read_parquet(TMP_DIR / "fpmmTrades.parquet")
|
105 |
+
except Exception:
|
106 |
+
print("Error reading the parquet file")
|
107 |
+
|
108 |
+
columns_of_interest = [
|
109 |
+
"fpmm.currentAnswer",
|
110 |
+
"fpmm.id",
|
111 |
+
"fpmm.openingTimestamp",
|
112 |
+
"market_creator",
|
113 |
+
]
|
114 |
+
markets = markets[columns_of_interest]
|
115 |
+
markets.rename(
|
116 |
+
columns={
|
117 |
+
"fpmm.currentAnswer": "currentAnswer",
|
118 |
+
"fpmm.openingTimestamp": "openingTimestamp",
|
119 |
+
"fpmm.id": "id",
|
120 |
+
},
|
121 |
+
inplace=True,
|
122 |
+
)
|
123 |
+
markets = markets.drop_duplicates(subset=["id"], keep="last")
|
124 |
+
# remove invalid answers
|
125 |
+
markets = markets.loc[markets["currentAnswer"] != INVALID_ANSWER_HEX]
|
126 |
+
markets["currentAnswer"] = markets["currentAnswer"].apply(
|
127 |
+
lambda x: convert_hex_to_int(x)
|
128 |
+
)
|
129 |
+
markets.dropna(inplace=True)
|
130 |
+
markets["opening_datetime"] = markets["openingTimestamp"].apply(
|
131 |
+
lambda x: datetime.fromtimestamp(int(x))
|
132 |
+
)
|
133 |
+
markets = markets.sort_values(by="opening_datetime", ascending=True)
|
134 |
+
return markets
|
135 |
+
|
136 |
+
|
137 |
+
def kl_divergence(P, Q):
|
138 |
+
"""
|
139 |
+
Compute KL divergence for a single sample with two prob distributions.
|
140 |
+
|
141 |
+
:param P: True distribution)
|
142 |
+
:param Q: Approximating distribution)
|
143 |
+
:return: KL divergence value
|
144 |
+
"""
|
145 |
+
# Review edge cases
|
146 |
+
if P[0] == Q[0]:
|
147 |
+
return 0.0
|
148 |
+
# If P is complete opposite of Q, divergence is some max value.
|
149 |
+
# Here set to 20--allows for Q [\mu, 1-\mu] or Q[1-\mu, \mu] where \mu = 10^-8
|
150 |
+
if P[0] == Q[1]:
|
151 |
+
return 20
|
152 |
+
|
153 |
+
nonzero = P > 0.0
|
154 |
+
# Compute KL divergence
|
155 |
+
kl_div = np.sum(P[nonzero] * np.log(P[nonzero] / Q[nonzero]))
|
156 |
+
|
157 |
+
return kl_div
|
158 |
+
|
159 |
+
|
160 |
+
def market_KL_divergence(market_row: pd.DataFrame) -> float:
|
161 |
+
"""Function to compute the divergence based on the formula
|
162 |
+
Formula in https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence"""
|
163 |
+
current_answer = market_row.currentAnswer # "yes", "no"
|
164 |
+
approx_prob = market_row.first_outcome_prob
|
165 |
+
true_prob = 1.0 # for yes outcome
|
166 |
+
if current_answer == "no":
|
167 |
+
true_prob = 0.0 # = 0% for yes outcome and 100% for no
|
168 |
+
|
169 |
+
# we have only one sample, the final probability based on tokens
|
170 |
+
# Ensure probabilities sum to 1
|
171 |
+
P = np.array([true_prob, 1 - true_prob])
|
172 |
+
Q = np.array([approx_prob, 1 - approx_prob])
|
173 |
+
return kl_divergence(P, Q)
|
174 |
+
|
175 |
+
|
176 |
+
def off_by_values(market_row: pd.DataFrame) -> float:
|
177 |
+
current_answer = market_row.currentAnswer # "yes", "no"
|
178 |
+
approx_prob = market_row.first_outcome_prob
|
179 |
+
true_prob = 1.0 # for yes outcome
|
180 |
+
if current_answer == "no":
|
181 |
+
true_prob = 0.0 # = 0% for yes outcome and 100% for no
|
182 |
+
|
183 |
+
# we have only one sample, the final probability based on tokens
|
184 |
+
# Ensure probabilities sum to 1
|
185 |
+
P = np.array([true_prob, 1 - true_prob])
|
186 |
+
Q = np.array([approx_prob, 1 - approx_prob])
|
187 |
+
return abs(P[0] - Q[0]) * 100.0
|
188 |
+
|
189 |
+
|
190 |
+
def compute_tokens_prob(token_amounts: list) -> list:
|
191 |
+
first_token_amounts = token_amounts[0]
|
192 |
+
second_token_amounts = token_amounts[1]
|
193 |
+
total_tokens = first_token_amounts + second_token_amounts
|
194 |
+
first_token_prob = 1 - round((first_token_amounts / total_tokens), 4)
|
195 |
+
return [first_token_prob, 1 - first_token_prob]
|
196 |
+
|
197 |
+
|
198 |
+
def prepare_closed_markets_data():
|
199 |
+
closed_markets = get_closed_markets()
|
200 |
+
closed_markets["first_outcome_prob"] = -1.0
|
201 |
+
closed_markets["second_outcome_prob"] = -1.0
|
202 |
+
total_markets = len(closed_markets)
|
203 |
+
markets_no_info = []
|
204 |
+
no_info = 0
|
205 |
+
with ThreadPoolExecutor(max_workers=NUM_WORKERS) as executor:
|
206 |
+
futures = []
|
207 |
+
for i in range(total_markets):
|
208 |
+
futures.append(
|
209 |
+
executor.submit(
|
210 |
+
collect_liquidity_info,
|
211 |
+
i,
|
212 |
+
closed_markets.iloc[i].id,
|
213 |
+
closed_markets.iloc[i].market_creator,
|
214 |
+
)
|
215 |
+
)
|
216 |
+
markets_with_info = 0
|
217 |
+
for future in tqdm(
|
218 |
+
as_completed(futures),
|
219 |
+
total=len(futures),
|
220 |
+
desc=f"Fetching Market liquidity info",
|
221 |
+
):
|
222 |
+
token_amounts_dict = future.result()
|
223 |
+
if token_amounts_dict:
|
224 |
+
fpmm_id, token_amounts = token_amounts_dict.popitem()
|
225 |
+
if token_amounts:
|
226 |
+
tokens_prob = compute_tokens_prob(token_amounts)
|
227 |
+
closed_markets.loc[
|
228 |
+
closed_markets["id"] == fpmm_id, "first_outcome_prob"
|
229 |
+
] = tokens_prob[0]
|
230 |
+
closed_markets.loc[
|
231 |
+
closed_markets["id"] == fpmm_id, "second_outcome_prob"
|
232 |
+
] = tokens_prob[1]
|
233 |
+
markets_with_info += 1
|
234 |
+
else:
|
235 |
+
tqdm.write(f"Skipping market with no liquidity info")
|
236 |
+
markets_no_info.append(i)
|
237 |
+
else:
|
238 |
+
tqdm.write(f"Skipping market with no liquidity info")
|
239 |
+
no_info += 1
|
240 |
+
|
241 |
+
print(f"Markets with info = {markets_with_info}")
|
242 |
+
# Removing markets with no liq info
|
243 |
+
closed_markets = closed_markets.loc[closed_markets["first_outcome_prob"] != -1.0]
|
244 |
+
print(
|
245 |
+
f"Finished computing all markets liquidity info. Final length = {len(closed_markets)}"
|
246 |
+
)
|
247 |
+
if len(markets_no_info) > 0:
|
248 |
+
print(
|
249 |
+
f"There were {len(markets_no_info)} markets with no liquidity info. Printing some index of the dataframe"
|
250 |
+
)
|
251 |
+
with open("no_liq_info.pickle", "wb") as file:
|
252 |
+
pickle.dump(markets_no_info, file)
|
253 |
+
print(markets_no_info[:1])
|
254 |
+
print(closed_markets.head())
|
255 |
+
# Add the Kullback–Leibler divergence values
|
256 |
+
print("Computing Kullback–Leibler (KL) divergence")
|
257 |
+
closed_markets["kl_divergence"] = closed_markets.apply(
|
258 |
+
lambda x: market_KL_divergence(x), axis=1
|
259 |
+
)
|
260 |
+
closed_markets["off_by_perc"] = closed_markets.apply(
|
261 |
+
lambda x: off_by_values(x), axis=1
|
262 |
+
)
|
263 |
+
closed_markets.to_parquet(ROOT_DIR / "closed_markets_div.parquet", index=False)
|
264 |
+
print("Finished preparing final dataset for visualization")
|
265 |
+
print(closed_markets.head())
|
266 |
+
|
267 |
+
|
268 |
+
if __name__ == "__main__":
|
269 |
+
prepare_closed_markets_data()
|
scripts/cloud_storage.py
ADDED
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from minio import Minio
|
2 |
+
from minio.error import S3Error
|
3 |
+
import os
|
4 |
+
import argparse
|
5 |
+
|
6 |
+
from utils import HIST_DIR
|
7 |
+
|
8 |
+
MINIO_ENDPOINT = "minio.autonolas.tech"
|
9 |
+
ACCESS_KEY = os.environ.get("CLOUD_ACCESS_KEY", None)
|
10 |
+
SECRET_KEY = os.environ.get("CLOUD_SECRET_KEY", None)
|
11 |
+
BUCKET_NAME = "weekly-stats"
|
12 |
+
FOLDER_NAME = "historical_data"
|
13 |
+
|
14 |
+
|
15 |
+
def initialize_client():
|
16 |
+
# Initialize the MinIO client
|
17 |
+
client = Minio(
|
18 |
+
MINIO_ENDPOINT,
|
19 |
+
access_key=ACCESS_KEY,
|
20 |
+
secret_key=SECRET_KEY,
|
21 |
+
secure=True, # Set to False if not using HTTPS
|
22 |
+
)
|
23 |
+
return client
|
24 |
+
|
25 |
+
|
26 |
+
def upload_file(client, filename: str, file_path: str) -> bool:
|
27 |
+
"""Upload a file to the bucket"""
|
28 |
+
try:
|
29 |
+
OBJECT_NAME = FOLDER_NAME + "/" + filename
|
30 |
+
print(
|
31 |
+
f"filename={filename}, object_name={OBJECT_NAME} and file_path={file_path}"
|
32 |
+
)
|
33 |
+
client.fput_object(
|
34 |
+
BUCKET_NAME, OBJECT_NAME, file_path, part_size=10 * 1024 * 1024
|
35 |
+
) # 10MB parts
|
36 |
+
print(f"File '{file_path}' uploaded as '{OBJECT_NAME}'.")
|
37 |
+
return True
|
38 |
+
except S3Error as err:
|
39 |
+
print(f"Error uploading file: {err}")
|
40 |
+
return False
|
41 |
+
|
42 |
+
|
43 |
+
def download_file(client, filename: str, file_path: str):
|
44 |
+
"""Download the file back"""
|
45 |
+
try:
|
46 |
+
OBJECT_NAME = FOLDER_NAME + "/" + filename
|
47 |
+
client.fget_object(BUCKET_NAME, OBJECT_NAME, "downloaded_" + file_path)
|
48 |
+
print(f"File '{OBJECT_NAME}' downloaded as 'downloaded_{file_path}'.")
|
49 |
+
except S3Error as err:
|
50 |
+
print(f"Error downloading file: {err}")
|
51 |
+
|
52 |
+
|
53 |
+
def load_historical_file(client, filename: str) -> bool:
|
54 |
+
"""Function to load one file into the cloud storage"""
|
55 |
+
file_path = filename
|
56 |
+
file_path = HIST_DIR / filename
|
57 |
+
return upload_file(client, filename, file_path)
|
58 |
+
|
59 |
+
|
60 |
+
def upload_historical_file(filename: str):
|
61 |
+
client = initialize_client()
|
62 |
+
load_historical_file(client=client, filename=filename)
|
63 |
+
|
64 |
+
|
65 |
+
def process_historical_files(client):
|
66 |
+
"""Process all parquet files in historical_data folder"""
|
67 |
+
|
68 |
+
# Walk through all files in the folder
|
69 |
+
for filename in os.listdir(HIST_DIR):
|
70 |
+
# Check if file is a parquet file
|
71 |
+
if filename.endswith(".parquet"):
|
72 |
+
try:
|
73 |
+
if load_historical_file(client, filename):
|
74 |
+
print(f"Successfully processed {filename}")
|
75 |
+
else:
|
76 |
+
print("Error loading the files")
|
77 |
+
except Exception as e:
|
78 |
+
print(f"Error processing {filename}: {str(e)}")
|
79 |
+
|
80 |
+
|
81 |
+
if __name__ == "__main__":
|
82 |
+
# parser = argparse.ArgumentParser(
|
83 |
+
# description="Load files to the cloud storate for historical data"
|
84 |
+
# )
|
85 |
+
# parser.add_argument("param_1", type=str, help="Name of the file to upload")
|
86 |
+
|
87 |
+
# # Parse the arguments
|
88 |
+
# args = parser.parse_args()
|
89 |
+
# filename = args.param_1
|
90 |
+
|
91 |
+
client = initialize_client()
|
92 |
+
# load_historical_file(client, filename)
|
93 |
+
process_historical_files(client)
|
scripts/daily_data.py
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
from utils import measure_execution_time, ROOT_DIR, TMP_DIR
|
3 |
+
from profitability import (
|
4 |
+
analyse_all_traders,
|
5 |
+
label_trades_by_staking,
|
6 |
+
)
|
7 |
+
import pandas as pd
|
8 |
+
from nr_mech_calls import (
|
9 |
+
create_unknown_traders_df,
|
10 |
+
compute_daily_mech_calls,
|
11 |
+
transform_to_datetime,
|
12 |
+
)
|
13 |
+
from markets import check_current_week_data
|
14 |
+
from staking import generate_retention_activity_file
|
15 |
+
|
16 |
+
logging.basicConfig(level=logging.INFO)
|
17 |
+
|
18 |
+
|
19 |
+
@measure_execution_time
|
20 |
+
def prepare_live_metrics(
|
21 |
+
tools_filename="new_tools.parquet", trades_filename="new_fpmmTrades.parquet"
|
22 |
+
):
|
23 |
+
fpmmTrades = pd.read_parquet(TMP_DIR / trades_filename)
|
24 |
+
tools = pd.read_parquet(TMP_DIR / tools_filename)
|
25 |
+
|
26 |
+
# TODO if monday data of the week is missing in new_fpmmTrades then take it from the general file
|
27 |
+
try:
|
28 |
+
fpmmTrades["creationTimestamp"] = fpmmTrades["creationTimestamp"].apply(
|
29 |
+
lambda x: transform_to_datetime(x)
|
30 |
+
)
|
31 |
+
except Exception as e:
|
32 |
+
print(f"Transformation not needed")
|
33 |
+
# check missing data from Monday
|
34 |
+
fpmmTrades = check_current_week_data(fpmmTrades)
|
35 |
+
|
36 |
+
print("Computing the estimated mech calls dataset")
|
37 |
+
trader_mech_calls = compute_daily_mech_calls(fpmmTrades=fpmmTrades, tools=tools)
|
38 |
+
print("Analysing trades...")
|
39 |
+
all_trades_df = analyse_all_traders(fpmmTrades, trader_mech_calls, daily_info=True)
|
40 |
+
|
41 |
+
# staking label
|
42 |
+
all_trades_df = label_trades_by_staking(all_trades_df)
|
43 |
+
|
44 |
+
# create the unknown traders dataset
|
45 |
+
unknown_traders_df, all_trades_df = create_unknown_traders_df(
|
46 |
+
trades_df=all_trades_df
|
47 |
+
)
|
48 |
+
unknown_traders_df.to_parquet(
|
49 |
+
TMP_DIR / "unknown_daily_traders.parquet", index=False
|
50 |
+
)
|
51 |
+
|
52 |
+
# save into a separate file
|
53 |
+
all_trades_df.to_parquet(ROOT_DIR / "daily_info.parquet", index=False)
|
54 |
+
|
55 |
+
# prepare the retention info file
|
56 |
+
generate_retention_activity_file()
|
57 |
+
|
58 |
+
|
59 |
+
if __name__ == "__main__":
|
60 |
+
prepare_live_metrics()
|
61 |
+
# generate_retention_activity_file()
|
scripts/get_mech_info.py
ADDED
@@ -0,0 +1,335 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from string import Template
|
2 |
+
import shutil
|
3 |
+
import gzip
|
4 |
+
from typing import Any
|
5 |
+
from datetime import datetime, timedelta, UTC
|
6 |
+
from utils import (
|
7 |
+
SUBGRAPH_API_KEY,
|
8 |
+
measure_execution_time,
|
9 |
+
ROOT_DIR,
|
10 |
+
TMP_DIR,
|
11 |
+
NETWORK_SUBGRAPH_URL,
|
12 |
+
transform_to_datetime,
|
13 |
+
)
|
14 |
+
import requests
|
15 |
+
import pandas as pd
|
16 |
+
import numpy as np
|
17 |
+
from mech_request_utils import (
|
18 |
+
collect_all_mech_delivers,
|
19 |
+
collect_all_mech_requests,
|
20 |
+
clean_mech_delivers,
|
21 |
+
fix_duplicate_requestIds,
|
22 |
+
merge_requests_delivers,
|
23 |
+
get_ipfs_data,
|
24 |
+
merge_json_files,
|
25 |
+
)
|
26 |
+
|
27 |
+
SUBGRAPH_HEADERS = {
|
28 |
+
"Accept": "application/json, multipart/mixed",
|
29 |
+
"Content-Type": "application/json",
|
30 |
+
}
|
31 |
+
|
32 |
+
QUERY_BATCH_SIZE = 1000
|
33 |
+
DATETIME_60_DAYS_AGO = datetime.now(UTC) - timedelta(days=60)
|
34 |
+
DATETIME_10_DAYS_AGO = datetime.now(UTC) - timedelta(days=10)
|
35 |
+
DATETIME_10_HOURS_AGO = datetime.now(UTC) - timedelta(hours=10)
|
36 |
+
BLOCK_NUMBER = Template(
|
37 |
+
"""
|
38 |
+
{
|
39 |
+
blocks(
|
40 |
+
first: 1,
|
41 |
+
orderBy: timestamp,
|
42 |
+
orderDirection: asc,
|
43 |
+
where: {
|
44 |
+
timestamp_gte: "${timestamp_from}",
|
45 |
+
timestamp_lte: "${timestamp_to}"
|
46 |
+
}
|
47 |
+
){
|
48 |
+
id,
|
49 |
+
number,
|
50 |
+
}
|
51 |
+
}
|
52 |
+
"""
|
53 |
+
)
|
54 |
+
|
55 |
+
LATEST_BLOCK_QUERY = """
|
56 |
+
{
|
57 |
+
blocks(
|
58 |
+
first: 1,
|
59 |
+
orderBy: timestamp,
|
60 |
+
orderDirection: desc,
|
61 |
+
){
|
62 |
+
id,
|
63 |
+
number,
|
64 |
+
}
|
65 |
+
}
|
66 |
+
"""
|
67 |
+
|
68 |
+
|
69 |
+
def read_all_trades_profitability() -> pd.DataFrame:
|
70 |
+
try:
|
71 |
+
all_trades = pd.read_parquet(ROOT_DIR / "all_trades_profitability.parquet")
|
72 |
+
except Exception as e:
|
73 |
+
try:
|
74 |
+
with gzip.open("all_trades_profitability.parquet.gz", "rb") as f_in:
|
75 |
+
with open("all_trades_profitability.parquet", "wb") as f_out:
|
76 |
+
shutil.copyfileobj(f_in, f_out)
|
77 |
+
all_trades = pd.read_parquet(ROOT_DIR / "all_trades_profitability.parquet")
|
78 |
+
except Exception as e:
|
79 |
+
print(f"Error reading old trades parquet file {e}")
|
80 |
+
return None
|
81 |
+
return all_trades
|
82 |
+
|
83 |
+
|
84 |
+
def fetch_last_block_number() -> dict:
|
85 |
+
# print(f"Sending query for the subgraph = {query}")
|
86 |
+
network_subgraph_url = NETWORK_SUBGRAPH_URL.substitute(
|
87 |
+
subgraph_api_key=SUBGRAPH_API_KEY
|
88 |
+
)
|
89 |
+
query = LATEST_BLOCK_QUERY
|
90 |
+
response = requests.post(
|
91 |
+
network_subgraph_url,
|
92 |
+
headers=SUBGRAPH_HEADERS,
|
93 |
+
json={"query": query},
|
94 |
+
timeout=300,
|
95 |
+
)
|
96 |
+
|
97 |
+
result_json = response.json()
|
98 |
+
print(f"Response of the query={result_json}")
|
99 |
+
blocks = result_json.get("data", {}).get("blocks", "")
|
100 |
+
if len(blocks) == 0:
|
101 |
+
raise ValueError(f"The query {query} did not return any results")
|
102 |
+
return blocks[0]
|
103 |
+
|
104 |
+
|
105 |
+
def fetch_block_number(timestamp_from: int, timestamp_to: int) -> dict:
|
106 |
+
"""Get a block number by its timestamp margins."""
|
107 |
+
|
108 |
+
query = BLOCK_NUMBER.substitute(
|
109 |
+
timestamp_from=timestamp_from, timestamp_to=timestamp_to
|
110 |
+
)
|
111 |
+
# print(f"Sending query for the subgraph = {query}")
|
112 |
+
network_subgraph_url = NETWORK_SUBGRAPH_URL.substitute(
|
113 |
+
subgraph_api_key=SUBGRAPH_API_KEY
|
114 |
+
)
|
115 |
+
response = requests.post(
|
116 |
+
network_subgraph_url,
|
117 |
+
headers=SUBGRAPH_HEADERS,
|
118 |
+
json={"query": query},
|
119 |
+
timeout=300,
|
120 |
+
)
|
121 |
+
# print(f"block query: {query}")
|
122 |
+
result_json = response.json()
|
123 |
+
print(f"Response of the query={result_json}")
|
124 |
+
blocks = result_json.get("data", {}).get("blocks", "")
|
125 |
+
if len(blocks) == 0:
|
126 |
+
raise ValueError(f"The query {query} did not return any results")
|
127 |
+
return blocks[0]
|
128 |
+
|
129 |
+
|
130 |
+
def update_json_files():
|
131 |
+
merge_json_files("mech_requests.json", "new_mech_requests.json")
|
132 |
+
merge_json_files("mech_delivers.json", "new_mech_delivers.json")
|
133 |
+
merge_json_files("merged_requests.json", "new_merged_requests.json")
|
134 |
+
merge_json_files("tools_info.json", "new_tools_info.json")
|
135 |
+
|
136 |
+
|
137 |
+
def update_all_trades_parquet(new_trades_df: pd.DataFrame) -> pd.DataFrame:
|
138 |
+
# Read old all_trades parquet file
|
139 |
+
old_trades_df = read_all_trades_profitability()
|
140 |
+
# merge two dataframes
|
141 |
+
merge_df = pd.concat([old_trades_df, new_trades_df], ignore_index=True)
|
142 |
+
|
143 |
+
# Check for duplicates
|
144 |
+
print(f"Initial length before removing duplicates in all_trades= {len(merge_df)}")
|
145 |
+
|
146 |
+
# Remove duplicates
|
147 |
+
merge_df.drop_duplicates("trade_id", inplace=True)
|
148 |
+
print(f"Final length after removing duplicates in all_trades = {len(merge_df)}")
|
149 |
+
return merge_df
|
150 |
+
|
151 |
+
|
152 |
+
def update_tools_parquet(new_tools_filename: pd.DataFrame):
|
153 |
+
try:
|
154 |
+
old_tools_df = pd.read_parquet(TMP_DIR / "tools.parquet")
|
155 |
+
except Exception as e:
|
156 |
+
print(f"Error reading old tools parquet file {e}")
|
157 |
+
return None
|
158 |
+
try:
|
159 |
+
new_tools_df = pd.read_parquet(ROOT_DIR / new_tools_filename)
|
160 |
+
|
161 |
+
except Exception as e:
|
162 |
+
print(f"Error reading new trades parquet file {e}")
|
163 |
+
return None
|
164 |
+
|
165 |
+
# merge two dataframes
|
166 |
+
merge_df = pd.concat([old_tools_df, new_tools_df], ignore_index=True)
|
167 |
+
|
168 |
+
# Check for duplicates
|
169 |
+
print(f"Initial length before removing duplicates in tools= {len(merge_df)}")
|
170 |
+
|
171 |
+
# Remove duplicates
|
172 |
+
merge_df.drop_duplicates(
|
173 |
+
subset=["request_id", "request_time"], keep="last", inplace=True
|
174 |
+
)
|
175 |
+
print(f"Final length after removing duplicates in tools= {len(merge_df)}")
|
176 |
+
|
177 |
+
# save the parquet file
|
178 |
+
merge_df.to_parquet(TMP_DIR / "tools.parquet", index=False)
|
179 |
+
|
180 |
+
|
181 |
+
def get_mech_info_2024() -> dict[str, Any]:
|
182 |
+
"""Query the subgraph to get the 2024 information from mech."""
|
183 |
+
|
184 |
+
date = "2024-01-01"
|
185 |
+
datetime_jan_2024 = datetime.strptime(date, "%Y-%m-%d")
|
186 |
+
timestamp_jan_2024 = int(datetime_jan_2024.timestamp())
|
187 |
+
margin = timedelta(seconds=5)
|
188 |
+
timestamp_jan_2024_plus_margin = int((datetime_jan_2024 + margin).timestamp())
|
189 |
+
|
190 |
+
jan_block_number = fetch_block_number(
|
191 |
+
timestamp_jan_2024, timestamp_jan_2024_plus_margin
|
192 |
+
)
|
193 |
+
# expecting only one block
|
194 |
+
jan_block_number = jan_block_number.get("number", "")
|
195 |
+
if jan_block_number.isdigit():
|
196 |
+
jan_block_number = int(jan_block_number)
|
197 |
+
|
198 |
+
if jan_block_number == "":
|
199 |
+
raise ValueError(
|
200 |
+
"Could not find a valid block number for the first of January 2024"
|
201 |
+
)
|
202 |
+
MECH_TO_INFO = {
|
203 |
+
# this block number is when the creator had its first tx ever, and after this mech's creation
|
204 |
+
"0xff82123dfb52ab75c417195c5fdb87630145ae81": (
|
205 |
+
"old_mech_abi.json",
|
206 |
+
jan_block_number,
|
207 |
+
),
|
208 |
+
# this block number is when this mech was created
|
209 |
+
"0x77af31de935740567cf4ff1986d04b2c964a786a": (
|
210 |
+
"new_mech_abi.json",
|
211 |
+
jan_block_number,
|
212 |
+
),
|
213 |
+
}
|
214 |
+
return MECH_TO_INFO
|
215 |
+
|
216 |
+
|
217 |
+
def get_last_block_number() -> int:
|
218 |
+
last_block_number = fetch_last_block_number()
|
219 |
+
# expecting only one block
|
220 |
+
last_block_number = last_block_number.get("number", "")
|
221 |
+
if last_block_number.isdigit():
|
222 |
+
last_block_number = int(last_block_number)
|
223 |
+
|
224 |
+
if last_block_number == "":
|
225 |
+
raise ValueError("Could not find a valid block number for last month data")
|
226 |
+
return last_block_number
|
227 |
+
|
228 |
+
|
229 |
+
def get_last_60_days_block_number() -> int:
|
230 |
+
timestamp_60_days_ago = int((DATETIME_60_DAYS_AGO).timestamp())
|
231 |
+
margin = timedelta(seconds=5)
|
232 |
+
timestamp_60_days_ago_plus_margin = int((DATETIME_60_DAYS_AGO + margin).timestamp())
|
233 |
+
|
234 |
+
last_month_block_number = fetch_block_number(
|
235 |
+
timestamp_60_days_ago, timestamp_60_days_ago_plus_margin
|
236 |
+
)
|
237 |
+
# expecting only one block
|
238 |
+
last_month_block_number = last_month_block_number.get("number", "")
|
239 |
+
if last_month_block_number.isdigit():
|
240 |
+
last_month_block_number = int(last_month_block_number)
|
241 |
+
|
242 |
+
if last_month_block_number == "":
|
243 |
+
raise ValueError("Could not find a valid block number for last month data")
|
244 |
+
return last_month_block_number
|
245 |
+
|
246 |
+
|
247 |
+
def get_mech_info_last_60_days() -> dict[str, Any]:
|
248 |
+
"""Query the subgraph to get the last 60 days of information from mech."""
|
249 |
+
last_month_block_number = get_last_60_days_block_number()
|
250 |
+
|
251 |
+
MECH_TO_INFO = {
|
252 |
+
# this block number is when the creator had its first tx ever, and after this mech's creation
|
253 |
+
"0xff82123dfb52ab75c417195c5fdb87630145ae81": (
|
254 |
+
"old_mech_abi.json",
|
255 |
+
last_month_block_number,
|
256 |
+
),
|
257 |
+
# this block number is when this mech was created
|
258 |
+
"0x77af31de935740567cf4ff1986d04b2c964a786a": (
|
259 |
+
"new_mech_abi.json",
|
260 |
+
last_month_block_number,
|
261 |
+
),
|
262 |
+
}
|
263 |
+
print(f"last 60 days block number {last_month_block_number}")
|
264 |
+
return MECH_TO_INFO
|
265 |
+
|
266 |
+
|
267 |
+
@measure_execution_time
|
268 |
+
def get_mech_events_since_last_run(logger):
|
269 |
+
"""Function to download only the new events since the last execution."""
|
270 |
+
|
271 |
+
# Read the latest date from stored data
|
272 |
+
try:
|
273 |
+
all_trades = read_all_trades_profitability()
|
274 |
+
latest_timestamp = max(all_trades.creation_timestamp)
|
275 |
+
# cutoff_date = "2025-01-13"
|
276 |
+
# latest_timestamp = pd.Timestamp(
|
277 |
+
# datetime.strptime(cutoff_date, "%Y-%m-%d")
|
278 |
+
# ).tz_localize("UTC")
|
279 |
+
print(f"Updating data since {latest_timestamp}")
|
280 |
+
except Exception:
|
281 |
+
print("Error while reading the profitability parquet file")
|
282 |
+
return None
|
283 |
+
|
284 |
+
# Get the block number of lastest date
|
285 |
+
five_seconds = np.timedelta64(5, "s")
|
286 |
+
last_run_block_number = fetch_block_number(
|
287 |
+
int(latest_timestamp.timestamp()),
|
288 |
+
int((latest_timestamp + five_seconds).timestamp()),
|
289 |
+
)
|
290 |
+
# expecting only one block
|
291 |
+
last_run_block_number = last_run_block_number.get("number", "")
|
292 |
+
if last_run_block_number.isdigit():
|
293 |
+
last_run_block_number = int(last_run_block_number)
|
294 |
+
|
295 |
+
if last_run_block_number == "":
|
296 |
+
raise ValueError("Could not find a valid block number for last collected data")
|
297 |
+
last_block_number = get_last_block_number()
|
298 |
+
|
299 |
+
# mech requests
|
300 |
+
requests_dict, duplicatedReqId, nr_errors = collect_all_mech_requests(
|
301 |
+
from_block=last_run_block_number,
|
302 |
+
to_block=last_block_number,
|
303 |
+
filename="new_mech_requests.json",
|
304 |
+
)
|
305 |
+
print(f"NUMBER OF MECH REQUEST ERRORS={nr_errors}")
|
306 |
+
# mech delivers
|
307 |
+
delivers_dict, duplicatedIds, nr_errors = collect_all_mech_delivers(
|
308 |
+
from_block=last_run_block_number,
|
309 |
+
to_block=last_block_number,
|
310 |
+
filename="new_mech_delivers.json",
|
311 |
+
)
|
312 |
+
print(f"NUMBER OF MECH DELIVER ERRORS={nr_errors}")
|
313 |
+
if delivers_dict is None:
|
314 |
+
return None
|
315 |
+
# clean delivers
|
316 |
+
clean_mech_delivers("new_mech_requests.json", "new_mech_delivers.json")
|
317 |
+
|
318 |
+
# solve duplicated requestIds
|
319 |
+
block_map = fix_duplicate_requestIds(
|
320 |
+
"new_mech_requests.json", "new_mech_delivers.json"
|
321 |
+
)
|
322 |
+
# merge the two files into one source
|
323 |
+
not_found = merge_requests_delivers(
|
324 |
+
"new_mech_requests.json", "new_mech_delivers.json", "new_merged_requests.json"
|
325 |
+
)
|
326 |
+
|
327 |
+
# Add ipfs contents
|
328 |
+
get_ipfs_data("new_merged_requests.json", "new_tools_info.json", logger)
|
329 |
+
return latest_timestamp
|
330 |
+
|
331 |
+
|
332 |
+
if __name__ == "__main__":
|
333 |
+
get_mech_events_since_last_run()
|
334 |
+
# result = get_mech_info_last_60_days()
|
335 |
+
# print(result)
|
scripts/gnosis_timestamps.py
ADDED
@@ -0,0 +1,186 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from web3 import Web3
|
2 |
+
import os
|
3 |
+
import requests
|
4 |
+
import time
|
5 |
+
import pickle
|
6 |
+
from datetime import datetime, timezone
|
7 |
+
from functools import partial
|
8 |
+
import pandas as pd
|
9 |
+
import pytz
|
10 |
+
from tqdm import tqdm
|
11 |
+
from utils import ROOT_DIR, TMP_DIR, measure_execution_time
|
12 |
+
from concurrent.futures import ThreadPoolExecutor
|
13 |
+
|
14 |
+
GNOSIS_API_INTERVAL = 0.2 # 5 calls in 1 second
|
15 |
+
GNOSIS_URL = "https://api.gnosisscan.io/api"
|
16 |
+
GNOSIS_API_KEY = os.environ.get("GNOSIS_API_KEY", None)
|
17 |
+
# https://api.gnosisscan.io/api?module=account&action=txlist&address=0x1fe2b09de07475b1027b0c73a5bf52693b31a52e&startblock=36626348&endblock=36626348&page=1&offset=10&sort=asc&apikey=${gnosis_api_key}""
|
18 |
+
|
19 |
+
# Connect to Gnosis Chain RPC
|
20 |
+
w3 = Web3(Web3.HTTPProvider("https://rpc.gnosischain.com"))
|
21 |
+
|
22 |
+
|
23 |
+
def parallelize_timestamp_computation(df: pd.DataFrame, function: callable) -> list:
|
24 |
+
"""Parallelize the timestamp conversion."""
|
25 |
+
tx_hashes = df["tx_hash"].tolist()
|
26 |
+
with ThreadPoolExecutor(max_workers=10) as executor:
|
27 |
+
results = list(tqdm(executor.map(function, tx_hashes), total=len(tx_hashes)))
|
28 |
+
return results
|
29 |
+
|
30 |
+
|
31 |
+
def transform_timestamp_to_datetime(timestamp):
|
32 |
+
dt = datetime.fromtimestamp(timestamp, timezone.utc)
|
33 |
+
return dt
|
34 |
+
|
35 |
+
|
36 |
+
def get_tx_hash(trader_address, request_block):
|
37 |
+
"""Function to get the transaction hash from the address and block number"""
|
38 |
+
params = {
|
39 |
+
"module": "account",
|
40 |
+
"action": "txlist",
|
41 |
+
"address": trader_address,
|
42 |
+
"page": 1,
|
43 |
+
"offset": 100,
|
44 |
+
"startblock": request_block,
|
45 |
+
"endblock": request_block,
|
46 |
+
"sort": "asc",
|
47 |
+
"apikey": GNOSIS_API_KEY,
|
48 |
+
}
|
49 |
+
|
50 |
+
try:
|
51 |
+
response = requests.get(GNOSIS_URL, params=params)
|
52 |
+
tx_list = response.json()["result"]
|
53 |
+
time.sleep(GNOSIS_API_INTERVAL)
|
54 |
+
if len(tx_list) > 1:
|
55 |
+
raise ValueError("More than one transaction found")
|
56 |
+
return tx_list[0]["hash"]
|
57 |
+
except Exception as e:
|
58 |
+
return None
|
59 |
+
|
60 |
+
|
61 |
+
def add_tx_hash_info(filename: str = "tools.parquet"):
|
62 |
+
"""Function to add the hash info to the saved tools parquet file"""
|
63 |
+
tools = pd.read_parquet(ROOT_DIR / filename)
|
64 |
+
tools["tx_hash"] = None
|
65 |
+
total_errors = 0
|
66 |
+
for i, mech_request in tqdm(
|
67 |
+
tools.iterrows(), total=len(tools), desc="Adding tx hash"
|
68 |
+
):
|
69 |
+
try:
|
70 |
+
trader_address = mech_request["trader_address"]
|
71 |
+
block_number = mech_request["request_block"]
|
72 |
+
tools.at[i, "tx_hash"] = get_tx_hash(
|
73 |
+
trader_address=trader_address, request_block=block_number
|
74 |
+
)
|
75 |
+
except Exception as e:
|
76 |
+
print(f"Error with mech request {mech_request}")
|
77 |
+
total_errors += 1
|
78 |
+
continue
|
79 |
+
|
80 |
+
print(f"Total number of errors = {total_errors}")
|
81 |
+
tools.to_parquet(ROOT_DIR / filename)
|
82 |
+
|
83 |
+
|
84 |
+
def get_transaction_timestamp(tx_hash: str, web3: Web3):
|
85 |
+
|
86 |
+
try:
|
87 |
+
# Get transaction data
|
88 |
+
tx = web3.eth.get_transaction(tx_hash)
|
89 |
+
# Get block data
|
90 |
+
block = web3.eth.get_block(tx["blockNumber"])
|
91 |
+
# Get timestamp
|
92 |
+
timestamp = block["timestamp"]
|
93 |
+
|
94 |
+
# Convert to datetime
|
95 |
+
dt = datetime.fromtimestamp(timestamp, tz=pytz.UTC)
|
96 |
+
|
97 |
+
# return {
|
98 |
+
# "timestamp": timestamp,
|
99 |
+
# "datetime": dt,
|
100 |
+
# "from_address": tx["from"],
|
101 |
+
# "to_address": tx["to"],
|
102 |
+
# "success": True,
|
103 |
+
# }
|
104 |
+
return dt.strftime("%Y-%m-%d %H:%M:%S")
|
105 |
+
except Exception as e:
|
106 |
+
print(f"Error getting the timestamp from {tx_hash}")
|
107 |
+
return None
|
108 |
+
|
109 |
+
|
110 |
+
@measure_execution_time
|
111 |
+
def compute_request_time(tools_df: pd.DataFrame) -> pd.DataFrame:
|
112 |
+
"""Function to compute the request timestamp from the tx hash"""
|
113 |
+
# read the local info
|
114 |
+
try:
|
115 |
+
gnosis_info = pickle.load(open(TMP_DIR / "gnosis_info.pkl", "rb"))
|
116 |
+
except Exception:
|
117 |
+
print("File not found or not created. Creating a new one")
|
118 |
+
gnosis_info = {}
|
119 |
+
|
120 |
+
# any previous information?
|
121 |
+
tools_df["request_time"] = tools_df["tx_hash"].map(gnosis_info)
|
122 |
+
|
123 |
+
# Identify tools with missing request_time and fill them
|
124 |
+
missing_time_indices = tools_df[tools_df["request_time"].isna()].index
|
125 |
+
print(f"length of missing_time_indices = {len(missing_time_indices)}")
|
126 |
+
# traverse all tx hashes and get the timestamp of each tx
|
127 |
+
partial_mech_request_timestamp = partial(get_transaction_timestamp, web3=w3)
|
128 |
+
missing_timestamps = parallelize_timestamp_computation(
|
129 |
+
tools_df.loc[missing_time_indices], partial_mech_request_timestamp
|
130 |
+
)
|
131 |
+
|
132 |
+
# Update the original DataFrame with the missing timestamps
|
133 |
+
for i, timestamp in zip(missing_time_indices, missing_timestamps):
|
134 |
+
tools_df.at[i, "request_time"] = timestamp
|
135 |
+
# creating other time fields
|
136 |
+
tools_df["request_month_year"] = pd.to_datetime(
|
137 |
+
tools_df["request_time"]
|
138 |
+
).dt.strftime("%Y-%m")
|
139 |
+
tools_df["request_month_year_week"] = (
|
140 |
+
pd.to_datetime(tools_df["request_time"])
|
141 |
+
.dt.to_period("W")
|
142 |
+
.dt.start_time.dt.strftime("%b-%d-%Y")
|
143 |
+
)
|
144 |
+
# Update t_map with new timestamps
|
145 |
+
new_timestamps = (
|
146 |
+
tools_df[["tx_hash", "request_time"]]
|
147 |
+
.dropna()
|
148 |
+
.set_index("tx_hash")
|
149 |
+
.to_dict()["request_time"]
|
150 |
+
)
|
151 |
+
gnosis_info.update(new_timestamps)
|
152 |
+
# saving gnosis info
|
153 |
+
with open(TMP_DIR / "gnosis_info.pkl", "wb") as f:
|
154 |
+
pickle.dump(gnosis_info, f)
|
155 |
+
return tools_df
|
156 |
+
|
157 |
+
|
158 |
+
def get_account_details(address):
|
159 |
+
# gnosis_url = GNOSIS_URL.substitute(gnosis_api_key=GNOSIS_API_KEY, tx_hash=tx_hash)
|
160 |
+
|
161 |
+
params = {
|
162 |
+
"module": "account",
|
163 |
+
"action": "txlistinternal",
|
164 |
+
"address": address,
|
165 |
+
#'page': 1,
|
166 |
+
#'offset': 100,
|
167 |
+
#'startblock': 0,
|
168 |
+
#'endblock': 9999999999,
|
169 |
+
#'sort': 'asc',
|
170 |
+
"apikey": GNOSIS_API_KEY,
|
171 |
+
}
|
172 |
+
|
173 |
+
try:
|
174 |
+
response = requests.get(GNOSIS_URL, params=params)
|
175 |
+
return response.json()
|
176 |
+
except Exception as e:
|
177 |
+
return {"error": str(e)}
|
178 |
+
|
179 |
+
|
180 |
+
if __name__ == "__main__":
|
181 |
+
# tx_data = "0x783BFA045BDE2D0BCD65280D97A29E7BD9E4FDC10985848690C9797E767140F4"
|
182 |
+
new_tools = pd.read_parquet(ROOT_DIR / "new_tools.parquet")
|
183 |
+
new_tools = compute_request_time(new_tools)
|
184 |
+
new_tools.to_parquet(ROOT_DIR / "new_tools.parquet")
|
185 |
+
# result = get_tx_hash("0x1fe2b09de07475b1027b0c73a5bf52693b31a52e", 36626348)
|
186 |
+
# print(result)
|
scripts/manage_space_files.py
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import shutil
|
3 |
+
|
4 |
+
# Define the file names to move
|
5 |
+
files_to_move = [
|
6 |
+
"new_tools.parquet",
|
7 |
+
"new_fpmmTrades.parquet",
|
8 |
+
"fpmms.parquet",
|
9 |
+
"fpmmTrades.parquet",
|
10 |
+
]
|
11 |
+
|
12 |
+
# Get the current working directory
|
13 |
+
current_dir = os.getcwd()
|
14 |
+
|
15 |
+
# Define source and destination paths
|
16 |
+
source_dir = os.path.join(current_dir, "data")
|
17 |
+
dest_dir = os.path.join(current_dir, "tmp")
|
18 |
+
|
19 |
+
|
20 |
+
def move_files():
|
21 |
+
# Create tmp directory if it doesn't exist
|
22 |
+
if not os.path.exists(dest_dir):
|
23 |
+
os.makedirs(dest_dir)
|
24 |
+
# Move each file
|
25 |
+
for file_name in files_to_move:
|
26 |
+
source_file = os.path.join(current_dir, file_name)
|
27 |
+
dest_file = os.path.join(dest_dir, file_name)
|
28 |
+
|
29 |
+
try:
|
30 |
+
if os.path.exists(source_file):
|
31 |
+
shutil.move(source_file, dest_file)
|
32 |
+
print(f"Moved {file_name} successfully")
|
33 |
+
else:
|
34 |
+
print(f"File not found: {file_name}")
|
35 |
+
except Exception as e:
|
36 |
+
print(f"Error moving {file_name}: {str(e)}")
|
37 |
+
|
38 |
+
|
39 |
+
if __name__ == "__main__":
|
40 |
+
move_files()
|
scripts/market_metrics.py
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pandas as pd
|
3 |
+
import time
|
4 |
+
from utils import convert_hex_to_int, ROOT_DIR, TMP_DIR
|
5 |
+
|
6 |
+
|
7 |
+
def determine_market_status(row):
|
8 |
+
current_answer = row["currentAnswer"]
|
9 |
+
"""Determine the market status of a trade."""
|
10 |
+
if (current_answer is np.nan or current_answer is None) and time.time() >= int(
|
11 |
+
row["openingTimestamp"]
|
12 |
+
):
|
13 |
+
return "pending"
|
14 |
+
if current_answer is np.nan or current_answer is None:
|
15 |
+
return "open"
|
16 |
+
if row["fpmm.isPendingArbitration"]:
|
17 |
+
return "arbitrating"
|
18 |
+
if row["fpmm.answerFinalizedTimestamp"] and time.time() < int(
|
19 |
+
row["fpmm.answerFinalizedTimestamp"]
|
20 |
+
):
|
21 |
+
return "finalizing"
|
22 |
+
return "closed"
|
23 |
+
|
24 |
+
|
25 |
+
def compute_market_metrics(all_trades: pd.DataFrame):
|
26 |
+
print("Preparing dataset")
|
27 |
+
all_trades.rename(
|
28 |
+
columns={
|
29 |
+
"fpmm.currentAnswer": "currentAnswer",
|
30 |
+
"fpmm.openingTimestamp": "openingTimestamp",
|
31 |
+
"fpmm.id": "market_id",
|
32 |
+
},
|
33 |
+
inplace=True,
|
34 |
+
)
|
35 |
+
all_trades["currentAnswer"] = all_trades["currentAnswer"].apply(
|
36 |
+
lambda x: convert_hex_to_int(x)
|
37 |
+
)
|
38 |
+
all_trades["market_status"] = all_trades.apply(
|
39 |
+
lambda x: determine_market_status(x), axis=1
|
40 |
+
)
|
41 |
+
closed_trades = all_trades.loc[all_trades["market_status"] == "closed"]
|
42 |
+
print("Computing metrics")
|
43 |
+
nr_trades = (
|
44 |
+
closed_trades.groupby("market_id")["id"].count().reset_index(name="nr_trades")
|
45 |
+
)
|
46 |
+
total_traders = (
|
47 |
+
closed_trades.groupby("market_id")["trader_address"]
|
48 |
+
.nunique()
|
49 |
+
.reset_index(name="total_traders")
|
50 |
+
)
|
51 |
+
final_dataset = nr_trades.merge(total_traders, on="market_id")
|
52 |
+
markets = closed_trades[
|
53 |
+
["market_id", "title", "market_creator", "openingTimestamp"]
|
54 |
+
]
|
55 |
+
markets.drop_duplicates("market_id", inplace=True)
|
56 |
+
market_metrics = markets.merge(final_dataset, on="market_id")
|
57 |
+
print("Saving dataset")
|
58 |
+
market_metrics.to_parquet(ROOT_DIR / "closed_market_metrics.parquet", index=False)
|
59 |
+
print(market_metrics.head())
|
60 |
+
|
61 |
+
|
62 |
+
if __name__ == "__main__":
|
63 |
+
all_trades = pd.read_parquet(TMP_DIR / "fpmmTrades.parquet")
|
64 |
+
compute_market_metrics(all_trades)
|
scripts/markets.py
ADDED
@@ -0,0 +1,464 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
# ------------------------------------------------------------------------------
|
3 |
+
#
|
4 |
+
# Copyright 2023 Valory AG
|
5 |
+
#
|
6 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
7 |
+
# you may not use this file except in compliance with the License.
|
8 |
+
# You may obtain a copy of the License at
|
9 |
+
#
|
10 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
11 |
+
#
|
12 |
+
# Unless required by applicable law or agreed to in writing, software
|
13 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
14 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
15 |
+
# See the License for the specific language governing permissions and
|
16 |
+
# limitations under the License.
|
17 |
+
#
|
18 |
+
# ------------------------------------------------------------------------------
|
19 |
+
|
20 |
+
import functools
|
21 |
+
import warnings
|
22 |
+
from datetime import datetime, timedelta
|
23 |
+
from typing import Optional, Generator, Callable
|
24 |
+
import pandas as pd
|
25 |
+
import requests
|
26 |
+
from tqdm import tqdm
|
27 |
+
from typing import List, Dict
|
28 |
+
from utils import SUBGRAPH_API_KEY, ROOT_DIR, TMP_DIR, transform_to_datetime
|
29 |
+
from web3_utils import (
|
30 |
+
FPMM_QS_CREATOR,
|
31 |
+
FPMM_PEARL_CREATOR,
|
32 |
+
query_omen_xdai_subgraph,
|
33 |
+
OMEN_SUBGRAPH_URL,
|
34 |
+
)
|
35 |
+
from queries import (
|
36 |
+
FPMMS_QUERY,
|
37 |
+
ID_FIELD,
|
38 |
+
DATA_FIELD,
|
39 |
+
ANSWER_FIELD,
|
40 |
+
QUERY_FIELD,
|
41 |
+
TITLE_FIELD,
|
42 |
+
OUTCOMES_FIELD,
|
43 |
+
ERROR_FIELD,
|
44 |
+
QUESTION_FIELD,
|
45 |
+
FPMMS_FIELD,
|
46 |
+
)
|
47 |
+
|
48 |
+
ResponseItemType = List[Dict[str, str]]
|
49 |
+
SubgraphResponseType = Dict[str, ResponseItemType]
|
50 |
+
BATCH_SIZE = 1000
|
51 |
+
DEFAULT_TO_TIMESTAMP = 2147483647 # around year 2038
|
52 |
+
DEFAULT_FROM_TIMESTAMP = 0
|
53 |
+
|
54 |
+
MAX_UINT_HEX = "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
|
55 |
+
DEFAULT_FILENAME = "fpmms.parquet"
|
56 |
+
market_creators_map = {"quickstart": FPMM_QS_CREATOR, "pearl": FPMM_PEARL_CREATOR}
|
57 |
+
|
58 |
+
|
59 |
+
class RetriesExceeded(Exception):
|
60 |
+
"""Exception to raise when retries are exceeded during data-fetching."""
|
61 |
+
|
62 |
+
def __init__(
|
63 |
+
self, msg="Maximum retries were exceeded while trying to fetch the data!"
|
64 |
+
):
|
65 |
+
super().__init__(msg)
|
66 |
+
|
67 |
+
|
68 |
+
def hacky_retry(func: Callable, n_retries: int = 3) -> Callable:
|
69 |
+
"""Create a hacky retry strategy.
|
70 |
+
Unfortunately, we cannot use `requests.packages.urllib3.util.retry.Retry`,
|
71 |
+
because the subgraph does not return the appropriate status codes in case of failure.
|
72 |
+
Instead, it always returns code 200. Thus, we raise exceptions manually inside `make_request`,
|
73 |
+
catch those exceptions in the hacky retry decorator and try again.
|
74 |
+
Finally, if the allowed number of retries is exceeded, we raise a custom `RetriesExceeded` exception.
|
75 |
+
|
76 |
+
:param func: the input request function.
|
77 |
+
:param n_retries: the maximum allowed number of retries.
|
78 |
+
:return: The request method with the hacky retry strategy applied.
|
79 |
+
"""
|
80 |
+
|
81 |
+
@functools.wraps(func)
|
82 |
+
def wrapper_hacky_retry(*args, **kwargs) -> SubgraphResponseType:
|
83 |
+
"""The wrapper for the hacky retry.
|
84 |
+
|
85 |
+
:return: a response dictionary.
|
86 |
+
"""
|
87 |
+
retried = 0
|
88 |
+
|
89 |
+
while retried <= n_retries:
|
90 |
+
try:
|
91 |
+
if retried > 0:
|
92 |
+
warnings.warn(f"Retrying {retried}/{n_retries}...")
|
93 |
+
|
94 |
+
return func(*args, **kwargs)
|
95 |
+
except (ValueError, ConnectionError) as e:
|
96 |
+
warnings.warn(e.args[0])
|
97 |
+
finally:
|
98 |
+
retried += 1
|
99 |
+
|
100 |
+
raise RetriesExceeded()
|
101 |
+
|
102 |
+
return wrapper_hacky_retry
|
103 |
+
|
104 |
+
|
105 |
+
@hacky_retry
|
106 |
+
def query_subgraph(url: str, query: str, key: str) -> SubgraphResponseType:
|
107 |
+
"""Query a subgraph.
|
108 |
+
|
109 |
+
Args:
|
110 |
+
url: the subgraph's URL.
|
111 |
+
query: the query to be used.
|
112 |
+
key: the key to use in order to access the required data.
|
113 |
+
|
114 |
+
Returns:
|
115 |
+
a response dictionary.
|
116 |
+
"""
|
117 |
+
content = {QUERY_FIELD: query}
|
118 |
+
headers = {
|
119 |
+
"Accept": "application/json",
|
120 |
+
"Content-Type": "application/json",
|
121 |
+
}
|
122 |
+
res = requests.post(url, json=content, headers=headers)
|
123 |
+
|
124 |
+
if res.status_code != 200:
|
125 |
+
raise ConnectionError(
|
126 |
+
"Something went wrong while trying to communicate with the subgraph "
|
127 |
+
f"(Error: {res.status_code})!\n{res.text}"
|
128 |
+
)
|
129 |
+
|
130 |
+
body = res.json()
|
131 |
+
if ERROR_FIELD in body.keys():
|
132 |
+
raise ValueError(f"The given query is not correct: {body[ERROR_FIELD]}")
|
133 |
+
|
134 |
+
data = body.get(DATA_FIELD, {}).get(key, None)
|
135 |
+
if data is None:
|
136 |
+
raise ValueError(f"Unknown error encountered!\nRaw response: \n{body}")
|
137 |
+
|
138 |
+
return data
|
139 |
+
|
140 |
+
|
141 |
+
def transform_fpmmTrades(df: pd.DataFrame) -> pd.DataFrame:
|
142 |
+
print("Transforming trades dataframe")
|
143 |
+
# convert creator to address
|
144 |
+
df["creator"] = df["creator"].apply(lambda x: x["id"])
|
145 |
+
|
146 |
+
# normalize fpmm column
|
147 |
+
fpmm = pd.json_normalize(df["fpmm"])
|
148 |
+
fpmm.columns = [f"fpmm.{col}" for col in fpmm.columns]
|
149 |
+
df = pd.concat([df, fpmm], axis=1)
|
150 |
+
|
151 |
+
# drop fpmm column
|
152 |
+
df.drop(["fpmm"], axis=1, inplace=True)
|
153 |
+
|
154 |
+
# change creator to creator_address
|
155 |
+
df.rename(columns={"creator": "trader_address"}, inplace=True)
|
156 |
+
return df
|
157 |
+
|
158 |
+
|
159 |
+
def create_fpmmTrades(
|
160 |
+
from_timestamp: int = DEFAULT_FROM_TIMESTAMP,
|
161 |
+
to_timestamp: int = DEFAULT_TO_TIMESTAMP,
|
162 |
+
):
|
163 |
+
"""Create fpmmTrades for all trades."""
|
164 |
+
print("Getting trades from Quickstart markets")
|
165 |
+
# Quickstart trades
|
166 |
+
qs_trades_json = query_omen_xdai_subgraph(
|
167 |
+
trader_category="quickstart",
|
168 |
+
from_timestamp=from_timestamp,
|
169 |
+
to_timestamp=to_timestamp,
|
170 |
+
fpmm_from_timestamp=from_timestamp,
|
171 |
+
fpmm_to_timestamp=to_timestamp,
|
172 |
+
)
|
173 |
+
|
174 |
+
print(f"length of the qs_trades_json dataset {len(qs_trades_json)}")
|
175 |
+
|
176 |
+
# convert to dataframe
|
177 |
+
qs_df = pd.DataFrame(qs_trades_json["data"]["fpmmTrades"])
|
178 |
+
qs_df["market_creator"] = "quickstart"
|
179 |
+
qs_df = transform_fpmmTrades(qs_df)
|
180 |
+
|
181 |
+
# Pearl trades
|
182 |
+
print("Getting trades from Pearl markets")
|
183 |
+
pearl_trades_json = query_omen_xdai_subgraph(
|
184 |
+
trader_category="pearl",
|
185 |
+
from_timestamp=from_timestamp,
|
186 |
+
to_timestamp=DEFAULT_TO_TIMESTAMP,
|
187 |
+
fpmm_from_timestamp=from_timestamp,
|
188 |
+
fpmm_to_timestamp=DEFAULT_TO_TIMESTAMP,
|
189 |
+
)
|
190 |
+
|
191 |
+
print(f"length of the pearl_trades_json dataset {len(pearl_trades_json)}")
|
192 |
+
|
193 |
+
# convert to dataframe
|
194 |
+
pearl_df = pd.DataFrame(pearl_trades_json["data"]["fpmmTrades"])
|
195 |
+
pearl_df["market_creator"] = "pearl"
|
196 |
+
pearl_df = transform_fpmmTrades(pearl_df)
|
197 |
+
|
198 |
+
return pd.concat([qs_df, pearl_df], ignore_index=True)
|
199 |
+
|
200 |
+
|
201 |
+
def fpmms_fetcher(trader_category: str) -> Generator[ResponseItemType, int, None]:
|
202 |
+
"""An indefinite fetcher for the FPMMs."""
|
203 |
+
omen_subgraph = OMEN_SUBGRAPH_URL.substitute(subgraph_api_key=SUBGRAPH_API_KEY)
|
204 |
+
print(f"omen_subgraph = {omen_subgraph}")
|
205 |
+
|
206 |
+
if trader_category == "pearl":
|
207 |
+
creator_id = FPMM_PEARL_CREATOR
|
208 |
+
else: # quickstart
|
209 |
+
creator_id = FPMM_QS_CREATOR
|
210 |
+
while True:
|
211 |
+
fpmm_id = yield
|
212 |
+
fpmms_query = FPMMS_QUERY.substitute(
|
213 |
+
creator=creator_id,
|
214 |
+
fpmm_id=fpmm_id,
|
215 |
+
fpmms_field=FPMMS_FIELD,
|
216 |
+
first=BATCH_SIZE,
|
217 |
+
id_field=ID_FIELD,
|
218 |
+
answer_field=ANSWER_FIELD,
|
219 |
+
question_field=QUESTION_FIELD,
|
220 |
+
outcomes_field=OUTCOMES_FIELD,
|
221 |
+
title_field=TITLE_FIELD,
|
222 |
+
)
|
223 |
+
print(f"markets query = {fpmms_query}")
|
224 |
+
yield query_subgraph(omen_subgraph, fpmms_query, FPMMS_FIELD)
|
225 |
+
|
226 |
+
|
227 |
+
def fetch_qs_fpmms() -> pd.DataFrame:
|
228 |
+
"""Fetch all the fpmms of the creator."""
|
229 |
+
latest_id = ""
|
230 |
+
fpmms = []
|
231 |
+
trader_category = "quickstart"
|
232 |
+
print(f"Getting markets for {trader_category}")
|
233 |
+
fetcher = fpmms_fetcher(trader_category)
|
234 |
+
for _ in tqdm(fetcher, unit="fpmms", unit_scale=BATCH_SIZE):
|
235 |
+
batch = fetcher.send(latest_id)
|
236 |
+
if len(batch) == 0:
|
237 |
+
break
|
238 |
+
|
239 |
+
latest_id = batch[-1].get(ID_FIELD, "")
|
240 |
+
if latest_id == "":
|
241 |
+
raise ValueError(f"Unexpected data format retrieved: {batch}")
|
242 |
+
|
243 |
+
fpmms.extend(batch)
|
244 |
+
|
245 |
+
return pd.DataFrame(fpmms)
|
246 |
+
|
247 |
+
|
248 |
+
def fetch_pearl_fpmms() -> pd.DataFrame:
|
249 |
+
"""Fetch all the fpmms of the creator."""
|
250 |
+
latest_id = ""
|
251 |
+
fpmms = []
|
252 |
+
trader_category = "pearl"
|
253 |
+
print(f"Getting markets for {trader_category}")
|
254 |
+
fetcher = fpmms_fetcher(trader_category)
|
255 |
+
for _ in tqdm(fetcher, unit="fpmms", unit_scale=BATCH_SIZE):
|
256 |
+
batch = fetcher.send(latest_id)
|
257 |
+
if len(batch) == 0:
|
258 |
+
break
|
259 |
+
|
260 |
+
latest_id = batch[-1].get(ID_FIELD, "")
|
261 |
+
if latest_id == "":
|
262 |
+
raise ValueError(f"Unexpected data format retrieved: {batch}")
|
263 |
+
|
264 |
+
fpmms.extend(batch)
|
265 |
+
|
266 |
+
return pd.DataFrame(fpmms)
|
267 |
+
|
268 |
+
|
269 |
+
def get_answer(fpmm: pd.Series) -> str:
|
270 |
+
"""Get an answer from its index, using Series of an FPMM."""
|
271 |
+
return fpmm[QUESTION_FIELD][OUTCOMES_FIELD][fpmm[ANSWER_FIELD]]
|
272 |
+
|
273 |
+
|
274 |
+
def transform_fpmms(fpmms: pd.DataFrame) -> pd.DataFrame:
|
275 |
+
"""Transform an FPMMS dataframe."""
|
276 |
+
transformed = fpmms.dropna()
|
277 |
+
transformed = transformed.drop_duplicates([ID_FIELD])
|
278 |
+
transformed = transformed.loc[transformed[ANSWER_FIELD] != MAX_UINT_HEX]
|
279 |
+
transformed.loc[:, ANSWER_FIELD] = (
|
280 |
+
transformed[ANSWER_FIELD].str.slice(-1).astype(int)
|
281 |
+
)
|
282 |
+
transformed.loc[:, ANSWER_FIELD] = transformed.apply(get_answer, axis=1)
|
283 |
+
transformed = transformed.drop(columns=[QUESTION_FIELD])
|
284 |
+
|
285 |
+
return transformed
|
286 |
+
|
287 |
+
|
288 |
+
def etl(filename: Optional[str] = None) -> pd.DataFrame:
|
289 |
+
"""Fetch, process, store and return the markets as a Dataframe."""
|
290 |
+
qs_fpmms = fetch_qs_fpmms()
|
291 |
+
qs_fpmms = transform_fpmms(qs_fpmms)
|
292 |
+
qs_fpmms["market_creator"] = "quickstart"
|
293 |
+
print(f"Results for the market creator quickstart. Len = {len(qs_fpmms)}")
|
294 |
+
|
295 |
+
pearl_fpmms = fetch_pearl_fpmms()
|
296 |
+
pearl_fpmms = transform_fpmms(pearl_fpmms)
|
297 |
+
pearl_fpmms["market_creator"] = "pearl"
|
298 |
+
print(f"Results for the market creator pearl. Len = {len(pearl_fpmms)}")
|
299 |
+
fpmms = pd.concat([qs_fpmms, pearl_fpmms], ignore_index=True)
|
300 |
+
|
301 |
+
if filename:
|
302 |
+
fpmms.to_parquet(ROOT_DIR / filename, index=False)
|
303 |
+
|
304 |
+
return fpmms
|
305 |
+
|
306 |
+
|
307 |
+
def read_global_trades_file() -> pd.DataFrame:
|
308 |
+
try:
|
309 |
+
trades_filename = "fpmmTrades.parquet"
|
310 |
+
fpmms_trades = pd.read_parquet(TMP_DIR / trades_filename)
|
311 |
+
except FileNotFoundError:
|
312 |
+
print("Error: fpmmTrades.parquet not found. No market creator added")
|
313 |
+
return
|
314 |
+
return fpmms_trades
|
315 |
+
|
316 |
+
|
317 |
+
def add_market_creator(tools: pd.DataFrame) -> None:
|
318 |
+
# Check if fpmmTrades.parquet is in the same directory
|
319 |
+
fpmms_trades = read_global_trades_file()
|
320 |
+
tools["market_creator"] = ""
|
321 |
+
# traverse the list of traders
|
322 |
+
tools_no_market_creator = 0
|
323 |
+
traders_list = list(tools.trader_address.unique())
|
324 |
+
for trader_address in traders_list:
|
325 |
+
market_creator = ""
|
326 |
+
try:
|
327 |
+
trades = fpmms_trades[fpmms_trades["trader_address"] == trader_address]
|
328 |
+
market_creator = trades.iloc[0]["market_creator"] # first value is enough
|
329 |
+
except Exception:
|
330 |
+
print(f"ERROR getting the market creator of {trader_address}")
|
331 |
+
tools_no_market_creator += 1
|
332 |
+
continue
|
333 |
+
# update
|
334 |
+
tools.loc[tools["trader_address"] == trader_address, "market_creator"] = (
|
335 |
+
market_creator
|
336 |
+
)
|
337 |
+
# filter those tools where we don't have market creator info
|
338 |
+
tools = tools.loc[tools["market_creator"] != ""]
|
339 |
+
print(f"Number of tools with no market creator info = {tools_no_market_creator}")
|
340 |
+
return tools
|
341 |
+
|
342 |
+
|
343 |
+
def fpmmTrades_etl(
|
344 |
+
trades_filename: str, from_timestamp: int, to_timestamp: int = DEFAULT_TO_TIMESTAMP
|
345 |
+
) -> None:
|
346 |
+
print("Generating the trades file")
|
347 |
+
try:
|
348 |
+
fpmmTrades = create_fpmmTrades(
|
349 |
+
from_timestamp=from_timestamp, to_timestamp=to_timestamp
|
350 |
+
)
|
351 |
+
except FileNotFoundError:
|
352 |
+
print(f"Error creating {trades_filename} file .")
|
353 |
+
|
354 |
+
# make sure trader_address is in the columns
|
355 |
+
assert "trader_address" in fpmmTrades.columns, "trader_address column not found"
|
356 |
+
|
357 |
+
# lowercase and strip creator_address
|
358 |
+
fpmmTrades["trader_address"] = fpmmTrades["trader_address"].str.lower().str.strip()
|
359 |
+
fpmmTrades.to_parquet(ROOT_DIR / trades_filename, index=False)
|
360 |
+
return fpmmTrades
|
361 |
+
|
362 |
+
|
363 |
+
def check_current_week_data(trades_df: pd.DataFrame) -> pd.DataFrame:
|
364 |
+
"""Function to check if all current weeks data is present, if not, then add the missing data from previous file"""
|
365 |
+
# Get current date
|
366 |
+
now = datetime.now()
|
367 |
+
|
368 |
+
# Get start of the current week (Monday)
|
369 |
+
start_of_week = now - timedelta(days=now.weekday())
|
370 |
+
start_of_week = start_of_week.replace(hour=0, minute=0, second=0, microsecond=0)
|
371 |
+
print(f"start of the week = {start_of_week}")
|
372 |
+
|
373 |
+
trades_df["creation_timestamp"] = pd.to_datetime(trades_df["creationTimestamp"])
|
374 |
+
trades_df["creation_date"] = trades_df["creation_timestamp"].dt.date
|
375 |
+
trades_df["creation_date"] = pd.to_datetime(trades_df["creation_date"])
|
376 |
+
# Check dataframe
|
377 |
+
min_date = min(trades_df.creation_date)
|
378 |
+
if min_date > start_of_week:
|
379 |
+
# missing data of current week in the trades file
|
380 |
+
fpmms_trades = read_global_trades_file()
|
381 |
+
# get missing data
|
382 |
+
missing_data = fpmms_trades[
|
383 |
+
(fpmms_trades["creation_date"] >= start_of_week)
|
384 |
+
& (fpmms_trades["creation_date"] < min_date)
|
385 |
+
]
|
386 |
+
merge_df = pd.concat([trades_df, missing_data], ignore_index=True)
|
387 |
+
merge_df.drop_duplicates("id", keep="last", inplace=True)
|
388 |
+
return merge_df
|
389 |
+
# no update needed
|
390 |
+
return trades_df
|
391 |
+
|
392 |
+
|
393 |
+
def update_fpmmTrades_parquet(trades_filename: str) -> pd.DataFrame:
|
394 |
+
# Read old trades parquet file
|
395 |
+
try:
|
396 |
+
old_trades_df = pd.read_parquet(TMP_DIR / "fpmmTrades.parquet")
|
397 |
+
except Exception as e:
|
398 |
+
print(f"Error reading old trades parquet file {e}")
|
399 |
+
return None
|
400 |
+
|
401 |
+
try:
|
402 |
+
new_trades_df = pd.read_parquet(ROOT_DIR / trades_filename)
|
403 |
+
except Exception as e:
|
404 |
+
print(f"Error reading new trades parquet file {e}")
|
405 |
+
return None
|
406 |
+
|
407 |
+
# lowercase and strip creator_address
|
408 |
+
new_trades_df["trader_address"] = (
|
409 |
+
new_trades_df["trader_address"].str.lower().str.strip()
|
410 |
+
)
|
411 |
+
# ensure creationTimestamp compatibility
|
412 |
+
try:
|
413 |
+
new_trades_df["creationTimestamp"] = new_trades_df["creationTimestamp"].apply(
|
414 |
+
lambda x: transform_to_datetime(x)
|
415 |
+
)
|
416 |
+
|
417 |
+
except Exception as e:
|
418 |
+
print(f"Transformation not needed")
|
419 |
+
try:
|
420 |
+
old_trades_df["creationTimestamp"] = old_trades_df["creationTimestamp"].apply(
|
421 |
+
lambda x: transform_to_datetime(x)
|
422 |
+
)
|
423 |
+
except Exception as e:
|
424 |
+
print(f"Transformation not needed")
|
425 |
+
|
426 |
+
# merge two dataframes
|
427 |
+
merge_df = pd.concat([old_trades_df, new_trades_df], ignore_index=True)
|
428 |
+
# avoid numpy objects
|
429 |
+
merge_df["fpmm.arbitrationOccurred"] = merge_df["fpmm.arbitrationOccurred"].astype(
|
430 |
+
bool
|
431 |
+
)
|
432 |
+
merge_df["fpmm.isPendingArbitration"] = merge_df[
|
433 |
+
"fpmm.isPendingArbitration"
|
434 |
+
].astype(bool)
|
435 |
+
|
436 |
+
# Check for duplicates
|
437 |
+
print(f"Initial length before removing duplicates in fpmmTrades= {len(merge_df)}")
|
438 |
+
|
439 |
+
# Remove duplicates
|
440 |
+
# fpmm.outcomes is a numpy array
|
441 |
+
merge_df.drop_duplicates("id", keep="last", inplace=True)
|
442 |
+
print(f"Final length after removing duplicates in fpmmTrades= {len(merge_df)}")
|
443 |
+
|
444 |
+
# save the parquet file
|
445 |
+
merge_df.to_parquet(TMP_DIR / "fpmmTrades.parquet", index=False)
|
446 |
+
|
447 |
+
return
|
448 |
+
|
449 |
+
|
450 |
+
def update_fpmmTrades(from_date: str):
|
451 |
+
|
452 |
+
from_timestamp = pd.Timestamp(datetime.strptime(from_date, "%Y-%m-%d")).tz_localize(
|
453 |
+
"UTC"
|
454 |
+
)
|
455 |
+
fpmmTrades_etl(
|
456 |
+
trades_filename="new_fpmmTrades.parquet",
|
457 |
+
from_timestamp=int(from_timestamp.timestamp()),
|
458 |
+
)
|
459 |
+
update_fpmmTrades_parquet("new_fpmmTrades.parquet")
|
460 |
+
|
461 |
+
|
462 |
+
if __name__ == "__main__":
|
463 |
+
cutoff_date = "2025-01-20"
|
464 |
+
update_fpmmTrades(cutoff_date)
|
scripts/mech_request_utils.py
ADDED
@@ -0,0 +1,603 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
# ------------------------------------------------------------------------------
|
3 |
+
#
|
4 |
+
# Copyright 2024 Valory AG
|
5 |
+
#
|
6 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
7 |
+
# you may not use this file except in compliance with the License.
|
8 |
+
# You may obtain a copy of the License at
|
9 |
+
#
|
10 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
11 |
+
#
|
12 |
+
# Unless required by applicable law or agreed to in writing, software
|
13 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
14 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
15 |
+
# See the License for the specific language governing permissions and
|
16 |
+
# limitations under the License.
|
17 |
+
#
|
18 |
+
# ------------------------------------------------------------------------------
|
19 |
+
|
20 |
+
"""Script for retrieving mech requests and their delivers."""
|
21 |
+
import json
|
22 |
+
import time
|
23 |
+
import pickle
|
24 |
+
from random import uniform
|
25 |
+
from typing import Any, Dict, Tuple
|
26 |
+
import requests
|
27 |
+
from gql import Client, gql
|
28 |
+
from gql.transport.requests import RequestsHTTPTransport
|
29 |
+
from tools import (
|
30 |
+
GET_CONTENTS_BATCH_SIZE,
|
31 |
+
IRRELEVANT_TOOLS,
|
32 |
+
create_session,
|
33 |
+
request,
|
34 |
+
)
|
35 |
+
from tqdm import tqdm
|
36 |
+
from web3_utils import (
|
37 |
+
FPMM_QS_CREATOR,
|
38 |
+
FPMM_PEARL_CREATOR,
|
39 |
+
IPFS_POLL_INTERVAL,
|
40 |
+
SUBGRAPH_POLL_INTERVAL,
|
41 |
+
)
|
42 |
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
43 |
+
from utils import (
|
44 |
+
ROOT_DIR,
|
45 |
+
JSON_DATA_DIR,
|
46 |
+
MECH_SUBGRAPH_URL,
|
47 |
+
SUBGRAPH_API_KEY,
|
48 |
+
IPFS_ADDRESS,
|
49 |
+
)
|
50 |
+
|
51 |
+
NUM_WORKERS = 10
|
52 |
+
BLOCKS_CHUNK_SIZE = 10000
|
53 |
+
TEXT_ALIGNMENT = 30
|
54 |
+
MINIMUM_WRITE_FILE_DELAY_SECONDS = 20
|
55 |
+
MECH_FROM_BLOCK_RANGE = 50000
|
56 |
+
|
57 |
+
last_write_time = 0.0
|
58 |
+
|
59 |
+
REQUESTS_QUERY_FILTER = """
|
60 |
+
query requests_query($sender_not_in: [Bytes!], $id_gt: Bytes, $blockNumber_gte: BigInt, $blockNumber_lte: BigInt) {
|
61 |
+
requests(where: {sender_not_in: $sender_not_in, id_gt: $id_gt, blockNumber_gte: $blockNumber_gte, blockNumber_lte: $blockNumber_lte}, orderBy: id, first: 1000) {
|
62 |
+
blockNumber
|
63 |
+
blockTimestamp
|
64 |
+
id
|
65 |
+
ipfsHash
|
66 |
+
requestId
|
67 |
+
sender
|
68 |
+
transactionHash
|
69 |
+
}
|
70 |
+
}
|
71 |
+
"""
|
72 |
+
|
73 |
+
DELIVERS_QUERY_NO_FILTER = """
|
74 |
+
query delivers_query($id_gt: Bytes, $blockNumber_gte: BigInt, $blockNumber_lte: BigInt) {
|
75 |
+
delivers(where: {id_gt: $id_gt, blockNumber_gte: $blockNumber_gte, blockNumber_lte: $blockNumber_lte}, orderBy: id, first: 1000) {
|
76 |
+
blockNumber
|
77 |
+
blockTimestamp
|
78 |
+
id
|
79 |
+
ipfsHash
|
80 |
+
requestId
|
81 |
+
sender
|
82 |
+
transactionHash
|
83 |
+
}
|
84 |
+
}
|
85 |
+
|
86 |
+
"""
|
87 |
+
DELIVERS_QUERY = """
|
88 |
+
query delivers_query($requestId: BigInt, $blockNumber_gte: BigInt, $blockNumber_lte: BigInt) {
|
89 |
+
delivers(where: {requestId: $requestId, blockNumber_gte: $blockNumber_gte, blockNumber_lte: $blockNumber_lte}, orderBy: blockNumber, first: 1000) {
|
90 |
+
blockNumber
|
91 |
+
blockTimestamp
|
92 |
+
id
|
93 |
+
ipfsHash
|
94 |
+
requestId
|
95 |
+
sender
|
96 |
+
transactionHash
|
97 |
+
}
|
98 |
+
}
|
99 |
+
"""
|
100 |
+
|
101 |
+
MISSING_DELIVERS_QUERY = """
|
102 |
+
query delivers_query($requestId: BigInt, $blockNumber_gte: BigInt, $blockNumber_lte: BigInt) {
|
103 |
+
delivers(where: {requestId: $requestId, blockNumber_gte: $blockNumber_gte, blockNumber_lte: $blockNumber_lte}, orderBy: blockNumber, first: 1000) {
|
104 |
+
blockNumber
|
105 |
+
blockTimestamp
|
106 |
+
id
|
107 |
+
ipfsHash
|
108 |
+
requestId
|
109 |
+
sender
|
110 |
+
transactionHash
|
111 |
+
}
|
112 |
+
}
|
113 |
+
"""
|
114 |
+
|
115 |
+
|
116 |
+
def collect_all_mech_requests(from_block: int, to_block: int, filename: str) -> Tuple:
|
117 |
+
|
118 |
+
print(f"Fetching all mech requests from {from_block} to {to_block}")
|
119 |
+
mech_requests = {}
|
120 |
+
duplicated_reqIds = []
|
121 |
+
mech_subgraph_url = MECH_SUBGRAPH_URL.substitute(subgraph_api_key=SUBGRAPH_API_KEY)
|
122 |
+
transport = RequestsHTTPTransport(url=mech_subgraph_url)
|
123 |
+
client = Client(transport=transport, fetch_schema_from_transport=True)
|
124 |
+
|
125 |
+
id_gt = "0x00"
|
126 |
+
nr_errors = 0
|
127 |
+
while True:
|
128 |
+
variables = {
|
129 |
+
"sender_not_in": [FPMM_QS_CREATOR, FPMM_PEARL_CREATOR],
|
130 |
+
"id_gt": id_gt,
|
131 |
+
"blockNumber_gte": str(from_block), # str
|
132 |
+
"blockNumber_lte": str(to_block), # str
|
133 |
+
}
|
134 |
+
try:
|
135 |
+
response = fetch_with_retry(client, REQUESTS_QUERY_FILTER, variables)
|
136 |
+
|
137 |
+
items = response.get("requests", [])
|
138 |
+
|
139 |
+
if not items:
|
140 |
+
break
|
141 |
+
|
142 |
+
for mech_request in items:
|
143 |
+
if mech_request["id"] not in mech_requests:
|
144 |
+
mech_requests[mech_request["id"]] = mech_request
|
145 |
+
else:
|
146 |
+
duplicated_reqIds.append(mech_request["id"])
|
147 |
+
except Exception as e:
|
148 |
+
# counter for errors
|
149 |
+
nr_errors += 1
|
150 |
+
print(f"Error while getting the response: {e}")
|
151 |
+
|
152 |
+
id_gt = items[-1]["id"]
|
153 |
+
time.sleep(SUBGRAPH_POLL_INTERVAL)
|
154 |
+
print(f"New execution for id_gt = {id_gt}")
|
155 |
+
if len(duplicated_reqIds) > 0:
|
156 |
+
print(f"Number of duplicated req Ids = {len(duplicated_reqIds)}")
|
157 |
+
save_json_file(mech_requests, filename)
|
158 |
+
|
159 |
+
print(f"Number of requests = {len(mech_requests)}")
|
160 |
+
print(f"Number of duplicated req Ids = {len(duplicated_reqIds)}")
|
161 |
+
save_json_file(mech_requests, filename)
|
162 |
+
return mech_requests, duplicated_reqIds, nr_errors
|
163 |
+
|
164 |
+
|
165 |
+
def fetch_with_retry(client, query, variables, max_retries=5):
|
166 |
+
for attempt in range(max_retries):
|
167 |
+
try:
|
168 |
+
return client.execute(gql(query), variable_values=variables)
|
169 |
+
except Exception as e:
|
170 |
+
if attempt == max_retries - 1:
|
171 |
+
raise e
|
172 |
+
wait_time = (2**attempt) + uniform(0, 1) # exponential backoff with jitter
|
173 |
+
time.sleep(wait_time)
|
174 |
+
|
175 |
+
|
176 |
+
def collect_all_mech_delivers(from_block: int, to_block: int, filename: str) -> Tuple:
|
177 |
+
|
178 |
+
print(f"Fetching all mech delivers from {from_block} to {to_block}")
|
179 |
+
|
180 |
+
mech_delivers = {}
|
181 |
+
duplicated_requestIds = []
|
182 |
+
mech_subgraph_url = MECH_SUBGRAPH_URL.substitute(subgraph_api_key=SUBGRAPH_API_KEY)
|
183 |
+
transport = RequestsHTTPTransport(url=mech_subgraph_url)
|
184 |
+
client = Client(transport=transport, fetch_schema_from_transport=True)
|
185 |
+
to_block = (
|
186 |
+
to_block + MECH_FROM_BLOCK_RANGE
|
187 |
+
) # there is a delay between deliver and request
|
188 |
+
id_gt = ""
|
189 |
+
nr_errors = 0
|
190 |
+
while True:
|
191 |
+
variables = {
|
192 |
+
"id_gt": id_gt,
|
193 |
+
"blockNumber_gte": str(from_block), # str
|
194 |
+
"blockNumber_lte": str(to_block), # str
|
195 |
+
}
|
196 |
+
try:
|
197 |
+
response = fetch_with_retry(client, DELIVERS_QUERY_NO_FILTER, variables)
|
198 |
+
items = response.get("delivers", [])
|
199 |
+
|
200 |
+
if not items:
|
201 |
+
break
|
202 |
+
|
203 |
+
for mech_deliver in items:
|
204 |
+
if mech_deliver["requestId"] not in mech_delivers:
|
205 |
+
mech_delivers[mech_deliver["requestId"]] = [mech_deliver]
|
206 |
+
else:
|
207 |
+
duplicated_requestIds.append(mech_deliver["requestId"])
|
208 |
+
# we will handle the duplicated later
|
209 |
+
except Exception as e:
|
210 |
+
# counter for errors
|
211 |
+
nr_errors += 1
|
212 |
+
print(f"Error while getting the response: {e}")
|
213 |
+
# return None, None
|
214 |
+
|
215 |
+
id_gt = items[-1]["id"]
|
216 |
+
time.sleep(SUBGRAPH_POLL_INTERVAL)
|
217 |
+
print(f"New execution for id_gt = {id_gt}")
|
218 |
+
if len(duplicated_requestIds) > 0:
|
219 |
+
print(f"Number of duplicated request id = {len(duplicated_requestIds)}")
|
220 |
+
save_json_file(mech_delivers, filename)
|
221 |
+
print(f"Number of delivers = {len(mech_delivers)}")
|
222 |
+
print(f"Number of duplicated request id = {len(duplicated_requestIds)}")
|
223 |
+
save_json_file(mech_delivers, filename)
|
224 |
+
return mech_delivers, duplicated_requestIds, nr_errors
|
225 |
+
|
226 |
+
|
227 |
+
def collect_missing_delivers(request_id: int, block_number: int) -> Dict[str, Any]:
|
228 |
+
to_block = (
|
229 |
+
block_number + MECH_FROM_BLOCK_RANGE
|
230 |
+
) # there is a delay between deliver and request
|
231 |
+
print(f"Fetching all missing delivers from {block_number} to {to_block}")
|
232 |
+
mech_delivers = {}
|
233 |
+
mech_subgraph_url = MECH_SUBGRAPH_URL.substitute(subgraph_api_key=SUBGRAPH_API_KEY)
|
234 |
+
transport = RequestsHTTPTransport(url=mech_subgraph_url)
|
235 |
+
client = Client(transport=transport, fetch_schema_from_transport=True)
|
236 |
+
|
237 |
+
variables = {
|
238 |
+
"requestId": request_id,
|
239 |
+
"blockNumber_gte": str(block_number), # str
|
240 |
+
"blockNumber_lte": str(to_block), # str
|
241 |
+
}
|
242 |
+
try:
|
243 |
+
response = fetch_with_retry(client, MISSING_DELIVERS_QUERY, variables)
|
244 |
+
items = response.get("delivers", [])
|
245 |
+
# If the user sends requests with the same values (tool, prompt, nonce) it
|
246 |
+
# will generate the same requestId. Therefore, multiple items can be retrieved
|
247 |
+
# at this point. We assume the most likely deliver to this request is the
|
248 |
+
# one with the closest blockNumber among all delivers with the same requestId.
|
249 |
+
if items:
|
250 |
+
return items[0]
|
251 |
+
except Exception as e:
|
252 |
+
print(f"Error while getting the response: {e}")
|
253 |
+
# TODO count how many mech requests without a deliver do we have
|
254 |
+
|
255 |
+
return mech_delivers
|
256 |
+
|
257 |
+
|
258 |
+
def populate_requests_ipfs_contents(
|
259 |
+
session: requests.Session, mech_requests: Dict[str, Any], keys_to_traverse: list
|
260 |
+
) -> dict:
|
261 |
+
updated_dict = {}
|
262 |
+
wrong_response_count = 0
|
263 |
+
for k in tqdm(
|
264 |
+
keys_to_traverse,
|
265 |
+
desc="Fetching IPFS contents for requests",
|
266 |
+
position=1,
|
267 |
+
unit="results",
|
268 |
+
):
|
269 |
+
mech_request = mech_requests[k]
|
270 |
+
|
271 |
+
if "ipfsContents" not in mech_request:
|
272 |
+
ipfs_hash = mech_request["ipfsHash"]
|
273 |
+
url = f"{IPFS_ADDRESS}{ipfs_hash}/metadata.json"
|
274 |
+
response = request(session, url)
|
275 |
+
if response is None:
|
276 |
+
tqdm.write(f"Skipping {mech_request=}. because response was None")
|
277 |
+
wrong_response_count += 1
|
278 |
+
continue
|
279 |
+
try:
|
280 |
+
contents = response.json()
|
281 |
+
if contents["tool"] in IRRELEVANT_TOOLS:
|
282 |
+
continue
|
283 |
+
mech_request["ipfsContents"] = contents
|
284 |
+
except requests.exceptions.JSONDecodeError:
|
285 |
+
tqdm.write(
|
286 |
+
f"Skipping {mech_request} because of JSONDecodeError when parsing response"
|
287 |
+
)
|
288 |
+
wrong_response_count += 1
|
289 |
+
continue
|
290 |
+
updated_dict[k] = mech_request
|
291 |
+
time.sleep(IPFS_POLL_INTERVAL)
|
292 |
+
|
293 |
+
return updated_dict, wrong_response_count
|
294 |
+
|
295 |
+
|
296 |
+
def populate_delivers_ipfs_contents(
|
297 |
+
session: requests.Session, mech_requests: Dict[str, Any], keys_to_traverse: list
|
298 |
+
) -> dict:
|
299 |
+
"""Function to complete the delivers content info from ipfs"""
|
300 |
+
updated_dict = {}
|
301 |
+
errors = 0
|
302 |
+
for k in tqdm(
|
303 |
+
keys_to_traverse,
|
304 |
+
desc="Fetching IPFS contents for delivers",
|
305 |
+
position=1,
|
306 |
+
unit="results",
|
307 |
+
):
|
308 |
+
mech_request = mech_requests[k]
|
309 |
+
if "deliver" not in mech_request or len(mech_request["deliver"]) == 0:
|
310 |
+
print(f"Skipping mech request {mech_request} because of no delivers info")
|
311 |
+
continue
|
312 |
+
|
313 |
+
deliver = mech_request["deliver"]
|
314 |
+
if "ipfsContents" not in deliver:
|
315 |
+
ipfs_hash = deliver["ipfsHash"]
|
316 |
+
request_id = deliver["requestId"]
|
317 |
+
url = f"{IPFS_ADDRESS}{ipfs_hash}/{request_id}"
|
318 |
+
response = request(session, url)
|
319 |
+
if response is None:
|
320 |
+
tqdm.write(f"Skipping {mech_request=}.")
|
321 |
+
continue
|
322 |
+
try:
|
323 |
+
contents = response.json()
|
324 |
+
metadata = contents.get("metadata", None)
|
325 |
+
if metadata and contents["metadata"]["tool"] in IRRELEVANT_TOOLS:
|
326 |
+
continue
|
327 |
+
contents.pop("cost_dict", None)
|
328 |
+
deliver["ipfsContents"] = contents
|
329 |
+
except requests.exceptions.JSONDecodeError:
|
330 |
+
tqdm.write(f"Skipping {mech_request} because of JSONDecodeError")
|
331 |
+
continue
|
332 |
+
except Exception:
|
333 |
+
errors += 1
|
334 |
+
tqdm.write(
|
335 |
+
f"Skipping {mech_request} because of error parsing the response"
|
336 |
+
)
|
337 |
+
continue
|
338 |
+
updated_dict[k] = mech_request
|
339 |
+
time.sleep(IPFS_POLL_INTERVAL)
|
340 |
+
|
341 |
+
return updated_dict, errors
|
342 |
+
|
343 |
+
|
344 |
+
def write_mech_events_to_file(
|
345 |
+
mech_requests: Dict[str, Any],
|
346 |
+
filename: str,
|
347 |
+
force_write: bool = False,
|
348 |
+
) -> None:
|
349 |
+
global last_write_time # pylint: disable=global-statement
|
350 |
+
now = time.time()
|
351 |
+
|
352 |
+
if len(mech_requests) == 0:
|
353 |
+
return
|
354 |
+
|
355 |
+
filename_path = ROOT_DIR / filename
|
356 |
+
if force_write or (now - last_write_time) >= MINIMUM_WRITE_FILE_DELAY_SECONDS:
|
357 |
+
with open(filename_path, "w", encoding="utf-8") as file:
|
358 |
+
json.dump(mech_requests, file, indent=2)
|
359 |
+
last_write_time = now
|
360 |
+
|
361 |
+
|
362 |
+
def save_json_file(data: Dict[str, Any], filename: str):
|
363 |
+
"""Function to save the content into a json file"""
|
364 |
+
filename_path = JSON_DATA_DIR / filename
|
365 |
+
with open(filename_path, "w", encoding="utf-8") as file:
|
366 |
+
json.dump(data, file, indent=2)
|
367 |
+
|
368 |
+
|
369 |
+
def merge_json_files(old_file: str, new_file: str):
|
370 |
+
# read old file
|
371 |
+
with open(JSON_DATA_DIR / old_file, "r") as f:
|
372 |
+
old_data = json.load(f)
|
373 |
+
|
374 |
+
# read the new file
|
375 |
+
with open(JSON_DATA_DIR / new_file, "r") as f:
|
376 |
+
new_data = json.load(f)
|
377 |
+
|
378 |
+
# Merge the two JSON files and remove duplicates
|
379 |
+
old_data.update(new_data)
|
380 |
+
|
381 |
+
# Save the merged JSON file
|
382 |
+
print(f"{old_file} updated")
|
383 |
+
save_json_file(old_data, old_file)
|
384 |
+
|
385 |
+
|
386 |
+
def clean_mech_delivers(requests_filename: str, delivers_filename: str) -> None:
|
387 |
+
"""Function to remove from the delivers json file the request Ids that are not in the mech requests"""
|
388 |
+
# read mech requests
|
389 |
+
with open(JSON_DATA_DIR / requests_filename, "r") as file:
|
390 |
+
mech_requests = json.load(file)
|
391 |
+
|
392 |
+
list_reqIds = [mech_requests[k].get("requestId") for k in mech_requests.keys()]
|
393 |
+
|
394 |
+
# remove requestIds from delivers that are not in this list
|
395 |
+
with open(JSON_DATA_DIR / delivers_filename, "r") as file:
|
396 |
+
mech_delivers = json.load(file)
|
397 |
+
|
398 |
+
print(f"original size of the file {len(mech_delivers)}")
|
399 |
+
mech_delivers = {
|
400 |
+
k: v
|
401 |
+
for k, v in tqdm(
|
402 |
+
mech_delivers.items(),
|
403 |
+
total=len(mech_delivers),
|
404 |
+
desc="Filtering delivers dictionary",
|
405 |
+
)
|
406 |
+
if k in set(list_reqIds)
|
407 |
+
}
|
408 |
+
|
409 |
+
print(f"final size of the file {len(mech_delivers)}")
|
410 |
+
save_json_file(mech_delivers, delivers_filename)
|
411 |
+
|
412 |
+
|
413 |
+
def get_request_block_numbers(
|
414 |
+
mech_requests: Dict[str, Any], target_req_id: int
|
415 |
+
) -> list:
|
416 |
+
block_numbers = []
|
417 |
+
|
418 |
+
for entry in mech_requests.values():
|
419 |
+
if entry["requestId"] == target_req_id:
|
420 |
+
block_numbers.append(entry["blockNumber"])
|
421 |
+
|
422 |
+
return block_numbers
|
423 |
+
|
424 |
+
|
425 |
+
def update_block_request_map(block_request_id_map: dict) -> None:
|
426 |
+
print("Saving block request id map info")
|
427 |
+
with open(JSON_DATA_DIR / "block_request_id_map.pickle", "wb") as handle:
|
428 |
+
pickle.dump(block_request_id_map, handle, protocol=pickle.HIGHEST_PROTOCOL)
|
429 |
+
|
430 |
+
|
431 |
+
def fix_duplicate_requestIds(requests_filename: str, delivers_filename: str) -> dict:
|
432 |
+
print("Fix duplicated request Ids")
|
433 |
+
with open(JSON_DATA_DIR / delivers_filename, "r") as file:
|
434 |
+
data_delivers = json.load(file)
|
435 |
+
|
436 |
+
with open(JSON_DATA_DIR / requests_filename, "r") as file:
|
437 |
+
mech_requests = json.load(file)
|
438 |
+
list_request_Ids = list(data_delivers.keys())
|
439 |
+
|
440 |
+
list_duplicated_reqIds = []
|
441 |
+
for req_Id in list_request_Ids:
|
442 |
+
if len(data_delivers.get(req_Id)) > 1:
|
443 |
+
list_duplicated_reqIds.append(req_Id)
|
444 |
+
|
445 |
+
print(len(list_duplicated_reqIds))
|
446 |
+
block_request_id_map = {}
|
447 |
+
|
448 |
+
for req_Id in list_duplicated_reqIds:
|
449 |
+
# get the list of mech request block numbers for that requestId
|
450 |
+
block_nrs = get_request_block_numbers(mech_requests, req_Id)
|
451 |
+
# get the list of mech delivers
|
452 |
+
mech_delivers_list = data_delivers.get(req_Id) # list of dictionaries
|
453 |
+
if len(block_nrs) > 1:
|
454 |
+
print("More than one block number was found")
|
455 |
+
for block_nr in block_nrs:
|
456 |
+
key = (block_nr, req_Id)
|
457 |
+
min_difference_request = min(
|
458 |
+
mech_delivers_list,
|
459 |
+
key=lambda x: abs(int(x["blockNumber"]) - int(block_nr)),
|
460 |
+
)
|
461 |
+
block_request_id_map[key] = min_difference_request
|
462 |
+
|
463 |
+
update_block_request_map(block_request_id_map)
|
464 |
+
|
465 |
+
return block_request_id_map
|
466 |
+
|
467 |
+
|
468 |
+
def merge_requests_delivers(
|
469 |
+
requests_filename: str, delivers_filename: str, filename: str
|
470 |
+
) -> None:
|
471 |
+
print("Merge request delivers")
|
472 |
+
"""Function to map requests and delivers"""
|
473 |
+
with open(JSON_DATA_DIR / delivers_filename, "r") as file:
|
474 |
+
mech_delivers = json.load(file)
|
475 |
+
|
476 |
+
with open(JSON_DATA_DIR / requests_filename, "r") as file:
|
477 |
+
mech_requests = json.load(file)
|
478 |
+
|
479 |
+
# read the block map for duplicated requestIds
|
480 |
+
with open(JSON_DATA_DIR / "block_request_id_map.pickle", "rb") as handle:
|
481 |
+
# key = (block_nr, req_Id) value = delivers dictionary
|
482 |
+
block_request_id_map = pickle.load(handle)
|
483 |
+
for _, mech_req in tqdm(
|
484 |
+
mech_requests.items(),
|
485 |
+
desc=f"Merging delivers data into the mech requests",
|
486 |
+
):
|
487 |
+
if "deliver" in mech_req:
|
488 |
+
continue
|
489 |
+
|
490 |
+
block_number_req = mech_req["blockNumber"]
|
491 |
+
req_Id = mech_req["requestId"]
|
492 |
+
# check if it is in the duplicated map
|
493 |
+
key = (block_number_req, req_Id)
|
494 |
+
if key in block_request_id_map.keys():
|
495 |
+
deliver_dict = block_request_id_map[key]
|
496 |
+
elif req_Id in mech_delivers.keys():
|
497 |
+
deliver_dict = mech_delivers.get(req_Id)[0] # the value is a list
|
498 |
+
else:
|
499 |
+
print("No deliver entry found for this request Id")
|
500 |
+
deliver_dict = collect_missing_delivers(
|
501 |
+
request_id=req_Id, block_number=int(block_number_req)
|
502 |
+
)
|
503 |
+
|
504 |
+
# extract the info and append it to the original mech request dictionary
|
505 |
+
mech_req["deliver"] = deliver_dict
|
506 |
+
save_json_file(mech_requests, filename)
|
507 |
+
return
|
508 |
+
|
509 |
+
|
510 |
+
def get_ipfs_data(input_filename: str, output_filename: str, logger):
|
511 |
+
with open(JSON_DATA_DIR / input_filename, "r") as file:
|
512 |
+
mech_requests = json.load(file)
|
513 |
+
|
514 |
+
total_keys_to_traverse = list(mech_requests.keys())
|
515 |
+
updated_mech_requests = dict()
|
516 |
+
session = create_session()
|
517 |
+
logger.info("UPDATING IPFS CONTENTS OF REQUESTS")
|
518 |
+
# requests
|
519 |
+
nr_errors = 0
|
520 |
+
with ThreadPoolExecutor(max_workers=NUM_WORKERS) as executor:
|
521 |
+
futures = []
|
522 |
+
for i in range(0, len(mech_requests), GET_CONTENTS_BATCH_SIZE):
|
523 |
+
futures.append(
|
524 |
+
executor.submit(
|
525 |
+
populate_requests_ipfs_contents,
|
526 |
+
session,
|
527 |
+
mech_requests,
|
528 |
+
total_keys_to_traverse[i : i + GET_CONTENTS_BATCH_SIZE],
|
529 |
+
)
|
530 |
+
)
|
531 |
+
|
532 |
+
for future in tqdm(
|
533 |
+
as_completed(futures),
|
534 |
+
total=len(futures),
|
535 |
+
desc=f"Fetching all ipfs contents from requests ",
|
536 |
+
):
|
537 |
+
partial_dict, error_counter = future.result()
|
538 |
+
nr_errors += error_counter
|
539 |
+
updated_mech_requests.update(partial_dict)
|
540 |
+
|
541 |
+
save_json_file(updated_mech_requests, output_filename)
|
542 |
+
logger.info(f"NUMBER OF MECH REQUEST IPFS ERRORS={nr_errors}")
|
543 |
+
|
544 |
+
# delivers
|
545 |
+
nr_deliver_errors = 0
|
546 |
+
logger.info("UPDATING IPFS CONTENTS OF DELIVERS")
|
547 |
+
total_keys_to_traverse = list(updated_mech_requests.keys())
|
548 |
+
final_tools_content = {}
|
549 |
+
with ThreadPoolExecutor(max_workers=NUM_WORKERS) as executor:
|
550 |
+
futures = []
|
551 |
+
for i in range(0, len(updated_mech_requests), GET_CONTENTS_BATCH_SIZE):
|
552 |
+
futures.append(
|
553 |
+
executor.submit(
|
554 |
+
populate_delivers_ipfs_contents,
|
555 |
+
session,
|
556 |
+
updated_mech_requests,
|
557 |
+
total_keys_to_traverse[i : i + GET_CONTENTS_BATCH_SIZE],
|
558 |
+
)
|
559 |
+
)
|
560 |
+
|
561 |
+
for future in tqdm(
|
562 |
+
as_completed(futures),
|
563 |
+
total=len(futures),
|
564 |
+
desc=f"Fetching all ipfs contents from delivers ",
|
565 |
+
):
|
566 |
+
partial_dict, error_counter = future.result()
|
567 |
+
nr_deliver_errors += error_counter
|
568 |
+
final_tools_content.update(partial_dict)
|
569 |
+
|
570 |
+
save_json_file(final_tools_content, output_filename)
|
571 |
+
logger.info(f"NUMBER OF MECH DELIVERS IPFS ERRORS={nr_deliver_errors}")
|
572 |
+
|
573 |
+
|
574 |
+
def only_delivers_loop():
|
575 |
+
with open(ROOT_DIR / "tools_info.json", "r") as file:
|
576 |
+
updated_mech_requests = json.load(file)
|
577 |
+
|
578 |
+
# delivers
|
579 |
+
session = create_session()
|
580 |
+
print("UPDATING IPFS CONTENTS OF DELIVERS")
|
581 |
+
total_keys_to_traverse = list(updated_mech_requests.keys())
|
582 |
+
final_tools_content = {}
|
583 |
+
with ThreadPoolExecutor(max_workers=NUM_WORKERS) as executor:
|
584 |
+
futures = []
|
585 |
+
for i in range(0, len(updated_mech_requests), GET_CONTENTS_BATCH_SIZE):
|
586 |
+
futures.append(
|
587 |
+
executor.submit(
|
588 |
+
populate_delivers_ipfs_contents,
|
589 |
+
session,
|
590 |
+
updated_mech_requests,
|
591 |
+
total_keys_to_traverse[i : i + GET_CONTENTS_BATCH_SIZE],
|
592 |
+
)
|
593 |
+
)
|
594 |
+
|
595 |
+
for future in tqdm(
|
596 |
+
as_completed(futures),
|
597 |
+
total=len(futures),
|
598 |
+
desc=f"Fetching all ipfs contents from delivers ",
|
599 |
+
):
|
600 |
+
partial_dict = future.result()
|
601 |
+
final_tools_content.update(partial_dict)
|
602 |
+
|
603 |
+
save_json_file(final_tools_content, "tools_info.json")
|
scripts/nr_mech_calls.py
ADDED
@@ -0,0 +1,268 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
from utils import ROOT_DIR, DEFAULT_MECH_FEE, TMP_DIR, transform_to_datetime
|
3 |
+
from tqdm import tqdm
|
4 |
+
|
5 |
+
from typing import Dict, Any
|
6 |
+
from collections import defaultdict
|
7 |
+
from tools import IRRELEVANT_TOOLS
|
8 |
+
import re
|
9 |
+
|
10 |
+
|
11 |
+
def update_roi(row: pd.DataFrame) -> float:
|
12 |
+
new_value = row.net_earnings / (
|
13 |
+
row.collateral_amount
|
14 |
+
+ row.trade_fee_amount
|
15 |
+
+ row.num_mech_calls * DEFAULT_MECH_FEE
|
16 |
+
)
|
17 |
+
return new_value
|
18 |
+
|
19 |
+
|
20 |
+
def get_mech_statistics(mech_requests: Dict[str, Any]) -> Dict[str, Dict[str, int]]:
|
21 |
+
"""Outputs a table with Mech statistics"""
|
22 |
+
|
23 |
+
mech_statistics: Dict[str, Dict[str, int]] = defaultdict(lambda: defaultdict(int))
|
24 |
+
|
25 |
+
for mech_request in mech_requests.values():
|
26 |
+
if (
|
27 |
+
"ipfs_contents" not in mech_request
|
28 |
+
or "tool" not in mech_request["ipfs_contents"]
|
29 |
+
or "prompt" not in mech_request["ipfs_contents"]
|
30 |
+
):
|
31 |
+
continue
|
32 |
+
|
33 |
+
if mech_request["ipfs_contents"]["tool"] in IRRELEVANT_TOOLS:
|
34 |
+
continue
|
35 |
+
|
36 |
+
prompt = mech_request["ipfs_contents"]["prompt"]
|
37 |
+
prompt = prompt.replace("\n", " ")
|
38 |
+
prompt = prompt.strip()
|
39 |
+
prompt = re.sub(r"\s+", " ", prompt)
|
40 |
+
prompt_match = re.search(r"\"(.*)\"", prompt)
|
41 |
+
if prompt_match:
|
42 |
+
question = prompt_match.group(1)
|
43 |
+
else:
|
44 |
+
question = prompt
|
45 |
+
|
46 |
+
mech_statistics[question]["count"] += 1
|
47 |
+
mech_statistics[question]["fees"] += mech_request["fee"]
|
48 |
+
|
49 |
+
return mech_statistics
|
50 |
+
|
51 |
+
|
52 |
+
def create_unknown_traders_df(trades_df: pd.DataFrame) -> pd.DataFrame:
|
53 |
+
"""filter trades coming from non-Olas traders that are placing no mech calls"""
|
54 |
+
no_mech_calls_mask = (trades_df["staking"] == "non_Olas") & (
|
55 |
+
trades_df["num_mech_calls"] == 0
|
56 |
+
)
|
57 |
+
no_mech_calls_df = trades_df.loc[no_mech_calls_mask]
|
58 |
+
trades_df = trades_df.loc[~no_mech_calls_mask]
|
59 |
+
return no_mech_calls_df, trades_df
|
60 |
+
|
61 |
+
|
62 |
+
def update_trade_nr_mech_calls(non_agents: bool = False):
|
63 |
+
try:
|
64 |
+
all_trades_df = pd.read_parquet(ROOT_DIR / "all_trades_profitability.parquet")
|
65 |
+
tools = pd.read_parquet(ROOT_DIR / "tools.parquet")
|
66 |
+
except Exception as e:
|
67 |
+
print(f"Error reading the profitability and tools parquet files")
|
68 |
+
|
69 |
+
traders = list(all_trades_df.trader_address.unique())
|
70 |
+
if non_agents:
|
71 |
+
traders = list(
|
72 |
+
all_trades_df.loc[
|
73 |
+
all_trades_df["staking"] == "non_agent"
|
74 |
+
].trader_address.unique()
|
75 |
+
)
|
76 |
+
|
77 |
+
print("before updating")
|
78 |
+
print(
|
79 |
+
all_trades_df.loc[
|
80 |
+
all_trades_df["staking"] == "non_agent"
|
81 |
+
].num_mech_calls.describe()
|
82 |
+
)
|
83 |
+
for trader in tqdm(traders, desc=f"Updating Traders mech calls", unit="traders"):
|
84 |
+
tools_usage = tools[tools["trader_address"] == trader]
|
85 |
+
if len(tools_usage) == 0:
|
86 |
+
tqdm.write(f"trader with no tools usage found {trader}")
|
87 |
+
all_trades_df.loc[
|
88 |
+
all_trades_df["trader_address"] == trader, "nr_mech_calls"
|
89 |
+
] = 0
|
90 |
+
# update roi
|
91 |
+
all_trades_df["roi"] = all_trades_df.apply(lambda x: update_roi(x), axis=1)
|
92 |
+
print("after updating")
|
93 |
+
print(
|
94 |
+
all_trades_df.loc[
|
95 |
+
all_trades_df["staking"] == "non_agent"
|
96 |
+
].num_mech_calls.describe()
|
97 |
+
)
|
98 |
+
|
99 |
+
# saving
|
100 |
+
all_trades_df.to_parquet(ROOT_DIR / "all_trades_profitability.parquet", index=False)
|
101 |
+
|
102 |
+
|
103 |
+
def get_daily_mech_calls_estimation(
|
104 |
+
daily_trades: pd.DataFrame, daily_tools: pd.DataFrame
|
105 |
+
) -> list:
|
106 |
+
# for each market
|
107 |
+
daily_markets = daily_trades.title.unique()
|
108 |
+
trader = daily_trades.iloc[0].trader_address
|
109 |
+
day = daily_trades.iloc[0].creation_date
|
110 |
+
estimations = []
|
111 |
+
for market in daily_markets:
|
112 |
+
estimation_dict = {}
|
113 |
+
estimation_dict["trader_address"] = trader
|
114 |
+
estimation_dict["trading_day"] = day
|
115 |
+
# tools usage of this market
|
116 |
+
market_requests = daily_tools.loc[daily_tools["title"] == market]
|
117 |
+
# trades done on this market
|
118 |
+
market_trades = daily_trades[daily_trades["title"] == market]
|
119 |
+
mech_calls_estimation = 0
|
120 |
+
total_trades = len(market_trades)
|
121 |
+
total_requests = 0
|
122 |
+
if len(market_requests) > 0:
|
123 |
+
total_requests = len(market_requests)
|
124 |
+
mech_calls_estimation = total_requests / total_trades
|
125 |
+
estimation_dict["total_trades"] = total_trades
|
126 |
+
estimation_dict["total_mech_requests"] = total_requests
|
127 |
+
estimation_dict["market"] = market
|
128 |
+
estimation_dict["mech_calls_per_trade"] = mech_calls_estimation
|
129 |
+
estimations.append(estimation_dict)
|
130 |
+
return estimations
|
131 |
+
|
132 |
+
|
133 |
+
def compute_daily_mech_calls(
|
134 |
+
fpmmTrades: pd.DataFrame, tools: pd.DataFrame
|
135 |
+
) -> pd.DataFrame:
|
136 |
+
"""Function to compute the daily mech calls at the trader and market level"""
|
137 |
+
nr_traders = len(fpmmTrades["trader_address"].unique())
|
138 |
+
fpmmTrades["creation_timestamp"] = pd.to_datetime(fpmmTrades["creationTimestamp"])
|
139 |
+
fpmmTrades["creation_date"] = fpmmTrades["creation_timestamp"].dt.date
|
140 |
+
fpmmTrades = fpmmTrades.sort_values(by="creation_timestamp", ascending=True)
|
141 |
+
tools["request_time"] = pd.to_datetime(tools["request_time"])
|
142 |
+
tools["request_date"] = tools["request_time"].dt.date
|
143 |
+
tools = tools.sort_values(by="request_time", ascending=True)
|
144 |
+
all_mech_calls = []
|
145 |
+
for trader in tqdm(
|
146 |
+
fpmmTrades["trader_address"].unique(),
|
147 |
+
total=nr_traders,
|
148 |
+
desc="creating daily mech calls computation",
|
149 |
+
):
|
150 |
+
# compute the mech calls estimations for each trader
|
151 |
+
all_trades = fpmmTrades[fpmmTrades["trader_address"] == trader]
|
152 |
+
all_tools = tools[tools["trader_address"] == trader]
|
153 |
+
trading_days = all_trades.creation_date.unique()
|
154 |
+
for trading_day in trading_days:
|
155 |
+
daily_trades = all_trades.loc[all_trades["creation_date"] == trading_day]
|
156 |
+
daily_tools = all_tools.loc[all_tools["request_date"] == trading_day]
|
157 |
+
trader_entry = {}
|
158 |
+
trader_entry["trader_address"] = trader
|
159 |
+
trader_entry["total_trades"] = len(daily_trades)
|
160 |
+
trader_entry["trading_day"] = trading_day
|
161 |
+
trader_entry["total_mech_calls"] = len(daily_tools)
|
162 |
+
all_mech_calls.append(trader_entry)
|
163 |
+
return pd.DataFrame.from_dict(all_mech_calls, orient="columns")
|
164 |
+
|
165 |
+
|
166 |
+
def compute_mech_call_estimations(
|
167 |
+
fpmmTrades: pd.DataFrame, tools: pd.DataFrame
|
168 |
+
) -> pd.DataFrame:
|
169 |
+
"""Function to compute the estimated mech calls needed per trade at the trader and market level"""
|
170 |
+
nr_traders = len(fpmmTrades["trader_address"].unique())
|
171 |
+
fpmmTrades["creation_timestamp"] = pd.to_datetime(fpmmTrades["creationTimestamp"])
|
172 |
+
fpmmTrades["creation_date"] = fpmmTrades["creation_timestamp"].dt.date
|
173 |
+
tools["request_time"] = pd.to_datetime(tools["request_time"])
|
174 |
+
tools["request_date"] = tools["request_time"].dt.date
|
175 |
+
all_estimations = []
|
176 |
+
for trader in tqdm(
|
177 |
+
fpmmTrades["trader_address"].unique(),
|
178 |
+
total=nr_traders,
|
179 |
+
desc="creating mech calls estimation dataframe",
|
180 |
+
):
|
181 |
+
# compute the mech calls estimations for each trader
|
182 |
+
all_trades = fpmmTrades[fpmmTrades["trader_address"] == trader]
|
183 |
+
all_tools = tools[tools["trader_address"] == trader]
|
184 |
+
trading_days = all_trades.creation_date.unique()
|
185 |
+
for trading_day in trading_days:
|
186 |
+
daily_trades = all_trades.loc[all_trades["creation_date"] == trading_day]
|
187 |
+
daily_tools = all_tools.loc[all_tools["request_date"] == trading_day]
|
188 |
+
daily_estimations = get_daily_mech_calls_estimation(
|
189 |
+
daily_trades=daily_trades, daily_tools=daily_tools
|
190 |
+
)
|
191 |
+
all_estimations.extend(daily_estimations)
|
192 |
+
return pd.DataFrame.from_dict(all_estimations, orient="columns")
|
193 |
+
|
194 |
+
|
195 |
+
def compute_timestamp_mech_calls(
|
196 |
+
all_trades: pd.DataFrame, all_tools: pd.DataFrame
|
197 |
+
) -> list:
|
198 |
+
"""Function to compute the mech calls based on timestamps but without repeating mech calls"""
|
199 |
+
mech_calls_contents = []
|
200 |
+
request_timestamps_used = {}
|
201 |
+
# intialize the dict with all markets
|
202 |
+
all_markets = all_trades.title.unique()
|
203 |
+
for market in all_markets:
|
204 |
+
request_timestamps_used[market] = []
|
205 |
+
|
206 |
+
for i, trade in all_trades.iterrows():
|
207 |
+
trader = trade["trader_address"]
|
208 |
+
trade_id = trade["id"]
|
209 |
+
market = trade["title"]
|
210 |
+
trade_ts = trade["creation_timestamp"]
|
211 |
+
market_requests = all_tools.loc[
|
212 |
+
(all_tools["trader_address"] == trader) & (all_tools["title"] == market)
|
213 |
+
]
|
214 |
+
# traverse market requests
|
215 |
+
total_mech_calls = 0
|
216 |
+
for i, mech_request in market_requests.iterrows():
|
217 |
+
# check timestamp (before the trade)
|
218 |
+
request_ts = mech_request["request_time"]
|
219 |
+
if request_ts < trade_ts:
|
220 |
+
# check the timestamp has not been used in a previous trade
|
221 |
+
used_timestamps = request_timestamps_used[market]
|
222 |
+
if request_ts not in used_timestamps:
|
223 |
+
request_timestamps_used[market].append(request_ts)
|
224 |
+
total_mech_calls += 1
|
225 |
+
# create enty for the dataframe
|
226 |
+
mech_call_entry = {}
|
227 |
+
mech_call_entry["trader_address"] = trader
|
228 |
+
mech_call_entry["market"] = market
|
229 |
+
mech_call_entry["trade_id"] = trade_id
|
230 |
+
mech_call_entry["total_mech_calls"] = total_mech_calls
|
231 |
+
mech_calls_contents.append(mech_call_entry)
|
232 |
+
return mech_calls_contents
|
233 |
+
|
234 |
+
|
235 |
+
def compute_mech_calls_based_on_timestamps(
|
236 |
+
fpmmTrades: pd.DataFrame, tools: pd.DataFrame
|
237 |
+
) -> pd.DataFrame:
|
238 |
+
"""Function to compute the mech calls needed per trade at the trader and market level using timestamps"""
|
239 |
+
nr_traders = len(fpmmTrades["trader_address"].unique())
|
240 |
+
fpmmTrades["creation_timestamp"] = pd.to_datetime(fpmmTrades["creationTimestamp"])
|
241 |
+
fpmmTrades["creation_date"] = fpmmTrades["creation_timestamp"].dt.date
|
242 |
+
fpmmTrades = fpmmTrades.sort_values(by="creation_timestamp", ascending=True)
|
243 |
+
tools["request_time"] = pd.to_datetime(tools["request_time"])
|
244 |
+
tools["request_date"] = tools["request_time"].dt.date
|
245 |
+
tools = tools.sort_values(by="request_time", ascending=True)
|
246 |
+
all_mech_calls = []
|
247 |
+
for trader in tqdm(
|
248 |
+
fpmmTrades["trader_address"].unique(),
|
249 |
+
total=nr_traders,
|
250 |
+
desc="creating mech calls count based on timestamps",
|
251 |
+
):
|
252 |
+
# compute the mech calls for each trader
|
253 |
+
all_trades = fpmmTrades[fpmmTrades["trader_address"] == trader]
|
254 |
+
all_tools = tools[tools["trader_address"] == trader]
|
255 |
+
trader_mech_calls = compute_timestamp_mech_calls(all_trades, all_tools)
|
256 |
+
all_mech_calls.extend(trader_mech_calls)
|
257 |
+
return pd.DataFrame.from_dict(all_mech_calls, orient="columns")
|
258 |
+
|
259 |
+
|
260 |
+
if __name__ == "__main__":
|
261 |
+
# update_trade_nr_mech_calls(non_agents=True)
|
262 |
+
tools = pd.read_parquet(TMP_DIR / "tools.parquet")
|
263 |
+
fpmmTrades = pd.read_parquet(TMP_DIR / "fpmmTrades.parquet")
|
264 |
+
fpmmTrades["creationTimestamp"] = fpmmTrades["creationTimestamp"].apply(
|
265 |
+
lambda x: transform_to_datetime(x)
|
266 |
+
)
|
267 |
+
result = compute_mech_calls_based_on_timestamps(fpmmTrades=fpmmTrades, tools=tools)
|
268 |
+
result.to_parquet(TMP_DIR / "result_df.parquet", index=False)
|
scripts/num_mech_calls.py
ADDED
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
|
3 |
+
try:
|
4 |
+
from utils import ROOT_DIR, TMP_DIR
|
5 |
+
except ImportError:
|
6 |
+
from scripts.utils import ROOT_DIR, TMP_DIR
|
7 |
+
|
8 |
+
from datetime import datetime, timezone
|
9 |
+
from tqdm import tqdm
|
10 |
+
|
11 |
+
|
12 |
+
def transform_to_datetime(x):
|
13 |
+
return datetime.fromtimestamp(int(x), tz=timezone.utc)
|
14 |
+
|
15 |
+
|
16 |
+
def compute_weekly_total_mech_calls(
|
17 |
+
trader: str, week: str, weekly_trades: pd.DataFrame, weekly_tools: pd.DataFrame
|
18 |
+
) -> dict:
|
19 |
+
weekly_total_mech_calls_dict = {}
|
20 |
+
weekly_total_mech_calls_dict["trader_address"] = trader
|
21 |
+
weekly_total_mech_calls_dict["month_year_week"] = week
|
22 |
+
weekly_total_mech_calls_dict["total_trades"] = len(weekly_trades)
|
23 |
+
weekly_total_mech_calls_dict["total_mech_calls"] = len(weekly_tools)
|
24 |
+
return weekly_total_mech_calls_dict
|
25 |
+
|
26 |
+
|
27 |
+
def compute_total_mech_calls():
|
28 |
+
"""Function to compute the total number of mech calls for all traders and all markets
|
29 |
+
at a weekly level"""
|
30 |
+
try:
|
31 |
+
print("Reading tools file")
|
32 |
+
tools = pd.read_parquet(TMP_DIR / "tools.parquet")
|
33 |
+
tools["request_time"] = pd.to_datetime(tools["request_time"])
|
34 |
+
tools["request_date"] = tools["request_time"].dt.date
|
35 |
+
tools = tools.sort_values(by="request_time", ascending=True)
|
36 |
+
tools["month_year_week"] = (
|
37 |
+
tools["request_time"]
|
38 |
+
.dt.to_period("W")
|
39 |
+
.dt.start_time.dt.strftime("%b-%d-%Y")
|
40 |
+
)
|
41 |
+
|
42 |
+
except Exception as e:
|
43 |
+
print(f"Error updating the invalid trades parquet {e}")
|
44 |
+
|
45 |
+
print("Reading trades weekly info file")
|
46 |
+
fpmmTrades = pd.read_parquet(TMP_DIR / "fpmmTrades.parquet")
|
47 |
+
try:
|
48 |
+
fpmmTrades["creationTimestamp"] = fpmmTrades["creationTimestamp"].apply(
|
49 |
+
lambda x: transform_to_datetime(x)
|
50 |
+
)
|
51 |
+
except Exception as e:
|
52 |
+
print(f"Transformation not needed")
|
53 |
+
|
54 |
+
fpmmTrades["creation_timestamp"] = pd.to_datetime(fpmmTrades["creationTimestamp"])
|
55 |
+
fpmmTrades["creation_date"] = fpmmTrades["creation_timestamp"].dt.date
|
56 |
+
fpmmTrades = fpmmTrades.sort_values(by="creation_timestamp", ascending=True)
|
57 |
+
fpmmTrades["month_year_week"] = (
|
58 |
+
fpmmTrades["creation_timestamp"]
|
59 |
+
.dt.to_period("W")
|
60 |
+
.dt.start_time.dt.strftime("%b-%d-%Y")
|
61 |
+
)
|
62 |
+
|
63 |
+
nr_traders = len(fpmmTrades["trader_address"].unique())
|
64 |
+
all_mech_calls = []
|
65 |
+
for trader in tqdm(
|
66 |
+
fpmmTrades["trader_address"].unique(),
|
67 |
+
total=nr_traders,
|
68 |
+
desc="creating weekly mech calls dataframe",
|
69 |
+
):
|
70 |
+
# compute the mech calls estimations for each trader
|
71 |
+
all_trades = fpmmTrades[fpmmTrades["trader_address"] == trader]
|
72 |
+
all_tools = tools[tools["trader_address"] == trader]
|
73 |
+
weeks = fpmmTrades.month_year_week.unique()
|
74 |
+
|
75 |
+
for week in weeks:
|
76 |
+
weekly_trades = all_trades.loc[all_trades["month_year_week"] == week]
|
77 |
+
weekly_tools = all_tools.loc[all_tools["month_year_week"] == week]
|
78 |
+
|
79 |
+
weekly_mech_calls_dict = compute_weekly_total_mech_calls(
|
80 |
+
trader, week, weekly_trades, weekly_tools
|
81 |
+
)
|
82 |
+
all_mech_calls.append(weekly_mech_calls_dict)
|
83 |
+
|
84 |
+
all_mech_calls_df: pd.DataFrame = pd.DataFrame.from_dict(
|
85 |
+
all_mech_calls, orient="columns"
|
86 |
+
)
|
87 |
+
print("Saving weekly_mech_calls.parquet file")
|
88 |
+
print(all_mech_calls_df.total_mech_calls.describe())
|
89 |
+
|
90 |
+
all_mech_calls_df.to_parquet(ROOT_DIR / "weekly_mech_calls.parquet", index=False)
|
91 |
+
|
92 |
+
|
93 |
+
if __name__ == "__main__":
|
94 |
+
compute_total_mech_calls()
|
scripts/profitability.py
ADDED
@@ -0,0 +1,528 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
# ------------------------------------------------------------------------------
|
3 |
+
#
|
4 |
+
# Copyright 2023 Valory AG
|
5 |
+
#
|
6 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
7 |
+
# you may not use this file except in compliance with the License.
|
8 |
+
# You may obtain a copy of the License at
|
9 |
+
#
|
10 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
11 |
+
#
|
12 |
+
# Unless required by applicable law or agreed to in writing, software
|
13 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
14 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
15 |
+
# See the License for the specific language governing permissions and
|
16 |
+
# limitations under the License.
|
17 |
+
#
|
18 |
+
# ------------------------------------------------------------------------------
|
19 |
+
|
20 |
+
import time
|
21 |
+
import pandas as pd
|
22 |
+
from typing import Any
|
23 |
+
from enum import Enum
|
24 |
+
from tqdm import tqdm
|
25 |
+
import numpy as np
|
26 |
+
from web3_utils import query_conditional_tokens_gc_subgraph
|
27 |
+
from get_mech_info import (
|
28 |
+
DATETIME_60_DAYS_AGO,
|
29 |
+
update_tools_parquet,
|
30 |
+
update_all_trades_parquet,
|
31 |
+
)
|
32 |
+
from utils import (
|
33 |
+
wei_to_unit,
|
34 |
+
convert_hex_to_int,
|
35 |
+
JSON_DATA_DIR,
|
36 |
+
ROOT_DIR,
|
37 |
+
DEFAULT_MECH_FEE,
|
38 |
+
TMP_DIR,
|
39 |
+
measure_execution_time,
|
40 |
+
)
|
41 |
+
from staking import label_trades_by_staking
|
42 |
+
from nr_mech_calls import (
|
43 |
+
create_unknown_traders_df,
|
44 |
+
transform_to_datetime,
|
45 |
+
compute_mech_calls_based_on_timestamps,
|
46 |
+
)
|
47 |
+
|
48 |
+
DUST_THRESHOLD = 10000000000000
|
49 |
+
INVALID_ANSWER = -1
|
50 |
+
DEFAULT_60_DAYS_AGO_TIMESTAMP = (DATETIME_60_DAYS_AGO).timestamp()
|
51 |
+
WXDAI_CONTRACT_ADDRESS = "0xe91D153E0b41518A2Ce8Dd3D7944Fa863463a97d"
|
52 |
+
DUST_THRESHOLD = 10000000000000
|
53 |
+
|
54 |
+
|
55 |
+
class MarketState(Enum):
|
56 |
+
"""Market state"""
|
57 |
+
|
58 |
+
OPEN = 1
|
59 |
+
PENDING = 2
|
60 |
+
FINALIZING = 3
|
61 |
+
ARBITRATING = 4
|
62 |
+
CLOSED = 5
|
63 |
+
|
64 |
+
def __str__(self) -> str:
|
65 |
+
"""Prints the market status."""
|
66 |
+
return self.name.capitalize()
|
67 |
+
|
68 |
+
|
69 |
+
class MarketAttribute(Enum):
|
70 |
+
"""Attribute"""
|
71 |
+
|
72 |
+
NUM_TRADES = "Num_trades"
|
73 |
+
WINNER_TRADES = "Winner_trades"
|
74 |
+
NUM_REDEEMED = "Num_redeemed"
|
75 |
+
INVESTMENT = "Investment"
|
76 |
+
FEES = "Fees"
|
77 |
+
MECH_CALLS = "Mech_calls"
|
78 |
+
MECH_FEES = "Mech_fees"
|
79 |
+
EARNINGS = "Earnings"
|
80 |
+
NET_EARNINGS = "Net_earnings"
|
81 |
+
REDEMPTIONS = "Redemptions"
|
82 |
+
ROI = "ROI"
|
83 |
+
|
84 |
+
def __str__(self) -> str:
|
85 |
+
"""Prints the attribute."""
|
86 |
+
return self.value
|
87 |
+
|
88 |
+
def __repr__(self) -> str:
|
89 |
+
"""Prints the attribute representation."""
|
90 |
+
return self.name
|
91 |
+
|
92 |
+
@staticmethod
|
93 |
+
def argparse(s: str) -> "MarketAttribute":
|
94 |
+
"""Performs string conversion to MarketAttribute."""
|
95 |
+
try:
|
96 |
+
return MarketAttribute[s.upper()]
|
97 |
+
except KeyError as e:
|
98 |
+
raise ValueError(f"Invalid MarketAttribute: {s}") from e
|
99 |
+
|
100 |
+
|
101 |
+
ALL_TRADES_STATS_DF_COLS = [
|
102 |
+
"trader_address",
|
103 |
+
"market_creator",
|
104 |
+
"trade_id",
|
105 |
+
"creation_timestamp",
|
106 |
+
"title",
|
107 |
+
"market_status",
|
108 |
+
"collateral_amount",
|
109 |
+
"outcome_index",
|
110 |
+
"trade_fee_amount",
|
111 |
+
"outcomes_tokens_traded",
|
112 |
+
"current_answer",
|
113 |
+
"is_invalid",
|
114 |
+
"winning_trade",
|
115 |
+
"earnings",
|
116 |
+
"redeemed",
|
117 |
+
"redeemed_amount",
|
118 |
+
"num_mech_calls",
|
119 |
+
"mech_fee_amount",
|
120 |
+
"net_earnings",
|
121 |
+
"roi",
|
122 |
+
]
|
123 |
+
|
124 |
+
|
125 |
+
def _is_redeemed(user_json: dict[str, Any], fpmmTrade: dict[str, Any]) -> bool:
|
126 |
+
"""Returns whether the user has redeemed the position."""
|
127 |
+
user_positions = user_json["data"]["user"]["userPositions"]
|
128 |
+
condition_id = fpmmTrade["fpmm.condition.id"]
|
129 |
+
for position in user_positions:
|
130 |
+
position_condition_ids = position["position"]["conditionIds"]
|
131 |
+
balance = int(position["balance"])
|
132 |
+
|
133 |
+
if condition_id in position_condition_ids:
|
134 |
+
if balance == 0:
|
135 |
+
return True
|
136 |
+
# return early
|
137 |
+
return False
|
138 |
+
return False
|
139 |
+
|
140 |
+
|
141 |
+
def prepare_profitalibity_data(
|
142 |
+
tools_filename: str,
|
143 |
+
trades_filename: str,
|
144 |
+
tmp_dir: bool = False,
|
145 |
+
) -> pd.DataFrame:
|
146 |
+
"""Prepare data for profitalibity analysis."""
|
147 |
+
|
148 |
+
# Check if tools.parquet is in the same directory
|
149 |
+
try:
|
150 |
+
if tmp_dir:
|
151 |
+
tools = pd.read_parquet(TMP_DIR / tools_filename)
|
152 |
+
else:
|
153 |
+
tools = pd.read_parquet(ROOT_DIR / tools_filename)
|
154 |
+
|
155 |
+
# make sure creator_address is in the columns
|
156 |
+
assert "trader_address" in tools.columns, "trader_address column not found"
|
157 |
+
|
158 |
+
# lowercase and strip creator_address
|
159 |
+
tools["trader_address"] = tools["trader_address"].str.lower().str.strip()
|
160 |
+
|
161 |
+
tools.drop_duplicates(
|
162 |
+
subset=["request_id", "request_block"], keep="last", inplace=True
|
163 |
+
)
|
164 |
+
tools.to_parquet(ROOT_DIR / tools_filename)
|
165 |
+
print(f"{tools_filename} loaded")
|
166 |
+
except FileNotFoundError:
|
167 |
+
print(f"{tools_filename} not found.")
|
168 |
+
return
|
169 |
+
|
170 |
+
# Check if fpmmTrades.parquet is in the same directory
|
171 |
+
print("Reading the new trades file")
|
172 |
+
try:
|
173 |
+
if tmp_dir:
|
174 |
+
fpmmTrades = pd.read_parquet(TMP_DIR / trades_filename)
|
175 |
+
else:
|
176 |
+
fpmmTrades = pd.read_parquet(ROOT_DIR / trades_filename)
|
177 |
+
except FileNotFoundError:
|
178 |
+
print(f"Error reading {trades_filename} file.")
|
179 |
+
|
180 |
+
# make sure trader_address is in the columns
|
181 |
+
assert "trader_address" in fpmmTrades.columns, "trader_address column not found"
|
182 |
+
|
183 |
+
# lowercase and strip creator_address
|
184 |
+
fpmmTrades["trader_address"] = fpmmTrades["trader_address"].str.lower().str.strip()
|
185 |
+
|
186 |
+
return fpmmTrades
|
187 |
+
|
188 |
+
|
189 |
+
def determine_market_status(trade, current_answer):
|
190 |
+
"""Determine the market status of a trade."""
|
191 |
+
if (current_answer is np.nan or current_answer is None) and time.time() >= int(
|
192 |
+
trade["fpmm.openingTimestamp"]
|
193 |
+
):
|
194 |
+
return MarketState.PENDING
|
195 |
+
elif current_answer is np.nan or current_answer is None:
|
196 |
+
return MarketState.OPEN
|
197 |
+
elif trade["fpmm.isPendingArbitration"]:
|
198 |
+
return MarketState.ARBITRATING
|
199 |
+
elif time.time() < int(trade["fpmm.answerFinalizedTimestamp"]):
|
200 |
+
return MarketState.FINALIZING
|
201 |
+
return MarketState.CLOSED
|
202 |
+
|
203 |
+
|
204 |
+
def analyse_trader(
|
205 |
+
trader_address: str,
|
206 |
+
fpmmTrades: pd.DataFrame,
|
207 |
+
trader_estimated_mech_calls: pd.DataFrame,
|
208 |
+
daily_info: bool = False,
|
209 |
+
) -> pd.DataFrame:
|
210 |
+
"""Analyse a trader's trades"""
|
211 |
+
fpmmTrades["creation_timestamp"] = pd.to_datetime(fpmmTrades["creationTimestamp"])
|
212 |
+
fpmmTrades["creation_date"] = fpmmTrades["creation_timestamp"].dt.date
|
213 |
+
# Filter trades and tools for the given trader
|
214 |
+
trades = fpmmTrades[fpmmTrades["trader_address"] == trader_address]
|
215 |
+
|
216 |
+
# Prepare the DataFrame
|
217 |
+
trades_df = pd.DataFrame(columns=ALL_TRADES_STATS_DF_COLS)
|
218 |
+
if trades.empty:
|
219 |
+
return trades_df
|
220 |
+
|
221 |
+
# Fetch user's conditional tokens gc graph
|
222 |
+
try:
|
223 |
+
user_json = query_conditional_tokens_gc_subgraph(trader_address)
|
224 |
+
except Exception as e:
|
225 |
+
print(f"Error fetching user data: {e}")
|
226 |
+
return trades_df
|
227 |
+
|
228 |
+
# Iterate over the trades
|
229 |
+
trades_answer_nan = 0
|
230 |
+
trades_no_closed_market = 0
|
231 |
+
for i, trade in tqdm(trades.iterrows(), total=len(trades), desc="Analysing trades"):
|
232 |
+
try:
|
233 |
+
market_answer = trade["fpmm.currentAnswer"]
|
234 |
+
trading_day = trade["creation_date"]
|
235 |
+
trade_id = trade["id"]
|
236 |
+
if not daily_info and not market_answer:
|
237 |
+
# print(f"Skipping trade {i} because currentAnswer is NaN")
|
238 |
+
trades_answer_nan += 1
|
239 |
+
continue
|
240 |
+
# Parsing and computing shared values
|
241 |
+
collateral_amount = wei_to_unit(float(trade["collateralAmount"]))
|
242 |
+
fee_amount = wei_to_unit(float(trade["feeAmount"]))
|
243 |
+
outcome_tokens_traded = wei_to_unit(float(trade["outcomeTokensTraded"]))
|
244 |
+
earnings, winner_trade = (0, False)
|
245 |
+
redemption = _is_redeemed(user_json, trade)
|
246 |
+
current_answer = market_answer if market_answer else None
|
247 |
+
market_creator = trade["market_creator"]
|
248 |
+
|
249 |
+
# Determine market status
|
250 |
+
market_status = determine_market_status(trade, current_answer)
|
251 |
+
|
252 |
+
# Skip non-closed markets
|
253 |
+
if not daily_info and market_status != MarketState.CLOSED:
|
254 |
+
# print(
|
255 |
+
# f"Skipping trade {i} because market is not closed. Market Status: {market_status}"
|
256 |
+
# )
|
257 |
+
trades_no_closed_market += 1
|
258 |
+
continue
|
259 |
+
if current_answer is not None:
|
260 |
+
current_answer = convert_hex_to_int(current_answer)
|
261 |
+
|
262 |
+
# Compute invalidity
|
263 |
+
is_invalid = current_answer == INVALID_ANSWER
|
264 |
+
|
265 |
+
# Compute earnings and winner trade status
|
266 |
+
if current_answer is None:
|
267 |
+
earnings = 0.0
|
268 |
+
winner_trade = None
|
269 |
+
elif is_invalid:
|
270 |
+
earnings = collateral_amount
|
271 |
+
winner_trade = False
|
272 |
+
elif int(trade["outcomeIndex"]) == current_answer:
|
273 |
+
earnings = outcome_tokens_traded
|
274 |
+
winner_trade = True
|
275 |
+
|
276 |
+
# Compute mech calls using the title, and trade id
|
277 |
+
if daily_info:
|
278 |
+
total_mech_calls = trader_estimated_mech_calls.loc[
|
279 |
+
(trader_estimated_mech_calls["trading_day"] == trading_day),
|
280 |
+
"total_mech_calls",
|
281 |
+
].iloc[0]
|
282 |
+
else:
|
283 |
+
total_mech_calls = trader_estimated_mech_calls.loc[
|
284 |
+
(trader_estimated_mech_calls["market"] == trade["title"])
|
285 |
+
& (trader_estimated_mech_calls["trade_id"] == trade_id),
|
286 |
+
"total_mech_calls",
|
287 |
+
].iloc[0]
|
288 |
+
|
289 |
+
net_earnings = (
|
290 |
+
earnings
|
291 |
+
- fee_amount
|
292 |
+
- (total_mech_calls * DEFAULT_MECH_FEE)
|
293 |
+
- collateral_amount
|
294 |
+
)
|
295 |
+
|
296 |
+
# Assign values to DataFrame
|
297 |
+
trades_df.loc[i] = {
|
298 |
+
"trader_address": trader_address,
|
299 |
+
"market_creator": market_creator,
|
300 |
+
"trade_id": trade["id"],
|
301 |
+
"market_status": market_status.name,
|
302 |
+
"creation_timestamp": trade["creationTimestamp"],
|
303 |
+
"title": trade["title"],
|
304 |
+
"collateral_amount": collateral_amount,
|
305 |
+
"outcome_index": trade["outcomeIndex"],
|
306 |
+
"trade_fee_amount": fee_amount,
|
307 |
+
"outcomes_tokens_traded": outcome_tokens_traded,
|
308 |
+
"current_answer": current_answer,
|
309 |
+
"is_invalid": is_invalid,
|
310 |
+
"winning_trade": winner_trade,
|
311 |
+
"earnings": earnings,
|
312 |
+
"redeemed": redemption,
|
313 |
+
"redeemed_amount": earnings if redemption else 0,
|
314 |
+
"num_mech_calls": total_mech_calls,
|
315 |
+
"mech_fee_amount": total_mech_calls * DEFAULT_MECH_FEE,
|
316 |
+
"net_earnings": net_earnings,
|
317 |
+
"roi": net_earnings
|
318 |
+
/ (
|
319 |
+
collateral_amount + fee_amount + total_mech_calls * DEFAULT_MECH_FEE
|
320 |
+
),
|
321 |
+
}
|
322 |
+
|
323 |
+
except Exception as e:
|
324 |
+
print(f"Error processing trade {i}: {e}")
|
325 |
+
print(trade)
|
326 |
+
continue
|
327 |
+
|
328 |
+
print(f"Number of trades where currentAnswer is NaN = {trades_answer_nan}")
|
329 |
+
print(
|
330 |
+
f"Number of trades where the market is not closed = {trades_no_closed_market}"
|
331 |
+
)
|
332 |
+
return trades_df
|
333 |
+
|
334 |
+
|
335 |
+
def analyse_all_traders(
|
336 |
+
trades: pd.DataFrame,
|
337 |
+
estimated_mech_calls: pd.DataFrame,
|
338 |
+
daily_info: bool = False,
|
339 |
+
) -> pd.DataFrame:
|
340 |
+
"""Analyse all creators."""
|
341 |
+
|
342 |
+
all_traders = []
|
343 |
+
for trader in tqdm(
|
344 |
+
trades["trader_address"].unique(),
|
345 |
+
total=len(trades["trader_address"].unique()),
|
346 |
+
desc="Analysing creators",
|
347 |
+
):
|
348 |
+
trader_estimated_mech_calls = estimated_mech_calls.loc[
|
349 |
+
estimated_mech_calls["trader_address"] == trader
|
350 |
+
]
|
351 |
+
all_traders.append(
|
352 |
+
analyse_trader(trader, trades, trader_estimated_mech_calls, daily_info)
|
353 |
+
)
|
354 |
+
|
355 |
+
# concat all creators
|
356 |
+
all_creators_df = pd.concat(all_traders)
|
357 |
+
|
358 |
+
return all_creators_df
|
359 |
+
|
360 |
+
|
361 |
+
@measure_execution_time
|
362 |
+
def run_profitability_analysis(
|
363 |
+
tools_filename: str,
|
364 |
+
trades_filename: str,
|
365 |
+
merge: bool = False,
|
366 |
+
tmp_dir: bool = False,
|
367 |
+
):
|
368 |
+
"""Create all trades analysis."""
|
369 |
+
print(f"Preparing data with {tools_filename} and {trades_filename}")
|
370 |
+
fpmmTrades = prepare_profitalibity_data(
|
371 |
+
tools_filename, trades_filename, tmp_dir=tmp_dir
|
372 |
+
)
|
373 |
+
|
374 |
+
if merge:
|
375 |
+
update_tools_parquet(tools_filename)
|
376 |
+
|
377 |
+
tools = pd.read_parquet(TMP_DIR / "tools.parquet")
|
378 |
+
|
379 |
+
try:
|
380 |
+
fpmmTrades["creationTimestamp"] = fpmmTrades["creationTimestamp"].apply(
|
381 |
+
lambda x: transform_to_datetime(x)
|
382 |
+
)
|
383 |
+
except Exception as e:
|
384 |
+
print(f"Transformation not needed")
|
385 |
+
|
386 |
+
print("Computing the estimated mech calls dataset")
|
387 |
+
trade_mech_calls = compute_mech_calls_based_on_timestamps(
|
388 |
+
fpmmTrades=fpmmTrades, tools=tools
|
389 |
+
)
|
390 |
+
trade_mech_calls.to_parquet(TMP_DIR / "trade_mech_calls.parquet")
|
391 |
+
|
392 |
+
print(trade_mech_calls.total_mech_calls.describe())
|
393 |
+
print("Analysing trades...")
|
394 |
+
all_trades_df = analyse_all_traders(fpmmTrades, trade_mech_calls)
|
395 |
+
|
396 |
+
# # merge previous files if requested
|
397 |
+
if merge:
|
398 |
+
all_trades_df = update_all_trades_parquet(all_trades_df)
|
399 |
+
|
400 |
+
# debugging purposes
|
401 |
+
all_trades_df.to_parquet(JSON_DATA_DIR / "all_trades_df.parquet", index=False)
|
402 |
+
# all_trades_df = pd.read_parquet(JSON_DATA_DIR / "all_trades_df.parquet")
|
403 |
+
|
404 |
+
# filter invalid markets. Condition: "is_invalid" is True
|
405 |
+
invalid_trades = all_trades_df.loc[all_trades_df["is_invalid"] == True]
|
406 |
+
if len(invalid_trades) == 0:
|
407 |
+
print("No new invalid trades")
|
408 |
+
else:
|
409 |
+
if merge:
|
410 |
+
try:
|
411 |
+
print("Merging invalid trades parquet file")
|
412 |
+
old_invalid_trades = pd.read_parquet(
|
413 |
+
ROOT_DIR / "invalid_trades.parquet"
|
414 |
+
)
|
415 |
+
merge_df = pd.concat(
|
416 |
+
[old_invalid_trades, invalid_trades], ignore_index=True
|
417 |
+
)
|
418 |
+
invalid_trades = merge_df.drop_duplicates()
|
419 |
+
except Exception as e:
|
420 |
+
print(f"Error updating the invalid trades parquet {e}")
|
421 |
+
invalid_trades.to_parquet(ROOT_DIR / "invalid_trades.parquet", index=False)
|
422 |
+
|
423 |
+
all_trades_df = all_trades_df.loc[all_trades_df["is_invalid"] == False]
|
424 |
+
|
425 |
+
all_trades_df = label_trades_by_staking(trades_df=all_trades_df)
|
426 |
+
|
427 |
+
print("Creating unknown traders dataset")
|
428 |
+
unknown_traders_df, all_trades_df = create_unknown_traders_df(
|
429 |
+
trades_df=all_trades_df
|
430 |
+
)
|
431 |
+
# merge with previous unknown traders dataset
|
432 |
+
previous_unknown_traders = pd.read_parquet(ROOT_DIR / "unknown_traders.parquet")
|
433 |
+
|
434 |
+
unknown_traders_df: pd.DataFrame = pd.concat(
|
435 |
+
[unknown_traders_df, previous_unknown_traders], ignore_index=True
|
436 |
+
)
|
437 |
+
unknown_traders_df.drop_duplicates("trade_id", keep="last", inplace=True)
|
438 |
+
unknown_traders_df.to_parquet(ROOT_DIR / "unknown_traders.parquet", index=False)
|
439 |
+
|
440 |
+
# save to parquet
|
441 |
+
all_trades_df.to_parquet(ROOT_DIR / "all_trades_profitability.parquet", index=False)
|
442 |
+
print("Profitability analysis Done!")
|
443 |
+
|
444 |
+
return all_trades_df
|
445 |
+
|
446 |
+
|
447 |
+
def add_trades_profitability(trades_filename: str):
|
448 |
+
print("Reading the trades file")
|
449 |
+
try:
|
450 |
+
fpmmTrades = pd.read_parquet(ROOT_DIR / trades_filename)
|
451 |
+
except FileNotFoundError:
|
452 |
+
print(f"Error reading {trades_filename} file .")
|
453 |
+
|
454 |
+
# make sure trader_address is in the columns
|
455 |
+
assert "trader_address" in fpmmTrades.columns, "trader_address column not found"
|
456 |
+
|
457 |
+
# lowercase and strip creator_address
|
458 |
+
fpmmTrades["trader_address"] = fpmmTrades["trader_address"].str.lower().str.strip()
|
459 |
+
|
460 |
+
print("Reading tools parquet file")
|
461 |
+
tools = pd.read_parquet(TMP_DIR / "tools.parquet")
|
462 |
+
|
463 |
+
try:
|
464 |
+
fpmmTrades["creationTimestamp"] = fpmmTrades["creationTimestamp"].apply(
|
465 |
+
lambda x: transform_to_datetime(x)
|
466 |
+
)
|
467 |
+
except Exception as e:
|
468 |
+
print(f"Transformation not needed")
|
469 |
+
|
470 |
+
print("Computing the estimated mech calls dataset")
|
471 |
+
trade_mech_calls = compute_mech_calls_based_on_timestamps(
|
472 |
+
fpmmTrades=fpmmTrades, tools=tools
|
473 |
+
)
|
474 |
+
print(trade_mech_calls.total_mech_calls.describe())
|
475 |
+
print("Analysing trades...")
|
476 |
+
all_trades_df = analyse_all_traders(fpmmTrades, trade_mech_calls)
|
477 |
+
|
478 |
+
# debugging purposes
|
479 |
+
all_trades_df.to_parquet(JSON_DATA_DIR / "missing_trades_df.parquet", index=False)
|
480 |
+
# filter invalid markets. Condition: "is_invalid" is True
|
481 |
+
print("Checking invalid trades")
|
482 |
+
invalid_trades = all_trades_df.loc[all_trades_df["is_invalid"] == True]
|
483 |
+
if len(invalid_trades) > 0:
|
484 |
+
try:
|
485 |
+
print("Merging invalid trades parquet file")
|
486 |
+
old_invalid_trades = pd.read_parquet(ROOT_DIR / "invalid_trades.parquet")
|
487 |
+
merge_df = pd.concat(
|
488 |
+
[old_invalid_trades, invalid_trades], ignore_index=True
|
489 |
+
)
|
490 |
+
invalid_trades = merge_df.drop_duplicates("trade_id")
|
491 |
+
except Exception as e:
|
492 |
+
print(f"Error updating the invalid trades parquet {e}")
|
493 |
+
invalid_trades.to_parquet(ROOT_DIR / "invalid_trades.parquet", index=False)
|
494 |
+
all_trades_df = all_trades_df.loc[all_trades_df["is_invalid"] == False]
|
495 |
+
|
496 |
+
print("Adding staking labels")
|
497 |
+
all_trades_df = label_trades_by_staking(trades_df=all_trades_df)
|
498 |
+
print("Creating unknown traders dataset")
|
499 |
+
unknown_traders_df, all_trades_df = create_unknown_traders_df(
|
500 |
+
trades_df=all_trades_df
|
501 |
+
)
|
502 |
+
if len(unknown_traders_df) > 0:
|
503 |
+
print("Merging unknown traders info")
|
504 |
+
# merge with previous unknown traders dataset
|
505 |
+
previous_unknown_traders = pd.read_parquet(ROOT_DIR / "unknown_traders.parquet")
|
506 |
+
|
507 |
+
unknown_traders_df: pd.DataFrame = pd.concat(
|
508 |
+
[unknown_traders_df, previous_unknown_traders], ignore_index=True
|
509 |
+
)
|
510 |
+
unknown_traders_df.drop_duplicates("trade_id", keep="last", inplace=True)
|
511 |
+
unknown_traders_df.to_parquet(ROOT_DIR / "unknown_traders.parquet", index=False)
|
512 |
+
|
513 |
+
print("merge with previous all_trades_profitability")
|
514 |
+
old_trades = pd.read_parquet(ROOT_DIR / "all_trades_profitability.parquet")
|
515 |
+
all_trades_df: pd.DataFrame = pd.concat(
|
516 |
+
[all_trades_df, old_trades], ignore_index=True
|
517 |
+
)
|
518 |
+
all_trades_df.drop_duplicates("trade_id", keep="last", inplace=True)
|
519 |
+
all_trades_df.to_parquet(ROOT_DIR / "all_trades_profitability.parquet", index=False)
|
520 |
+
|
521 |
+
|
522 |
+
if __name__ == "__main__":
|
523 |
+
run_profitability_analysis(
|
524 |
+
tools_filename="tools.parquet",
|
525 |
+
trades_filename="fpmmTrades.parquet",
|
526 |
+
merge=False,
|
527 |
+
tmp_dir=True,
|
528 |
+
)
|
scripts/pull_data.py
ADDED
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
from datetime import datetime
|
3 |
+
import pandas as pd
|
4 |
+
from markets import (
|
5 |
+
etl as mkt_etl,
|
6 |
+
DEFAULT_FILENAME as MARKETS_FILENAME,
|
7 |
+
fpmmTrades_etl,
|
8 |
+
update_fpmmTrades_parquet,
|
9 |
+
)
|
10 |
+
from tools import generate_tools_file
|
11 |
+
from profitability import run_profitability_analysis, add_trades_profitability
|
12 |
+
from utils import (
|
13 |
+
get_question,
|
14 |
+
current_answer,
|
15 |
+
measure_execution_time,
|
16 |
+
ROOT_DIR,
|
17 |
+
HIST_DIR,
|
18 |
+
TMP_DIR,
|
19 |
+
)
|
20 |
+
from get_mech_info import (
|
21 |
+
get_mech_events_since_last_run,
|
22 |
+
update_json_files,
|
23 |
+
)
|
24 |
+
from update_tools_accuracy import compute_tools_accuracy
|
25 |
+
from cleaning_old_info import clean_old_data_from_parquet_files
|
26 |
+
from web3_utils import updating_timestamps
|
27 |
+
from manage_space_files import move_files
|
28 |
+
from cloud_storage import upload_historical_file
|
29 |
+
from tools_metrics import compute_tools_based_datasets
|
30 |
+
|
31 |
+
|
32 |
+
logging.basicConfig(
|
33 |
+
level=logging.INFO,
|
34 |
+
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
|
35 |
+
datefmt="%Y-%m-%d %H:%M:%S",
|
36 |
+
)
|
37 |
+
logger = logging.getLogger(__name__)
|
38 |
+
|
39 |
+
|
40 |
+
def add_current_answer(tools_filename: str):
|
41 |
+
# Get currentAnswer from FPMMS
|
42 |
+
fpmms = pd.read_parquet(ROOT_DIR / MARKETS_FILENAME)
|
43 |
+
tools = pd.read_parquet(ROOT_DIR / tools_filename)
|
44 |
+
|
45 |
+
# Get the question from the tools
|
46 |
+
logging.info("Getting the question and current answer for the tools")
|
47 |
+
tools["title"] = tools["prompt_request"].apply(lambda x: get_question(x))
|
48 |
+
tools["currentAnswer"] = tools["title"].apply(lambda x: current_answer(x, fpmms))
|
49 |
+
|
50 |
+
tools["currentAnswer"] = tools["currentAnswer"].str.replace("yes", "Yes")
|
51 |
+
tools["currentAnswer"] = tools["currentAnswer"].str.replace("no", "No")
|
52 |
+
# Save the tools data after the updates on the content
|
53 |
+
tools.to_parquet(ROOT_DIR / tools_filename, index=False)
|
54 |
+
del fpmms
|
55 |
+
|
56 |
+
|
57 |
+
def save_historical_data():
|
58 |
+
"""Function to save a copy of the main trades and tools file
|
59 |
+
into the historical folder"""
|
60 |
+
print("Saving historical data copies")
|
61 |
+
current_datetime = datetime.now()
|
62 |
+
|
63 |
+
timestamp = current_datetime.strftime("%Y%m%d_%H%M%S")
|
64 |
+
|
65 |
+
try:
|
66 |
+
tools = pd.read_parquet(TMP_DIR / "tools.parquet")
|
67 |
+
filename = f"tools_{timestamp}.parquet"
|
68 |
+
tools.to_parquet(HIST_DIR / filename, index=False)
|
69 |
+
# save into cloud storage
|
70 |
+
upload_historical_file(filename)
|
71 |
+
except Exception as e:
|
72 |
+
print(f"Error saving tools file in the historical folder {e}")
|
73 |
+
|
74 |
+
try:
|
75 |
+
all_trades = pd.read_parquet(ROOT_DIR / "all_trades_profitability.parquet")
|
76 |
+
filename = f"all_trades_profitability_{timestamp}.parquet"
|
77 |
+
all_trades.to_parquet(HIST_DIR / filename, index=False)
|
78 |
+
# save into cloud storage
|
79 |
+
upload_historical_file(filename)
|
80 |
+
|
81 |
+
except Exception as e:
|
82 |
+
print(
|
83 |
+
f"Error saving all_trades_profitability file in the historical folder {e}"
|
84 |
+
)
|
85 |
+
|
86 |
+
|
87 |
+
@measure_execution_time
|
88 |
+
def only_new_weekly_analysis():
|
89 |
+
"""Run weekly analysis for the FPMMS project."""
|
90 |
+
# Run markets ETL
|
91 |
+
logging.info("Running markets ETL")
|
92 |
+
mkt_etl(MARKETS_FILENAME)
|
93 |
+
logging.info("Markets ETL completed")
|
94 |
+
|
95 |
+
# Mech events ETL
|
96 |
+
logging.info("Generating the mech json files")
|
97 |
+
# get only new data
|
98 |
+
latest_timestamp = get_mech_events_since_last_run(logger)
|
99 |
+
if latest_timestamp == None:
|
100 |
+
print("Error while getting the mech events")
|
101 |
+
return
|
102 |
+
logging.info(f"Finished generating the mech json files from {latest_timestamp}")
|
103 |
+
|
104 |
+
# FpmmTrades ETL
|
105 |
+
fpmmTrades_etl(
|
106 |
+
trades_filename="new_fpmmTrades.parquet",
|
107 |
+
from_timestamp=int(latest_timestamp.timestamp()),
|
108 |
+
)
|
109 |
+
# merge with previous file
|
110 |
+
print("Merging with previous fpmmTrades file")
|
111 |
+
update_fpmmTrades_parquet(trades_filename="new_fpmmTrades.parquet")
|
112 |
+
|
113 |
+
# Run tools ETL
|
114 |
+
logging.info("Generate and parse the tools content")
|
115 |
+
# generate only new file
|
116 |
+
generate_tools_file("new_tools_info.json", "new_tools.parquet")
|
117 |
+
logging.info("Tools ETL completed")
|
118 |
+
|
119 |
+
add_current_answer("new_tools.parquet")
|
120 |
+
|
121 |
+
# # Run profitability analysis
|
122 |
+
logging.info("Running profitability analysis")
|
123 |
+
run_profitability_analysis(
|
124 |
+
tools_filename="new_tools.parquet",
|
125 |
+
trades_filename="new_fpmmTrades.parquet",
|
126 |
+
merge=True,
|
127 |
+
)
|
128 |
+
|
129 |
+
logging.info("Profitability analysis completed")
|
130 |
+
|
131 |
+
# merge new json files with old json files
|
132 |
+
update_json_files()
|
133 |
+
|
134 |
+
save_historical_data()
|
135 |
+
try:
|
136 |
+
clean_old_data_from_parquet_files("2024-11-30")
|
137 |
+
except Exception as e:
|
138 |
+
print("Error cleaning the oldest information from parquet files")
|
139 |
+
print(f"reason = {e}")
|
140 |
+
compute_tools_accuracy()
|
141 |
+
compute_tools_based_datasets()
|
142 |
+
# # move to tmp folder the new generated files
|
143 |
+
move_files()
|
144 |
+
logging.info("Weekly analysis files generated and saved")
|
145 |
+
|
146 |
+
|
147 |
+
def restoring_trades_data(from_date: str, to_date: str):
|
148 |
+
# Convert the string to datetime64[ns, UTC]
|
149 |
+
min_date_utc = pd.to_datetime(from_date, format="%Y-%m-%d", utc=True)
|
150 |
+
max_date_utc = pd.to_datetime(to_date, format="%Y-%m-%d", utc=True)
|
151 |
+
logging.info("Running markets ETL")
|
152 |
+
mkt_etl(MARKETS_FILENAME)
|
153 |
+
logging.info("Markets ETL completed")
|
154 |
+
|
155 |
+
fpmmTrades_etl(
|
156 |
+
trades_filename="missing_fpmmTrades.parquet",
|
157 |
+
from_timestamp=int(min_date_utc.timestamp()),
|
158 |
+
to_timestamp=int(max_date_utc.timestamp()),
|
159 |
+
)
|
160 |
+
|
161 |
+
# merge with the old file
|
162 |
+
print("Merging with previous fpmmTrades file")
|
163 |
+
update_fpmmTrades_parquet(trades_filename="missing_fpmmTrades.parquet")
|
164 |
+
|
165 |
+
# adding tools information
|
166 |
+
add_trades_profitability(trades_filename="missing_fpmmTrades.parquet")
|
167 |
+
|
168 |
+
|
169 |
+
if __name__ == "__main__":
|
170 |
+
only_new_weekly_analysis()
|
171 |
+
# restoring_trades_data("2024-12-28", "2025-01-07")
|
scripts/queries.py
ADDED
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
# ------------------------------------------------------------------------------
|
3 |
+
#
|
4 |
+
# Copyright 2024 Valory AG
|
5 |
+
#
|
6 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
7 |
+
# you may not use this file except in compliance with the License.
|
8 |
+
# You may obtain a copy of the License at
|
9 |
+
#
|
10 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
11 |
+
#
|
12 |
+
# Unless required by applicable law or agreed to in writing, software
|
13 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
14 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
15 |
+
# See the License for the specific language governing permissions and
|
16 |
+
# limitations under the License.
|
17 |
+
#
|
18 |
+
# ------------------------------------------------------------------------------
|
19 |
+
|
20 |
+
from string import Template
|
21 |
+
|
22 |
+
FPMMS_FIELD = "fixedProductMarketMakers"
|
23 |
+
QUERY_FIELD = "query"
|
24 |
+
ERROR_FIELD = "errors"
|
25 |
+
DATA_FIELD = "data"
|
26 |
+
ID_FIELD = "id"
|
27 |
+
ANSWER_FIELD = "currentAnswer"
|
28 |
+
QUESTION_FIELD = "question"
|
29 |
+
OUTCOMES_FIELD = "outcomes"
|
30 |
+
TITLE_FIELD = "title"
|
31 |
+
ANSWER_TIMESTAMP_FIELD = "currentAnswerTimestamp"
|
32 |
+
OPENING_TIMESTAMP_FIELD = "openingTimestamp"
|
33 |
+
RESOLUTION_TIMESTAMP_FIELD = "resolutionTimestamp"
|
34 |
+
CREATION_TIMESTAMP_FIELD = "creationTimestamp"
|
35 |
+
LIQUIDITY_FIELD = "liquidityParameter"
|
36 |
+
LIQUIDIY_MEASURE_FIELD = "liquidityMeasure"
|
37 |
+
TOKEN_AMOUNTS_FIELD = "outcomeTokenAmounts"
|
38 |
+
|
39 |
+
FPMMS_QUERY = Template(
|
40 |
+
"""
|
41 |
+
{
|
42 |
+
${fpmms_field}(
|
43 |
+
where: {
|
44 |
+
creator: "${creator}",
|
45 |
+
id_gt: "${fpmm_id}",
|
46 |
+
isPendingArbitration: false
|
47 |
+
},
|
48 |
+
orderBy: ${id_field}
|
49 |
+
first: ${first}
|
50 |
+
){
|
51 |
+
${id_field}
|
52 |
+
${answer_field}
|
53 |
+
${question_field} {
|
54 |
+
${outcomes_field}
|
55 |
+
}
|
56 |
+
${title_field}
|
57 |
+
}
|
58 |
+
}
|
59 |
+
"""
|
60 |
+
)
|
61 |
+
|
62 |
+
omen_xdai_trades_query = Template(
|
63 |
+
"""
|
64 |
+
{
|
65 |
+
fpmmTrades(
|
66 |
+
where: {
|
67 |
+
type: Buy,
|
68 |
+
fpmm_: {
|
69 |
+
creator: "${fpmm_creator}"
|
70 |
+
creationTimestamp_gte: "${fpmm_creationTimestamp_gte}",
|
71 |
+
creationTimestamp_lt: "${fpmm_creationTimestamp_lte}"
|
72 |
+
},
|
73 |
+
creationTimestamp_gte: "${creationTimestamp_gte}",
|
74 |
+
creationTimestamp_lte: "${creationTimestamp_lte}"
|
75 |
+
id_gt: "${id_gt}"
|
76 |
+
}
|
77 |
+
first: ${first}
|
78 |
+
orderBy: id
|
79 |
+
orderDirection: asc
|
80 |
+
) {
|
81 |
+
id
|
82 |
+
title
|
83 |
+
collateralToken
|
84 |
+
outcomeTokenMarginalPrice
|
85 |
+
oldOutcomeTokenMarginalPrice
|
86 |
+
type
|
87 |
+
creator {
|
88 |
+
id
|
89 |
+
}
|
90 |
+
creationTimestamp
|
91 |
+
collateralAmount
|
92 |
+
collateralAmountUSD
|
93 |
+
feeAmount
|
94 |
+
outcomeIndex
|
95 |
+
outcomeTokensTraded
|
96 |
+
transactionHash
|
97 |
+
fpmm {
|
98 |
+
id
|
99 |
+
outcomes
|
100 |
+
title
|
101 |
+
answerFinalizedTimestamp
|
102 |
+
currentAnswer
|
103 |
+
isPendingArbitration
|
104 |
+
arbitrationOccurred
|
105 |
+
openingTimestamp
|
106 |
+
condition {
|
107 |
+
id
|
108 |
+
}
|
109 |
+
}
|
110 |
+
}
|
111 |
+
}
|
112 |
+
"""
|
113 |
+
)
|
114 |
+
|
115 |
+
conditional_tokens_gc_user_query = Template(
|
116 |
+
"""
|
117 |
+
{
|
118 |
+
user(id: "${id}") {
|
119 |
+
userPositions(
|
120 |
+
first: ${first}
|
121 |
+
where: {
|
122 |
+
id_gt: "${userPositions_id_gt}"
|
123 |
+
}
|
124 |
+
orderBy: id
|
125 |
+
) {
|
126 |
+
balance
|
127 |
+
id
|
128 |
+
position {
|
129 |
+
id
|
130 |
+
conditionIds
|
131 |
+
}
|
132 |
+
totalBalance
|
133 |
+
wrappedBalance
|
134 |
+
}
|
135 |
+
}
|
136 |
+
}
|
137 |
+
"""
|
138 |
+
)
|
139 |
+
|
140 |
+
|
141 |
+
TRADES_QUERY = """
|
142 |
+
query fpmms_query($fpmm: String, $id_gt: ID) {
|
143 |
+
fpmmTrades(
|
144 |
+
where: {fpmm: $fpmm, id_gt: $id_gt, type: Buy}
|
145 |
+
orderBy: id
|
146 |
+
orderDirection: asc
|
147 |
+
first: 1000
|
148 |
+
) {
|
149 |
+
collateralAmount
|
150 |
+
outcomeIndex
|
151 |
+
outcomeTokensTraded
|
152 |
+
id
|
153 |
+
oldOutcomeTokenMarginalPrice
|
154 |
+
outcomeTokenMarginalPrice
|
155 |
+
type
|
156 |
+
collateralAmountUSD
|
157 |
+
creationTimestamp
|
158 |
+
feeAmount
|
159 |
+
}
|
160 |
+
}
|
161 |
+
"""
|
scripts/staking.py
ADDED
@@ -0,0 +1,304 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import sys
|
3 |
+
from typing import Any, List
|
4 |
+
from utils import RPC, ROOT_DIR, TMP_DIR, JSON_DATA_DIR
|
5 |
+
import requests
|
6 |
+
from tqdm import tqdm
|
7 |
+
from web3 import Web3
|
8 |
+
import pandas as pd
|
9 |
+
import pickle
|
10 |
+
import os
|
11 |
+
import gzip
|
12 |
+
import shutil
|
13 |
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
14 |
+
|
15 |
+
NUM_WORKERS = 10
|
16 |
+
DEPRECATED_STAKING_PROGRAMS = {
|
17 |
+
"quickstart_alpha_everest": "0x5add592ce0a1B5DceCebB5Dcac086Cd9F9e3eA5C",
|
18 |
+
"quickstart_alpha_alpine": "0x2Ef503950Be67a98746F484DA0bBAdA339DF3326",
|
19 |
+
"quickstart_alpha_coastal": "0x43fB32f25dce34EB76c78C7A42C8F40F84BCD237",
|
20 |
+
}
|
21 |
+
STAKING_PROGRAMS_QS = {
|
22 |
+
"quickstart_beta_hobbyist": "0x389B46c259631Acd6a69Bde8B6cEe218230bAE8C",
|
23 |
+
"quickstart_beta_hobbyist_2": "0x238EB6993b90a978ec6AAD7530d6429c949C08DA",
|
24 |
+
"quickstart_beta_expert": "0x5344B7DD311e5d3DdDd46A4f71481bD7b05AAA3e",
|
25 |
+
"quickstart_beta_expert_2": "0xb964e44c126410df341ae04B13aB10A985fE3513",
|
26 |
+
"quickstart_beta_expert_3": "0x80faD33Cadb5F53f9D29F02Db97D682E8b101618",
|
27 |
+
"quickstart_beta_expert_4": "0xaD9d891134443B443D7F30013c7e14Fe27F2E029",
|
28 |
+
"quickstart_beta_expert_5": "0xE56dF1E563De1B10715cB313D514af350D207212",
|
29 |
+
"quickstart_beta_expert_6": "0x2546214aEE7eEa4bEE7689C81231017CA231Dc93",
|
30 |
+
"quickstart_beta_expert_7": "0xD7A3C8b975f71030135f1a66e9e23164d54fF455",
|
31 |
+
"quickstart_beta_expert_8": "0x356C108D49C5eebd21c84c04E9162de41933030c",
|
32 |
+
"quickstart_beta_expert_9": "0x17dBAe44BC5618Cc254055b386A29576b4F87015",
|
33 |
+
"quickstart_beta_expert_10": "0xB0ef657b8302bd2c74B6E6D9B2b4b39145b19c6f",
|
34 |
+
"quickstart_beta_expert_11": "0x3112c1613eAC3dBAE3D4E38CeF023eb9E2C91CF7",
|
35 |
+
"quickstart_beta_expert_12": "0xF4a75F476801B3fBB2e7093aCDcc3576593Cc1fc",
|
36 |
+
}
|
37 |
+
|
38 |
+
STAKING_PROGRAMS_PEARL = {
|
39 |
+
"pearl_alpha": "0xEE9F19b5DF06c7E8Bfc7B28745dcf944C504198A",
|
40 |
+
"pearl_beta": "0xeF44Fb0842DDeF59D37f85D61A1eF492bbA6135d",
|
41 |
+
"pearl_beta_2": "0x1c2F82413666d2a3fD8bC337b0268e62dDF67434",
|
42 |
+
"pearl_beta_3": "0xBd59Ff0522aA773cB6074ce83cD1e4a05A457bc1",
|
43 |
+
"pearl_beta_4": "0x3052451e1eAee78e62E169AfdF6288F8791F2918",
|
44 |
+
"pearl_beta_5": "0x4Abe376Fda28c2F43b84884E5f822eA775DeA9F4",
|
45 |
+
}
|
46 |
+
|
47 |
+
|
48 |
+
SERVICE_REGISTRY_ADDRESS = "0x9338b5153AE39BB89f50468E608eD9d764B755fD"
|
49 |
+
|
50 |
+
|
51 |
+
def _get_contract(address: str) -> Any:
|
52 |
+
w3 = Web3(Web3.HTTPProvider(RPC))
|
53 |
+
abi = _get_abi(address)
|
54 |
+
contract = w3.eth.contract(address=Web3.to_checksum_address(address), abi=abi)
|
55 |
+
return contract
|
56 |
+
|
57 |
+
|
58 |
+
def _get_abi(address: str) -> List:
|
59 |
+
contract_abi_url = (
|
60 |
+
"https://gnosis.blockscout.com/api/v2/smart-contracts/{contract_address}"
|
61 |
+
)
|
62 |
+
response = requests.get(contract_abi_url.format(contract_address=address)).json()
|
63 |
+
|
64 |
+
if "result" in response:
|
65 |
+
result = response["result"]
|
66 |
+
try:
|
67 |
+
abi = json.loads(result)
|
68 |
+
except json.JSONDecodeError:
|
69 |
+
print("Error: Failed to parse 'result' field as JSON")
|
70 |
+
sys.exit(1)
|
71 |
+
else:
|
72 |
+
abi = response.get("abi")
|
73 |
+
|
74 |
+
return abi if abi else []
|
75 |
+
|
76 |
+
|
77 |
+
def get_service_safe(service_id: int) -> str:
|
78 |
+
"""Gets the service Safe"""
|
79 |
+
service_registry = _get_contract(SERVICE_REGISTRY_ADDRESS)
|
80 |
+
service_safe_address = service_registry.functions.getService(service_id).call()[1]
|
81 |
+
return service_safe_address
|
82 |
+
|
83 |
+
|
84 |
+
def list_contract_functions(contract):
|
85 |
+
function_names = []
|
86 |
+
for item in contract.abi:
|
87 |
+
if item.get("type") == "function":
|
88 |
+
function_names.append(item.get("name"))
|
89 |
+
return function_names
|
90 |
+
|
91 |
+
|
92 |
+
def get_service_data(service_registry: Any, service_id: int) -> dict:
|
93 |
+
tmp_map = {}
|
94 |
+
# Get the list of addresses
|
95 |
+
# print(f"getting addresses from service id ={service_id}")
|
96 |
+
|
97 |
+
# available_functions = list_contract_functions(service_registry)
|
98 |
+
# print("Available Contract Functions:")
|
99 |
+
# for func in available_functions:
|
100 |
+
# print(f"- {func}")
|
101 |
+
|
102 |
+
data = service_registry.functions.getService(service_id).call()
|
103 |
+
try:
|
104 |
+
owner_data = service_registry.functions.ownerOf(service_id).call()
|
105 |
+
except Exception as e:
|
106 |
+
tqdm.write(f"Error: no owner data infor from {service_id}")
|
107 |
+
return None
|
108 |
+
# print(f"owner data = {owner_data}")
|
109 |
+
address = data[1]
|
110 |
+
state = data[-1]
|
111 |
+
# print(f"address = {address}")
|
112 |
+
# print(f"state={state}")
|
113 |
+
# PEARL trade
|
114 |
+
|
115 |
+
if address != "0x0000000000000000000000000000000000000000":
|
116 |
+
tmp_map[service_id] = {
|
117 |
+
"safe_address": address,
|
118 |
+
"state": state,
|
119 |
+
"owner_address": owner_data,
|
120 |
+
}
|
121 |
+
return tmp_map
|
122 |
+
|
123 |
+
|
124 |
+
def update_service_map(start: int = 1, end: int = 2000):
|
125 |
+
if os.path.exists(ROOT_DIR / "service_map.pkl"):
|
126 |
+
with open(ROOT_DIR / "service_map.pkl", "rb") as f:
|
127 |
+
service_map = pickle.load(f)
|
128 |
+
else:
|
129 |
+
service_map = {}
|
130 |
+
print(f"updating service map from service id={start}")
|
131 |
+
# we do not know which is the last service id right now
|
132 |
+
service_registry = _get_contract(SERVICE_REGISTRY_ADDRESS)
|
133 |
+
with ThreadPoolExecutor(max_workers=NUM_WORKERS) as executor:
|
134 |
+
futures = []
|
135 |
+
for service_id in range(start, end):
|
136 |
+
futures.append(
|
137 |
+
executor.submit(
|
138 |
+
get_service_data,
|
139 |
+
service_registry,
|
140 |
+
service_id,
|
141 |
+
)
|
142 |
+
)
|
143 |
+
|
144 |
+
for future in tqdm(
|
145 |
+
as_completed(futures),
|
146 |
+
total=len(futures),
|
147 |
+
desc=f"Fetching all service data from contracts",
|
148 |
+
):
|
149 |
+
partial_dict = future.result()
|
150 |
+
if partial_dict:
|
151 |
+
service_map.update(partial_dict)
|
152 |
+
|
153 |
+
print(f"length of service map {len(service_map)}")
|
154 |
+
with open(ROOT_DIR / "service_map.pkl", "wb") as f:
|
155 |
+
pickle.dump(service_map, f)
|
156 |
+
|
157 |
+
|
158 |
+
def check_owner_staking_contract(owner_address: str) -> str:
|
159 |
+
staking = "non_staking"
|
160 |
+
owner_address = owner_address.lower()
|
161 |
+
# check quickstart staking contracts
|
162 |
+
qs_list = [x.lower() for x in STAKING_PROGRAMS_QS.values()]
|
163 |
+
if owner_address in qs_list:
|
164 |
+
return "quickstart"
|
165 |
+
|
166 |
+
# check pearl staking contracts
|
167 |
+
pearl_list = [x.lower() for x in STAKING_PROGRAMS_PEARL.values()]
|
168 |
+
if owner_address in pearl_list:
|
169 |
+
return "pearl"
|
170 |
+
|
171 |
+
# check legacy staking contracts
|
172 |
+
deprec_list = [x.lower() for x in DEPRECATED_STAKING_PROGRAMS.values()]
|
173 |
+
if owner_address in deprec_list:
|
174 |
+
return "quickstart"
|
175 |
+
|
176 |
+
return staking
|
177 |
+
|
178 |
+
|
179 |
+
def get_trader_address_staking(trader_address: str, service_map: dict) -> str:
|
180 |
+
# check if there is any service id linked with that trader address
|
181 |
+
|
182 |
+
found_key = -1
|
183 |
+
for key, value in service_map.items():
|
184 |
+
if value["safe_address"].lower() == trader_address.lower():
|
185 |
+
# found a service
|
186 |
+
found_key = key
|
187 |
+
break
|
188 |
+
|
189 |
+
if found_key == -1:
|
190 |
+
return "non_Olas"
|
191 |
+
owner = service_map[found_key]["owner_address"]
|
192 |
+
return check_owner_staking_contract(owner_address=owner)
|
193 |
+
|
194 |
+
|
195 |
+
def label_trades_by_staking(trades_df: pd.DataFrame, start: int = None) -> None:
|
196 |
+
with open(ROOT_DIR / "service_map.pkl", "rb") as f:
|
197 |
+
service_map = pickle.load(f)
|
198 |
+
# get the last service id
|
199 |
+
keys = service_map.keys()
|
200 |
+
if start is None:
|
201 |
+
last_key = max(keys)
|
202 |
+
else:
|
203 |
+
last_key = start
|
204 |
+
print(f"last service key = {last_key}")
|
205 |
+
update_service_map(start=last_key)
|
206 |
+
all_traders = trades_df.trader_address.unique()
|
207 |
+
trades_df["staking"] = ""
|
208 |
+
for trader in tqdm(all_traders, desc="Labeling traders by staking", unit="trader"):
|
209 |
+
# tqdm.write(f"checking trader {trader}")
|
210 |
+
staking_label = get_trader_address_staking(trader, service_map)
|
211 |
+
if staking_label:
|
212 |
+
trades_df.loc[trades_df["trader_address"] == trader, "staking"] = (
|
213 |
+
staking_label
|
214 |
+
)
|
215 |
+
# tqdm.write(f"statking label {staking_label}")
|
216 |
+
return trades_df
|
217 |
+
|
218 |
+
|
219 |
+
def generate_retention_activity_file():
|
220 |
+
tools = pd.read_parquet(TMP_DIR / "tools.parquet")
|
221 |
+
tools["request_time"] = pd.to_datetime(tools["request_time"])
|
222 |
+
tools["request_date"] = tools["request_time"].dt.date
|
223 |
+
tools = tools.sort_values(by="request_time", ascending=True)
|
224 |
+
reduced_tools_df = tools[
|
225 |
+
["trader_address", "request_time", "market_creator", "request_date"]
|
226 |
+
]
|
227 |
+
print(f"length of reduced tools before labeling = {len(reduced_tools_df)}")
|
228 |
+
reduced_tools_df = label_trades_by_staking(trades_df=reduced_tools_df)
|
229 |
+
print(f"labeling of tools activity. {reduced_tools_df.staking.value_counts()}")
|
230 |
+
print(f"length of reduced tools after labeling = {len(reduced_tools_df)}")
|
231 |
+
reduced_tools_df = reduced_tools_df.sort_values(by="request_time", ascending=True)
|
232 |
+
reduced_tools_df["month_year_week"] = (
|
233 |
+
pd.to_datetime(tools["request_time"])
|
234 |
+
.dt.to_period("W")
|
235 |
+
.dt.start_time.dt.strftime("%b-%d-%Y")
|
236 |
+
)
|
237 |
+
reduced_tools_df.to_parquet(ROOT_DIR / "retention_activity.parquet")
|
238 |
+
with open(ROOT_DIR / "retention_activity.parquet", "rb") as f_in:
|
239 |
+
with gzip.open(ROOT_DIR / "retention_activity.parquet.gz", "wb") as f_out:
|
240 |
+
shutil.copyfileobj(f_in, f_out)
|
241 |
+
return True
|
242 |
+
|
243 |
+
|
244 |
+
def check_list_addresses(address_list: list):
|
245 |
+
with open(ROOT_DIR / "service_map.pkl", "rb") as f:
|
246 |
+
service_map = pickle.load(f)
|
247 |
+
# check if it is part of any service id on the map
|
248 |
+
mapping = {}
|
249 |
+
print(f"length of service map={len(service_map)}")
|
250 |
+
keys = service_map.keys()
|
251 |
+
last_key = max(keys)
|
252 |
+
|
253 |
+
print(f"last service key = {last_key}")
|
254 |
+
update_service_map(start=last_key)
|
255 |
+
found_key = -1
|
256 |
+
trader_types = []
|
257 |
+
for trader_address in address_list:
|
258 |
+
for key, value in service_map.items():
|
259 |
+
if value["safe_address"].lower() == trader_address.lower():
|
260 |
+
# found a service
|
261 |
+
found_key = key
|
262 |
+
mapping[trader_address] = "Olas"
|
263 |
+
trader_types.append("Olas")
|
264 |
+
break
|
265 |
+
|
266 |
+
if found_key == -1:
|
267 |
+
mapping[trader_address] = "non_Olas"
|
268 |
+
trader_types.append("non_Olas")
|
269 |
+
return mapping
|
270 |
+
|
271 |
+
|
272 |
+
def check_service_map():
|
273 |
+
with open(ROOT_DIR / "service_map.pkl", "rb") as f:
|
274 |
+
service_map = pickle.load(f)
|
275 |
+
# check if it is part of any service id on the map
|
276 |
+
mapping = {}
|
277 |
+
print(f"length of service map={len(service_map)}")
|
278 |
+
keys = service_map.keys()
|
279 |
+
last_key = max(keys)
|
280 |
+
print(f"last key ={last_key}")
|
281 |
+
missing_keys = 0
|
282 |
+
for i in range(1, last_key):
|
283 |
+
if i not in keys:
|
284 |
+
missing_keys += 1
|
285 |
+
print(f"missing key = {i}")
|
286 |
+
print(f"total missing keys = {missing_keys}")
|
287 |
+
|
288 |
+
|
289 |
+
if __name__ == "__main__":
|
290 |
+
# create_service_map()
|
291 |
+
trades_df = pd.read_parquet(JSON_DATA_DIR / "all_trades_df.parquet")
|
292 |
+
trades_df = trades_df.loc[trades_df["is_invalid"] == False]
|
293 |
+
|
294 |
+
trades_df = label_trades_by_staking(trades_df=trades_df)
|
295 |
+
print(trades_df.staking.value_counts())
|
296 |
+
# trades_df.to_parquet(TMP_DIR / "result_staking.parquet", index=False)
|
297 |
+
# generate_retention_activity_file()
|
298 |
+
# a_list = [
|
299 |
+
# "0x027592700fafc4db3221bb662d7bdc7f546a2bb5",
|
300 |
+
# "0x0845f4ad01a2f41da618848c7a9e56b64377965e",
|
301 |
+
# ]
|
302 |
+
# check_list_addresses(address_list=a_list)
|
303 |
+
# update_service_map()
|
304 |
+
# check_service_map()
|
scripts/tools.py
ADDED
@@ -0,0 +1,320 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
# ------------------------------------------------------------------------------
|
3 |
+
#
|
4 |
+
# Copyright 2023 Valory AG
|
5 |
+
#
|
6 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
7 |
+
# you may not use this file except in compliance with the License.
|
8 |
+
# You may obtain a copy of the License at
|
9 |
+
#
|
10 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
11 |
+
#
|
12 |
+
# Unless required by applicable law or agreed to in writing, software
|
13 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
14 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
15 |
+
# See the License for the specific language governing permissions and
|
16 |
+
# limitations under the License.
|
17 |
+
#
|
18 |
+
# ------------------------------------------------------------------------------
|
19 |
+
|
20 |
+
import json
|
21 |
+
from typing import (
|
22 |
+
Optional,
|
23 |
+
List,
|
24 |
+
Dict,
|
25 |
+
Union,
|
26 |
+
Any,
|
27 |
+
)
|
28 |
+
import pandas as pd
|
29 |
+
import requests
|
30 |
+
from datetime import datetime
|
31 |
+
from gnosis_timestamps import transform_timestamp_to_datetime
|
32 |
+
from requests.adapters import HTTPAdapter
|
33 |
+
from tqdm import tqdm
|
34 |
+
from urllib3 import Retry
|
35 |
+
from markets import add_market_creator
|
36 |
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
37 |
+
from web3_utils import (
|
38 |
+
N_IPFS_RETRIES,
|
39 |
+
)
|
40 |
+
from utils import (
|
41 |
+
clean,
|
42 |
+
BLOCK_FIELD,
|
43 |
+
limit_text,
|
44 |
+
ROOT_DIR,
|
45 |
+
JSON_DATA_DIR,
|
46 |
+
MechEvent,
|
47 |
+
MechEventName,
|
48 |
+
MechRequest,
|
49 |
+
MechResponse,
|
50 |
+
EVENT_TO_MECH_STRUCT,
|
51 |
+
REQUEST_ID,
|
52 |
+
HTTP,
|
53 |
+
HTTPS,
|
54 |
+
get_result_values,
|
55 |
+
get_vote,
|
56 |
+
get_win_probability,
|
57 |
+
get_prediction_values,
|
58 |
+
)
|
59 |
+
|
60 |
+
CONTRACTS_PATH = "contracts"
|
61 |
+
MECH_TO_INFO = {
|
62 |
+
# this block number is when the creator had its first tx ever, and after this mech's creation
|
63 |
+
"0xff82123dfb52ab75c417195c5fdb87630145ae81": ("old_mech_abi.json", 28911547),
|
64 |
+
# this block number is when this mech was created
|
65 |
+
"0x77af31de935740567cf4ff1986d04b2c964a786a": ("new_mech_abi.json", 30776879),
|
66 |
+
}
|
67 |
+
# optionally set the latest block to stop searching for the delivered events
|
68 |
+
|
69 |
+
EVENT_ARGUMENTS = "args"
|
70 |
+
DATA = "data"
|
71 |
+
IPFS_LINKS_SERIES_NAME = "ipfs_links"
|
72 |
+
BACKOFF_FACTOR = 1
|
73 |
+
STATUS_FORCELIST = [404, 500, 502, 503, 504]
|
74 |
+
DEFAULT_FILENAME = "tools.parquet"
|
75 |
+
ABI_ERROR = "The event signature did not match the provided ABI"
|
76 |
+
# HTTP_TIMEOUT = 10
|
77 |
+
# Increasing when ipfs is slow
|
78 |
+
HTTP_TIMEOUT = 15
|
79 |
+
|
80 |
+
IRRELEVANT_TOOLS = [
|
81 |
+
"openai-text-davinci-002",
|
82 |
+
"openai-text-davinci-003",
|
83 |
+
"openai-gpt-3.5-turbo",
|
84 |
+
"openai-gpt-4",
|
85 |
+
"stabilityai-stable-diffusion-v1-5",
|
86 |
+
"stabilityai-stable-diffusion-xl-beta-v2-2-2",
|
87 |
+
"stabilityai-stable-diffusion-512-v2-1",
|
88 |
+
"stabilityai-stable-diffusion-768-v2-1",
|
89 |
+
"deepmind-optimization-strong",
|
90 |
+
"deepmind-optimization",
|
91 |
+
]
|
92 |
+
# this is how frequently we will keep a snapshot of the progress so far in terms of blocks' batches
|
93 |
+
# for example, the value 1 means that for every `BLOCKS_CHUNK_SIZE` blocks that we search,
|
94 |
+
# we also store the snapshot
|
95 |
+
SNAPSHOT_RATE = 10
|
96 |
+
NUM_WORKERS = 10
|
97 |
+
GET_CONTENTS_BATCH_SIZE = 1000
|
98 |
+
|
99 |
+
|
100 |
+
class TimestampedRetry(Retry):
|
101 |
+
def increment(self, *args, **kwargs):
|
102 |
+
print(f"Retry attempt at {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
|
103 |
+
return super().increment(*args, **kwargs)
|
104 |
+
|
105 |
+
|
106 |
+
def create_session() -> requests.Session:
|
107 |
+
"""Create a session with a retry strategy."""
|
108 |
+
session = requests.Session()
|
109 |
+
retry_strategy = TimestampedRetry(
|
110 |
+
total=N_IPFS_RETRIES,
|
111 |
+
backoff_factor=BACKOFF_FACTOR,
|
112 |
+
status_forcelist=STATUS_FORCELIST,
|
113 |
+
)
|
114 |
+
adapter = HTTPAdapter(max_retries=retry_strategy)
|
115 |
+
for protocol in (HTTP, HTTPS):
|
116 |
+
session.mount(protocol, adapter)
|
117 |
+
|
118 |
+
return session
|
119 |
+
|
120 |
+
|
121 |
+
def request(
|
122 |
+
session: requests.Session, url: str, timeout: int = HTTP_TIMEOUT
|
123 |
+
) -> Optional[requests.Response]:
|
124 |
+
"""Perform a request with a session."""
|
125 |
+
try:
|
126 |
+
response = session.get(url, timeout=timeout)
|
127 |
+
response.raise_for_status()
|
128 |
+
except requests.exceptions.HTTPError as exc:
|
129 |
+
tqdm.write(f"HTTP error occurred: {exc}.")
|
130 |
+
except Exception as exc:
|
131 |
+
tqdm.write(f"Unexpected error occurred: {exc}.")
|
132 |
+
else:
|
133 |
+
return response
|
134 |
+
return None
|
135 |
+
|
136 |
+
|
137 |
+
def parse_ipfs_response(
|
138 |
+
session: requests.Session,
|
139 |
+
url: str,
|
140 |
+
event: MechEvent,
|
141 |
+
event_name: MechEventName,
|
142 |
+
response: requests.Response,
|
143 |
+
) -> Optional[Dict[str, str]]:
|
144 |
+
"""Parse a response from IPFS."""
|
145 |
+
try:
|
146 |
+
return response.json()
|
147 |
+
except requests.exceptions.JSONDecodeError:
|
148 |
+
# this is a workaround because the `metadata.json` file was introduced and removed multiple times
|
149 |
+
if event_name == MechEvent.REQUEST and url != event.ipfs_request_link:
|
150 |
+
url = event.ipfs_request_link
|
151 |
+
response = request(session, url)
|
152 |
+
if response is None:
|
153 |
+
tqdm.write(f"Skipping {event=}.")
|
154 |
+
return None
|
155 |
+
|
156 |
+
try:
|
157 |
+
return response.json()
|
158 |
+
except requests.exceptions.JSONDecodeError:
|
159 |
+
pass
|
160 |
+
|
161 |
+
tqdm.write(f"Failed to parse response into json for {url=}.")
|
162 |
+
return None
|
163 |
+
|
164 |
+
|
165 |
+
def parse_ipfs_tools_content(
|
166 |
+
raw_content: Dict[str, str], event: MechEvent, event_name: MechEventName
|
167 |
+
) -> Optional[Union[MechRequest, MechResponse]]:
|
168 |
+
"""Parse tools content from IPFS."""
|
169 |
+
struct = EVENT_TO_MECH_STRUCT.get(event_name)
|
170 |
+
raw_content[REQUEST_ID] = str(event.requestId)
|
171 |
+
raw_content[BLOCK_FIELD] = str(event.for_block)
|
172 |
+
raw_content["sender"] = str(event.sender)
|
173 |
+
|
174 |
+
try:
|
175 |
+
mech_response = struct(**raw_content)
|
176 |
+
except (ValueError, TypeError, KeyError):
|
177 |
+
tqdm.write(f"Could not parse {limit_text(str(raw_content))}")
|
178 |
+
return None
|
179 |
+
|
180 |
+
if event_name == MechEventName.REQUEST and mech_response.tool in IRRELEVANT_TOOLS:
|
181 |
+
return None
|
182 |
+
|
183 |
+
return mech_response
|
184 |
+
|
185 |
+
|
186 |
+
def parse_json_events(json_events: dict, keys_to_traverse: List[int]) -> pd.DataFrame:
|
187 |
+
"""Function to parse the mech info in a json format"""
|
188 |
+
all_records = []
|
189 |
+
for key in keys_to_traverse:
|
190 |
+
try:
|
191 |
+
json_input = json_events[key]
|
192 |
+
output = {}
|
193 |
+
output["request_id"] = json_input["requestId"]
|
194 |
+
output["request_block"] = json_input["blockNumber"]
|
195 |
+
output["request_time"] = transform_timestamp_to_datetime(
|
196 |
+
int(json_input["blockTimestamp"])
|
197 |
+
)
|
198 |
+
output["tx_hash"] = json_input["transactionHash"]
|
199 |
+
output["prompt_request"] = json_input["ipfsContents"]["prompt"]
|
200 |
+
output["tool"] = json_input["ipfsContents"]["tool"]
|
201 |
+
output["nonce"] = json_input["ipfsContents"]["nonce"]
|
202 |
+
output["trader_address"] = json_input["sender"]
|
203 |
+
output["deliver_block"] = json_input["deliver"]["blockNumber"]
|
204 |
+
error_value, error_message, prediction_params = get_result_values(
|
205 |
+
json_input["deliver"]["ipfsContents"]["result"]
|
206 |
+
)
|
207 |
+
error_message_value = json_input.get("error_message", error_message)
|
208 |
+
output["error"] = error_value
|
209 |
+
output["error_message"] = error_message_value
|
210 |
+
output["prompt_response"] = json_input["deliver"]["ipfsContents"]["prompt"]
|
211 |
+
output["mech_address"] = json_input["deliver"]["sender"]
|
212 |
+
p_yes_value, p_no_value, confidence_value, info_utility_value = (
|
213 |
+
get_prediction_values(prediction_params)
|
214 |
+
)
|
215 |
+
output["p_yes"] = p_yes_value
|
216 |
+
output["p_no"] = p_no_value
|
217 |
+
output["confidence"] = confidence_value
|
218 |
+
output["info_utility"] = info_utility_value
|
219 |
+
output["vote"] = get_vote(p_yes_value, p_no_value)
|
220 |
+
output["win_probability"] = get_win_probability(p_yes_value, p_no_value)
|
221 |
+
all_records.append(output)
|
222 |
+
except Exception as e:
|
223 |
+
print(e)
|
224 |
+
print(f"Error parsing the key ={key}. Noted as error")
|
225 |
+
output["error"] = 1
|
226 |
+
output["error_message"] = "Response parsing error"
|
227 |
+
output["p_yes"] = None
|
228 |
+
output["p_no"] = None
|
229 |
+
output["confidence"] = None
|
230 |
+
output["info_utility"] = None
|
231 |
+
output["vote"] = None
|
232 |
+
output["win_probability"] = None
|
233 |
+
all_records.append(output)
|
234 |
+
|
235 |
+
return pd.DataFrame.from_dict(all_records, orient="columns")
|
236 |
+
|
237 |
+
|
238 |
+
def transform_request(contents: pd.DataFrame) -> pd.DataFrame:
|
239 |
+
"""Transform the requests dataframe."""
|
240 |
+
return clean(contents)
|
241 |
+
|
242 |
+
|
243 |
+
def transform_deliver(contents: pd.DataFrame) -> pd.DataFrame:
|
244 |
+
"""Transform the delivers dataframe."""
|
245 |
+
unpacked_result = pd.json_normalize(contents.result)
|
246 |
+
# # drop result column if it exists
|
247 |
+
if "result" in unpacked_result.columns:
|
248 |
+
unpacked_result.drop(columns=["result"], inplace=True)
|
249 |
+
|
250 |
+
# drop prompt column if it exists
|
251 |
+
if "prompt" in unpacked_result.columns:
|
252 |
+
unpacked_result.drop(columns=["prompt"], inplace=True)
|
253 |
+
|
254 |
+
# rename prompt column to prompt_deliver
|
255 |
+
unpacked_result.rename(columns={"prompt": "prompt_deliver"}, inplace=True)
|
256 |
+
contents = pd.concat((contents, unpacked_result), axis=1)
|
257 |
+
|
258 |
+
if "result" in contents.columns:
|
259 |
+
contents.drop(columns=["result"], inplace=True)
|
260 |
+
|
261 |
+
if "prompt" in contents.columns:
|
262 |
+
contents.drop(columns=["prompt"], inplace=True)
|
263 |
+
|
264 |
+
return clean(contents)
|
265 |
+
|
266 |
+
|
267 |
+
def parse_store_json_events_parallel(json_events: Dict[str, Any], output_filename: str):
|
268 |
+
total_nr_events = len(json_events)
|
269 |
+
ids_to_traverse = list(json_events.keys())
|
270 |
+
print(f"Parsing {total_nr_events} events")
|
271 |
+
contents = []
|
272 |
+
with ThreadPoolExecutor(max_workers=NUM_WORKERS) as executor:
|
273 |
+
futures = []
|
274 |
+
for i in range(0, total_nr_events, GET_CONTENTS_BATCH_SIZE):
|
275 |
+
futures.append(
|
276 |
+
executor.submit(
|
277 |
+
parse_json_events,
|
278 |
+
json_events,
|
279 |
+
ids_to_traverse[i : i + GET_CONTENTS_BATCH_SIZE],
|
280 |
+
)
|
281 |
+
)
|
282 |
+
|
283 |
+
for future in tqdm(
|
284 |
+
as_completed(futures),
|
285 |
+
total=len(futures),
|
286 |
+
desc=f"Fetching json contents",
|
287 |
+
):
|
288 |
+
current_mech_contents = future.result()
|
289 |
+
contents.append(current_mech_contents)
|
290 |
+
|
291 |
+
tools = pd.concat(contents, ignore_index=True)
|
292 |
+
print(f"Adding market creators info. Length of the tools file = {len(tools)}")
|
293 |
+
tools = add_market_creator(tools)
|
294 |
+
print(
|
295 |
+
f"Length of the tools dataframe after adding market creators info= {len(tools)}"
|
296 |
+
)
|
297 |
+
print(tools.info())
|
298 |
+
try:
|
299 |
+
if "result" in tools.columns:
|
300 |
+
tools = tools.drop(columns=["result"])
|
301 |
+
tools.to_parquet(ROOT_DIR / output_filename, index=False)
|
302 |
+
except Exception as e:
|
303 |
+
print(f"Failed to write tools data: {e}")
|
304 |
+
|
305 |
+
return tools
|
306 |
+
|
307 |
+
|
308 |
+
def generate_tools_file(input_filename: str, output_filename: str):
|
309 |
+
"""Function to parse the json mech events and generate the parquet tools file"""
|
310 |
+
try:
|
311 |
+
with open(JSON_DATA_DIR / input_filename, "r") as file:
|
312 |
+
file_contents = json.load(file)
|
313 |
+
parse_store_json_events_parallel(file_contents, output_filename)
|
314 |
+
except Exception as e:
|
315 |
+
print(f"An Exception happened while parsing the json events {e}")
|
316 |
+
|
317 |
+
|
318 |
+
if __name__ == "__main__":
|
319 |
+
|
320 |
+
generate_tools_file()
|
scripts/tools_metrics.py
ADDED
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
from typing import List
|
3 |
+
from utils import TMP_DIR, INC_TOOLS, ROOT_DIR
|
4 |
+
|
5 |
+
|
6 |
+
def get_error_data_by_market(
|
7 |
+
tools_df: pd.DataFrame, inc_tools: List[str]
|
8 |
+
) -> pd.DataFrame:
|
9 |
+
"""Gets the error data for the given tools and calculates the error percentage."""
|
10 |
+
tools_inc = tools_df[tools_df["tool"].isin(inc_tools)]
|
11 |
+
error = (
|
12 |
+
tools_inc.groupby(
|
13 |
+
["tool", "request_month_year_week", "market_creator", "error"], sort=False
|
14 |
+
)
|
15 |
+
.size()
|
16 |
+
.unstack()
|
17 |
+
.fillna(0)
|
18 |
+
.reset_index()
|
19 |
+
)
|
20 |
+
error["error_perc"] = (error[1] / (error[0] + error[1])) * 100
|
21 |
+
error["total_requests"] = error[0] + error[1]
|
22 |
+
return error
|
23 |
+
|
24 |
+
|
25 |
+
def get_tool_winning_rate_by_market(
|
26 |
+
tools_df: pd.DataFrame, inc_tools: List[str]
|
27 |
+
) -> pd.DataFrame:
|
28 |
+
"""Gets the tool winning rate data for the given tools by market and calculates the winning percentage."""
|
29 |
+
tools_inc = tools_df[tools_df["tool"].isin(inc_tools)]
|
30 |
+
tools_non_error = tools_inc[tools_inc["error"] != 1]
|
31 |
+
tools_non_error.loc[:, "currentAnswer"] = tools_non_error["currentAnswer"].replace(
|
32 |
+
{"no": "No", "yes": "Yes"}
|
33 |
+
)
|
34 |
+
tools_non_error = tools_non_error[
|
35 |
+
tools_non_error["currentAnswer"].isin(["Yes", "No"])
|
36 |
+
]
|
37 |
+
tools_non_error = tools_non_error[tools_non_error["vote"].isin(["Yes", "No"])]
|
38 |
+
tools_non_error["win"] = (
|
39 |
+
tools_non_error["currentAnswer"] == tools_non_error["vote"]
|
40 |
+
).astype(int)
|
41 |
+
tools_non_error.columns = tools_non_error.columns.astype(str)
|
42 |
+
wins = (
|
43 |
+
tools_non_error.groupby(
|
44 |
+
["tool", "request_month_year_week", "market_creator", "win"], sort=False
|
45 |
+
)
|
46 |
+
.size()
|
47 |
+
.unstack()
|
48 |
+
.fillna(0)
|
49 |
+
)
|
50 |
+
wins["win_perc"] = (wins[1] / (wins[0] + wins[1])) * 100
|
51 |
+
wins.reset_index(inplace=True)
|
52 |
+
wins["total_request"] = wins[0] + wins[1]
|
53 |
+
wins.columns = wins.columns.astype(str)
|
54 |
+
# Convert request_month_year_week to string and explicitly set type for Altair
|
55 |
+
# wins["request_month_year_week"] = wins["request_month_year_week"].astype(str)
|
56 |
+
return wins
|
57 |
+
|
58 |
+
|
59 |
+
def prepare_tools(tools: pd.DataFrame) -> pd.DataFrame:
|
60 |
+
tools["request_time"] = pd.to_datetime(tools["request_time"])
|
61 |
+
tools = tools.sort_values(by="request_time", ascending=True)
|
62 |
+
|
63 |
+
tools["request_month_year_week"] = (
|
64 |
+
pd.to_datetime(tools["request_time"])
|
65 |
+
.dt.to_period("W")
|
66 |
+
.dt.start_time.dt.strftime("%b-%d-%Y")
|
67 |
+
)
|
68 |
+
# preparing the tools graph
|
69 |
+
# adding the total
|
70 |
+
tools_all = tools.copy(deep=True)
|
71 |
+
tools_all["market_creator"] = "all"
|
72 |
+
# merging both dataframes
|
73 |
+
tools = pd.concat([tools, tools_all], ignore_index=True)
|
74 |
+
tools = tools.sort_values(by="request_time", ascending=True)
|
75 |
+
return tools
|
76 |
+
|
77 |
+
|
78 |
+
def compute_tools_based_datasets():
|
79 |
+
try:
|
80 |
+
tools_df = pd.read_parquet(TMP_DIR / "tools.parquet")
|
81 |
+
tools_df = prepare_tools(tools_df)
|
82 |
+
except Exception as e:
|
83 |
+
print(f"Error reading old tools parquet file {e}")
|
84 |
+
return None
|
85 |
+
# error by markets
|
86 |
+
error_by_markets = get_error_data_by_market(tools_df=tools_df, inc_tools=INC_TOOLS)
|
87 |
+
error_by_markets.to_parquet(ROOT_DIR / "error_by_markets.parquet", index=False)
|
88 |
+
try:
|
89 |
+
tools_df = pd.read_parquet(TMP_DIR / "tools.parquet")
|
90 |
+
tools_df = prepare_tools(tools_df)
|
91 |
+
except Exception as e:
|
92 |
+
print(f"Error reading old tools parquet file {e}")
|
93 |
+
return None
|
94 |
+
winning_df = get_tool_winning_rate_by_market(tools_df, inc_tools=INC_TOOLS)
|
95 |
+
winning_df.to_parquet(ROOT_DIR / "winning_df.parquet", index=False)
|
scripts/update_tools_accuracy.py
ADDED
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import pandas as pd
|
3 |
+
import ipfshttpclient
|
4 |
+
from utils import INC_TOOLS
|
5 |
+
from typing import List
|
6 |
+
from utils import TMP_DIR, ROOT_DIR
|
7 |
+
|
8 |
+
ACCURACY_FILENAME = "tools_accuracy.csv"
|
9 |
+
IPFS_SERVER = "/dns/registry.autonolas.tech/tcp/443/https"
|
10 |
+
GCP_IPFS_SERVER = "/dns/registry.gcp.autonolas.tech/tcp/443/https"
|
11 |
+
|
12 |
+
|
13 |
+
def update_tools_accuracy(
|
14 |
+
tools_acc: pd.DataFrame, tools_df: pd.DataFrame, inc_tools: List[str]
|
15 |
+
) -> pd.DataFrame:
|
16 |
+
"""To compute/update the latest accuracy information for the different mech tools"""
|
17 |
+
|
18 |
+
# computation of the accuracy information
|
19 |
+
tools_inc = tools_df[tools_df["tool"].isin(inc_tools)]
|
20 |
+
# filtering errors
|
21 |
+
tools_non_error = tools_inc[tools_inc["error"] != 1]
|
22 |
+
tools_non_error.loc[:, "currentAnswer"] = tools_non_error["currentAnswer"].replace(
|
23 |
+
{"no": "No", "yes": "Yes"}
|
24 |
+
)
|
25 |
+
tools_non_error = tools_non_error[
|
26 |
+
tools_non_error["currentAnswer"].isin(["Yes", "No"])
|
27 |
+
]
|
28 |
+
tools_non_error = tools_non_error[tools_non_error["vote"].isin(["Yes", "No"])]
|
29 |
+
tools_non_error["win"] = (
|
30 |
+
tools_non_error["currentAnswer"] == tools_non_error["vote"]
|
31 |
+
).astype(int)
|
32 |
+
tools_non_error.columns = tools_non_error.columns.astype(str)
|
33 |
+
|
34 |
+
wins = tools_non_error.groupby(["tool", "win"]).size().unstack().fillna(0)
|
35 |
+
wins["tool_accuracy"] = (wins[1] / (wins[0] + wins[1])) * 100
|
36 |
+
wins.reset_index(inplace=True)
|
37 |
+
wins["total_requests"] = wins[0] + wins[1]
|
38 |
+
wins.columns = wins.columns.astype(str)
|
39 |
+
wins = wins[["tool", "tool_accuracy", "total_requests"]]
|
40 |
+
|
41 |
+
no_timeline_info = False
|
42 |
+
try:
|
43 |
+
timeline = tools_non_error.groupby(["tool"])["request_time"].agg(["min", "max"])
|
44 |
+
print("timeline dataset")
|
45 |
+
print(timeline.head())
|
46 |
+
acc_info = wins.merge(timeline, how="left", on="tool")
|
47 |
+
except:
|
48 |
+
print("NO REQUEST TIME INFORMATION AVAILABLE")
|
49 |
+
no_timeline_info = True
|
50 |
+
acc_info = wins
|
51 |
+
|
52 |
+
if tools_acc is None:
|
53 |
+
print("Creating accuracy file for the first time")
|
54 |
+
return acc_info
|
55 |
+
|
56 |
+
# update the old information
|
57 |
+
print("Updating accuracy information")
|
58 |
+
tools_to_update = list(acc_info["tool"].values)
|
59 |
+
print("tools to update")
|
60 |
+
print(tools_to_update)
|
61 |
+
existing_tools = list(tools_acc["tool"].values)
|
62 |
+
# dt.strftime("%Y-%m-%d %H:%M:%S")
|
63 |
+
acc_info["min"] = acc_info["min"].dt.strftime("%Y-%m-%d %H:%M:%S")
|
64 |
+
acc_info["max"] = acc_info["max"].dt.strftime("%Y-%m-%d %H:%M:%S")
|
65 |
+
for tool in tools_to_update:
|
66 |
+
new_accuracy = acc_info[acc_info["tool"] == tool]["tool_accuracy"].values[0]
|
67 |
+
new_volume = acc_info[acc_info["tool"] == tool]["total_requests"].values[0]
|
68 |
+
if no_timeline_info:
|
69 |
+
new_min_timeline = None
|
70 |
+
new_max_timeline = None
|
71 |
+
else:
|
72 |
+
new_min_timeline = acc_info[acc_info["tool"] == tool]["min"].values[0]
|
73 |
+
new_max_timeline = acc_info[acc_info["tool"] == tool]["max"].values[0]
|
74 |
+
if tool in existing_tools:
|
75 |
+
|
76 |
+
tools_acc.loc[tools_acc["tool"] == tool, "tool_accuracy"] = new_accuracy
|
77 |
+
tools_acc.loc[tools_acc["tool"] == tool, "total_requests"] = new_volume
|
78 |
+
tools_acc.loc[tools_acc["tool"] == tool, "min"] = new_min_timeline
|
79 |
+
tools_acc.loc[tools_acc["tool"] == tool, "max"] = new_max_timeline
|
80 |
+
else:
|
81 |
+
# new tool to add to the file
|
82 |
+
# tool,tool_accuracy,total_requests,min,max
|
83 |
+
new_row = {
|
84 |
+
"tool": tool,
|
85 |
+
"tool_accuracy": new_accuracy,
|
86 |
+
"total_requests": new_volume,
|
87 |
+
"min": new_min_timeline,
|
88 |
+
"max": new_max_timeline,
|
89 |
+
}
|
90 |
+
tools_acc = pd.concat([tools_acc, pd.DataFrame(new_row)], ignore_index=True)
|
91 |
+
|
92 |
+
print(tools_acc)
|
93 |
+
return tools_acc
|
94 |
+
|
95 |
+
|
96 |
+
def compute_tools_accuracy():
|
97 |
+
print("Computing accuracy of tools")
|
98 |
+
print("Reading tools parquet file")
|
99 |
+
tools = pd.read_parquet(TMP_DIR / "tools.parquet")
|
100 |
+
# Computing tools accuracy information
|
101 |
+
print("Computing tool accuracy information")
|
102 |
+
# Check if the file exists
|
103 |
+
acc_data = None
|
104 |
+
if os.path.exists(ROOT_DIR / ACCURACY_FILENAME):
|
105 |
+
acc_data = pd.read_csv(ROOT_DIR / ACCURACY_FILENAME)
|
106 |
+
acc_data = update_tools_accuracy(acc_data, tools, INC_TOOLS)
|
107 |
+
|
108 |
+
# save acc_data into a CSV file
|
109 |
+
print("Saving into a csv file")
|
110 |
+
acc_data.to_csv(ROOT_DIR / ACCURACY_FILENAME, index=False)
|
111 |
+
print(acc_data.head())
|
112 |
+
|
113 |
+
# save the data into IPFS
|
114 |
+
client = ipfshttpclient.connect(IPFS_SERVER)
|
115 |
+
result = client.add(ROOT_DIR / ACCURACY_FILENAME)
|
116 |
+
print(f"HASH of the tools accuracy file: {result['Hash']}")
|
117 |
+
|
118 |
+
|
119 |
+
if __name__ == "__main__":
|
120 |
+
compute_tools_accuracy()
|
scripts/utils.py
ADDED
@@ -0,0 +1,430 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
import time
|
4 |
+
from typing import List, Any, Optional, Union, Tuple
|
5 |
+
import numpy as np
|
6 |
+
import pandas as pd
|
7 |
+
import gc
|
8 |
+
import re
|
9 |
+
from dataclasses import dataclass
|
10 |
+
from datetime import datetime, timezone
|
11 |
+
from pathlib import Path
|
12 |
+
from enum import Enum
|
13 |
+
from string import Template
|
14 |
+
from json.decoder import JSONDecodeError
|
15 |
+
|
16 |
+
DEFAULT_MECH_FEE = 0.01
|
17 |
+
REDUCE_FACTOR = 0.25
|
18 |
+
SLEEP = 0.5
|
19 |
+
REQUEST_ID_FIELD = "request_id"
|
20 |
+
SCRIPTS_DIR = Path(__file__).parent
|
21 |
+
ROOT_DIR = SCRIPTS_DIR.parent
|
22 |
+
JSON_DATA_DIR = ROOT_DIR / "json_data"
|
23 |
+
HIST_DIR = ROOT_DIR / "historical_data"
|
24 |
+
TMP_DIR = ROOT_DIR / "tmp"
|
25 |
+
BLOCK_FIELD = "block"
|
26 |
+
CID_PREFIX = "f01701220"
|
27 |
+
REQUEST_ID = "requestId"
|
28 |
+
REQUEST_SENDER = "sender"
|
29 |
+
PROMPT_FIELD = "prompt"
|
30 |
+
HTTP = "http://"
|
31 |
+
HTTPS = HTTP[:4] + "s" + HTTP[4:]
|
32 |
+
FORMAT_UPDATE_BLOCK_NUMBER = 30411638
|
33 |
+
INVALID_ANSWER_HEX = (
|
34 |
+
"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
|
35 |
+
)
|
36 |
+
IPFS_ADDRESS = "https://gateway.autonolas.tech/ipfs/"
|
37 |
+
GCP_IPFS_ADDRESS = "https://gateway.gcp.autonolas.tech/ipfs/"
|
38 |
+
|
39 |
+
INC_TOOLS = [
|
40 |
+
"prediction-online",
|
41 |
+
"prediction-offline",
|
42 |
+
"claude-prediction-online",
|
43 |
+
"claude-prediction-offline",
|
44 |
+
"prediction-offline-sme",
|
45 |
+
"prediction-online-sme",
|
46 |
+
"prediction-request-rag",
|
47 |
+
"prediction-request-reasoning",
|
48 |
+
"prediction-url-cot-claude",
|
49 |
+
"prediction-request-rag-claude",
|
50 |
+
"prediction-request-reasoning-claude",
|
51 |
+
"superforcaster",
|
52 |
+
]
|
53 |
+
SUBGRAPH_URL = Template(
|
54 |
+
"""https://gateway-arbitrum.network.thegraph.com/api/${subgraph_api_key}/subgraphs/id/7s9rGBffUTL8kDZuxvvpuc46v44iuDarbrADBFw5uVp2"""
|
55 |
+
)
|
56 |
+
OMEN_SUBGRAPH_URL = Template(
|
57 |
+
"""https://gateway-arbitrum.network.thegraph.com/api/${subgraph_api_key}/subgraphs/id/9fUVQpFwzpdWS9bq5WkAnmKbNNcoBwatMR4yZq81pbbz"""
|
58 |
+
)
|
59 |
+
NETWORK_SUBGRAPH_URL = Template(
|
60 |
+
"""https://gateway-arbitrum.network.thegraph.com/api/${subgraph_api_key}/subgraphs/id/FxV6YUix58SpYmLBwc9gEHkwjfkqwe1X5FJQjn8nKPyA"""
|
61 |
+
)
|
62 |
+
# THEGRAPH_ENDPOINT = (
|
63 |
+
# "https://api.studio.thegraph.com/query/78829/mech-predict/version/latest"
|
64 |
+
# )
|
65 |
+
MECH_SUBGRAPH_URL = Template(
|
66 |
+
"""https://gateway.thegraph.com/api/${subgraph_api_key}/subgraphs/id/4YGoX3iXUni1NBhWJS5xyKcntrAzssfytJK7PQxxQk5g"""
|
67 |
+
)
|
68 |
+
|
69 |
+
SUBGRAPH_API_KEY = os.environ.get("SUBGRAPH_API_KEY", None)
|
70 |
+
RPC = os.environ.get("RPC", None)
|
71 |
+
|
72 |
+
|
73 |
+
class MechEventName(Enum):
|
74 |
+
"""The mech's event names."""
|
75 |
+
|
76 |
+
REQUEST = "Request"
|
77 |
+
DELIVER = "Deliver"
|
78 |
+
|
79 |
+
|
80 |
+
@dataclass
|
81 |
+
class MechEvent:
|
82 |
+
"""A mech's on-chain event representation."""
|
83 |
+
|
84 |
+
for_block: int
|
85 |
+
requestId: int
|
86 |
+
data: bytes
|
87 |
+
sender: str
|
88 |
+
|
89 |
+
def _ipfs_link(self) -> Optional[str]:
|
90 |
+
"""Get the ipfs link for the data."""
|
91 |
+
return f"{IPFS_ADDRESS}{CID_PREFIX}{self.data.hex()}"
|
92 |
+
|
93 |
+
@property
|
94 |
+
def ipfs_request_link(self) -> Optional[str]:
|
95 |
+
"""Get the IPFS link for the request."""
|
96 |
+
return f"{self._ipfs_link()}/metadata.json"
|
97 |
+
|
98 |
+
@property
|
99 |
+
def ipfs_deliver_link(self) -> Optional[str]:
|
100 |
+
"""Get the IPFS link for the deliver."""
|
101 |
+
if self.requestId is None:
|
102 |
+
return None
|
103 |
+
return f"{self._ipfs_link()}/{self.requestId}"
|
104 |
+
|
105 |
+
def ipfs_link(self, event_name: MechEventName) -> Optional[str]:
|
106 |
+
"""Get the ipfs link based on the event."""
|
107 |
+
if event_name == MechEventName.REQUEST:
|
108 |
+
if self.for_block < FORMAT_UPDATE_BLOCK_NUMBER:
|
109 |
+
return self._ipfs_link()
|
110 |
+
return self.ipfs_request_link
|
111 |
+
if event_name == MechEventName.DELIVER:
|
112 |
+
return self.ipfs_deliver_link
|
113 |
+
return None
|
114 |
+
|
115 |
+
|
116 |
+
@dataclass(init=False)
|
117 |
+
class MechRequest:
|
118 |
+
"""A structure for a request to a mech."""
|
119 |
+
|
120 |
+
request_id: Optional[int]
|
121 |
+
request_block: Optional[int]
|
122 |
+
prompt_request: Optional[str]
|
123 |
+
tool: Optional[str]
|
124 |
+
nonce: Optional[str]
|
125 |
+
trader_address: Optional[str]
|
126 |
+
|
127 |
+
def __init__(self, **kwargs: Any) -> None:
|
128 |
+
"""Initialize the request ignoring extra keys."""
|
129 |
+
self.request_id = int(kwargs.pop(REQUEST_ID, 0))
|
130 |
+
self.request_block = int(kwargs.pop(BLOCK_FIELD, 0))
|
131 |
+
self.prompt_request = kwargs.pop(PROMPT_FIELD, None)
|
132 |
+
self.tool = kwargs.pop("tool", None)
|
133 |
+
self.nonce = kwargs.pop("nonce", None)
|
134 |
+
self.trader_address = kwargs.pop("sender", None)
|
135 |
+
|
136 |
+
|
137 |
+
@dataclass(init=False)
|
138 |
+
class PredictionResponse:
|
139 |
+
"""A response of a prediction."""
|
140 |
+
|
141 |
+
p_yes: float
|
142 |
+
p_no: float
|
143 |
+
confidence: float
|
144 |
+
info_utility: float
|
145 |
+
vote: Optional[str]
|
146 |
+
win_probability: Optional[float]
|
147 |
+
|
148 |
+
def __init__(self, **kwargs: Any) -> None:
|
149 |
+
"""Initialize the mech's prediction ignoring extra keys."""
|
150 |
+
try:
|
151 |
+
self.p_yes = float(kwargs.pop("p_yes"))
|
152 |
+
self.p_no = float(kwargs.pop("p_no"))
|
153 |
+
self.confidence = float(kwargs.pop("confidence"))
|
154 |
+
self.info_utility = float(kwargs.pop("info_utility"))
|
155 |
+
self.win_probability = 0
|
156 |
+
|
157 |
+
# Validate probabilities
|
158 |
+
probabilities = {
|
159 |
+
"p_yes": self.p_yes,
|
160 |
+
"p_no": self.p_no,
|
161 |
+
"confidence": self.confidence,
|
162 |
+
"info_utility": self.info_utility,
|
163 |
+
}
|
164 |
+
|
165 |
+
for name, prob in probabilities.items():
|
166 |
+
if not 0 <= prob <= 1:
|
167 |
+
raise ValueError(f"{name} probability is out of bounds: {prob}")
|
168 |
+
|
169 |
+
if self.p_yes + self.p_no != 1:
|
170 |
+
raise ValueError(
|
171 |
+
f"Sum of p_yes and p_no is not 1: {self.p_yes} + {self.p_no}"
|
172 |
+
)
|
173 |
+
|
174 |
+
self.vote = self.get_vote()
|
175 |
+
self.win_probability = self.get_win_probability()
|
176 |
+
|
177 |
+
except KeyError as e:
|
178 |
+
raise KeyError(f"Missing key in PredictionResponse: {e}")
|
179 |
+
except ValueError as e:
|
180 |
+
raise ValueError(f"Invalid value in PredictionResponse: {e}")
|
181 |
+
|
182 |
+
def get_vote(self) -> Optional[str]:
|
183 |
+
"""Return the vote."""
|
184 |
+
if self.p_no == self.p_yes:
|
185 |
+
return None
|
186 |
+
if self.p_no > self.p_yes:
|
187 |
+
return "No"
|
188 |
+
return "Yes"
|
189 |
+
|
190 |
+
def get_win_probability(self) -> Optional[float]:
|
191 |
+
"""Return the probability estimation for winning with vote."""
|
192 |
+
return max(self.p_no, self.p_yes)
|
193 |
+
|
194 |
+
|
195 |
+
@dataclass(init=False)
|
196 |
+
class MechResponse:
|
197 |
+
"""A structure for the response of a mech."""
|
198 |
+
|
199 |
+
request_id: int
|
200 |
+
deliver_block: Optional[int]
|
201 |
+
result: Optional[PredictionResponse]
|
202 |
+
error: Optional[str]
|
203 |
+
error_message: Optional[str]
|
204 |
+
prompt_response: Optional[str]
|
205 |
+
mech_address: Optional[str]
|
206 |
+
|
207 |
+
def __init__(self, **kwargs: Any) -> None:
|
208 |
+
"""Initialize the mech's response ignoring extra keys."""
|
209 |
+
self.error = kwargs.get("error", None)
|
210 |
+
self.request_id = int(kwargs.get(REQUEST_ID, 0))
|
211 |
+
self.deliver_block = int(kwargs.get(BLOCK_FIELD, 0))
|
212 |
+
self.result = kwargs.get("result", None)
|
213 |
+
self.prompt_response = kwargs.get(PROMPT_FIELD, None)
|
214 |
+
self.mech_address = kwargs.get("sender", None)
|
215 |
+
|
216 |
+
if self.result != "Invalid response":
|
217 |
+
self.error_message = kwargs.get("error_message", None)
|
218 |
+
|
219 |
+
try:
|
220 |
+
if isinstance(self.result, str):
|
221 |
+
kwargs = json.loads(self.result)
|
222 |
+
self.result = PredictionResponse(**kwargs)
|
223 |
+
self.error = 0
|
224 |
+
|
225 |
+
except JSONDecodeError:
|
226 |
+
self.error_message = "Response parsing error"
|
227 |
+
self.error = 1
|
228 |
+
|
229 |
+
except Exception as e:
|
230 |
+
self.error_message = str(e)
|
231 |
+
self.error = 1
|
232 |
+
|
233 |
+
else:
|
234 |
+
self.error_message = "Invalid response from tool"
|
235 |
+
self.error = 1
|
236 |
+
self.result = None
|
237 |
+
|
238 |
+
|
239 |
+
EVENT_TO_MECH_STRUCT = {
|
240 |
+
MechEventName.REQUEST: MechRequest,
|
241 |
+
MechEventName.DELIVER: MechResponse,
|
242 |
+
}
|
243 |
+
|
244 |
+
|
245 |
+
def transform_to_datetime(x):
|
246 |
+
return datetime.fromtimestamp(int(x), tz=timezone.utc)
|
247 |
+
|
248 |
+
|
249 |
+
def measure_execution_time(func):
|
250 |
+
def wrapper(*args, **kwargs):
|
251 |
+
start_time = time.time()
|
252 |
+
result = func(*args, **kwargs)
|
253 |
+
end_time = time.time()
|
254 |
+
execution_time = end_time - start_time
|
255 |
+
print(f"Execution time: {execution_time:.6f} seconds")
|
256 |
+
return result
|
257 |
+
|
258 |
+
return wrapper
|
259 |
+
|
260 |
+
|
261 |
+
def limit_text(text: str, limit: int = 200) -> str:
|
262 |
+
"""Limit the given text"""
|
263 |
+
if len(text) > limit:
|
264 |
+
return f"{text[:limit]}..."
|
265 |
+
return text
|
266 |
+
|
267 |
+
|
268 |
+
def check_for_dicts(df: pd.DataFrame) -> List[str]:
|
269 |
+
"""Check for columns that contain dictionaries."""
|
270 |
+
dict_columns = []
|
271 |
+
for column in df.columns:
|
272 |
+
if df[column].apply(lambda x: isinstance(x, dict)).any():
|
273 |
+
dict_columns.append(column)
|
274 |
+
return dict_columns
|
275 |
+
|
276 |
+
|
277 |
+
def drop_dict_rows(df: pd.DataFrame, dict_columns: List[str]) -> pd.DataFrame:
|
278 |
+
"""Drop rows that contain dictionaries."""
|
279 |
+
for column in dict_columns:
|
280 |
+
df = df[~df[column].apply(lambda x: isinstance(x, dict))]
|
281 |
+
return df
|
282 |
+
|
283 |
+
|
284 |
+
def clean(df: pd.DataFrame) -> pd.DataFrame:
|
285 |
+
"""Clean the dataframe."""
|
286 |
+
dict_columns = check_for_dicts(df)
|
287 |
+
df = drop_dict_rows(df, dict_columns)
|
288 |
+
cleaned = df.drop_duplicates()
|
289 |
+
cleaned[REQUEST_ID_FIELD] = cleaned[REQUEST_ID_FIELD].astype("str")
|
290 |
+
return cleaned
|
291 |
+
|
292 |
+
|
293 |
+
def gen_event_filename(event_name: MechEventName) -> str:
|
294 |
+
"""Generate the filename of an event."""
|
295 |
+
return f"{event_name.value.lower()}s.parquet"
|
296 |
+
|
297 |
+
|
298 |
+
def read_n_last_lines(filename: str, n: int = 1) -> str:
|
299 |
+
"""Return the `n` last lines' content of a file."""
|
300 |
+
num_newlines = 0
|
301 |
+
with open(filename, "rb") as f:
|
302 |
+
try:
|
303 |
+
f.seek(-2, os.SEEK_END)
|
304 |
+
while num_newlines < n:
|
305 |
+
f.seek(-2, os.SEEK_CUR)
|
306 |
+
if f.read(1) == b"\n":
|
307 |
+
num_newlines += 1
|
308 |
+
except OSError:
|
309 |
+
f.seek(0)
|
310 |
+
last_line = f.readline().decode()
|
311 |
+
return last_line
|
312 |
+
|
313 |
+
|
314 |
+
def get_question(text: str) -> str:
|
315 |
+
"""Get the question from a text."""
|
316 |
+
# Regex to find text within double quotes
|
317 |
+
pattern = r'"([^"]*)"'
|
318 |
+
|
319 |
+
# Find all occurrences
|
320 |
+
questions = re.findall(pattern, text)
|
321 |
+
|
322 |
+
# Assuming you want the first question if there are multiple
|
323 |
+
question = questions[0] if questions else None
|
324 |
+
|
325 |
+
return question
|
326 |
+
|
327 |
+
|
328 |
+
def current_answer(text: str, fpmms: pd.DataFrame) -> Optional[str]:
|
329 |
+
"""Get the current answer for a question."""
|
330 |
+
row = fpmms[fpmms["title"] == text]
|
331 |
+
if row.shape[0] == 0:
|
332 |
+
return None
|
333 |
+
return row["currentAnswer"].values[0]
|
334 |
+
|
335 |
+
|
336 |
+
def convert_hex_to_int(x: Union[str, float]) -> Union[int, float]:
|
337 |
+
"""Convert hex to int"""
|
338 |
+
if isinstance(x, float):
|
339 |
+
return np.nan
|
340 |
+
if isinstance(x, str):
|
341 |
+
if x == INVALID_ANSWER_HEX:
|
342 |
+
return -1
|
343 |
+
return int(x, 16)
|
344 |
+
|
345 |
+
|
346 |
+
def wei_to_unit(wei: int) -> float:
|
347 |
+
"""Converts wei to currency unit."""
|
348 |
+
return wei / 10**18
|
349 |
+
|
350 |
+
|
351 |
+
def get_vote(p_yes, p_no) -> Optional[str]:
|
352 |
+
"""Return the vote."""
|
353 |
+
if p_no == p_yes:
|
354 |
+
return None
|
355 |
+
if p_no > p_yes:
|
356 |
+
return "No"
|
357 |
+
return "Yes"
|
358 |
+
|
359 |
+
|
360 |
+
def get_win_probability(p_yes, p_no) -> Optional[float]:
|
361 |
+
"""Return the probability estimation for winning with vote."""
|
362 |
+
return max(p_no, p_yes)
|
363 |
+
|
364 |
+
|
365 |
+
def get_result_values(result: str) -> Tuple:
|
366 |
+
if result == "Invalid response":
|
367 |
+
return 1, "Invalid response from tool", None
|
368 |
+
error_message = None
|
369 |
+
params = None
|
370 |
+
try:
|
371 |
+
if isinstance(result, str):
|
372 |
+
params = json.loads(result)
|
373 |
+
error_value = 0
|
374 |
+
|
375 |
+
except JSONDecodeError:
|
376 |
+
error_message = "Response parsing error"
|
377 |
+
error_value = 1
|
378 |
+
|
379 |
+
except Exception as e:
|
380 |
+
error_message = str(e)
|
381 |
+
error_value = 1
|
382 |
+
return error_value, error_message, params
|
383 |
+
|
384 |
+
|
385 |
+
def get_prediction_values(params: dict) -> Tuple:
|
386 |
+
p_yes = float(params.pop("p_yes"))
|
387 |
+
p_no = float(params.pop("p_no"))
|
388 |
+
confidence = float(params.pop("confidence"))
|
389 |
+
info_utility = float(params.pop("info_utility"))
|
390 |
+
return p_yes, p_no, confidence, info_utility
|
391 |
+
|
392 |
+
|
393 |
+
def to_content(q: str) -> dict[str, Any]:
|
394 |
+
"""Convert the given query string to payload content, i.e., add it under a `queries` key and convert it to bytes."""
|
395 |
+
finalized_query = {
|
396 |
+
"query": q,
|
397 |
+
"variables": None,
|
398 |
+
"extensions": {"headers": None},
|
399 |
+
}
|
400 |
+
return finalized_query
|
401 |
+
|
402 |
+
|
403 |
+
def read_parquet_files(tools_filename: str, trades_filename: str):
|
404 |
+
# Check if tools.parquet is in the same directory
|
405 |
+
try:
|
406 |
+
tools = pd.read_parquet(ROOT_DIR / tools_filename)
|
407 |
+
|
408 |
+
# make sure creator_address is in the columns
|
409 |
+
assert "trader_address" in tools.columns, "trader_address column not found"
|
410 |
+
|
411 |
+
# lowercase and strip creator_address
|
412 |
+
tools["trader_address"] = tools["trader_address"].str.lower().str.strip()
|
413 |
+
|
414 |
+
# drop duplicates
|
415 |
+
tools.drop_duplicates(inplace=True)
|
416 |
+
|
417 |
+
print(f"{tools_filename} loaded")
|
418 |
+
except FileNotFoundError:
|
419 |
+
print("tools.parquet not found. Please run tools.py first.")
|
420 |
+
return
|
421 |
+
try:
|
422 |
+
fpmmTrades = pd.read_parquet(ROOT_DIR / trades_filename)
|
423 |
+
fpmmTrades["trader_address"] = (
|
424 |
+
fpmmTrades["trader_address"].str.lower().str.strip()
|
425 |
+
)
|
426 |
+
except FileNotFoundError:
|
427 |
+
print("fpmmsTrades.parquet not found.")
|
428 |
+
return
|
429 |
+
|
430 |
+
return tools, fpmmTrades
|
scripts/web3_utils.py
ADDED
@@ -0,0 +1,276 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import pickle
|
3 |
+
import gc
|
4 |
+
import time
|
5 |
+
import requests
|
6 |
+
from functools import partial
|
7 |
+
from string import Template
|
8 |
+
from datetime import datetime
|
9 |
+
from concurrent.futures import ThreadPoolExecutor
|
10 |
+
from collections import defaultdict
|
11 |
+
from tqdm import tqdm
|
12 |
+
from web3 import Web3
|
13 |
+
from typing import Any, Optional
|
14 |
+
from web3.types import BlockParams
|
15 |
+
from utils import (
|
16 |
+
JSON_DATA_DIR,
|
17 |
+
ROOT_DIR,
|
18 |
+
SUBGRAPH_API_KEY,
|
19 |
+
to_content,
|
20 |
+
SUBGRAPH_URL,
|
21 |
+
HIST_DIR,
|
22 |
+
TMP_DIR,
|
23 |
+
)
|
24 |
+
from queries import conditional_tokens_gc_user_query, omen_xdai_trades_query
|
25 |
+
import pandas as pd
|
26 |
+
|
27 |
+
REDUCE_FACTOR = 0.25
|
28 |
+
SLEEP = 0.5
|
29 |
+
QUERY_BATCH_SIZE = 1000
|
30 |
+
FPMM_QS_CREATOR = "0x89c5cc945dd550bcffb72fe42bff002429f46fec"
|
31 |
+
FPMM_PEARL_CREATOR = "0xFfc8029154ECD55ABED15BD428bA596E7D23f557"
|
32 |
+
LATEST_BLOCK: Optional[int] = None
|
33 |
+
LATEST_BLOCK_NAME: BlockParams = "latest"
|
34 |
+
BLOCK_DATA_NUMBER = "number"
|
35 |
+
BLOCKS_CHUNK_SIZE = 10_000
|
36 |
+
N_IPFS_RETRIES = 4
|
37 |
+
N_RPC_RETRIES = 100
|
38 |
+
RPC_POLL_INTERVAL = 0.05
|
39 |
+
SUBGRAPH_POLL_INTERVAL = 0.05
|
40 |
+
IPFS_POLL_INTERVAL = 0.2 # 5 calls per second
|
41 |
+
OMEN_SUBGRAPH_URL = Template(
|
42 |
+
"""https://gateway-arbitrum.network.thegraph.com/api/${subgraph_api_key}/subgraphs/id/9fUVQpFwzpdWS9bq5WkAnmKbNNcoBwatMR4yZq81pbbz"""
|
43 |
+
)
|
44 |
+
|
45 |
+
headers = {
|
46 |
+
"Accept": "application/json, multipart/mixed",
|
47 |
+
"Content-Type": "application/json",
|
48 |
+
}
|
49 |
+
|
50 |
+
|
51 |
+
def parse_args() -> str:
|
52 |
+
"""Parse the arguments and return the RPC."""
|
53 |
+
if len(sys.argv) != 2:
|
54 |
+
raise ValueError("Expected the RPC as a positional argument.")
|
55 |
+
return sys.argv[1]
|
56 |
+
|
57 |
+
|
58 |
+
def read_abi(abi_path: str) -> str:
|
59 |
+
"""Read and return the wxDAI contract's ABI."""
|
60 |
+
with open(abi_path) as abi_file:
|
61 |
+
return abi_file.read()
|
62 |
+
|
63 |
+
|
64 |
+
def update_block_request_map(block_request_id_map: dict) -> None:
|
65 |
+
print("Saving block request id map info")
|
66 |
+
with open(JSON_DATA_DIR / "block_request_id_map.pickle", "wb") as handle:
|
67 |
+
pickle.dump(block_request_id_map, handle, protocol=pickle.HIGHEST_PROTOCOL)
|
68 |
+
|
69 |
+
|
70 |
+
def reduce_window(contract_instance, event, from_block, batch_size, latest_block):
|
71 |
+
"""Dynamically reduce the batch size window."""
|
72 |
+
keep_fraction = 1 - REDUCE_FACTOR
|
73 |
+
events_filter = contract_instance.events[event].build_filter()
|
74 |
+
events_filter.fromBlock = from_block
|
75 |
+
batch_size = int(batch_size * keep_fraction)
|
76 |
+
events_filter.toBlock = min(from_block + batch_size, latest_block)
|
77 |
+
tqdm.write(f"RPC timed out! Resizing batch size to {batch_size}.")
|
78 |
+
time.sleep(SLEEP)
|
79 |
+
return events_filter, batch_size
|
80 |
+
|
81 |
+
|
82 |
+
def block_number_to_timestamp(block_number: int, web3: Web3) -> str:
|
83 |
+
"""Convert a block number to a timestamp."""
|
84 |
+
block = web3.eth.get_block(block_number)
|
85 |
+
timestamp = datetime.utcfromtimestamp(block["timestamp"])
|
86 |
+
try:
|
87 |
+
timestamp_str = timestamp.strftime("%Y-%m-%d %H:%M:%S")
|
88 |
+
timestamp = datetime.strptime(timestamp_str, "%Y-%m-%dT%H:%M:%S.%f")
|
89 |
+
except Exception as e:
|
90 |
+
timestamp = datetime.utcfromtimestamp(block["timestamp"])
|
91 |
+
return timestamp.strftime("%Y-%m-%d %H:%M:%S")
|
92 |
+
|
93 |
+
|
94 |
+
def parallelize_timestamp_conversion(df: pd.DataFrame, function: callable) -> list:
|
95 |
+
"""Parallelize the timestamp conversion."""
|
96 |
+
block_numbers = df["request_block"].tolist()
|
97 |
+
with ThreadPoolExecutor(max_workers=10) as executor:
|
98 |
+
results = list(
|
99 |
+
tqdm(executor.map(function, block_numbers), total=len(block_numbers))
|
100 |
+
)
|
101 |
+
return results
|
102 |
+
|
103 |
+
|
104 |
+
def updating_timestamps(rpc: str, tools_filename: str):
|
105 |
+
web3 = Web3(Web3.HTTPProvider(rpc))
|
106 |
+
|
107 |
+
tools = pd.read_parquet(TMP_DIR / tools_filename)
|
108 |
+
|
109 |
+
# Convert block number to timestamp
|
110 |
+
print("Converting block number to timestamp")
|
111 |
+
t_map = pickle.load(open(TMP_DIR / "t_map.pkl", "rb"))
|
112 |
+
tools["request_time"] = tools["request_block"].map(t_map)
|
113 |
+
|
114 |
+
no_data = tools["request_time"].isna().sum()
|
115 |
+
print(f"Total rows with no request time info = {no_data}")
|
116 |
+
|
117 |
+
# Identify tools with missing request_time and fill them
|
118 |
+
missing_time_indices = tools[tools["request_time"].isna()].index
|
119 |
+
if not missing_time_indices.empty:
|
120 |
+
partial_block_number_to_timestamp = partial(
|
121 |
+
block_number_to_timestamp, web3=web3
|
122 |
+
)
|
123 |
+
missing_timestamps = parallelize_timestamp_conversion(
|
124 |
+
tools.loc[missing_time_indices], partial_block_number_to_timestamp
|
125 |
+
)
|
126 |
+
|
127 |
+
# Update the original DataFrame with the missing timestamps
|
128 |
+
for i, timestamp in zip(missing_time_indices, missing_timestamps):
|
129 |
+
tools.at[i, "request_time"] = timestamp
|
130 |
+
|
131 |
+
tools["request_month_year"] = pd.to_datetime(tools["request_time"]).dt.strftime(
|
132 |
+
"%Y-%m"
|
133 |
+
)
|
134 |
+
tools["request_month_year_week"] = (
|
135 |
+
pd.to_datetime(tools["request_time"])
|
136 |
+
.dt.to_period("W")
|
137 |
+
.dt.start_time.dt.strftime("%b-%d-%Y")
|
138 |
+
)
|
139 |
+
|
140 |
+
# Save the tools data after the updates on the content
|
141 |
+
print(f"Updating file {tools_filename} with timestamps")
|
142 |
+
tools.to_parquet(TMP_DIR / tools_filename, index=False)
|
143 |
+
|
144 |
+
# Update t_map with new timestamps
|
145 |
+
new_timestamps = (
|
146 |
+
tools[["request_block", "request_time"]]
|
147 |
+
.dropna()
|
148 |
+
.set_index("request_block")
|
149 |
+
.to_dict()["request_time"]
|
150 |
+
)
|
151 |
+
t_map.update(new_timestamps)
|
152 |
+
|
153 |
+
# filtering old timestamps
|
154 |
+
cutoff_date = datetime(2024, 9, 9)
|
155 |
+
filtered_map = {
|
156 |
+
k: v
|
157 |
+
for k, v in t_map.items()
|
158 |
+
if datetime.strptime(v, "%Y-%m-%d %H:%M:%S") < cutoff_date
|
159 |
+
}
|
160 |
+
|
161 |
+
with open(ROOT_DIR / "t_map.pkl", "wb") as f:
|
162 |
+
pickle.dump(filtered_map, f)
|
163 |
+
|
164 |
+
# clean and release all memory
|
165 |
+
del tools
|
166 |
+
del t_map
|
167 |
+
gc.collect()
|
168 |
+
|
169 |
+
|
170 |
+
def query_conditional_tokens_gc_subgraph(creator: str) -> dict[str, Any]:
|
171 |
+
"""Query the subgraph."""
|
172 |
+
|
173 |
+
subgraph = SUBGRAPH_URL.substitute(subgraph_api_key=SUBGRAPH_API_KEY)
|
174 |
+
all_results: dict[str, Any] = {"data": {"user": {"userPositions": []}}}
|
175 |
+
userPositions_id_gt = ""
|
176 |
+
while True:
|
177 |
+
query = conditional_tokens_gc_user_query.substitute(
|
178 |
+
id=creator.lower(),
|
179 |
+
first=QUERY_BATCH_SIZE,
|
180 |
+
userPositions_id_gt=userPositions_id_gt,
|
181 |
+
)
|
182 |
+
content_json = {"query": query}
|
183 |
+
# print("sending query to subgraph")
|
184 |
+
res = requests.post(subgraph, headers=headers, json=content_json)
|
185 |
+
result_json = res.json()
|
186 |
+
# print(f"result = {result_json}")
|
187 |
+
user_data = result_json.get("data", {}).get("user", {})
|
188 |
+
|
189 |
+
if not user_data:
|
190 |
+
break
|
191 |
+
|
192 |
+
user_positions = user_data.get("userPositions", [])
|
193 |
+
|
194 |
+
if user_positions:
|
195 |
+
all_results["data"]["user"]["userPositions"].extend(user_positions)
|
196 |
+
userPositions_id_gt = user_positions[len(user_positions) - 1]["id"]
|
197 |
+
else:
|
198 |
+
break
|
199 |
+
|
200 |
+
if len(all_results["data"]["user"]["userPositions"]) == 0:
|
201 |
+
return {"data": {"user": None}}
|
202 |
+
|
203 |
+
return all_results
|
204 |
+
|
205 |
+
|
206 |
+
def query_omen_xdai_subgraph(
|
207 |
+
trader_category: str,
|
208 |
+
from_timestamp: float,
|
209 |
+
to_timestamp: float,
|
210 |
+
fpmm_from_timestamp: float,
|
211 |
+
fpmm_to_timestamp: float,
|
212 |
+
) -> dict[str, Any]:
|
213 |
+
"""Query the subgraph."""
|
214 |
+
|
215 |
+
omen_subgraph = OMEN_SUBGRAPH_URL.substitute(subgraph_api_key=SUBGRAPH_API_KEY)
|
216 |
+
print(f"omen_subgraph = {omen_subgraph}")
|
217 |
+
grouped_results = defaultdict(list)
|
218 |
+
id_gt = ""
|
219 |
+
if trader_category == "quickstart":
|
220 |
+
creator_id = FPMM_QS_CREATOR.lower()
|
221 |
+
else: # pearl
|
222 |
+
creator_id = FPMM_PEARL_CREATOR.lower()
|
223 |
+
|
224 |
+
while True:
|
225 |
+
query = omen_xdai_trades_query.substitute(
|
226 |
+
fpmm_creator=creator_id,
|
227 |
+
creationTimestamp_gte=int(from_timestamp),
|
228 |
+
creationTimestamp_lte=int(to_timestamp),
|
229 |
+
fpmm_creationTimestamp_gte=int(fpmm_from_timestamp),
|
230 |
+
fpmm_creationTimestamp_lte=int(fpmm_to_timestamp),
|
231 |
+
first=QUERY_BATCH_SIZE,
|
232 |
+
id_gt=id_gt,
|
233 |
+
)
|
234 |
+
print(f"omen query={query}")
|
235 |
+
content_json = to_content(query)
|
236 |
+
|
237 |
+
res = requests.post(omen_subgraph, headers=headers, json=content_json)
|
238 |
+
result_json = res.json()
|
239 |
+
# print(f"result = {result_json}")
|
240 |
+
user_trades = result_json.get("data", {}).get("fpmmTrades", [])
|
241 |
+
|
242 |
+
if not user_trades:
|
243 |
+
break
|
244 |
+
|
245 |
+
for trade in user_trades:
|
246 |
+
fpmm_id = trade.get("fpmm", {}).get("id")
|
247 |
+
grouped_results[fpmm_id].append(trade)
|
248 |
+
|
249 |
+
id_gt = user_trades[len(user_trades) - 1]["id"]
|
250 |
+
|
251 |
+
all_results = {
|
252 |
+
"data": {
|
253 |
+
"fpmmTrades": [
|
254 |
+
trade
|
255 |
+
for trades_list in grouped_results.values()
|
256 |
+
for trade in trades_list
|
257 |
+
]
|
258 |
+
}
|
259 |
+
}
|
260 |
+
|
261 |
+
return all_results
|
262 |
+
|
263 |
+
|
264 |
+
# def get_earliest_block(event_name: MechEventName) -> int:
|
265 |
+
# """Get the earliest block number to use when filtering for events."""
|
266 |
+
# filename = gen_event_filename(event_name)
|
267 |
+
# if not os.path.exists(ROOT_DIR / filename):
|
268 |
+
# return 0
|
269 |
+
|
270 |
+
# df = pd.read_parquet(ROOT_DIR / filename)
|
271 |
+
# block_field = f"{event_name.value.lower()}_{BLOCK_FIELD}"
|
272 |
+
# earliest_block = int(df[block_field].max())
|
273 |
+
# # clean and release all memory
|
274 |
+
# del df
|
275 |
+
# gc.collect()
|
276 |
+
# return earliest_block
|