python_code
stringlengths
0
34.9k
from distutils.core import setup from setuptools import find_packages # When publishing the Docker image, a script checks for the first line with "version" and an equals sign to get the version. version='1.0.0' install_requires = [ 'bokeh>=0.13', 'expiringdict>=1.1.4', 'injector>=0.16.2', 'joblib>=0.13.2', 'keras>=2.3', 'mmh3~=3.0.0', 'numpy', # Required for saving plots. 'selenium>=3.141.0', 'scikit-multiflow>=0.3.0', 'spacy>=2.2', 'tqdm>=4.19', ] test_deps = [ 'pytest', ] setup( name='decai', version=version, packages=find_packages(), url='https://github.com/microsoft/0xDeCA10B', license='MIT', author="Justin D. Harris", author_email='', description="Simulate Decentralized & Collaborative AI for Sharing Updatable Models.", install_requires=install_requires, tests_require=test_deps, extras_require=dict( test=test_deps, ), )
import json from collections import defaultdict from dataclasses import dataclass from itertools import cycle from logging import Logger from operator import itemgetter from pathlib import Path from typing import List, Dict from bokeh import colors from bokeh.io import export_png from bokeh.models import FuncTickFormatter, Legend, PrintfTickFormatter, AdaptiveTicker from bokeh.plotting import figure, output_file from injector import Injector, inject from decai.simulation.logging_module import LoggingModule from decai.simulation.simulate import Agent @inject @dataclass class SimulationCombiner(object): _logger: Logger def combine(self, runs: List[Dict], img_save_path: str): """ Combine runs from several files. :param paths: The paths to the runs to combine. """ output_file('combined_plots.html') plot = figure(title="Balances & Accuracy on Hidden Test Set", ) plot.width = 800 plot.height = 800 plot.xaxis.axis_label = "Time (days)" plot.yaxis.axis_label = "Percent" plot.title.text_font_size = '20pt' plot.xaxis.major_label_text_font_size = '16pt' plot.xaxis.axis_label_text_font_size = '16pt' plot.yaxis.major_label_text_font_size = '16pt' plot.yaxis.axis_label_text_font_size = '16pt' plot.xaxis[0].ticker = AdaptiveTicker(base=5 * 24 * 60 * 60) plot.xgrid[0].ticker = AdaptiveTicker(base=24 * 60 * 60) # JavaScript code. plot.xaxis[0].formatter = FuncTickFormatter(code=""" return (tick / 86400).toFixed(0); """) plot.yaxis[0].formatter = PrintfTickFormatter(format="%0.1f%%") # TODO Make plot wider (or maybe it's ok for the paper). good_colors = cycle([ colors.named.green, colors.named.lawngreen, colors.named.darkgreen, colors.named.limegreen, ]) bad_colors = cycle([ colors.named.red, colors.named.darkred, colors.named.orangered, colors.named.indianred, ]) accuracy_colors = cycle([ colors.named.blue, colors.named.cadetblue, colors.named.cornflowerblue, colors.named.darkblue, ]) baseline_accuracy_colors = cycle([ colors.named.black, colors.named.darkgrey, colors.named.slategrey, colors.named.darkslategrey, ]) line_dashes = cycle([ 'solid', 'dashed', 'dotted', 'dotdash', 'dashdot', ]) legend = [] for run in runs: name = run['name'] path = run['path'] line_dash = next(line_dashes) self._logger.info("Opening \"%s\".", path) with open(path) as f: data = json.load(f) baseline_accuracy = data['baselineAccuracy'] if baseline_accuracy is not None: self._logger.debug("Baseline accuracy: %s", baseline_accuracy) r = plot.ray(x=[0], y=[baseline_accuracy * 100], length=0, angle=0, line_width=2, line_dash=line_dash, color=next(baseline_accuracy_colors)) legend.append((f"{name} accuracy when trained with all data: {baseline_accuracy * 100:0.1f}%", [r])) agents: Dict[str, Agent] = dict() for agent in data['agents']: agent = Agent(**agent) agents[agent.address] = agent l = plot.line(x=[d['t'] for d in data['accuracies']], y=[d['accuracy'] * 100 for d in data['accuracies']], line_dash=line_dash, line_width=2, color=next(accuracy_colors), ) legend.append((f"{name} Accuracy", [l])) agent_balance_data = defaultdict(list) for balance_data in data['balances']: agent = balance_data['a'] agent_balance_data[agent].append( (balance_data['t'], balance_data['b'] * 100 / agents[agent].start_balance)) for agent_id, balance_data in sorted(agent_balance_data.items(), key=itemgetter(0)): agent = agents[agent_id] if agent.good: color = next(good_colors) else: color = next(bad_colors) l = plot.line(x=list(map(itemgetter(0), balance_data)), y=list(map(itemgetter(1), balance_data)), line_dash=line_dash, line_width=2, color=color, ) legend.append((f"{name} {agent.address} Agent Balance", [l])) self._logger.info("Done going through runs.") legend = Legend(items=legend, location='center_left') plot.add_layout(legend, 'above') plot.legend.label_text_font_size = '12pt' self._logger.info("Saving image to: %s", img_save_path) export_png(plot, img_save_path) if __name__ == '__main__': inj = Injector([ LoggingModule, ]) s = inj.get(SimulationCombiner) path = Path(__file__, '../../..').resolve() paths = dict( fitness=dict( nb=path / 'saved_runs/1578937397-fitness-nb.json', ncc=path / 'saved_runs/1578938741-fitness-ncc.json', perceptron=path / 'saved_runs/1578934493-fitness-perceptron.json', ), imdb=dict( nb=path / 'saved_runs/1580943847-imdb-nb-simulation_data.json', ncc=path / 'saved_runs/1580945025-imdb-ncc-simulation_data.json', perceptron=path / 'saved_runs/1580945565-imdb-perceptron-simulation_data.json', ), news=dict( nb=path / 'saved_runs/1580941815-news-nb-simulation_data.json', ncc=path / 'saved_runs/1580941258-news-ncc-simulation_data.json', perceptron=path / 'saved_runs/1580940494-news-perceptron-simulation_data.json', ), ) for dataset in paths.keys(): s.combine([ dict(name="NB", path=paths[dataset]['nb'] ), dict(name="NCC", path=paths[dataset]['ncc'] ), dict(name="Perceptron", path=paths[dataset]['perceptron'] ), ], path / f'saved_runs/combined-{dataset}.png')
import json import logging import os import random import time from dataclasses import asdict, dataclass from functools import partial from itertools import cycle from logging import Logger from platform import uname from queue import PriorityQueue from threading import Thread from typing import List import numpy as np from bokeh import colors from bokeh.document import Document from bokeh.io import export_png from bokeh.models import AdaptiveTicker, ColumnDataSource, FuncTickFormatter, PrintfTickFormatter from bokeh.plotting import curdoc, figure from injector import inject from tornado import gen from tqdm import tqdm from decai.simulation.contract.balances import Balances from decai.simulation.contract.collab_trainer import CollaborativeTrainer from decai.simulation.contract.incentive.prediction_market import MarketPhase, PredictionMarket from decai.simulation.contract.objects import Address, Msg, RejectException, TimeMock from decai.simulation.data.data_loader import DataLoader from decai.simulation.data.featuremapping.feature_index_mapper import FeatureIndexMapper @dataclass class Agent: """ A user to run in the simulator. """ address: Address start_balance: float mean_deposit: float stdev_deposit: float mean_update_wait_s: float stdev_update_wait_time: float = 1 pay_to_call: float = 0 good: bool = True prob_mistake: float = 0 calls_model: bool = False def __post_init__(self): assert self.start_balance > self.mean_deposit def __lt__(self, other): return self.address < other.address def get_next_deposit(self) -> int: while True: result = int(random.normalvariate(self.mean_deposit, self.stdev_deposit)) if result > 0: return result def get_next_wait_s(self) -> int: while True: result = int(random.normalvariate(self.mean_update_wait_s, self.stdev_update_wait_time)) if result >= 1: return result class Simulator(object): """ A simulator for Decentralized & Collaborative AI. """ @inject def __init__(self, balances: Balances, data_loader: DataLoader, decai: CollaborativeTrainer, feature_index_mapper: FeatureIndexMapper, logger: Logger, time_method: TimeMock, ): self._balances = balances self._data_loader = data_loader self._decai = decai self._feature_index_mapper = feature_index_mapper self._logger = logger self._time = time_method self._warned_about_saving_plot = False def save_plot_image(self, plot, plot_save_path): try: export_png(plot, filename=plot_save_path) except Exception as e: if self._warned_about_saving_plot: return show_error_details = True message = "Could not save picture of the plot." try: # Check if in WSL. show_error_details = not ('microsoft' in uname().release.lower()) except: pass if show_error_details: self._logger.exception(message, exc_info=e) else: self._logger.warning(f"{message} %s", e) self._warned_about_saving_plot = True def simulate(self, agents: List[Agent], baseline_accuracy: float = None, init_train_data_portion: float = 0.1, pm_test_sets: list = None, accuracy_plot_wait_s=2E5, train_size: int = None, test_size: int = None, filename_indicator: str = None ): """ Run a simulation. :param agents: The agents that will interact with the data. :param baseline_accuracy: The baseline accuracy of the model. Usually the accuracy on a hidden test set when the model is trained with all data. :param init_train_data_portion: The portion of the data to initially use for training. Must be [0,1]. :param pm_test_sets: The test sets for the prediction market incentive mechanism. :param accuracy_plot_wait_s: The amount of time to wait in seconds between plotting the accuracy. :param train_size: The amount of training data to use. :param test_size: The amount of test data to use. :param filename_indicator: Path of the filename to create for the run. """ assert 0 <= init_train_data_portion <= 1 # Data to save. save_data = dict(agents=[asdict(a) for a in agents], baselineAccuracy=baseline_accuracy, initTrainDataPortion=init_train_data_portion, accuracies=[], balances=[], ) time_for_filenames = int(time.time()) save_path = f'saved_runs/{time_for_filenames}-{filename_indicator}-simulation_data.json' model_save_path = f'saved_runs/{time_for_filenames}-{filename_indicator}-model.json' plot_save_path = f'saved_runs/{time_for_filenames}-{filename_indicator}.png' self._logger.info("Saving run info to \"%s\".", save_path) os.makedirs(os.path.dirname(save_path), exist_ok=True) # Set up plots. doc: Document = curdoc() doc.title = "DeCAI Simulation" plot = figure(title="Balances & Accuracy on Hidden Test Set", ) plot.width = 800 plot.height = 600 plot.xaxis.axis_label = "Time (days)" plot.yaxis.axis_label = "Percent" plot.title.text_font_size = '20pt' plot.xaxis.major_label_text_font_size = '20pt' plot.xaxis.axis_label_text_font_size = '20pt' plot.yaxis.major_label_text_font_size = '20pt' plot.yaxis.axis_label_text_font_size = '20pt' plot.xaxis[0].ticker = AdaptiveTicker(base=5 * 24 * 60 * 60) plot.xgrid[0].ticker = AdaptiveTicker(base=24 * 60 * 60) balance_plot_sources_per_agent = dict() good_colors = cycle([ colors.named.green, colors.named.lawngreen, colors.named.darkgreen, colors.named.limegreen, ]) bad_colors = cycle([ colors.named.red, colors.named.darkred, ]) for agent in agents: source = ColumnDataSource(dict(t=[], b=[])) assert agent.address not in balance_plot_sources_per_agent balance_plot_sources_per_agent[agent.address] = source if agent.calls_model: color = 'blue' line_dash = 'dashdot' elif agent.good: color = next(good_colors) line_dash = 'dotted' else: color = next(bad_colors) line_dash = 'dashed' plot.line(x='t', y='b', line_dash=line_dash, line_width=2, source=source, color=color, legend=f"{agent.address} Balance") plot.legend.location = 'top_left' plot.legend.label_text_font_size = '12pt' # JavaScript code. plot.xaxis[0].formatter = FuncTickFormatter(code=""" return (tick / 86400).toFixed(0); """) plot.yaxis[0].formatter = PrintfTickFormatter(format="%0.1f%%") acc_source = ColumnDataSource(dict(t=[], a=[])) if baseline_accuracy is not None: plot.ray(x=[0], y=[baseline_accuracy * 100], length=0, angle=0, line_width=2, legend=f"Accuracy when trained with all data: {baseline_accuracy * 100:0.1f}%") plot.line(x='t', y='a', line_dash='solid', line_width=2, source=acc_source, color='black', legend="Current Accuracy") @gen.coroutine def plot_cb(agent: Agent, t, b): source = balance_plot_sources_per_agent[agent.address] source.stream(dict(t=[t], b=[b * 100 / agent.start_balance])) save_data['balances'].append(dict(t=t, a=agent.address, b=b)) @gen.coroutine def plot_accuracy_cb(t, a): acc_source.stream(dict(t=[t], a=[a * 100])) save_data['accuracies'].append(dict(t=t, accuracy=a)) continuous_evaluation = not isinstance(self._decai.im, PredictionMarket) def task(): (x_train, y_train), (x_test, y_test) = \ self._data_loader.load_data(train_size=train_size, test_size=test_size) classifications = self._data_loader.classifications() x_train, x_test, feature_index_mapping = self._feature_index_mapper.map(x_train, x_test) x_train_len = x_train.shape[0] init_idx = int(x_train_len * init_train_data_portion) self._logger.info("Initializing model with %d out of %d samples.", init_idx, x_train_len) x_init_data, y_init_data = x_train[:init_idx], y_train[:init_idx] x_remaining, y_remaining = x_train[init_idx:], y_train[init_idx:] save_model = isinstance(self._decai.im, PredictionMarket) and self._decai.im.reset_model_during_reward_phase self._decai.model.init_model(x_init_data, y_init_data, save_model) if self._logger.isEnabledFor(logging.DEBUG): s = self._decai.model.evaluate(x_init_data, y_init_data) self._logger.debug("Initial training data evaluation: %s", s) if len(x_remaining) > 0: s = self._decai.model.evaluate(x_remaining, y_remaining) self._logger.debug("Remaining training data evaluation: %s", s) else: self._logger.debug("There is no more remaining data to evaluate.") self._logger.info("Evaluating initial model.") accuracy = self._decai.model.log_evaluation_details(x_test, y_test) self._logger.info("Initial test set accuracy: %0.2f%%", accuracy * 100) t = self._time() doc.add_next_tick_callback( partial(plot_accuracy_cb, t=t, a=accuracy)) q = PriorityQueue() random.shuffle(agents) for agent in agents: self._balances.initialize(agent.address, agent.start_balance) q.put((self._time() + agent.get_next_wait_s(), agent)) doc.add_next_tick_callback( partial(plot_cb, agent=agent, t=t, b=agent.start_balance)) unclaimed_data = [] next_data_index = 0 next_accuracy_plot_time = 1E4 desc = "Processing agent requests" current_time = 0 with tqdm(desc=desc, unit_scale=True, mininterval=2, unit=" requests", total=len(x_remaining), ) as pbar: while not q.empty(): # For now assume sending a transaction (editing) is free (no gas) # since it should be relatively cheaper than the deposit required to add data. # It may not be cheaper than calling `report`. if next_data_index >= len(x_remaining): if not continuous_evaluation or len(unclaimed_data) == 0: break current_time, agent = q.get() update_balance_plot = False if current_time > next_accuracy_plot_time: self._logger.debug("Evaluating.") next_accuracy_plot_time += accuracy_plot_wait_s accuracy = self._decai.model.evaluate(x_test, y_test) doc.add_next_tick_callback( partial(plot_accuracy_cb, t=current_time, a=accuracy)) if continuous_evaluation: self._logger.debug("Unclaimed data: %d", len(unclaimed_data)) pbar.set_description(f"{desc} ({len(unclaimed_data)} unclaimed)") with open(save_path, 'w') as f: json.dump(save_data, f, separators=(',', ':')) self._decai.model.export(model_save_path, classifications, feature_index_mapping=feature_index_mapping) if os.path.exists(plot_save_path): os.remove(plot_save_path) self.save_plot_image(plot, plot_save_path) self._time.set_time(current_time) balance = self._balances[agent.address] if balance > 0 and next_data_index < len(x_remaining): # Pick data. x, y = x_remaining[next_data_index], y_remaining[next_data_index] if agent.calls_model: # Only call the model if it's good. if random.random() < accuracy: update_balance_plot = True self._decai.predict(Msg(agent.address, agent.pay_to_call), x) else: if not agent.good: y = 1 - y if agent.prob_mistake > 0 and random.random() < agent.prob_mistake: y = 1 - y # Bad agents always contribute. # Good agents will only work if the model is doing well. # Add a bit of chance they will contribute since 0.85 accuracy is okay. if not agent.good or random.random() < accuracy + 0.15: value = agent.get_next_deposit() if value > balance: value = balance msg = Msg(agent.address, value) try: self._decai.add_data(msg, x, y) # Don't need to plot every time. Plot less as we get more data. update_balance_plot = next_data_index / len(x_remaining) + 0.1 < random.random() balance = self._balances[agent.address] if continuous_evaluation: unclaimed_data.append((current_time, agent, x, y)) next_data_index += 1 pbar.update() except RejectException: # Probably failed because they didn't pay enough which is okay. # Or if not enough time has passed since data was attempted to be added # which is okay too because a real contract would reject this # because the smallest unit of time we can use is 1s. if self._logger.isEnabledFor(logging.DEBUG): self._logger.exception("Error adding data.") if balance > 0: q.put((current_time + agent.get_next_wait_s(), agent)) claimed_indices = [] for i in range(len(unclaimed_data)): added_time, adding_agent, x, classification = unclaimed_data[i] if current_time - added_time < self._decai.im.refund_time_s: break if next_data_index >= len(x_remaining) \ and current_time - added_time < self._decai.im.any_address_claim_wait_time_s: break balance = self._balances[agent.address] msg = Msg(agent.address, balance) if current_time - added_time > self._decai.im.any_address_claim_wait_time_s: # Attempt to take the entire deposit. try: self._decai.report(msg, x, classification, added_time, adding_agent.address) update_balance_plot = True except RejectException: if self._logger.isEnabledFor(logging.DEBUG): self._logger.exception("Error taking reward.") elif adding_agent.address == agent.address: try: self._decai.refund(msg, x, classification, added_time) update_balance_plot = True except RejectException: if self._logger.isEnabledFor(logging.DEBUG): self._logger.exception("Error getting refund.") else: try: self._decai.report(msg, x, classification, added_time, adding_agent.address) update_balance_plot = True except RejectException: if self._logger.isEnabledFor(logging.DEBUG): self._logger.exception("Error taking reward.") stored_data = self._decai.data_handler.get_data(x, classification, added_time, adding_agent.address) if stored_data.claimable_amount <= 0: claimed_indices.append(i) for i in claimed_indices[::-1]: unclaimed_data.pop(i) if update_balance_plot: balance = self._balances[agent.address] doc.add_next_tick_callback( partial(plot_cb, agent=agent, t=current_time, b=balance)) self._logger.info("Done going through data.") if continuous_evaluation: pbar.set_description(f"{desc} ({len(unclaimed_data)} unclaimed)") if isinstance(self._decai.im, PredictionMarket): self._time.add_time(agents[0].get_next_wait_s()) self._decai.im.end_market() for i, test_set_portion in enumerate(pm_test_sets): if i != self._decai.im.test_reveal_index: self._decai.im.verify_next_test_set(test_set_portion) with tqdm(desc="Processing contributions", unit_scale=True, mininterval=2, unit=" contributions", total=self._decai.im.get_num_contributions_in_market(), ) as pbar: finished_first_round_of_rewards = False while self._decai.im.remaining_bounty_rounds > 0: self._time.add_time(agents[0].get_next_wait_s()) self._decai.im.process_contribution() pbar.update() if not finished_first_round_of_rewards: accuracy = self._decai.im.prev_acc # If we plot too often then we end up with a blob instead of a line. if random.random() < 0.1: doc.add_next_tick_callback( partial(plot_accuracy_cb, t=self._time(), a=accuracy)) if self._decai.im.state == MarketPhase.REWARD_RESTART: finished_first_round_of_rewards = True if self._decai.im.reset_model_during_reward_phase: # Update the accuracy after resetting all data. accuracy = self._decai.im.prev_acc else: # Use the accuracy after training with all data. pass doc.add_next_tick_callback( partial(plot_accuracy_cb, t=self._time(), a=accuracy)) pbar.total += self._decai.im.get_num_contributions_in_market() self._time.add_time(self._time() * 0.001) for agent in agents: balance = self._balances[agent.address] market_bal = self._decai.im._market_balances[agent.address] self._logger.debug("\"%s\" market balance: %0.2f Balance: %0.2f", agent.address, market_bal, balance) doc.add_next_tick_callback( partial(plot_cb, agent=agent, t=self._time(), b=max(balance + market_bal, 0))) self._time.add_time(self._time() * 0.02) for agent in agents: msg = Msg(agent.address, 0) # Find data submitted by them. data = None for key, stored_data in self._decai.data_handler: if stored_data.sender == agent.address: data = key[0] break if data is not None: self._decai.refund(msg, np.array(data), stored_data.classification, stored_data.time) balance = self._balances[agent.address] doc.add_next_tick_callback( partial(plot_cb, agent=agent, t=self._time(), b=balance)) self._logger.info("Balance for \"%s\": %.2f (%+.2f%%)", agent.address, balance, (balance - agent.start_balance) / agent.start_balance * 100) else: self._logger.warning("No data submitted by \"%s\" was found." "\nWill not update it's balance.", agent.address) self._logger.info("Done issuing rewards.") accuracy = self._decai.model.log_evaluation_details(x_test, y_test) doc.add_next_tick_callback( partial(plot_accuracy_cb, t=current_time + 100, a=accuracy)) with open(save_path, 'w') as f: json.dump(save_data, f, separators=(',', ':')) self._decai.model.export(model_save_path, classifications, feature_index_mapping=feature_index_mapping) if os.path.exists(plot_save_path): os.remove(plot_save_path) self.save_plot_image(plot, plot_save_path) doc.add_root(plot) thread = Thread(target=task) thread.start()
import os import sys import math from injector import inject, Injector from decai.simulation.contract.classification.classifier import Classifier from decai.simulation.contract.classification.decision_tree import DecisionTreeModule from decai.simulation.contract.collab_trainer import DefaultCollaborativeTrainerModule from decai.simulation.contract.incentive.stakeable import StakeableImModule from decai.simulation.data.data_loader import DataLoader from decai.simulation.data.titanic_data_loader import TitanicDataModule from decai.simulation.logging_module import LoggingModule from decai.simulation.simulate import Agent, Simulator # For `bokeh serve`. sys.path.append(os.path.join(os.path.dirname(__file__), '../..')) class Runner(object): @inject def __init__(self, data: DataLoader, simulator: Simulator, ): self._data = data self._s = simulator def run(self): init_train_data_portion = 0.10 # Set up the agents that will act in the simulation. agents = [ # Good Agent(address="Good", start_balance=10_000, mean_deposit=5, stdev_deposit=1, mean_update_wait_s=10 * 60, ), # Malicious: determined with the goal of disrupting others. Agent(address="Bad", start_balance=10_000, mean_deposit=10, stdev_deposit=3, mean_update_wait_s=1 * 60 * 60, good=False, ), ] # Start the simulation. self._s.simulate(agents, baseline_accuracy=0.806, init_train_data_portion=init_train_data_portion, accuracy_plot_wait_s=math.inf, ) # Run with `bokeh serve PATH`. if __name__.startswith('bk_script_'): # Set up the data, model, and incentive mechanism. inj = Injector([ DecisionTreeModule, DefaultCollaborativeTrainerModule, LoggingModule, StakeableImModule, TitanicDataModule, ]) inj.get(Runner).run() if __name__ == '__main__': # Play the game. inj = Injector([ DecisionTreeModule(regression=False), DefaultCollaborativeTrainerModule, LoggingModule, StakeableImModule, TitanicDataModule ]) d = inj.get(DataLoader) (x_train, y_train), (x_test, y_test) = d.load_data() c = inj.get(Classifier) c.init_model(x_train, y_train) score = c.evaluate(x_train, y_train) import random for _ in range(10): i = random.randrange(len(x_train)) print(f"{i:04d}: {x_train[i]}: {y_train[i]}") print(f"Prediction: {c.predict(x_train[i])}") print(f"Evaluation on training data: {score * 100:0.2f}%") if len(x_test) > 0: score = c.evaluate(x_test, y_test) print(f"Evaluation on test data: {score * 100:0.2f}%")
import json import os import random import sys from collections import Counter from typing import cast import math import numpy as np from injector import inject, Injector from decai.simulation.contract.classification.classifier import Classifier from decai.simulation.contract.classification.decision_tree import DecisionTreeModule from decai.simulation.contract.collab_trainer import DefaultCollaborativeTrainerModule from decai.simulation.contract.incentive.stakeable import StakeableImModule from decai.simulation.data.data_loader import DataLoader from decai.simulation.data.ttt_data_loader import TicTacToeDataModule, TicTacToeDataLoader from decai.simulation.logging_module import LoggingModule from decai.simulation.simulate import Agent, Simulator # For `bokeh serve`. sys.path.append(os.path.join(os.path.dirname(__file__), '../..')) class Runner(object): @inject def __init__(self, data: DataLoader, simulator: Simulator, ): self._data = data self._s = simulator def run(self): init_train_data_portion = 0.10 # Set up the agents that will act in the simulation. agents = [ # Good Agent(address="Good", start_balance=10_000, mean_deposit=5, stdev_deposit=1, mean_update_wait_s=10 * 60, ), # Malicious: determined with the goal of disrupting others. Agent(address="Bad", start_balance=10_000, mean_deposit=10, stdev_deposit=3, mean_update_wait_s=1 * 60 * 60, good=False, ), ] # Start the simulation. self._s.simulate(agents, baseline_accuracy=0.44, init_train_data_portion=init_train_data_portion, accuracy_plot_wait_s=math.inf, ) # Run with `bokeh serve PATH`. if __name__.startswith('bk_script_'): # Set up the data, model, and incentive mechanism. inj = Injector([ DecisionTreeModule, DefaultCollaborativeTrainerModule, LoggingModule, StakeableImModule, TicTacToeDataModule, ]) inj.get(Runner).run() def _map_pos(tic_tac_toe, board, pos): assert 0 <= pos < board.size return pos // tic_tac_toe.width, pos % tic_tac_toe.width def play_game(classifier, tic_tac_toe): board = np.zeros((tic_tac_toe.width, tic_tac_toe.length), dtype=np.int8) if random.random() < 0.5: # Machine is playing. pos = classifier.predict(board.flatten()) board[_map_pos(tic_tac_toe, board, pos)] = 1 m = {0: '#', 1: 'O', -1: 'X'} map_symbols = np.vectorize(lambda x: m[x]) def print_board(b): print(np.array2string(map_symbols(b), formatter={'str_kind': lambda x: x})) print(f"The machine is O. You are X.\nPositions:\n{np.arange(board.size).reshape(board.shape)}") while True: if np.count_nonzero(board) == board.size: print("TIE") break # Person's turn. print_board(board) while True: pos = input("Where would you like to go?") pos = _map_pos(tic_tac_toe, board, int(pos.strip())) if board[pos] == 0: board[pos] = -1 break else: print("There is already a value there.") winner = tic_tac_toe.get_winner(board) if winner is not None: print("You WIN!") break # Machine's turn. original_pos = classifier.predict(board.flatten()) pos = _map_pos(tic_tac_toe, board, original_pos) if board[pos] != 0: print(f"Machine picked a spot that already has a marker ({original_pos}). This probably means a draw.") print_board(board) break board[pos] = 1 winner = tic_tac_toe.get_winner(board) if winner is not None: print("You lose :(") break print_board(board) def evaluate_on_self(classifier, tic_tac_toe): print("Evaluating by playing against itself.") def _run_game(board, next_player): if next_player == -1: # Flip the board since the bot always thinks it is 1. board_for_prediction = -board else: board_for_prediction = board pos = classifier.predict(board_for_prediction.flatten()) pos = _map_pos(tic_tac_toe, board, pos) if board[pos] != 0: return "TIE", np.count_nonzero(board == next_player) board[pos] = next_player if tic_tac_toe.get_winner(board): return next_player, np.count_nonzero(board == next_player) else: return _run_game(board, -1 if next_player == 1 else 1) # Start with empty board and let the model pick where to start. board = np.zeros((tic_tac_toe.width, tic_tac_toe.length), dtype=np.int8) winner, num_moves = _run_game(board, 1) if winner == 1: print(f"When model starts: WINS in {num_moves} moves.") elif isinstance(winner, str): print(f"When model starts: {winner} in {num_moves} moves.") else: print(f"When model starts: LOSES. Winner has {num_moves} moves.") winners = Counter() winner_move_counts = [] for start_pos in range(board.size): board = np.zeros((tic_tac_toe.width, tic_tac_toe.length), dtype=np.int8) board[_map_pos(tic_tac_toe, board, start_pos)] = -1 winner, num_moves = _run_game(board, 1) winners[winner] += 1 winner_move_counts.append(num_moves) print("Winners when -1 starts in each position:") print(json.dumps(winners, indent=2)) print(f"Winner move counts:\n{winner_move_counts}") print(f"Avg # winner moves: {np.average(winner_move_counts)}") if __name__ == '__main__': # Play the game. inj = Injector([ DecisionTreeModule, DefaultCollaborativeTrainerModule, LoggingModule, StakeableImModule, TicTacToeDataModule, ]) ttt = inj.get(DataLoader) assert isinstance(ttt, TicTacToeDataLoader) ttt = cast(TicTacToeDataLoader, ttt) # To train on all data. # ttt._train_split = 1 (x_train, y_train), (x_test, y_test) = ttt.load_data() c = inj.get(Classifier) c.init_model(x_train, y_train) score = c.evaluate(x_train, y_train) print(f"Evaluation on training data: {score}") if len(x_test) > 0: score = c.evaluate(x_test, y_test) print(f"Evaluation on test data: {score}") evaluate_on_self(c, ttt) while True: play_game(c, ttt)
import os import sys import math from injector import inject, Injector from decai.simulation.contract.classification.classifier import Classifier from decai.simulation.contract.classification.decision_tree import DecisionTreeModule from decai.simulation.contract.collab_trainer import DefaultCollaborativeTrainerModule from decai.simulation.contract.incentive.stakeable import StakeableImModule from decai.simulation.data.bhp_data_loader import BhpDataModule from decai.simulation.data.data_loader import DataLoader from decai.simulation.logging_module import LoggingModule from decai.simulation.simulate import Agent, Simulator # For `bokeh serve`. sys.path.append(os.path.join(os.path.dirname(__file__), '../..')) class Runner(object): @inject def __init__(self, data: DataLoader, simulator: Simulator, ): self._data = data self._s = simulator def run(self): init_train_data_portion = 0.10 # Set up the agents that will act in the simulation. agents = [ # Good Agent(address="Good", start_balance=10_000, mean_deposit=5, stdev_deposit=1, mean_update_wait_s=10 * 60, ), # Malicious: determined with the goal of disrupting others. Agent(address="Bad", start_balance=10_000, mean_deposit=10, stdev_deposit=3, mean_update_wait_s=1 * 60 * 60, good=False, ), ] # Start the simulation. self._s.simulate(agents, baseline_accuracy=0.44, init_train_data_portion=init_train_data_portion, accuracy_plot_wait_s=math.inf, ) # Run with `bokeh serve PATH`. if __name__.startswith('bk_script_'): # Set up the data, model, and incentive mechanism. inj = Injector([ DecisionTreeModule, DefaultCollaborativeTrainerModule, LoggingModule, StakeableImModule, BhpDataModule, ]) inj.get(Runner).run() if __name__ == '__main__': # Play the game. inj = Injector([ DecisionTreeModule(regression=True), DefaultCollaborativeTrainerModule, LoggingModule, StakeableImModule, BhpDataModule, ]) d = inj.get(DataLoader) (x_train, y_train), (x_test, y_test) = d.load_data() c = inj.get(Classifier) c.init_model(x_train, y_train) score = c.evaluate(x_train, y_train) import random for _ in range(10): i = random.randrange(len(x_train)) print(f"{i:04d}: {x_train[i]}: {y_train[i]}") print(f"Prediction: {c.predict(x_train[i])}") print(f"Evaluation on training data: {score}") if len(x_test) > 0: score = c.evaluate(x_test, y_test) print(f"Evaluation on test data: {score}")
import logging from dataclasses import dataclass, field from logging import Logger from injector import Module, provider, singleton @dataclass class LoggingModule(Module): _log_level: int = field(default=logging.INFO) @provider @singleton def provide_logger(self) -> Logger: result = logging.Logger('decai') result.setLevel(self._log_level) f = logging.Formatter('%(asctime)s [%(levelname)s] - %(name)s:%(filename)s:%(funcName)s\n%(message)s') h = logging.StreamHandler() h.setFormatter(f) result.addHandler(h) return result
import os import sys from typing import Optional from injector import Injector from decai.simulation.contract.classification.perceptron import PerceptronModule from decai.simulation.contract.collab_trainer import DefaultCollaborativeTrainerModule from decai.simulation.contract.incentive.stakeable import StakeableImModule from decai.simulation.data.imdb_data_loader import ImdbDataModule from decai.simulation.logging_module import LoggingModule from decai.simulation.simulate import Agent, Simulator # For `bokeh serve`. sys.path.append(os.path.join(os.path.dirname(__file__), '../..')) num_words = 1000 train_size: Optional[int] = None if train_size is None: init_train_data_portion = 0.08 else: init_train_data_portion = 100 / train_size def main(): # Set up the agents that will act in the simulation. agents = [ # Good Agent(address="Good", start_balance=10_000, mean_deposit=50, stdev_deposit=10, mean_update_wait_s=10 * 60, prob_mistake=0.0001, ), # Malicious: A determined agent with the goal of disrupting others. Agent(address="Bad", start_balance=10_000, mean_deposit=100, stdev_deposit=3, mean_update_wait_s=1 * 60 * 60, good=False, ), # One that just calls the model and pays to use the model. Agent(address="Caller", start_balance=30_000, mean_deposit=0, stdev_deposit=0, mean_update_wait_s=2 * 60 * 60, calls_model=True, pay_to_call=50 ), ] # No caller (assume free to call). agents = agents[:-1] # Set up the data, model, and incentive mechanism. inj = Injector([ DefaultCollaborativeTrainerModule, ImdbDataModule(num_words=num_words), LoggingModule, PerceptronModule, StakeableImModule, ]) s = inj.get(Simulator) # Accuracy on hidden test set after training with all training data: baseline_accuracies = { 100: 0.6210, 200: 0.6173, 1000: 0.7945, 10000: 0.84692, 20000: 0.8484, } # Start the simulation. s.simulate(agents, baseline_accuracy=baseline_accuracies[num_words], init_train_data_portion=init_train_data_portion, train_size=train_size, ) # Run with `bokeh serve PATH`. if __name__.startswith('bk_script_'): main()
import os import sys import math from injector import inject, Injector from decai.simulation.contract.balances import Balances from decai.simulation.contract.classification.perceptron import PerceptronModule from decai.simulation.contract.collab_trainer import DefaultCollaborativeTrainerModule from decai.simulation.contract.incentive.incentive_mechanism import IncentiveMechanism from decai.simulation.contract.incentive.prediction_market import PredictionMarket, PredictionMarketImModule from decai.simulation.contract.objects import Msg from decai.simulation.data.data_loader import DataLoader from decai.simulation.data.imdb_data_loader import ImdbDataModule from decai.simulation.logging_module import LoggingModule from decai.simulation.simulate import Agent, Simulator # For `bokeh serve`. sys.path.append(os.path.join(os.path.dirname(__file__), '../..')) num_words = 1000 class Runner(object): @inject def __init__(self, balances: Balances, data: DataLoader, im: IncentiveMechanism, simulator: Simulator, ): assert isinstance(im, PredictionMarket) self._balances = balances self._data = data self._im = im self._s = simulator def run(self): initializer_address = 'initializer' total_bounty = 100_000 train_size = 10_000 test_size = 1000 init_train_data_portion = 10 / train_size # Set up the agents that will act in the simulation. agents = [ # Good Agent(address="Good 1", start_balance=10_000, mean_deposit=5, stdev_deposit=1, mean_update_wait_s=10 * 60, ), Agent(address="Good 2", start_balance=10_000, mean_deposit=5, stdev_deposit=1, mean_update_wait_s=20 * 60, ), Agent(address="Good 3", start_balance=10_000, mean_deposit=5, stdev_deposit=1, mean_update_wait_s=30 * 60, ), # Malicious: determined with the goal of disrupting others. Agent(address="Bad 1", start_balance=10_000, mean_deposit=10, stdev_deposit=3, mean_update_wait_s=1 * 60 * 60, good=False, ), Agent(address="Bad 2", start_balance=10_000, mean_deposit=10, stdev_deposit=3, mean_update_wait_s=1 * 60 * 60, good=False, ), ] self._balances.initialize(initializer_address, total_bounty) (x_train, y_train), (x_test, y_test) = self._data.load_data(train_size=train_size, test_size=test_size) init_idx = int(len(x_train) * init_train_data_portion) assert init_idx > 0 x_init_data, y_init_data = x_train[:init_idx], y_train[:init_idx] x_remaining, y_remaining = x_train[init_idx:], y_train[init_idx:] # Split test set into pieces. num_pieces = 10 test_dataset_hashes, test_sets = self._im.get_test_set_hashes(num_pieces, x_test, y_test) # Ending criteria: min_length_s = 1_000 min_num_contributions = len(x_remaining) save_model = isinstance(self._im, PredictionMarket) and self._im.reset_model_during_reward_phase self._im.model.init_model(x_init_data, y_init_data, save_model) test_reveal_index = self._im.initialize_market(Msg(initializer_address, total_bounty), test_dataset_hashes, min_length_s, min_num_contributions) assert 0 <= test_reveal_index < len(test_dataset_hashes) self._im.reveal_init_test_set(test_sets[test_reveal_index]) # Accuracy on hidden test set after training with all training data: baseline_accuracies = { 100: 0.6210, 200: 0.6173, 1000: 0.7945, 10000: 0.84692, 20000: 0.8484, } # Start the simulation. self._s.simulate(agents, baseline_accuracy=baseline_accuracies[num_words], init_train_data_portion=init_train_data_portion, pm_test_sets=test_sets, accuracy_plot_wait_s=math.inf, train_size=train_size, ) # Run with `bokeh serve PATH`. if __name__.startswith('bk_script_'): # Set up the data, model, and incentive mechanism. inj = Injector([ DefaultCollaborativeTrainerModule, ImdbDataModule(num_words=num_words), LoggingModule, PerceptronModule, PredictionMarketImModule, ]) inj.get(Runner).run()
import os import re import sys from injector import Injector from sklearn.naive_bayes import MultinomialNB from decai.simulation.contract.classification.ncc_module import NearestCentroidClassifierModule from decai.simulation.contract.classification.perceptron import PerceptronModule from decai.simulation.contract.classification.scikit_classifier import SciKitClassifierModule from decai.simulation.contract.collab_trainer import DefaultCollaborativeTrainerModule from decai.simulation.contract.incentive.stakeable import StakeableImModule from decai.simulation.data.featuremapping.hashing.murmurhash3 import MurmurHash3Module from decai.simulation.data.fitness_data_loader import FitnessDataModule from decai.simulation.data.imdb_data_loader import ImdbDataModule from decai.simulation.data.news_data_loader import NewsDataModule from decai.simulation.data.offensive_data_loader import OffensiveDataModule from decai.simulation.logging_module import LoggingModule from decai.simulation.simulate import Agent, Simulator # For `bokeh serve`. sys.path.append(os.path.join(os.path.dirname(__file__), '../..')) datasets = dict( fitness=dict(module=FitnessDataModule, train_size=3500, test_size=1500, ), imdb=dict(module=ImdbDataModule(num_words=1000), train_size=None, test_size=None, ), news=dict(module=NewsDataModule, train_size=None, test_size=None, ), offensive=dict(module=OffensiveDataModule, train_size=None, test_size=None, ), ) models = dict( nb=dict(module=SciKitClassifierModule(MultinomialNB), baseline_accuracy=dict( # train_size, test_size = 3500, 1500 fitness=0.97, # train_size, test_size = None, None imdb=0.8323, # train_size, test_size = None, None news=0.8181, )), ncc=dict(module=NearestCentroidClassifierModule, baseline_accuracy=dict( # train_size, test_size = 3500, 1500 fitness=0.9513, # train_size, test_size = None, None imdb=0.7445, # train_size, test_size = None, None news=0.6727, )), perceptron=dict(module=PerceptronModule, baseline_accuracy=dict( # train_size, test_size = 3500, 1500 fitness=0.9507, # train_size, test_size = None, None imdb=0.73, # train_size, test_size = None, None news=0.9003, )), ) # Set up the agents that will act in the simulation. agents = [ # Good Agent(address="Good", start_balance=10_000, mean_deposit=50, stdev_deposit=10, mean_update_wait_s=10 * 60, prob_mistake=0.0001, ), # Malicious: A determined agent with the goal of disrupting others. Agent(address="Bad", start_balance=10_000, mean_deposit=100, stdev_deposit=3, mean_update_wait_s=1 * 60 * 60, good=False, ), # One that just calls the model and pays to use the model. Agent(address="Caller", start_balance=30_000, mean_deposit=0, stdev_deposit=0, mean_update_wait_s=2 * 60 * 60, calls_model=True, pay_to_call=50 ), ] def main(): global agents # This file is set up to use different models and datasets. dataset = 'offensive' model_type = 'nb' assert dataset in datasets assert model_type in models train_size = datasets[dataset]['train_size'] test_size = datasets[dataset]['test_size'] if train_size is None: init_train_data_portion = 0.08 else: init_train_data_portion = 100 / train_size # No caller (assume free to call). agents = agents[:-1] # Set up the data, model, and incentive mechanism. inj = Injector([ DefaultCollaborativeTrainerModule, datasets[dataset]['module'], MurmurHash3Module, LoggingModule, models[model_type]['module'], StakeableImModule, ]) s = inj.get(Simulator) # Start the simulation. s.simulate(agents, baseline_accuracy=models[model_type]['baseline_accuracy'].get(dataset), init_train_data_portion=init_train_data_portion, train_size=train_size, test_size=test_size, filename_indicator=f"{dataset}-{model_type}" ) # Run with `bokeh serve PATH`. if re.match('bk_script_|bokeh_app_', __name__): main() else: print("`__name__` didn't match the pattern. Bokeh app will not run.")
import os import sys import math from injector import inject, Injector from sklearn.naive_bayes import MultinomialNB from decai.simulation.contract.classification.classifier import Classifier from decai.simulation.contract.classification.scikit_classifier import SciKitClassifierModule from decai.simulation.contract.collab_trainer import DefaultCollaborativeTrainerModule from decai.simulation.contract.incentive.stakeable import StakeableImModule from decai.simulation.data.data_loader import DataLoader from decai.simulation.data.titanic_data_loader import TitanicDataModule from decai.simulation.logging_module import LoggingModule from decai.simulation.simulate import Agent, Simulator # For `bokeh serve`. sys.path.append(os.path.join(os.path.dirname(__file__), '../..')) # FIXME Using MultinomialNB might not work well with the Titanic dataset because it requires discrete features. class Runner(object): @inject def __init__(self, data: DataLoader, simulator: Simulator, ): self._data = data self._s = simulator def run(self): init_train_data_portion = 0.10 # Set up the agents that will act in the simulation. agents = [ # Good Agent(address="Good", start_balance=1_000, mean_deposit=5, stdev_deposit=1, mean_update_wait_s=10 * 60, ), # Malicious: determined with the goal of disrupting others. Agent(address="Bad", start_balance=1_000, mean_deposit=10, stdev_deposit=3, mean_update_wait_s=1 * 60 * 60, good=False, ), ] # Start the simulation. self._s.simulate(agents, baseline_accuracy=0.791, init_train_data_portion=init_train_data_portion, accuracy_plot_wait_s=math.inf, ) # Run with `bokeh serve PATH`. if __name__.startswith('bk_script_'): # Set up the data, model, and incentive mechanism. inj = Injector([ SciKitClassifierModule(MultinomialNB), DefaultCollaborativeTrainerModule, LoggingModule, StakeableImModule, TitanicDataModule, ]) inj.get(Runner).run() if __name__ == '__main__': # Play the game. inj = Injector([ SciKitClassifierModule(MultinomialNB), DefaultCollaborativeTrainerModule, LoggingModule, StakeableImModule, TitanicDataModule ]) d = inj.get(DataLoader) (x_train, y_train), (x_test, y_test) = d.load_data() c = inj.get(Classifier) c.init_model(x_train, y_train) score = c.evaluate(x_train, y_train) print(f"Evaluation on training data: {score * 100:0.2f}%") if len(x_test) > 0: score = c.evaluate(x_test, y_test) print(f"Evaluation on test data: {score * 100:0.2f}%")
from abc import ABC, abstractmethod from injector import Module, inject, singleton from decai.simulation.contract.balances import Balances from decai.simulation.contract.classification.classifier import Classifier from decai.simulation.contract.data.data_handler import DataHandler from decai.simulation.contract.incentive.incentive_mechanism import IncentiveMechanism from decai.simulation.contract.objects import Msg, SmartContract class CollaborativeTrainer(ABC, SmartContract): """ Base class for the main interface to create simulations of a training model in a smart contract. """ def __init__(self, balances: Balances, data_handler: DataHandler, incentive_mechanism: IncentiveMechanism, model: Classifier, ): super().__init__() self.data_handler = data_handler self.im = incentive_mechanism self.model = model self._balances = balances @abstractmethod def add_data(self, msg: Msg, data, label): """ Update the model with one data sample. :param msg: Standard message to pass to any method of a smart contract. :param data: A single sample of training data for the model. :param label: The label for `data`. """ pass @abstractmethod def predict(self, msg: Msg, data): """ :param msg: Standard message to pass to any method of a smart contract. :param data: :return: The predicted classification/label for `data`. """ pass @abstractmethod def refund(self, msg: Msg, data, classification, added_time: int): """ Attempt a refund for the deposit given with submitted data. Must be called by the address that originally submitted the data. :param msg: Standard message to pass to any method of a smart contract. :param data: The data for which to attempt a refund. :param classification: The label originally submitted with `data`. :param added_time :The time when the data was added. """ pass @abstractmethod def report(self, msg: Msg, data, classification, added_time: int, original_author: str): """ Report bad or old data and attempt to get a reward. :param msg: Standard message to pass to any method of a smart contract. :param data: The data to report. :param classification: The label originally submitted with `data`. :param added_time :The time when the data was added. :param original_author: The address that originally added the data. """ pass @singleton class DefaultCollaborativeTrainer(CollaborativeTrainer): """ Default implementation of the main interface. """ @inject def __init__(self, balances: Balances, data_handler: DataHandler, incentive_mechanism: IncentiveMechanism, model: Classifier, ): kwargs = dict(locals()) del kwargs['self'] del kwargs['__class__'] super().__init__(**kwargs) self.data_handler.owner = self.address self.im.owner = self.address self.model.owner = self.address def predict(self, msg: Msg, data): self.im.distribute_payment_for_prediction(msg.sender, msg.value) return self.model.predict(data) # FUNCTIONS FOR HANDLING DATA def add_data(self, msg: Msg, data, classification): # Consider making sure duplicate data isn't added until it's been claimed. cost, update_model = self.im.handle_add_data(msg.sender, msg.value, data, classification) self.data_handler.handle_add_data(msg.sender, cost, data, classification) if update_model: self.model.update(data, classification) # In Solidity the message's value gets taken automatically. # Here we do this at the end in case something failed while trying to add data. self._balances.send(msg.sender, self.address, cost) def refund(self, msg: Msg, data, classification, added_time: int): (claimable_amount, claimed_by_submitter, stored_data) = \ self.data_handler.handle_refund(msg.sender, data, classification, added_time) prediction = self.model.predict(data) refund_amount = self.im.handle_refund(msg.sender, stored_data, claimable_amount, claimed_by_submitter, prediction) self._balances.send(self.address, msg.sender, refund_amount) # The Solidity version doesn't need this extra function call because if there is an error earlier, # then the changes automatically get reverted. self.data_handler.update_claimable_amount(msg.sender, stored_data, refund_amount) def report(self, msg: Msg, data, classification, added_time: int, original_author: str): claimed_by_reporter, stored_data = \ self.data_handler.handle_report(msg.sender, data, classification, added_time, original_author) prediction = lambda: self.model.predict(data) reward_amount = self.im.handle_report(msg.sender, stored_data, claimed_by_reporter, prediction) self.data_handler.update_claimable_amount(msg.sender, stored_data, reward_amount) self._balances.send(self.address, msg.sender, reward_amount) class DefaultCollaborativeTrainerModule(Module): def configure(self, binder): binder.bind(CollaborativeTrainer, to=DefaultCollaborativeTrainer)
from dataclasses import dataclass, field from logging import Logger from typing import Dict from injector import inject, singleton from decai.simulation.contract.objects import Address @inject @singleton @dataclass class Balances(object): """ Tracks balances in the simulation. """ _logger: Logger _balances: Dict[Address, float] = field(default_factory=dict, init=False) def __contains__(self, address: Address): """ :param address: A participant's address. :return: `True` if the address is in the simulation, `False` otherwise. """ return address in self._balances def __getitem__(self, address: Address) -> float: """ :param address: A participant's address. :return: The balance for `address`. """ return self._balances[address] def get_all(self) -> Dict[Address, float]: """ :return: A copy of the balances. """ return dict(self._balances) def initialize(self, address: Address, start_balance: float): """ Initialize a participant's balance. """ assert address not in self._balances, f"'{address}' already has a balance." self._balances[address] = start_balance def send(self, sending_address: Address, receiving_address: Address, amount): """ Send funds from one participant to another. """ assert amount >= 0 if amount > 0: sender_balance = self._balances[sending_address] if sender_balance < amount: self._logger.warning(f"'{sending_address} has {sender_balance} < {amount}.\n" f"Will only send {sender_balance}.") amount = sender_balance self._balances[sending_address] -= amount if receiving_address not in self._balances: self.initialize(receiving_address, amount) else: self._balances[receiving_address] += amount
# Objects for all smart contracts. from dataclasses import dataclass, field from typing import Optional from injector import singleton Address = str """ An address that can receive funds and participate in training models. """ @dataclass class Msg: """ A message sent to a smart contract. :param sender: The sender's address. :param value: Amount sent with the message. """ sender: Address # Need to use float since the numbers might be large. They should still actually be integers. value: float class RejectException(Exception): """ The smart contract rejected the transaction. """ pass class SmartContract(object): """ A fake smart contract. """ def __init__(self): self.address: Address = f'{type(self).__name__}-{id(self)}' """ The address of this contract. """ self.owner: Optional[Address] = None """ The owner of this contract. """ @singleton @dataclass class TimeMock(object): """ Helps fake the current time (in seconds). Ideally the value returned is an integer (like `now` in Solidity) but this is not guaranteed. Normally in an Ethereum smart contract `now` can be called. To speed up simulations, use this class to get the current time. """ _time: float = field(default=0, init=False) def __call__(self, *args, **kwargs): """ Get the currently set time (in seconds). """ return self._time def add_time(self, amount): """ Add `amount` (in seconds) to the current time. """ self._time += amount def set_time(self, time_value): """ Set the time to return when `time()` is called. """ self._time = time_value def time(self): """ Get the currently set time (in seconds). """ return self._time
from collections import Counter from injector import inject from sklearn.neighbors import NearestCentroid # Purposely not a singleton so that it is easy to get a model that has not been initialized. @inject class NearestCentroidClassifier(NearestCentroid): def fit(self, X, y): self._num_samples_per_centroid = Counter(y) super().fit(X, y) def partial_fit(self, training_data, labels): # Assume len(training_data) == len(labels) == 1 # Assume centroids are indexed by class 0-N. sample = training_data[0] label = labels[0] n = self._num_samples_per_centroid[label] self.centroids_[label] = (self.centroids_[label] * n + sample) / (n + 1) self._num_samples_per_centroid[label] = n + 1
import os from sklearn.linear_model import SGDClassifier from decai.simulation.contract.classification.scikit_classifier import SciKitClassifierModule class PerceptronModule(SciKitClassifierModule): def __init__(self, class_weight=None): super().__init__( _model_initializer=lambda: SGDClassifier( loss='perceptron', n_jobs=max(1, os.cpu_count() - 2), random_state=0xDeCA10B, learning_rate='optimal', class_weight=class_weight, # Don't really care about tol, just setting it to remove a warning. tol=1e-3, penalty=None))
import logging from abc import ABC, abstractmethod from typing import List from decai.simulation.contract.objects import SmartContract from decai.simulation.data.featuremapping.feature_index_mapper import FeatureIndexMapping class Classifier(ABC, SmartContract): """ A classifier that can take a data sample as input and return a predict classification/label for the data. """ @abstractmethod def evaluate(self, data, labels) -> float: """ Evaluate the model. :param data: Data samples. :param labels: The ground truth labels for `data`. :return: The accuracy for the given test set. """ pass @abstractmethod def log_evaluation_details(self, data, labels, level=logging.INFO) -> float: """ Log some evaluation details. :param data: Data samples. :param labels: The ground truth labels for `data`. :param level: The level at which to log. :return: The accuracy for the given test set. """ pass @abstractmethod def init_model(self, training_data, labels, save_model=False): """ Fit the model to a specific dataset. :param training_data: The data to use to train the model. :param labels: The ground truth labels for `data`. :param save_model: `True` if the model should be saved, `False` otherwise. """ pass @abstractmethod def predict(self, data): """ :param data: The data or features for one sample. :return: The predicted classification or label for `data`. """ pass @abstractmethod def update(self, data, classification): """ Update the classifier with one data sample. :param data: The training data or features for one sample. :param classification: The label for `data`. """ pass @abstractmethod def reset_model(self): """ Re-initialize the model to the same state it was in after `init_model` was called. """ pass @abstractmethod def export(self, path: str, classifications: List[str] = None, model_type: str = None, feature_index_mapping: FeatureIndexMapping = None): """ Export the model in a format for the demo Node.js code to load. :param path: The path to save the exported model to. :param classifications: The classifications output by the model. :param model_type: The type of the model. :param feature_index_mapping: Mapping of the feature indices. Mainly for sparse models that were converted to dense ones. """ pass
import json import logging import os import time from dataclasses import dataclass from logging import Logger from pathlib import Path from typing import Any, Callable, List import joblib import numpy as np import scipy.sparse from injector import ClassAssistedBuilder, Module, inject, provider from sklearn.linear_model import SGDClassifier from sklearn.metrics import accuracy_score, classification_report, confusion_matrix from sklearn.naive_bayes import MultinomialNB from decai.simulation.contract.classification.classifier import Classifier from decai.simulation.contract.classification.ncc import NearestCentroidClassifier from decai.simulation.data.featuremapping.feature_index_mapper import FeatureIndexMapping # Purposely not a singleton so that it is easy to get a model that has not been initialized. @inject @dataclass class SciKitClassifier(Classifier): """ Classifier for a scikit-learn like model. """ _logger: Logger _model_initializer: Callable[[], Any] _model = None def __post_init__(self): self._original_model_path = Path('saved_models') / f'{time.time()}-{id(self)}.joblib' def evaluate(self, data, labels) -> float: assert self._model is not None, "The model has not been initialized yet." assert isinstance(data, np.ndarray) or scipy.sparse.isspmatrix(data), \ f"The data must be a matrix. Got: {type(data)}" assert isinstance(labels, np.ndarray), "The labels must be an array." self._logger.debug("Evaluating.") return self._model.score(data, labels) def log_evaluation_details(self, data, labels, level=logging.INFO) -> float: assert self._model is not None, "The model has not been initialized yet." assert isinstance(data, np.ndarray), "The data must be an array." assert isinstance(labels, np.ndarray), "The labels must be an array." self._logger.debug("Evaluating.") predicted_labels = self._model.predict(data) result = accuracy_score(labels, predicted_labels) if self._logger.isEnabledFor(level): m = confusion_matrix(labels, predicted_labels) report = classification_report(labels, predicted_labels) self._logger.log(level, "Confusion matrix:\n%s" "\nReport:\n%s" "\nAccuracy: %0.2f%%", m, report, result * 100) return result def init_model(self, training_data, labels, save_model=False): assert self._model is None, "The model has already been initialized." self._logger.debug("Initializing model.") self._model = self._model_initializer() self._logger.debug("training_data.shape: %s. dtype: %s", training_data.shape, training_data.dtype) self._model.fit(training_data, labels) if save_model: self._logger.debug("Saving model to \"%s\".", self._original_model_path) os.makedirs(os.path.dirname(self._original_model_path), exist_ok=True) joblib.dump(self._model, self._original_model_path) def predict(self, data): assert self._model is not None, "The model has not been initialized yet." assert isinstance(data, np.ndarray), "The data must be an array." return self._model.predict([data])[0] def update(self, data, classification): assert self._model is not None, "The model has not been initialized yet." self._model.partial_fit([data], [classification]) def reset_model(self): assert self._model is not None, "The model has not been initialized yet." assert self._original_model_path.exists(), "The model has not been saved. Perhaps saving was disabled." self._logger.debug("Loading model from \"%s\".", self._original_model_path) self._model = joblib.load(self._original_model_path) def export(self, path: str, classifications: List[str] = None, model_type: str = None, feature_index_mapping: FeatureIndexMapping = None): assert self._model is not None, "The model has not been initialized yet." if isinstance(self._model, SGDClassifier) and self._model.loss == 'perceptron': if classifications is None: classifications = ["0", "1"] model = { 'type': model_type or 'sparse perceptron', 'classifications': classifications, 'weights': self._model.coef_[0].tolist(), 'intercept': self._model.intercept_[0], } if feature_index_mapping is not None: if model_type is None: model['type'] = 'sparse perceptron' weights = model['weights'] del model['weights'] weights = {str(i): v for (i, v) in zip(feature_index_mapping, weights) if v != 0} model['sparseWeights'] = weights elif isinstance(self._model, MultinomialNB): if classifications is None: classifications = list(map(str, range(self._model.feature_count_.shape[1]))) feature_counts = [] for class_features in self._model.feature_count_: class_feature_counts = [] for index, count in enumerate(class_features): if count != 0: # Counts should already be integers. if feature_index_mapping is not None: index = feature_index_mapping[index] class_feature_counts.append((index, int(count))) feature_counts.append(class_feature_counts) model = { 'type': model_type or 'naive bayes', 'classifications': classifications, 'classCounts': self._model.class_count_.astype(dtype=np.int64).tolist(), 'featureCounts': feature_counts, 'totalNumFeatures': self._model.feature_count_.shape[1], 'smoothingFactor': self._model.alpha, } elif isinstance(self._model, NearestCentroidClassifier): if feature_index_mapping is not None: if model_type is None: model_type = 'sparse nearest centroid classifier' centroids = dict() if classifications is None: classifications = list(map(str, range(len(self.centroids_)))) for i, classification in enumerate(classifications): centroid = self._model.centroids_[i].tolist() if feature_index_mapping is not None: centroid = {str(i): v for (i, v) in zip(feature_index_mapping, centroid) if v != 0} centroids[classification] = dict( centroid=centroid, dataCount=self._model._num_samples_per_centroid[i]) model = { 'type': model_type or 'nearest centroid classifier', 'centroids': centroids, } else: raise Exception("Unrecognized model type.") with open(path, 'w') as f: json.dump(model, f, separators=(',', ':')) @dataclass class SciKitClassifierModule(Module): """ Module to provide SciKit Learn Classifier like models. """ _model_initializer: Any # Purposely not a singleton so that it is easy to get a model that has not been initialized. @provider def provide_classifier(self, builder: ClassAssistedBuilder[SciKitClassifier]) -> Classifier: return builder.build( _model_initializer=self._model_initializer, )
from decai.simulation.contract.classification.ncc import NearestCentroidClassifier from decai.simulation.contract.classification.scikit_classifier import SciKitClassifierModule class NearestCentroidClassifierModule(SciKitClassifierModule): def __init__(self): super().__init__( _model_initializer=NearestCentroidClassifier)
from skmultiflow.trees import HAT, RegressionHAT from decai.simulation.contract.classification.scikit_classifier import SciKitClassifierModule class DecisionTreeModule(SciKitClassifierModule): def __init__(self, regression=False): if regression: model_initializer = lambda: RegressionHAT( # leaf_prediction='mc' ) else: model_initializer = lambda: HAT( # leaf_prediction='mc', # nominal_attributes=[ 4], ) super().__init__(_model_initializer=model_initializer)
import unittest import numpy as np from injector import Injector from decai.simulation.contract.classification.classifier import Classifier from decai.simulation.contract.classification.ncc_module import NearestCentroidClassifierModule from decai.simulation.logging_module import LoggingModule class TestNearestCentroidClassifier(unittest.TestCase): @classmethod def setUpClass(cls): cls.inj = Injector([ LoggingModule, NearestCentroidClassifierModule, ]) def test_partial_fit(self): model = self.inj.get(Classifier) data = [ [-1.0, -1.0, ], [-0.5, -0.5, ], [+1.0, +1.0], [+0.5, +0.5], ] labels = [0, 0, 1, 1, ] data = np.array(data) labels = np.array(labels) model.init_model(data, labels) self.assertEqual(1, model.evaluate(data, labels)) sample = np.array([0.1, 0.1, ]) self.assertEqual(1, model.predict(sample)) # Update a point beyond `sample` so that `sample` gets a new label. model.update(np.array([0.3, 0.3, ]), 0) self.assertEqual(0, model.predict(sample)) self.assertEqual(1, model.evaluate(data, labels)) def test_partial_fit_2(self): model = self.inj.get(Classifier) data = [ [0, -1.0, ], [0, -0.5, ], [0, +1.0], [0, +0.5], ] labels = [0, 0, 1, 1, ] data = np.array(data) labels = np.array(labels) model.init_model(data, labels) self.assertEqual(1, model.evaluate(data, labels)) sample = np.array([0, +0.1, ]) self.assertEqual(1, model.predict(sample)) # Update a point beyond `sample` so that `sample` gets a new label. model.update(np.array([0, 0, ]), 0) self.assertEqual(0, model.predict(sample)) self.assertEqual(1, model.evaluate(data, labels))
import random import unittest import numpy as np from injector import Injector from decai.simulation.contract.balances import Balances from decai.simulation.contract.classification.classifier import Classifier from decai.simulation.contract.classification.perceptron import PerceptronModule from decai.simulation.contract.collab_trainer import CollaborativeTrainer, DefaultCollaborativeTrainerModule from decai.simulation.contract.incentive.stakeable import StakeableImModule from decai.simulation.contract.objects import Msg, RejectException, TimeMock from decai.simulation.logging_module import LoggingModule def _ground_truth(data): return data[0] * data[2] class TestCollaborativeTrainer(unittest.TestCase): @classmethod def setUpClass(cls): inj = Injector([ DefaultCollaborativeTrainerModule, LoggingModule, PerceptronModule, StakeableImModule, ]) cls.balances = inj.get(Balances) cls.decai = inj.get(CollaborativeTrainer) cls.time_method = inj.get(TimeMock) cls.good_address = 'sender' initial_balance = 1E6 cls.balances.initialize(cls.good_address, initial_balance) msg = Msg(cls.good_address, cls.balances[cls.good_address]) X = np.array([ # Initialization Data [0, 0, 0], [1, 1, 1], # Data to Add [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], ]) y = np.array([_ground_truth(x) for x in X]) cls.decai.model.init_model(np.array([X[0, :], X[1, :]]), np.array([y[0], y[1]])) score = cls.decai.model.evaluate(X, y) assert score != 1, "Model shouldn't fit the data yet." # Add all data. first_added_time = None for i in range(X.shape[0]): x = X[i] cls.time_method.set_time(cls.time_method() + 1) if first_added_time is None: first_added_time = cls.time_method() cls.decai.add_data(msg, x, y[i]) for _ in range(1000): score = cls.decai.model.evaluate(X, y) if score >= 1: break i = random.randint(0, X.shape[0] - 1) x = X[i] cls.time_method.set_time(cls.time_method() + 1) cls.decai.add_data(msg, x, y[i]) assert score == 1, "Model didn't fit the data." bal = cls.balances[msg.sender] assert bal < initial_balance, "Adding data should have a cost." # Make sure sender has some good data refunded so that they can report data later. cls.time_method.set_time(cls.time_method() + cls.decai.im.refund_time_s + 1) cls.decai.refund(msg, X[0], y[0], first_added_time) assert cls.balances[msg.sender] > bal, "Refunding should return value." def test_predict(self): data = np.array([0, 1, 0]) correct_class = _ground_truth(data) prediction = self.decai.model.predict(data) self.assertEqual(prediction, correct_class) def test_refund(self): data = np.array([0, 2, 0]) correct_class = _ground_truth(data) orig_address = "Orig" bal = 1E5 self.balances.initialize(orig_address, bal) msg = Msg(orig_address, 1E3) self.time_method.set_time(self.time_method() + 1) added_time = self.time_method() self.decai.add_data(msg, data, correct_class) self.assertLess(self.balances[orig_address], bal) # Add same data from another address. msg = Msg(self.good_address, 1E3) self.time_method.set_time(self.time_method() + 1) bal = self.balances[self.good_address] self.decai.add_data(msg, data, correct_class) self.assertLess(self.balances[self.good_address], bal) # Original address refunds. msg = Msg(orig_address, 1E3) bal = self.balances[orig_address] self.time_method.set_time(self.time_method() + self.decai.im.refund_time_s + 1) self.decai.refund(msg, data, correct_class, added_time) self.assertGreater(self.balances[orig_address], bal) def test_report(self): data = np.array([0, 0, 0]) correct_class = _ground_truth(data) submitted_classification = 1 - correct_class # Add bad data. malicious_address = 'malicious' self.balances.initialize(malicious_address, 1E6) bal = self.balances[malicious_address] msg = Msg(malicious_address, bal) self.time_method.set_time(self.time_method() + 1) added_time = self.time_method() self.decai.add_data(msg, data, submitted_classification) self.assertLess(self.balances[malicious_address], bal, "Adding data should have a cost.") self.time_method.set_time(self.time_method() + self.decai.im.refund_time_s + 1) # Can't refund. msg = Msg(malicious_address, self.balances[malicious_address]) try: self.decai.refund(msg, data, submitted_classification, added_time) self.fail("Should have failed.") except RejectException as e: self.assertEqual("The model doesn't agree with your contribution.", e.args[0]) bal = self.balances[self.good_address] msg = Msg(self.good_address, bal) self.decai.report(msg, data, submitted_classification, added_time, malicious_address) self.assertGreater(self.balances[self.good_address], bal) def test_report_take_all(self): data = np.array([0, 0, 0]) correct_class = _ground_truth(data) submitted_classification = 1 - correct_class # Add bad data. malicious_address = 'malicious_take_backer' self.balances.initialize(malicious_address, 1E6) bal = self.balances[malicious_address] msg = Msg(malicious_address, bal) self.time_method.set_time(self.time_method() + 1) added_time = self.time_method() self.decai.add_data(msg, data, submitted_classification) self.assertLess(self.balances[malicious_address], bal, "Adding data should have a cost.") self.time_method.set_time(self.time_method() + self.decai.im.any_address_claim_wait_time_s + 1) # Can't refund. msg = Msg(malicious_address, self.balances[malicious_address]) try: self.decai.refund(msg, data, submitted_classification, added_time) self.fail("Should have failed.") except RejectException as e: self.assertEqual("The model doesn't agree with your contribution.", e.args[0]) bal = self.balances[malicious_address] msg = Msg(malicious_address, bal) self.decai.report(msg, data, submitted_classification, added_time, malicious_address) self.assertGreater(self.balances[malicious_address], bal) def test_reset(self): inj = Injector([ LoggingModule, PerceptronModule, ]) m = inj.get(Classifier) X = np.array([ # Initialization Data [0, 0, 0], [1, 1, 1], ]) y = np.array([_ground_truth(x) for x in X]) m.init_model(X, y, save_model=True) data = np.array([ [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], ]) original_predictions = [m.predict(x) for x in data] labels = np.array([_ground_truth(x) for x in data]) for x, y in zip(data, labels): m.update(x, y) predictions_after_training = [m.predict(x) for x in data] self.assertNotEqual(original_predictions, predictions_after_training) m.reset_model() new_predictions = [m.predict(x) for x in data] self.assertEqual(original_predictions, new_predictions)
from collections import Counter from logging import Logger import math from injector import inject, Module, singleton from decai.simulation.contract.balances import Balances from decai.simulation.contract.data.data_handler import StoredData from decai.simulation.contract.incentive.incentive_mechanism import IncentiveMechanism from decai.simulation.contract.objects import Address, RejectException, TimeMock @singleton class Stakeable(IncentiveMechanism): """ The Deposit, Take, Reward IM. A deposit is required to add data. Later that deposit can be reclaimed if the model still agrees with the contribution. """ @inject def __init__(self, # Injected balances: Balances, logger: Logger, time_method: TimeMock, # Parameters refund_time_s=60 * 60 * 24 * 1, any_address_claim_wait_time_s=60 * 60 * 24 * 9, cost_weight=1, ): super().__init__(refund_time_s=refund_time_s, any_address_claim_wait_time_s=any_address_claim_wait_time_s) self._balances = balances self._logger = logger self._time = time_method # Make sure there is at least a week for the refund. min_refund_window_s = 60 * 60 * 24 * 7 assert self.any_address_claim_wait_time_s > self.refund_time_s + min_refund_window_s, "Claim time is not enough." self.cost_weight = cost_weight self.num_good_data_per_user = Counter() self.total_num_good_data = 0 self._last_update_time_s = int(self._time()) def distribute_payment_for_prediction(self, sender, value): if value > 0: for agent_address, num_good in self.num_good_data_per_user.items(): # Round down like Solidity would. # Also helps avoid errors for possible rounding so # total value distributed < value. self._balances.send(sender, agent_address, int(value * num_good / self.total_num_good_data)) def get_next_add_data_cost(self, data, classification) -> float: """ :param data: A single sample of training data for the model. :param classification: The label for `data`. :return: The current cost to update a model with a specific sample of training data. """ current_time_s = int(self._time()) # TODO Limit how many times a data point can be added if the model already classifies right for it? # TODO Add cost to flip all data? # TODO Add discount if already submitted good data? # Convert to integers like in Solidity. time_since_last_update_s = int((current_time_s - self._last_update_time_s)) if time_since_last_update_s <= 0: raise RejectException("Not enough time has passed since the last update.") # We really want to think about the time in hours # (divide by 3600 but this is in the square root of the denominator so we multiply by sqrt(3600)). # Equivalent to: cost = self.cost_weight / int(math.sqrt(time_since_last_update_s * 3600)) result = self.cost_weight * 60 / int(math.sqrt(time_since_last_update_s)) result = int(result) # Make sure there is a minimum cost to adding data. if result < 1: result = 1 return result def handle_add_data(self, contributor_address: Address, msg_value: float, data, classification) -> (float, bool): cost = self.get_next_add_data_cost(data, classification) update_model = True if cost > msg_value: raise RejectException(f"Did not pay enough. Sent {msg_value} < {cost}") self._last_update_time_s = self._time() return (cost, update_model) def handle_refund(self, submitter: str, stored_data: StoredData, claimable_amount: float, claimed_by_submitter: bool, prediction) -> float: result = claimable_amount # Do not need to check submitter == stored_data.sender because DataHandler already did it. if claimed_by_submitter: raise RejectException("Deposit already claimed by submitter.") if result <= 0: raise RejectException("There is no reward left to claim.") current_time_s = int(self._time()) if current_time_s - stored_data.time <= self.refund_time_s: raise RejectException("Not enough time has passed.") if callable(prediction): prediction = prediction() if prediction != stored_data.classification: raise RejectException("The model doesn't agree with your contribution.") self.num_good_data_per_user[submitter] += 1 self.total_num_good_data += 1 return result def handle_report(self, reporter: str, stored_data: StoredData, claimed_by_reporter: bool, prediction) -> float: if stored_data.claimable_amount <= 0: raise RejectException("There is no reward left to claim.") current_time_s = int(self._time()) if current_time_s - stored_data.time >= self.any_address_claim_wait_time_s: # Enough time has passed, give the entire remaining deposit to the reporter. self._logger.debug("Giving all remaining deposit to \"%s\".", reporter) result = stored_data.claimable_amount return result # Don't allow someone to claim back their own deposit if their data was wrong. # They can still claim it from another address but they will have had to have sent good data from that address. if reporter == stored_data.sender: raise RejectException("Cannot take your own deposit. Ask for a refund instead.") if claimed_by_reporter: raise RejectException("Deposit already claimed by reporter.") if current_time_s - stored_data.time <= self.refund_time_s: raise RejectException("Not enough time has passed.") if callable(prediction): prediction = prediction() if prediction == stored_data.classification: raise RejectException("The model should not agree with the contribution.") num_good = self.num_good_data_per_user[reporter] if num_good <= 0: raise RejectException(f"No good data was verified by reporter '{reporter}'.") result = stored_data.initial_deposit * num_good / self.total_num_good_data # Handle possible rounding errors or if there is too little to divide to reporters. if result <= 0 or result > stored_data.claimable_amount: result = stored_data.claimable_amount return result class StakeableImModule(Module): def configure(self, binder): binder.bind(IncentiveMechanism, to=Stakeable)
import random from collections import Counter, defaultdict from dataclasses import dataclass, field from enum import Enum from hashlib import sha256 from logging import Logger from typing import Dict, List, Optional, Tuple import math import numpy as np from injector import ClassAssistedBuilder, inject, Module, provider, singleton from decai.simulation.contract.balances import Balances from decai.simulation.contract.classification.classifier import Classifier from decai.simulation.contract.data.data_handler import StoredData from decai.simulation.contract.incentive.incentive_mechanism import IncentiveMechanism from decai.simulation.contract.objects import Address, Msg, RejectException, TimeMock class MarketPhase(Enum): """ Phases for the current market. """ # Phases are in chronological order. INITIALIZATION = 0 """ The market is being initialized and awaiting for the requested test set index to be revealed. """ PARTICIPATION = 1 """ The market is open to data contributions. """ REVEAL_TEST_SET = 2 """ The market will no longer accept data and the test set must be revealed before rewards can be calculated. """ REWARD = 3 """ No more data contributions are being accepted but rewards still need to be calculated. """ REWARD_RESTART = 4 """ Same as `REWARD` but contributions have just been filtered out and the iteration needs to restart with the remaining contributions. """ REWARD_COLLECT = 5 """ The reward values have been computed and are ready to be collected. """ @dataclass class _Contribution: """ A contribution to train data. This is stored for convenience but for some applications, storing the data could be very expensive, instead, hashes could be stored and during the reward phase, the hash can be used to verify data as data is re-submitted. Note: this is not in the spirit of the prediction market (the current state should be public) since the model would not actually be updated and the submitted data would be private so new data contributors have very limited information. """ contributor_address: Address data: np.array classification: int balance: int """ Initially this is the amount deposited with this contribution. If contributions are not grouped by contributor, then while calculating rewards this gets updated to be the balance for this particular contribution, to know if it should get kicked out of the reward phase. """ score: Optional[int] = field(default=None, init=False) """ The score for this contribution. Mainly used for when contributions are not grouped. """ accuracy: Optional[float] = field(default=None, init=False) """ The accuracy of the model on the test set after adding this contribution. """ class PredictionMarket(IncentiveMechanism): """ An IM where rewards are computed based on how the model's performance changes with respect to a test set. For now, for the purposes of the simulation, the market is only intended to be run once. Eventually this class and the actual smart contract implementation of it should support restarting the market with a new bounty once a market has ended. """ @inject def __init__(self, # Injected balances: Balances, logger: Logger, model: Classifier, time_method: TimeMock, # Parameters any_address_claim_wait_time_s=60 * 60 * 24 * 7, # Configuration Options allow_greater_deposit=False, group_contributions=False, reset_model_during_reward_phase=False, ): super().__init__(any_address_claim_wait_time_s=any_address_claim_wait_time_s) self._balances = balances self._logger = logger self.model = model self._time = time_method # Configuration Options self._allow_greater_deposit = allow_greater_deposit self._group_contributions = group_contributions self._reset_model_during_reward_phase = reset_model_during_reward_phase self._market_earliest_end_time_s = None self._market_balances: Dict[Address, float] = defaultdict(float) """ Keeps track of balances in the market. """ self._next_data_index = None self.min_stake = 1 """ The minimum required amount to deposit. Should be at least 1 to handle the worst case where the contribution takes the accuracy from 1 to 0. """ self.state = None @property def reset_model_during_reward_phase(self): return self._reset_model_during_reward_phase def distribute_payment_for_prediction(self, sender, value): pass def get_num_contributions_in_market(self): """ :return: The total number of contributions currently in the market. This can decrease as "bad" contributors are removed during the reward phase. """ return len(self._market_data) # Methods in chronological order of the PM. @staticmethod def hash_test_set(test_set): """ :param test_set: A test set. :return: The hash of `test_set`. """ return sha256(str(test_set).encode()).hexdigest() @staticmethod def get_test_set_hashes(num_pieces, x_test, y_test) -> Tuple[list, list]: """ Helper to break the test set into `num_pieces` to initialize the market. :param num_pieces: The number of pieces to break the test set into. :param x_test: The features for the test set. :param y_test: The labels for `x_test`. :return: tuple A list of `num_pieces` hashes for each portion of the test set. The test set divided into `num_pieces`. """ test_sets = [] test_dataset_hashes = [] assert len(x_test) == len(y_test) >= num_pieces for i in range(num_pieces): start = int(i / num_pieces * len(x_test)) end = int((i + 1) / num_pieces * len(x_test)) test_set = list(zip(x_test[start:end], y_test[start:end])) test_sets.append(test_set) test_dataset_hashes.append(PredictionMarket.hash_test_set(test_set)) assert sum(len(t) for t in test_sets) == len(x_test) return test_dataset_hashes, test_sets def initialize_market(self, msg: Msg, test_dataset_hashes: List[str], # Ending criteria: min_length_s: int, min_num_contributions: int) -> int: """ Initialize the prediction market. :param msg: Indicates the one posting the bounty and the amount being committed for the bounty. The total bounty should be an integer since it also represents the number of "rounds" in the PM. :param test_dataset_hashes: The committed hashes for the portions of the test set. :param min_length_s: The minimum length in seconds of the market. :param min_num_contributions: The minimum number of contributions before ending the market. :return: The index of the test set that must be revealed. """ assert self._market_earliest_end_time_s is None assert self._next_data_index is None, "The market end has already been triggered." assert self.state is None self.bounty_provider = msg.sender self.total_bounty = msg.value self.remaining_bounty_rounds = self.total_bounty self.test_set_hashes = test_dataset_hashes assert len(self.test_set_hashes) > 1 self.test_reveal_index = random.randrange(len(self.test_set_hashes)) self.next_test_set_index_to_verify = 0 if self.next_test_set_index_to_verify == self.test_reveal_index: self.next_test_set_index_to_verify += 1 self._market_data: List[_Contribution] = [] self.min_num_contributions = min_num_contributions self._market_earliest_end_time_s = self._time() + min_length_s self.reward_phase_end_time_s = None self.prev_acc = None self.original_acc = None # Pay the owner since it will be the owner distributing funds using `handle_refund` and `handle_reward` later. self._balances.send(self.bounty_provider, self.owner, self.total_bounty) self.state = MarketPhase.INITIALIZATION return self.test_reveal_index def add_test_set_hashes(self, msg: Msg, more_test_set_hashes: List[str]) -> int: """ (Optional) Add more hashes for portions of the test set to reveal. This helps in case not all hashes can be sent in one transaction. :param msg: The message for this transaction. The sender must be the bounty provider. :param more_test_set_hashes: More committed hashes for the portions of the test set. :return: The index of the test set that must be revealed. """ assert self.state == MarketPhase.INITIALIZATION assert msg.sender == self.bounty_provider # Ensure that a new test set is given and the sender isn't just trying to get a new random index. assert len(more_test_set_hashes) > 0, "You must give at least one hash." self.test_set_hashes += more_test_set_hashes self.test_reveal_index = random.randrange(len(self.test_set_hashes)) self.next_test_set_index_to_verify = 0 if self.next_test_set_index_to_verify == self.test_reveal_index: self.next_test_set_index_to_verify += 1 return self.test_reveal_index def verify_test_set(self, index: int, test_set_portion): """ Verify that a portion of the test set matches the committed to hash. :param index: The index of the test set in the originally committed list of hashes. :param test_set_portion: The portion of the test set to reveal. """ assert 0 <= index < len(self.test_set_hashes) assert len(test_set_portion) > 0 test_set_hash = self.hash_test_set(test_set_portion) assert test_set_hash == self.test_set_hashes[index] def reveal_init_test_set(self, test_set_portion): """ Reveal the required portion of the full test set. :param test_set_portion: The portion of the test set that must be revealed before started the Participation Phase. """ assert self.state == MarketPhase.INITIALIZATION self.verify_test_set(self.test_reveal_index, test_set_portion) self.state = MarketPhase.PARTICIPATION def handle_add_data(self, contributor_address: Address, msg_value: float, data, classification) -> (float, bool): # Allow them to stake as much as they want to ensure they get included in future rounds. assert self.state == MarketPhase.PARTICIPATION, f'Current state is: {self.state}.' if msg_value < self.min_stake: raise RejectException(f"Did not pay enough. Sent {msg_value} < {self.min_stake}") if self._allow_greater_deposit: cost = msg_value else: cost = self.min_stake update_model = False self._market_data.append(_Contribution(contributor_address, data, classification, cost)) self._market_balances[contributor_address] += cost return (cost, update_model) def end_market(self): """ Signal the end of the prediction market. """ assert self.state == MarketPhase.PARTICIPATION, f'Current state is: {self.state}.' if self.get_num_contributions_in_market() < self.min_num_contributions \ and self._time() < self._market_earliest_end_time_s: raise RejectException("Can't end the market yet.") self._logger.info("Ending market.") self.state = MarketPhase.REVEAL_TEST_SET self._next_data_index = 0 self.test_data, self.test_labels = [], [] def verify_next_test_set(self, test_set_portion): assert self.state == MarketPhase.REVEAL_TEST_SET self.verify_test_set(self.next_test_set_index_to_verify, test_set_portion) test_data, test_labels = zip(*test_set_portion) self.test_data += test_data self.test_labels += test_labels self.next_test_set_index_to_verify += 1 if self.next_test_set_index_to_verify == self.test_reveal_index: self.next_test_set_index_to_verify += 1 if self.next_test_set_index_to_verify == len(self.test_set_hashes): self.state = MarketPhase.REWARD_RESTART self.test_data = np.array(self.test_data) self.test_labels = np.array(self.test_labels) def process_contribution(self): """ Reward Phase: Process the next data contribution. """ assert self.remaining_bounty_rounds > 0, "The market has ended." if self.state == MarketPhase.REWARD_RESTART: self._next_data_index = 0 self._logger.debug("Remaining bounty rounds: %s", self.remaining_bounty_rounds) self._scores = defaultdict(float) if self._reset_model_during_reward_phase: # The paper implies that we should not retrain the model and instead only train once. # The problem there is that a contributor is affected by bad contributions # between them and the last counted contribution after bad contributions are filtered out. self.model.reset_model() if self.prev_acc is None: # XXX This evaluation can be expensive and likely won't work in Ethereum. # We need to find a more efficient way to do this or let a contributor proved they did it. self.prev_acc = self.model.evaluate(self.test_data, self.test_labels) self.original_acc = self.prev_acc self._logger.debug("Accuracy: %0.2f%%", self.prev_acc * 100) elif not self._reset_model_during_reward_phase: # When calculating rewards, the score, the same accuracy for the initial model should be used. self.prev_acc = self.original_acc self._num_market_contributions: Dict[Address, int] = Counter() self._worst_contribution: Optional[_Contribution] = None self._worst_contributor: Optional[Address] = None self._min_score = math.inf self.state = MarketPhase.REWARD else: assert self.state == MarketPhase.REWARD contribution = self._market_data[self._next_data_index] self._num_market_contributions[contribution.contributor_address] += 1 self.model.update(contribution.data, contribution.classification) if not self._reset_model_during_reward_phase and contribution.accuracy is None: # XXX Potentially expensive gas cost. contribution.accuracy = self.model.evaluate(self.test_data, self.test_labels) self._next_data_index += 1 iterated_through_all_contributions = self._next_data_index >= self.get_num_contributions_in_market() if iterated_through_all_contributions \ or not self._group_contributions \ or self._market_data[self._next_data_index].contributor_address != contribution.contributor_address: # Need to compute score. if self._reset_model_during_reward_phase: # XXX Potentially expensive gas cost. acc = self.model.evaluate(self.test_data, self.test_labels) else: acc = contribution.accuracy score_change = acc - self.prev_acc if self._group_contributions: new_score = self._scores[contribution.contributor_address] = \ self._scores[contribution.contributor_address] + score_change else: new_score = contribution.score = score_change if new_score < self._min_score: self._min_score = new_score if self._group_contributions: self._worst_contributor = contribution.contributor_address else: self._worst_contribution = contribution elif self._group_contributions and self._worst_contributor == contribution.contributor_address: # Their score increased, they might not be the worst anymore. # Optimize: use a heap. self._worst_contributor, self._min_score = min(self._scores.items(), key=lambda x: x[1]) self.prev_acc = acc if iterated_through_all_contributions: # Find min score and remove that address from the list. self._logger.debug("Minimum score: %.2f", self._min_score) if self._min_score < 0: if self._group_contributions: num_rounds = self._market_balances[self._worst_contributor] / -self._min_score else: num_rounds = self._worst_contribution.balance / -self._min_score if num_rounds > self.remaining_bounty_rounds: num_rounds = self.remaining_bounty_rounds self._logger.debug("Will simulate %.2f rounds.", num_rounds) self.remaining_bounty_rounds -= num_rounds if self.remaining_bounty_rounds == 0: self._end_reward_phase(num_rounds) else: if self._group_contributions: participants_to_remove = set() for participant, score in self._scores.items(): self._logger.debug("Score for \"%s\": %.2f", participant, score) self._market_balances[participant] += score * num_rounds if self._market_balances[participant] < self._num_market_contributions[participant]: # They don't have enough left to stake next time. participants_to_remove.add(participant) self._market_data: List[_Contribution] = list( filter(lambda c: c.contributor_address not in participants_to_remove, self._market_data)) else: for contribution in self._market_data: contribution.balance += contribution.score * num_rounds if contribution.balance < 1: # Contribution is going to get kicked out. self._market_balances[contribution.contributor_address] += contribution.balance self._market_data: List[_Contribution] = \ list(filter(lambda c: c.balance >= 1, self._market_data)) if self.get_num_contributions_in_market() == 0: self.state = MarketPhase.REWARD_COLLECT self.remaining_bounty_rounds = 0 self.reward_phase_end_time_s = self._time() else: self.state = MarketPhase.REWARD_RESTART else: num_rounds = self.remaining_bounty_rounds self.remaining_bounty_rounds = 0 self._end_reward_phase(num_rounds) def _end_reward_phase(self, num_rounds): """ Distribute rewards. :param num_rounds: The number of rounds remaining. """ self._logger.debug("Dividing remaining bounty amongst all remaining contributors to simulate %.2f rounds.", num_rounds) self.reward_phase_end_time_s = self._time() self.state = MarketPhase.REWARD_COLLECT if self._group_contributions: for participant, score in self._scores.items(): self._logger.debug("Score for \"%s\": %.2f", participant, score) self._market_balances[participant] += score * num_rounds else: for contribution in self._market_data: self._market_balances[contribution.contributor_address] += \ contribution.score * num_rounds self._market_data = [] def handle_refund(self, submitter: Address, stored_data: StoredData, claimable_amount: float, claimed_by_submitter: bool, prediction) -> float: assert self.remaining_bounty_rounds == 0, "The reward phase has not finished processing contributions." assert self.state == MarketPhase.REWARD_COLLECT result = self._market_balances[submitter] self._logger.debug("Reward for \"%s\": %.2f", submitter, result) if result > 0: del self._market_balances[submitter] else: result = 0 return result def handle_report(self, reporter: Address, stored_data: StoredData, claimed_by_reporter: bool, prediction) -> float: assert self.state == MarketPhase.REWARD_COLLECT, "The reward phase has not finished processing contributions." assert self.remaining_bounty_rounds == 0 assert self.reward_phase_end_time_s > 0 if self._time() - self.reward_phase_end_time_s >= self.any_address_claim_wait_time_s: submitter = stored_data.sender result = self._market_balances[submitter] if result > 0: self._logger.debug("Giving reward for \"%s\" to \"%s\". Reward: %s", submitter, reporter, result) del self._market_balances[reporter] else: result = 0 return result @dataclass class PredictionMarketImModule(Module): allow_greater_deposit: bool = field(default=False) group_contributions: bool = field(default=False) reset_model_during_reward_phase: bool = field(default=False) @provider @singleton def provide_data_loader(self, builder: ClassAssistedBuilder[PredictionMarket]) -> IncentiveMechanism: return builder.build( allow_greater_deposit=self.allow_greater_deposit, group_contributions=self.group_contributions, reset_model_during_reward_phase=self.reset_model_during_reward_phase, )
from abc import ABC, abstractmethod import math from decai.simulation.contract.data.data_handler import StoredData from decai.simulation.contract.objects import Address, SmartContract class IncentiveMechanism(ABC, SmartContract): """ Defines incentives for others to contribute "good" quality data. """ def __init__(self, refund_time_s=math.inf, any_address_claim_wait_time_s=math.inf): super().__init__() self.refund_time_s = refund_time_s """ Amount of time to wait to get a refund back. Once this amount of time has passed, the entire deposit can be reclaimed. Also once this amount of time has passed, the deposit (in full or in part) can be taken by others. Default to not allowing refunds. """ self.any_address_claim_wait_time_s = any_address_claim_wait_time_s """ Amount of time after which anyone can take someone's entire remaining deposit. The purpose of this is to help ensure that value does not get "stuck" in a contract. This must be greater than the required amount of time to wait for attempting a refund. Contracts may want to enforce that this is much greater than the amount of time to wait for attempting a refund to give even more time to get the deposit back and not let others take too much. """ @abstractmethod def distribute_payment_for_prediction(self, sender: str, value: float): """ Share `value` with those that submit data. :param sender: The address of the one calling prediction. :param value: The amount sent with the request to call prediction. """ pass @abstractmethod def handle_add_data(self, contributor_address: Address, msg_value: float, data, classification) \ -> (float, bool): """ Determine if the request to add data is acceptable. :param contributor_address: The address of the one attempting to add data :param msg_value: The value sent with the initial transaction to add data. :param data: A single sample of training data for the model. :param classification: The label for `data`. :return: tuple The cost required to add new data. `True` if the model should be updated, `False` otherwise. """ pass @abstractmethod def handle_refund(self, submitter: str, stored_data: StoredData, claimable_amount: float, claimed_by_submitter: bool, prediction) -> float: """ Notify that a refund is being attempted. :param submitter: The address of the one attempting a refund. :param stored_data: The data for which a refund is being attempted. :param claimable_amount: The amount that can be claimed for the refund. :param claimed_by_submitter: True if the data has already been claimed by `submitter`, otherwise false. :param prediction: The current prediction of the model for data or a callable with no parameters to lazily get the prediction of the model on the data. :return: The amount to refund to `submitter`. """ pass @abstractmethod def handle_report(self, reporter: str, stored_data: StoredData, claimed_by_reporter: bool, prediction) \ -> float: """ Notify that data is being reported as bad or old. :param reporter: The address of the one reporting about the data. :param stored_data: The data being reported. :param claimed_by_reporter: True if the data has already been claimed by `reporter`, otherwise false. :param prediction: The current prediction of the model for data or a callable with no parameters to lazily get the prediction of the model on the data. :return: The amount to reward to `reporter`. """ pass
import unittest from collections import defaultdict from typing import cast from injector import Injector from decai.simulation.contract.balances import Balances from decai.simulation.contract.classification.perceptron import PerceptronModule from decai.simulation.contract.data.data_handler import StoredData from decai.simulation.contract.incentive.incentive_mechanism import IncentiveMechanism from decai.simulation.contract.incentive.prediction_market import MarketPhase, \ PredictionMarket, PredictionMarketImModule from decai.simulation.contract.objects import Msg, TimeMock from decai.simulation.data.data_loader import DataLoader from decai.simulation.data.simple_data_loader import SimpleDataModule from decai.simulation.logging_module import LoggingModule class TestPredictionMarket(unittest.TestCase): def test_market_like_original_paper(self): inj = Injector([ SimpleDataModule, LoggingModule, PerceptronModule, PredictionMarketImModule( allow_greater_deposit=False, group_contributions=False, reset_model_during_reward_phase=False, ), ]) balances = inj.get(Balances) data = inj.get(DataLoader) im = cast(PredictionMarket, inj.get(IncentiveMechanism)) im.owner = 'owner' assert isinstance(im, PredictionMarket) init_train_data_portion = 0.2 initializer_address = 'initializer' total_bounty = 100_000 balances.initialize(initializer_address, total_bounty) good_contributor_address = 'good_contributor' initial_good_balance = 10_000 balances.initialize(good_contributor_address, initial_good_balance) bad_contributor_address = 'bad_contributor' initial_bad_balance = 10_000 balances.initialize(bad_contributor_address, initial_bad_balance) (x_train, y_train), (x_test, y_test) = data.load_data() init_idx = int(len(x_train) * init_train_data_portion) assert init_idx > 0 x_init_data, y_init_data = x_train[:init_idx], y_train[:init_idx] x_remaining, y_remaining = x_train[init_idx:], y_train[init_idx:] # Split test set into pieces. num_pieces = 10 test_dataset_hashes, test_sets = im.get_test_set_hashes(num_pieces, x_test, y_test) # Ending criteria: min_length_s = 100 min_num_contributions = min(len(x_remaining), 100) # Commitment Phase self.assertIsNone(im.state) im.model.init_model(x_init_data, y_init_data, save_model=True) hashes_split = 3 test_reveal_index = im.initialize_market(Msg(initializer_address, total_bounty), test_dataset_hashes[:hashes_split], min_length_s, min_num_contributions) assert 0 <= test_reveal_index < len(test_dataset_hashes) self.assertEqual(MarketPhase.INITIALIZATION, im.state) test_reveal_index = im.add_test_set_hashes(Msg(initializer_address, 0), test_dataset_hashes[hashes_split:]) assert 0 <= test_reveal_index < len(test_dataset_hashes) self.assertEqual(MarketPhase.INITIALIZATION, im.state) im.reveal_init_test_set(test_sets[test_reveal_index]) self.assertEqual(MarketPhase.PARTICIPATION, im.state) # Participation Phase value = 100 total_deposits = defaultdict(float) for i in range(min_num_contributions): data = x_remaining[i] classification = y_remaining[i] if i % 2 == 0: contributor = good_contributor_address else: contributor = bad_contributor_address classification = 1 - classification cost, _ = im.handle_add_data(contributor, value, data, classification) self.assertEqual(im.min_stake, cost, "Cost should be the minimum stake because of the options passed in.") balances.send(contributor, im.owner, cost) total_deposits[contributor] += cost # Reward Phase self.assertEqual(MarketPhase.PARTICIPATION, im.state) im.end_market() self.assertEqual(MarketPhase.REVEAL_TEST_SET, im.state) for i, test_set_portion in enumerate(test_sets): if i != test_reveal_index: im.verify_next_test_set(test_set_portion) self.assertEqual(MarketPhase.REWARD_RESTART, im.state) while im.remaining_bounty_rounds > 0: im.process_contribution() # Collect rewards. self.assertEqual(MarketPhase.REWARD_COLLECT, im.state) for contributor in [good_contributor_address, bad_contributor_address]: # Don't need to pass the right StoredData. # noinspection PyTypeChecker reward = im.handle_refund(contributor, None, 0, False, None) balances.send(im.owner, contributor, reward) self.assertGreater(total_deposits[good_contributor_address], 0) self.assertGreater(total_deposits[bad_contributor_address], 0) # General checks that should be true for a market with a reasonably sensitive model. self.assertLess(balances[im.owner], total_bounty, f"Some of the bounty should be distributed.\n" f"Balances: {balances.get_all()}") self.assertLess(0, balances[im.owner]) # Sometimes the bad contributor happens to get some value but not much. self.assertAlmostEqual(balances[bad_contributor_address], initial_bad_balance, delta=2, msg=f"The bad contributor should lose funds.\n" f"Balances: {balances.get_all()}") self.assertGreater(balances[good_contributor_address], initial_good_balance) self.assertLess(balances[bad_contributor_address], balances[good_contributor_address]) self.assertLessEqual(balances[good_contributor_address] - balances[bad_contributor_address], total_bounty) self.assertEqual(initial_good_balance + initial_bad_balance + total_bounty, balances[good_contributor_address] + balances[bad_contributor_address] + balances[im.owner], "Should be a zero-sum.") def test_market(self): inj = Injector([ SimpleDataModule, LoggingModule, PerceptronModule, PredictionMarketImModule( allow_greater_deposit=True, group_contributions=True, reset_model_during_reward_phase=True, ), ]) balances = inj.get(Balances) data = inj.get(DataLoader) im = cast(PredictionMarket, inj.get(IncentiveMechanism)) im.owner = 'owner' assert isinstance(im, PredictionMarket) init_train_data_portion = 0.2 initializer_address = 'initializer' total_bounty = 100_000 balances.initialize(initializer_address, total_bounty) good_contributor_address = 'good_contributor' initial_good_balance = 10_000 balances.initialize(good_contributor_address, initial_good_balance) bad_contributor_address = 'bad_contributor' initial_bad_balance = 10_000 balances.initialize(bad_contributor_address, initial_bad_balance) (x_train, y_train), (x_test, y_test) = data.load_data() init_idx = int(len(x_train) * init_train_data_portion) assert init_idx > 0 x_init_data, y_init_data = x_train[:init_idx], y_train[:init_idx] x_remaining, y_remaining = x_train[init_idx:], y_train[init_idx:] # Split test set into pieces. num_pieces = 10 test_dataset_hashes, test_sets = im.get_test_set_hashes(num_pieces, x_test, y_test) # Ending criteria: min_length_s = 100 min_num_contributions = min(len(x_remaining), 100) # Commitment Phase self.assertIsNone(im.state) im.model.init_model(x_init_data, y_init_data, save_model=True) hashes_split = 3 test_reveal_index = im.initialize_market(Msg(initializer_address, total_bounty), test_dataset_hashes[:hashes_split], min_length_s, min_num_contributions) assert 0 <= test_reveal_index < len(test_dataset_hashes) self.assertEqual(MarketPhase.INITIALIZATION, im.state) test_reveal_index = im.add_test_set_hashes(Msg(initializer_address, 0), test_dataset_hashes[hashes_split:]) assert 0 <= test_reveal_index < len(test_dataset_hashes) self.assertEqual(MarketPhase.INITIALIZATION, im.state) im.reveal_init_test_set(test_sets[test_reveal_index]) self.assertEqual(MarketPhase.PARTICIPATION, im.state) # Participation Phase value = 100 total_deposits = defaultdict(float) for i in range(min_num_contributions): data = x_remaining[i] classification = y_remaining[i] if i % 2 == 0: contributor = good_contributor_address else: contributor = bad_contributor_address classification = 1 - classification cost, _ = im.handle_add_data(contributor, value, data, classification) balances.send(contributor, im.owner, cost) total_deposits[contributor] += cost # Reward Phase self.assertEqual(MarketPhase.PARTICIPATION, im.state) im.end_market() self.assertEqual(MarketPhase.REVEAL_TEST_SET, im.state) for i, test_set_portion in enumerate(test_sets): if i != test_reveal_index: im.verify_next_test_set(test_set_portion) self.assertEqual(MarketPhase.REWARD_RESTART, im.state) while im.remaining_bounty_rounds > 0: im.process_contribution() # Collect rewards. self.assertEqual(MarketPhase.REWARD_COLLECT, im.state) for contributor in [good_contributor_address, bad_contributor_address]: # Don't need to pass the right StoredData. # noinspection PyTypeChecker reward = im.handle_refund(contributor, None, 0, False, None) balances.send(im.owner, contributor, reward) self.assertGreater(total_deposits[good_contributor_address], 0) self.assertGreater(total_deposits[bad_contributor_address], 0) # General checks that should be true for a market with a reasonably sensitive model. self.assertLess(balances[im.owner], total_bounty, f"Some of the bounty should be distributed.\n" f"Balances: {balances.get_all()}") self.assertLess(0, balances[im.owner]) self.assertLess(balances[bad_contributor_address], initial_bad_balance) self.assertGreater(balances[good_contributor_address], initial_good_balance) self.assertLess(balances[bad_contributor_address], balances[good_contributor_address]) self.assertLessEqual(balances[good_contributor_address] - balances[bad_contributor_address], total_bounty) self.assertEqual(initial_good_balance + initial_bad_balance + total_bounty, balances[good_contributor_address] + balances[bad_contributor_address] + balances[im.owner], "Should be a zero-sum.") self.assertEqual(initial_bad_balance - total_deposits[bad_contributor_address], balances[bad_contributor_address], "The bad contributor should lose all of their deposits.") def test_report(self): inj = Injector([ SimpleDataModule, LoggingModule, PerceptronModule, PredictionMarketImModule( allow_greater_deposit=True, group_contributions=True, reset_model_during_reward_phase=True, ), ]) balances = inj.get(Balances) data = inj.get(DataLoader) im = cast(PredictionMarket, inj.get(IncentiveMechanism)) im.owner = 'owner' time_method = inj.get(TimeMock) assert isinstance(im, PredictionMarket) init_train_data_portion = 0.2 initializer_address = 'initializer' total_bounty = 100_000 balances.initialize(initializer_address, total_bounty) good_contributor_address = 'good_contributor' initial_good_balance = 10_000 balances.initialize(good_contributor_address, initial_good_balance) bad_contributor_address = 'bad_contributor' initial_bad_balance = 10_000 balances.initialize(bad_contributor_address, initial_bad_balance) (x_train, y_train), (x_test, y_test) = data.load_data() init_idx = int(len(x_train) * init_train_data_portion) assert init_idx > 0 x_init_data, y_init_data = x_train[:init_idx], y_train[:init_idx] x_remaining, y_remaining = x_train[init_idx:], y_train[init_idx:] # Split test set into pieces. num_pieces = 10 test_dataset_hashes, test_sets = im.get_test_set_hashes(num_pieces, x_test, y_test) # Ending criteria: min_length_s = 100 min_num_contributions = min(len(x_remaining), 100) # Commitment Phase self.assertIsNone(im.state) im.model.init_model(x_init_data, y_init_data, save_model=True) test_reveal_index = im.initialize_market(Msg(initializer_address, total_bounty), test_dataset_hashes, min_length_s, min_num_contributions) self.assertEqual(MarketPhase.INITIALIZATION, im.state) assert 0 <= test_reveal_index < len(test_dataset_hashes) im.reveal_init_test_set(test_sets[test_reveal_index]) self.assertEqual(MarketPhase.PARTICIPATION, im.state) # Participation Phase value = 100 total_deposits = defaultdict(float) stored_data = None for i in range(min_num_contributions): time_method.add_time(60) data = x_remaining[i] classification = y_remaining[i] if i % 2 == 0: contributor = good_contributor_address else: contributor = bad_contributor_address classification = 1 - classification cost, _ = im.handle_add_data(contributor, value, data, classification) if stored_data is None: stored_data = StoredData(classification, time_method(), contributor, cost, cost) balances.send(contributor, im.owner, cost) total_deposits[contributor] += cost # Reward Phase self.assertEqual(MarketPhase.PARTICIPATION, im.state) im.end_market() time_method.add_time(60) self.assertEqual(MarketPhase.REVEAL_TEST_SET, im.state) for i, test_set_portion in enumerate(test_sets): if i != test_reveal_index: im.verify_next_test_set(test_set_portion) self.assertEqual(MarketPhase.REWARD_RESTART, im.state) while im.remaining_bounty_rounds > 0: time_method.add_time(60) im.process_contribution() # Collect rewards. self.assertEqual(MarketPhase.REWARD_COLLECT, im.state) # Get some stored data. # Make sure reporting doesn't work yet. reward = im.handle_report(bad_contributor_address, stored_data, False, None) self.assertEqual(0, reward, "There should be no reward yet.") time_method.add_time(im.any_address_claim_wait_time_s) reward = im.handle_report(bad_contributor_address, stored_data, False, None) balances.send(im.owner, bad_contributor_address, reward) # Don't need to pass the right StoredData. # noinspection PyTypeChecker reward = im.handle_refund(bad_contributor_address, None, 0, False, None) balances.send(im.owner, bad_contributor_address, reward) # General checks that should be true for a market with a reasonably sensitive model. self.assertLess(balances[im.owner], total_bounty, f"Some of the bounty should be distributed.\n" f"Balances: {balances.get_all()}") self.assertLess(0, balances[im.owner]) self.assertGreater(total_deposits[good_contributor_address], 0) self.assertGreater(total_deposits[bad_contributor_address], 0) # The bad contributor profited because they reported the good contributor. self.assertGreater(balances[bad_contributor_address], initial_bad_balance) self.assertLess(balances[good_contributor_address], initial_good_balance) self.assertLess(balances[good_contributor_address], balances[bad_contributor_address]) self.assertLessEqual(balances[bad_contributor_address] - balances[good_contributor_address], total_bounty) self.assertEqual(initial_good_balance + initial_bad_balance + total_bounty, balances[good_contributor_address] + balances[bad_contributor_address] + balances[im.owner], "Should be a zero-sum.") self.assertEqual(initial_good_balance - total_deposits[good_contributor_address], balances[good_contributor_address], "The good contributor should lose all of their deposits.")
from collections import defaultdict from dataclasses import dataclass, field from typing import Dict import numpy as np from injector import inject, singleton from decai.simulation.contract.objects import Address, RejectException, SmartContract, TimeMock @dataclass class StoredData: # Storing the data is not necessary. data: object classification: object time: int sender: Address # Need to use float since the numbers might be large. They should still actually be integers. initial_deposit: float """ The amount that was initially given to deposit this data. """ claimable_amount: float """ The amount of the deposit that can still be claimed. """ claimed_by: Dict[Address, bool] = field(default_factory=lambda: defaultdict(bool)) @inject @singleton @dataclass class DataHandler(SmartContract): """ Stores added training data and corresponding meta-data. """ _time: TimeMock _added_data: Dict[tuple, StoredData] = field(default_factory=dict, init=False) def __iter__(self): return iter(self._added_data.items()) def _get_key(self, data, classification, added_time: int, original_author: Address): if isinstance(data, np.ndarray): # The `.tolist()` isn't necessary but is faster. data = tuple(data.tolist()) else: data = tuple(data) return (data, classification, added_time, original_author) def get_data(self, data, classification, added_time: int, original_author: Address) -> StoredData: """ :param data: The originally submitted features. :param classification: The label originally submitted for `data`. :param added_time: The time in seconds for which the data was added. :param original_author: The address that originally added the data. :return: The stored information for the data. """ key = self._get_key(data, classification, added_time, original_author) result = self._added_data.get(key) return result def handle_add_data(self, contributor_address: Address, cost, data, classification): """ Log an attempt to add data :param sender: The address of the one attempting to add data :param cost: The cost required to add new data. :param data: A single sample of training data for the model. :param classification: The label for `data`. """ current_time_s = self._time() key = self._get_key(data, classification, current_time_s, contributor_address) if key in self._added_data: raise RejectException("Data has already been added.") d = StoredData(classification, current_time_s, contributor_address, cost, cost) self._added_data[key] = d def handle_refund(self, submitter: Address, data, classification, added_time: int) -> (float, bool, StoredData): """ Log a refund attempt. :param submitter: The address of the one attempting a refund. :param data: The data for which to attempt a refund. :param classification: The label originally submitted for `data`. :param added_time: The time in seconds for which the data was added. :return: The amount that can be claimed for the refund. True if the data has already been claimed by `submitter`, otherwise false. The stored data. """ stored_data = self.get_data(data, classification, added_time, submitter) assert stored_data is not None, "Data not found." assert stored_data.sender == submitter, "Data isn't from the sender." claimable_amount = stored_data.claimable_amount claimed_by_submitter = stored_data.claimed_by[submitter] return (claimable_amount, claimed_by_submitter, stored_data) def handle_report(self, reporter: Address, data, classification, added_time: int, original_author: Address) \ -> (bool, StoredData): """ Retrieve information about the data to report. :param reporter: The address of the one reporting the data. :param data: The data to report. :param classification: The label originally submitted for `data`. :param added_time: The time in seconds for which the data was added. :param original_author: The address that originally added the data. :return: True if the data has already been claimed by `submitter`, otherwise false. The stored data. """ stored_data = self.get_data(data, classification, added_time, original_author) assert stored_data is not None, "Data not found." claimed_by_reporter = stored_data.claimed_by[reporter] # The Solidity implementation updates `stored_data.claimed_by` here which is fine. # We do not update it here because if an error occurs while attempting a refund, # then the change would have to be undone. # Instead, `stored_data.claimed_by` is updated in `update_claimable_amount`. return (claimed_by_reporter, stored_data) def update_claimable_amount(self, receiver: Address, stored_data: StoredData, reward_amount: float): # The Solidity implementation does the update in another place which is fine for it. # Here we only update it once we're sure the refund can be completed successfully. if reward_amount > 0: stored_data.claimed_by[receiver] = True stored_data.claimable_amount -= reward_amount
import unittest from queue import PriorityQueue from decai.simulation.simulate import Agent class TestAgent(unittest.TestCase): def test_queue(self): q = PriorityQueue() agents = [ Agent('a1', 10, 1, 1, 1), Agent('a2', 10, 1, 1, 1), Agent('a0', 10, 1, 1, 1), ] [q.put((0, a)) for a in agents] results = [q.get()[1].address for _ in agents] self.assertEqual(['a0', 'a1', 'a2'], results)
from dataclasses import dataclass, field from logging import Logger from typing import List import numpy as np from injector import inject, Module from sklearn.utils import shuffle from tqdm import trange from .data_loader import DataLoader @inject @dataclass class TicTacToeDataLoader(DataLoader): """ Load data for Tic-Tac-Toe games. Data is flattened `width` x `length` games. The players are 1 and -1. The data is from the perspective of player 1, opponent is -1. 0 means no one has played in that position. """ _logger: Logger _seed: int = field(default=2, init=False) _train_split: float = field(default=0.7, init=False) width: int = field(default=3, init=False) length: int = field(default=3, init=False) def classifications(self) -> List[str]: return list(map(str, map(self.map_pos, range(self.width * self.length)))) def get_winner(self, board): def get_single_winner(line: set): if len(line) == 1: val = next(iter(line)) if val != 0: return val return None for row in range(self.width): result = get_single_winner(set(board[row])) if result is not None: return result for col in range(self.length): result = get_single_winner(set(board[:, col])) if result is not None: return result result = get_single_winner(set(board.diagonal())) if result is not None: return result diag_vals = set(board[i, self.length - 1 - i] for i in range(self.width)) result = get_single_winner(diag_vals) if result is not None: return result return None def map_pos(self, pos): return pos // self.width, pos % self.width def load_data(self, train_size: int = None, test_size: int = None) -> (tuple, tuple): X, y = [], [] bad_moves = set() players = (1, -1) assert self.width == self.length, "The following code assumes that the board is square." def fill(board, start_pos, next_player, path): # See if there is a winning move. winner = None for pos in range(start_pos, self.width * self.length): i, j = self.map_pos(pos) if board[i, j] != 0: continue _board = board.copy() _board[i, j] = next_player winner = self.get_winner(_board) if winner is not None: path.append((board, pos, next_player)) break if winner is not None: # Only count wins for one of the players to make setting up games simpler. if winner == players[0]: for history_board, history_position, history_player in path: history_board = history_board.flatten() if history_player == winner: X.append(history_board) y.append(history_position) else: bad_moves.add((tuple(-history_board.flatten()), -history_position)) else: # Recurse. for pos in range(start_pos, self.width * self.length): i, j = self.map_pos(pos) if board[i, j] != 0: continue _path = list(path) _path.append((board, pos, next_player)) _board = board.copy() _board[i, j] = next_player fill(_board, start_pos, next_player=-1 if next_player == 1 else 1, path=_path) self._logger.info("Loading Tic Tac Toe data.") for init_pos in trange(self.width * self.length, desc="Making boards", unit_scale=True, mininterval=2, unit=" start positions" ): pos = self.map_pos(init_pos) for player in players: board = np.zeros((self.width, self.length), dtype=np.int8) path = [(board.copy(), init_pos, player)] board[pos] = player fill(board, init_pos + 1, next_player=-1 if player == 1 else 1, path=path) # Remove bad moves. # Note this might not help much depending on the model. X, y = zip(*[(X[i], y[i]) for i in range(len(X)) if (tuple(X[i]), y[i]) not in bad_moves]) X, y = shuffle(X, y, random_state=self._seed) split = int(self._train_split * len(X)) x_train, y_train = np.array(X[:split]), np.array(y[:split]) x_test, y_test = np.array(X[split:]), np.array(y[split:]) if train_size is not None: x_train, y_train = x_train[:train_size], y_train[:train_size] if test_size is not None: x_test, y_test = x_test[:test_size], y_test[:test_size] # Show some data. # import random # for _ in range(10): # i = random.randrange(len(X)) # print(X[i].reshape((self.width, self.length)), y[i]) self._logger.info("Done loading data.\nCreated %d boards.", len(X)) return (x_train, y_train), (x_test, y_test) class TicTacToeDataModule(Module): def configure(self, binder): binder.bind(DataLoader, TicTacToeDataLoader)
import os from dataclasses import dataclass, field from logging import Logger from typing import List import numpy as np import pandas as pd from injector import inject, Module from sklearn.utils import shuffle from decai.simulation.data.data_loader import DataLoader @inject @dataclass class TitanicDataLoader(DataLoader): """ Load data for Titanic survivors. https://www.kaggle.com/c/titanic/data """ _logger: Logger _seed: int = field(default=231, init=False) _train_split: float = field(default=0.7, init=False) def classifications(self) -> List[str]: return ["DIED", "SURVIVED"] def _get_features(self, data: pd.DataFrame): """ Map the data to numbers. Also uses some ideas from https://triangleinequality.wordpress.com/2013/09/08/basic-feature-engineering-with-the-titanic-data/ :param data: The data without labels. :return: The data mapped to numbers. """ data.drop(columns=['PassengerId', 'Ticket'], inplace=True) # , 'Name', 'Ticket', 'Cabin', 'Embarked' title_tuples = ( (' Mr. ', ' Sir. ', ' Don. ', ' Major. ', ' Capt. ', ' Jonkheer. ', ' Rev. ', ' Col. '), (' Mrs. ', ' Countess. ', ' Mme. ', ' Lady. '), (' Miss. ', ' Mlle. ', ' Ms. '), (' Master. ',), (' Dr. ',), ) title_to_num = { ' Mr. ': 0, ' Mrs. ': 1, ' Miss. ': 2, ' Master. ': 3, } def _get_title(row): result = None name = row['Name'] for index, titles in enumerate(title_tuples): for t in titles: if t in name: result = titles[0] if result == ' Dr. ': if row['Sex'] == 'male': result = ' Mr. ' else: result = ' Mrs. ' assert result is not None, f"No title found in {row}." result = title_to_num[result] return result def _get_cabin(row): result = -1 cabin = row['Cabin'] if isinstance(cabin, str): for c in 'ABCDEFGT': if c in cabin: result = ord(c) - ord('A') break return result result = [] for index, row in data.iterrows(): if row['Sex'] == 'male': sex = 0 else: sex = 1 family_size = row['SibSp'] + row['Parch'] datum = [ row['Pclass'], sex, _get_title(row), family_size, # These features did not help: # _get_cabin(row), # row['Age'], # row['Parch'], # row['SibSp'], # row['Fare'], # row['Fare'] / (family_size + 1), ] result.append(datum) return result def load_data(self, train_size: int = None, test_size: int = None) -> (tuple, tuple): self._logger.info("Loading data.") data_folder_path = os.path.join(__file__, '../../../../training_data/titanic') if not os.path.exists(data_folder_path): # TODO Attempt to download the data. raise Exception(f"Could not find Titanic dataset at \"{data_folder_path}\"." "\nYou must download it from https://www.kaggle.com/c/titanic/data.") x_train = pd.read_csv(os.path.join(data_folder_path, 'train.csv')) y_train = np.array(x_train['Survived'], np.int8) x_train.drop(columns=['Survived'], inplace=True) x_train = self._get_features(x_train) x_train = np.array(x_train) x_train, y_train = shuffle(x_train, y_train, random_state=self._seed) train_split = int(len(x_train) * self._train_split) x_test, y_test = x_train[train_split:], y_train[train_split:] x_train, y_train = x_train[:train_split], y_train[:train_split] if train_size is not None: x_train, y_train = x_train[:train_size], y_train[:train_size] if test_size is not None: x_test, y_test = x_test[:test_size], y_test[:test_size] self._logger.info("Done loading data.") return (x_train, y_train), (x_test, y_test) @dataclass class TitanicDataModule(Module): def configure(self, binder): binder.bind(DataLoader, to=TitanicDataLoader)
from abc import ABC, abstractmethod from typing import List class DataLoader(ABC): """ Base class for providing simulation data. """ @abstractmethod def classifications(self) -> List[str]: """ :return: The classifications for this dataset. """ pass @abstractmethod def load_data(self, train_size: int = None, test_size: int = None) -> (tuple, tuple): """ :return: Training Data, Test Data: (x_train, y_train), (x_test, y_test) """ pass
from dataclasses import dataclass from logging import Logger from typing import List from injector import inject, Module from keras.datasets import boston_housing from decai.simulation.data.data_loader import DataLoader @inject @dataclass class BhpDataLoader(DataLoader): """ Load data from Boston Housing Prices. https://keras.io/datasets/#boston-housing-price-regression-dataset """ _logger: Logger def classifications(self) -> List[str]: raise NotImplementedError def load_data(self, train_size: int = None, test_size: int = None) -> (tuple, tuple): self._logger.info("Loading Boston housing prices data.") (x_train, y_train), (x_test, y_test) = boston_housing.load_data() if train_size is not None: x_train, y_train = x_train[:train_size], y_train[:train_size] if test_size is not None: x_test, y_test = x_test[:test_size], y_test[:test_size] self._logger.info("Done loading data.") return (x_train, y_train), (x_test, y_test) @dataclass class BhpDataModule(Module): def configure(self, binder): binder.bind(DataLoader, to=BhpDataLoader)
import itertools import json import os import random import time from collections import Counter from dataclasses import dataclass from enum import Enum from logging import Logger from operator import itemgetter from pathlib import Path from typing import Collection, List, Optional, Tuple import numpy as np import pandas as pd import spacy from injector import ClassAssistedBuilder, inject, Module, provider, singleton from sklearn.feature_extraction.text import TfidfVectorizer from spacy.cli import download from tqdm import tqdm from .data_loader import DataLoader class Label(Enum): RELIABLE = 0 UNRELIABLE = 1 @dataclass class News: text: Optional[str] label: Label @inject @dataclass class _SignalMediaDataLoader(DataLoader): """ INCOMPLETE BECAUSE MAPPING THE SOURCE NAMES TO DOMAIN NAMES IS TRICKY. See https://github.com/aldengolab/fake-news-detection/issues/4 Following logic of https://github.com/aldengolab/fake-news-detection. Requires the Signal Media dataset from http://research.signalmedia.co/newsir16/signal-dataset.html to be at simulation/training_data/news/sample-1M.jsonl and https://github.com/OpenSourcesGroup/opensources with sources.json in simulation/training_data/news/ """ _logger: Logger _media_types = {'News'} def classifications(self) -> List[str]: raise NotImplementedError def find_source_site(self, source_name: str, sources: Collection[str]) -> Optional[str]: """ :param source_name: The name of the source. :param sources: Source domain names. :return: The source domain name from `sources` or `None` if no mapping can be found. """ # TODO result = None return result def load_data(self, train_size: int = None, test_size: int = None) -> (tuple, tuple): data_folder_path = os.path.join(__file__, '../../../../training_data/news') signal_data_path = os.path.join(data_folder_path, 'sample-1M.jsonl') if not os.path.exists(signal_data_path): raise Exception(f"Could not find the Signal Media dataset at \"{signal_data_path}\"." "\nYou must obtain it from http://research.signalmedia.co/newsir16/signal-dataset.html" f" and follow the instructions to obtain it. Then extract it to \"{signal_data_path}\".") sources_path = os.path.join(data_folder_path, 'sources.json') if not os.path.exists(sources_path): raise Exception(f"Could not find the sources dataset at \"{sources_path}\"." "\nYou must obtain it from https://github.com/OpenSourcesGroup/opensources and put" f" sources.json in \"{data_folder_path}\".") with open(sources_path) as f: loaded_sources = json.load(f) sources = dict() for source, info in loaded_sources.items(): problem_types = (info['type'], info['2nd type'], info['3rd type']) sources[source] = set(filter(None, problem_types)) self._logger.info("Found %d sources with labels.", len(sources)) # Name: website name in `sources`. source_mapping = {} not_found_flag = -1 with open(signal_data_path) as f: for index, line in tqdm(enumerate(f), desc="Filtering news articles", unit_scale=True, mininterval=2, unit=" articles" ): news = json.loads(line) news_id = news['id'] title = news['title'] text = news['content'] source = news['source'] # media-type is either "News" or "Blog" media_type = news['media-type'] published_date = news['published'] if media_type not in self._media_types: continue source_site = source_mapping.get(source) if source_site is None: source_site = self.find_source_site(source, sources) if source_site is not None: source_mapping[source] = source_site else: source_mapping[source] = not_found_flag continue elif source_site == not_found_flag: continue # TODO Use article and set label. with open(os.path.join(data_folder_path, 'source_mapping.json')) as f: sorted(source_mapping.items(), key=itemgetter(0)) self._logger.info("Found %d sources in the articles.", len(source_mapping)) # TODO Set up output. (x_train, y_train), (x_test, y_test) = (None, None), (None, None) if train_size is not None: x_train, y_train = x_train[:train_size], y_train[:train_size] if test_size is not None: x_test, y_test = x_test[:test_size], y_test[:test_size] self._logger.info("Done loading news data.") return (x_train, y_train), (x_test, y_test) @inject @dataclass class NewsDataLoader(DataLoader): """ Load data from news sources. Requires data from https://www.kaggle.com/c/fake-news/data to be saved to "simulation/trainin_data/news/fake-news/train.csv". """ _logger: Logger _train_split = 0.7 _replace_entities_enabled = False """ If True, entities will be replaced in text with the entity's label surrounded by angle brackets: "<LABEL>". Accuracy with replacement: 0.9172 Accuracy without replacement: 0.9173 Disabled because using spaCy is slow, it will be tricky to use spaCy in JavaScript, and it didn't change the evaluation metrics much. """ _entity_types_to_replace = {'PERSON', 'GPE', 'ORG', 'DATE', 'TIME', 'PERCENT', 'MONEY', 'QUANTITY', 'ORDINAL', 'CARDINAL'} def classifications(self) -> List[str]: return ["RELIABLE", "UNRELIABLE"] def __post_init__(self): spacy_model = 'en_core_web_lg' download(spacy_model) self._nlp = spacy.load(spacy_model, disable={'tagger', 'parser', 'textcat'}) def _load_kaggle_data(self, data_folder_path: str) -> Collection[News]: """ Load data from https://www.kaggle.com/c/fake-news/data. """ # Don't use the test data because it has no labels. fake_news_data_path = os.path.join(data_folder_path, 'fake-news', 'train.csv') if not os.path.exists(fake_news_data_path): raise Exception(f"Could not find the Fake News dataset at \"{fake_news_data_path}\"." "\nYou must obtain it from https://www.kaggle.com/c/fake-news/data.") data = pd.read_csv(fake_news_data_path, na_values=dict(text=[]), keep_default_na=False) result = [] for row in data.itertuples(): label = Label.RELIABLE if row.label == 0 else Label.UNRELIABLE if len(row.text) > 0: result.append(News(row.text, label)) # Consistent shuffle to aim for a mostly even distribution of labels. random.shuffle(result, lambda: 0.618) return result def _replace_entities(self, doc) -> str: # Remove names in text using spaCy. result = doc.text for ent in doc.ents[::-1]: if ent.label_ in self._entity_types_to_replace: result = result[:ent.start_char] + "<" + ent.label_ + ">" + result[ent.end_char:] return result def _pre_process_text(self, doc) -> str: # TODO Remove name of news sources. if self._replace_entities_enabled: result = self._replace_entities(doc) else: assert isinstance(doc, str) result = doc return result def _pre_process(self, news_articles: Collection[News], train_size: int, test_size: int) -> \ Tuple[Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray]]: self._logger.info("Getting features for %d articles.", len(news_articles)) # Only use binary features. ngram_range = (2, 2) # Don't use IDF because we need integer features. t = TfidfVectorizer(max_features=1000, ngram_range=ngram_range, norm=None, use_idf=False) test_start = len(news_articles) - test_size x_train = map(lambda news: news.text, itertools.islice(news_articles, train_size)) x_test = map(lambda news: news.text, itertools.islice(news_articles, test_start, len(news_articles))) if self._replace_entities_enabled: self._logger.debug("Will replace entities.") x_train = self._nlp.pipe(x_train, batch_size=128) x_test = self._nlp.pipe(x_test, batch_size=128) else: self._logger.debug("Replacing entities is disabled.") x_train = map(self._pre_process_text, x_train) x_test = map(self._pre_process_text, x_test) x_train = t.fit_transform(tqdm(x_train, desc="Processing training data", total=train_size, unit_scale=True, mininterval=2, unit=" articles" )).toarray() x_test = t.transform(tqdm(x_test, desc="Processing testing data", total=test_size, unit_scale=True, mininterval=2, unit=" articles" )).toarray() y_train = np.array([news.label.value for news in itertools.islice(news_articles, train_size)], np.int8) y_test = np.array([news.label.value for news in itertools.islice(news_articles, test_start, len(news_articles))], np.int8) self._logger.debug("Training labels: %s", Counter(y_train)) self._logger.debug("Test labels: %s", Counter(y_test)) self._logger.info("Done getting features.") return (x_train, y_train), (x_test, y_test) def load_data(self, train_size: int = None, test_size: int = None) -> \ Tuple[Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray]]: self._logger.info("Loading news data.") data_folder_path = os.path.join(__file__, '../../../../training_data/news') # Look for cached data. file_identifier = f'news-data-{train_size}-{test_size}-replace_ents_{self._replace_entities_enabled}.npy' base_path = Path(os.path.dirname(__file__)) / 'cached_data' os.makedirs(base_path, exist_ok=True) cache_paths = { 'x_train': base_path / f'x_train-{file_identifier}', 'y_train': base_path / f'y_train-{file_identifier}', 'x_test': base_path / f'x_test-{file_identifier}', 'y_test': base_path / f'y_test-{file_identifier}' } # Use if modified in the last day. if all([p.exists() for p in cache_paths.values()]) and \ all([time.time() - p.stat().st_mtime < 60 * 60 * 24 for p in cache_paths.values()]): self._logger.info("Loaded cached News data from %s.", cache_paths) return (np.load(cache_paths['x_train']), np.load(cache_paths['y_train'])), \ (np.load(cache_paths['x_test']), np.load(cache_paths['y_test'])) data = self._load_kaggle_data(data_folder_path) # Separate train and test data. if train_size is None: if test_size is None: train_size = int(self._train_split * len(data)) else: train_size = len(data) - test_size if test_size is None: test_size = len(data) - train_size if train_size + test_size > len(data): raise Exception("There is not enough data for the requested sizes." f"\n data size: {len(data)}" f"\n train size: {train_size}" f"\n test size: {test_size}") (x_train, y_train), (x_test, y_test) = self._pre_process(data, train_size, test_size) np.save(cache_paths['x_train'], x_train, allow_pickle=False) np.save(cache_paths['y_train'], y_train, allow_pickle=False) np.save(cache_paths['x_test'], x_test, allow_pickle=False) np.save(cache_paths['y_test'], y_test, allow_pickle=False) self._logger.info("Done loading news data.") return (x_train, y_train), (x_test, y_test) @dataclass class NewsDataModule(Module): @provider @singleton def provide_data_loader(self, builder: ClassAssistedBuilder[NewsDataLoader]) -> DataLoader: return builder.build()
from dataclasses import dataclass from logging import Logger from typing import List import numpy as np from injector import Binder, inject, Module from decai.simulation.data.data_loader import DataLoader @inject @dataclass class SimpleDataLoader(DataLoader): """ Load simple data for testing. """ _logger: Logger def classifications(self) -> List[str]: return ["0", "1"] def load_data(self, train_size: int = None, test_size: int = None) -> (tuple, tuple): def _ground_truth(data): if data[0] * data[2] > 0: return 1 else: return 0 x_train = np.array([ [0, 0, 0], [1, 1, 1], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [0, 0, 2], [0, 2, 0], [2, 0, 0], [2, 0, 2], [0, 0, -3], [0, 3, 0], [0, 3, -3], [0, -3, 3], [0, 0, 4], [0, 4, 4], [4, 0, 0], [-6, 0, 0], ]) x_test = np.array([ [0, 2, 2], [0, 1, -1], [-1, 0, 0], [0, -1, 0], [1, -1, 2], [0, 0, 3], [0, -2, 0], [0, 2, -2], [3, 0, 0], [-2, 0, 2], [2, -2, 0], ]) if train_size is not None: x_train = x_train[:train_size] if test_size is not None: x_test = x_test[:test_size] y_train = [_ground_truth(x) for x in x_train] y_test = [_ground_truth(x) for x in x_test] return (x_train, y_train), (x_test, y_test) class SimpleDataModule(Module): """ Set up a `DataLoader` mainly for testing. """ def configure(self, binder: Binder): binder.bind(DataLoader, to=SimpleDataLoader)
import ast import logging import os import re import time from collections import Counter from dataclasses import dataclass, field from logging import Logger from pathlib import Path from typing import List, Set, Tuple import numpy as np from injector import ClassAssistedBuilder, inject, Module, provider, singleton from sklearn.utils import shuffle from tqdm import tqdm from .data_loader import DataLoader @inject @dataclass class FitnessDataLoader(DataLoader): """ Load sport activity data from Endomondo. Requires endomondoHR_proper.json from https://sites.google.com/eng.ucsd.edu/fitrec-project/home to be stored at simulation/training_data/fitness/endomondoHR_proper.json. From the first 5K samples, the 2842 'bike' and 2158 'run' occurrences. Some info from the fire 10K samples: genders: 'male', 'unknown', 'female' sports: 'bike', 'bike (transport)', 'run', 'kayaking', 'indoor cycling', 'mountain bike', 'orienteering', 'core stability training', 'walk', 'cross-country skiing', 'fitness walking', 'roller skiing' """ _logger: Logger _seed: int = field(default=2, init=False) _train_split: float = field(default=0.7, init=False) _classes: Set[str] = field(default_factory=lambda: {'bike', 'run'}, init=False) def classifications(self) -> List[str]: return ["BIKING", "RUNNING"] def load_data(self, train_size: int = None, test_size: int = None) -> (Tuple, Tuple): self._logger.info("Loading Endomondo fitness data.") # Look for cached data. file_identifier = f'fitness-data-{train_size}-{test_size}.npy' base_path = Path(os.path.dirname(__file__)) / 'cached_data' os.makedirs(base_path, exist_ok=True) cache_paths = { 'x_train': base_path / f'x_train-{file_identifier}', 'y_train': base_path / f'y_train-{file_identifier}', 'x_test': base_path / f'x_test-{file_identifier}', 'y_test': base_path / f'y_test-{file_identifier}' } # Use if modified in the last day. if all([p.exists() for p in cache_paths.values()]) and \ all([time.time() - p.stat().st_mtime < 60 * 60 * 24 for p in cache_paths.values()]): self._logger.info("Loaded cached Endomondo fitness data from %s.", cache_paths) return (np.load(cache_paths['x_train']), np.load(cache_paths['y_train'])), \ (np.load(cache_paths['x_test']), np.load(cache_paths['y_test'])) data = [] labels = [] data_folder_path = Path(__file__, '../../../../training_data/fitness').resolve() user_id_to_set = {} sport_to_label = { 'bike': 0, 'run': 1 } gender_to_index = {} if train_size is not None and test_size is not None: max_num_samples = train_size + test_size else: max_num_samples = 10_000 classes = '|'.join(self._classes) classes_pattern = re.compile(f' \'sport\': \'({classes})\', ') data_path = data_folder_path / 'endomondoHR_proper.json' assert data_path.exists(), f"See the documentation for how to download the dataset. It must be stored at {data_path}" with open(data_path) as f, \ tqdm(f, desc="Loading data", unit_scale=True, mininterval=2, unit=" samples", total=max_num_samples, ) as pbar: for line in f: # TODO Keep users in train set mutually exclusive from users in test set. # Check line before more expensive parsing. if not classes_pattern.search(line): continue record = ast.literal_eval(line) sport = record['sport'] if sport not in self._classes: continue if 'speed' not in record: continue label = sport_to_label[sport] labels.append(label) heart_rates = record['heart_rate'] gender = gender_to_index.setdefault(record['gender'], len(gender_to_index)) speeds = record['speed'] # Other fields: # record['longitude'] # record['altitude'] # record['latitude'] # record['id'] # record['timestamp'] # record['userId'] data.append({ # Values to keep as they are: 'rawValues': [ np.mean(heart_rates) / np.min(heart_rates), np.median(heart_rates) / np.min(heart_rates), np.max(speeds), np.min(speeds), np.mean(speeds), np.median(speeds), ], # Values that need to be converted: 'gender': gender, }) pbar.update() if len(data) >= max_num_samples: break if train_size is None: if test_size is None: train_size = int(self._train_split * len(data)) else: train_size = len(data) - test_size if test_size is None: test_size = len(data) - train_size # Thresholds for making sure features can be discretized for Naive Bayes. # Just use training data to make thresholds. thresholds = np.empty(len(data[0]['rawValues']), dtype=np.int32) for i in range(len(data[0]['rawValues'])): thresholds[i] = np.median([d['rawValues'][i] for d in data[:train_size]]) def _featurize(datum): raw_values = np.array(thresholds < datum['rawValues'], dtype=np.int8) gender_one_hot = np.zeros(len(gender_to_index), dtype=np.int8) gender_one_hot[datum['gender']] = 1 return np.concatenate([raw_values, gender_one_hot]) if self._logger.isEnabledFor(logging.DEBUG): self._logger.debug("Labels: %s", Counter(labels)) data, labels = shuffle(data, labels, random_state=self._seed) x_train = np.array([_featurize(d) for d in data[:train_size]]) y_train = np.array(labels[:train_size]) x_test = np.array([_featurize(d) for d in data[-test_size:]]) y_test = np.array(labels[-test_size:]) np.save(cache_paths['x_train'], x_train, allow_pickle=False) np.save(cache_paths['y_train'], y_train, allow_pickle=False) np.save(cache_paths['x_test'], x_test, allow_pickle=False) np.save(cache_paths['y_test'], y_test, allow_pickle=False) self._logger.info("Done loading Endomondo fitness data.") return (x_train, y_train), (x_test, y_test) @dataclass class FitnessDataModule(Module): @provider @singleton def provide_data_loader(self, builder: ClassAssistedBuilder[FitnessDataLoader]) -> DataLoader: return builder.build()
import html import itertools import os from collections import Counter from dataclasses import dataclass, field from logging import Logger from pathlib import Path from typing import Dict, Iterator, List, Tuple import numpy as np import pandas as pd import requests from injector import ClassAssistedBuilder, Module, inject, provider, singleton from scipy.sparse import csr_matrix from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.utils import shuffle from tqdm import tqdm from .data_loader import DataLoader from .featuremapping.hashing.token_hash import TokenHash @inject @dataclass class OffensiveDataLoader(DataLoader): """ Load offensive data from https://github.com/t-davidson/hate-speech-and-offensive-language. """ _logger: Logger _token_hash: TokenHash max_num_features: int _seed: int = field(default=2, init=False) _train_split: float = field(default=0.7, init=False) _class_mapping = [ # Hate 0, # Offensive 0, # Neither (Safe) 1, ] def classifications(self) -> List[str]: return ["OFFENSIVE", "SAFE"] def load_data(self, train_size: int = None, test_size: int = None) -> (Tuple, Tuple): self._logger.info("Loading data.") data_folder_path = Path(__file__, '../../../../training_data/offensive/hate-speech-and-offensive-language').resolve() if train_size is not None and test_size is not None: max_num_samples = train_size + test_size else: max_num_samples = None data_path = data_folder_path / 'labeled_data.csv' if not data_path.exists(): data_url = 'https://github.com/t-davidson/hate-speech-and-offensive-language/raw/master/data/labeled_data.csv' self._logger.info("Downloading data from \"%s\" to \"%s\".", data_url, data_path) r = requests.get(data_url, allow_redirects=True) r.raise_for_status() os.makedirs(data_folder_path, exist_ok=True) with open(data_path, 'wb') as f: f.write(r.content) loaded_data = pd.read_csv(data_path) data = [] labels = [] class_index = list(loaded_data.columns).index('class') + 1 assert class_index > 0 for row in tqdm(loaded_data.itertuples(), desc="Loading data", unit_scale=True, mininterval=2, unit=" samples", total=max_num_samples or len(loaded_data), ): if max_num_samples is not None and len(data) > max_num_samples: break text = row.tweet text = self._pre_process(text) data.append(text) labels.append(self._class_mapping[row[class_index]]) if train_size is None: if test_size is None: train_size = int(self._train_split * len(data)) else: train_size = len(data) - test_size if test_size is None: test_size = len(data) - train_size data, labels = shuffle(data, labels, random_state=self._seed) x_train = itertools.islice(data, train_size) # Compute the top features. t = TfidfVectorizer(max_features=self.max_num_features, norm=None) t.fit(tqdm(x_train, desc="Computing top token features", total=train_size, unit_scale=True, mininterval=2, unit=" texts" )) top_tokens = t.get_feature_names() self._logger.debug("Some top feature names: %s", top_tokens[:30]) tokenize = t.build_analyzer() feature_tokens = set(t.get_feature_names()) def _featurize(text: str) -> Dict[int, int]: result = Counter(tokenize(text)) return {self._token_hash.hash(token): count for token, count in result.items() if token in feature_tokens} x_train = map(_featurize, itertools.islice(data, train_size)) x_train = self._build_sparse_matrix(x_train) y_train = np.array(labels[:train_size]) x_test = map(_featurize, itertools.islice(data, len(data) - test_size, len(data))) # TODO Might have to might sure it has the same number of columns as x_train. x_test = self._build_sparse_matrix(x_test) y_test = np.array(labels[-test_size:]) self._logger.info("Done loading data.") return (x_train, y_train), (x_test, y_test) def _pre_process(self, text: str) -> str: """ Handle some simple pre-processing specific to this dataset. """ return html.unescape(text) def _build_sparse_matrix(self, feature_mapped_data: Iterator[Dict[int, int]]): # Make a sparse matrix following the term-document example from: # https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.html data = [] indptr = [0] indices = [] for feature_indices in feature_mapped_data: if len(feature_indices) > 0: i, d = zip(*feature_indices.items()) indices.extend(i) data.extend(d) indptr.append(len(indices)) return csr_matrix((data, indices, indptr), dtype=np.uint8) @dataclass class OffensiveDataModule(Module): max_num_features: int = field(default=1000) @provider @singleton def provide_data_loader(self, builder: ClassAssistedBuilder[OffensiveDataLoader]) -> DataLoader: return builder.build(max_num_features=self.max_num_features)
README.md exists but content is empty. Use the Edit dataset card button to edit it.
Downloads last month
2
Edit dataset card