|
# File: WebShop-master/baseline_models/agent.py |
|
import os |
|
import random |
|
import torch |
|
import torch.nn as nn |
|
import torch.nn.functional as F |
|
from transformers import AutoTokenizer |
|
from collections import defaultdict, namedtuple |
|
from models.bert import BertConfigForWebshop, BertModelForWebshop |
|
from models.rnn import RCDQN |
|
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') |
|
State = namedtuple('State', ('obs', 'goal', 'click', 'estimate', 'obs_str', 'goal_str', 'image_feat')) |
|
TransitionPG = namedtuple('TransitionPG', ('state', 'act', 'reward', 'value', 'valid_acts', 'done')) |
|
|
|
def discount_reward(transitions, last_values, gamma): |
|
(returns, advantages) = ([], []) |
|
R = last_values.detach() |
|
for t in reversed(range(len(transitions))): |
|
(_, _, rewards, values, _, dones) = transitions[t] |
|
R = torch.FloatTensor(rewards).to(device) + gamma * R * (1 - torch.FloatTensor(dones).to(device)) |
|
baseline = values |
|
adv = R - baseline |
|
returns.append(R) |
|
advantages.append(adv) |
|
return (returns[::-1], advantages[::-1]) |
|
|
|
class Agent: |
|
|
|
def __init__(self, args): |
|
self.tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased', truncation_side='left', max_length=512) |
|
self.tokenizer.add_tokens(['[button], [button_], [clicked button], [clicked button_]'], special_tokens=True) |
|
vocab_size = len(self.tokenizer) |
|
embedding_dim = args.embedding_dim |
|
if args.network == 'rnn': |
|
self.network = RCDQN(vocab_size, embedding_dim, args.hidden_dim, args.arch_encoder, args.grad_encoder, None, args.gru_embed, args.get_image, args.bert_path) |
|
self.network.rl_forward = self.network.forward |
|
elif args.network == 'bert': |
|
config = BertConfigForWebshop(image=args.get_image, pretrained_bert=args.bert_path != 'scratch') |
|
self.network = BertModelForWebshop(config) |
|
if args.bert_path != '' and args.bert_path != 'scratch': |
|
self.network.load_state_dict(torch.load(args.bert_path, map_location=torch.device('cpu')), strict=False) |
|
else: |
|
raise ValueError('Unknown network: {}'.format(args.network)) |
|
self.network = self.network.to(device) |
|
self.save_path = args.output_dir |
|
self.clip = args.clip |
|
self.w = {'loss_pg': args.w_pg, 'loss_td': args.w_td, 'loss_il': args.w_il, 'loss_en': args.w_en} |
|
self.optimizer = torch.optim.Adam(self.network.parameters(), lr=args.learning_rate) |
|
self.gamma = args.gamma |
|
|
|
def build_state(self, ob, info): |
|
obs_ids = self.encode(ob) |
|
goal_ids = self.encode(info['goal']) |
|
click = info['valid'][0].startswith('click[') |
|
estimate = info['estimate_score'] |
|
obs_str = ob.replace('\n', '[SEP]') |
|
goal_str = info['goal'] |
|
image_feat = info.get('image_feat') |
|
return State(obs_ids, goal_ids, click, estimate, obs_str, goal_str, image_feat) |
|
|
|
def encode(self, observation, max_length=512): |
|
observation = observation.lower().replace('"', '').replace("'", '').strip() |
|
observation = observation.replace('[sep]', '[SEP]') |
|
token_ids = self.tokenizer.encode(observation, truncation=True, max_length=max_length) |
|
return token_ids |
|
|
|
def decode(self, act): |
|
act = self.tokenizer.decode(act, skip_special_tokens=True) |
|
act = act.replace(' [ ', '[').replace(' ]', ']') |
|
return act |
|
|
|
def encode_valids(self, valids, max_length=64): |
|
return [[self.encode(act, max_length=max_length) for act in valid] for valid in valids] |
|
|
|
def act(self, states, valid_acts, method, state_strs=None, eps=0.1): |
|
act_ids = self.encode_valids(valid_acts) |
|
(act_values, act_sizes, values) = self.network.rl_forward(states, act_ids, value=True, act=True) |
|
act_values = act_values.split(act_sizes) |
|
if method == 'softmax': |
|
act_probs = [F.softmax(vals, dim=0) for vals in act_values] |
|
act_idxs = [torch.multinomial(probs, num_samples=1).item() for probs in act_probs] |
|
elif method == 'greedy': |
|
act_idxs = [vals.argmax(dim=0).item() for vals in act_values] |
|
elif method == 'eps': |
|
act_idxs = [vals.argmax(dim=0).item() if random.random() > eps else random.randint(0, len(vals) - 1) for vals in act_values] |
|
acts = [acts[idx] for (acts, idx) in zip(act_ids, act_idxs)] |
|
(act_strs, act_ids) = ([], []) |
|
for (act, idx, valids) in zip(acts, act_idxs, valid_acts): |
|
if torch.is_tensor(act): |
|
act = act.tolist() |
|
if 102 in act: |
|
act = act[:act.index(102) + 1] |
|
act_ids.append(act) |
|
if idx is None: |
|
act_str = self.decode(act) |
|
else: |
|
act_str = valids[idx] |
|
act_strs.append(act_str) |
|
return (act_strs, act_ids, values) |
|
|
|
def update(self, transitions, last_values, step=None, rewards_invdy=None): |
|
(returns, advs) = discount_reward(transitions, last_values, self.gamma) |
|
stats_global = defaultdict(float) |
|
for (transition, adv) in zip(transitions, advs): |
|
stats = {} |
|
(log_valid, valid_sizes) = self.network.rl_forward(transition.state, transition.valid_acts) |
|
act_values = log_valid.split(valid_sizes) |
|
log_a = torch.stack([values[acts.index(act)] for (values, acts, act) in zip(act_values, transition.valid_acts, transition.act)]) |
|
stats['loss_pg'] = -(log_a * adv.detach()).mean() |
|
stats['loss_td'] = adv.pow(2).mean() |
|
stats['loss_il'] = -log_valid.mean() |
|
stats['loss_en'] = (log_valid * log_valid.exp()).mean() |
|
for k in stats: |
|
stats[k] = self.w[k] * stats[k] / len(transitions) |
|
stats['loss'] = sum((stats[k] for k in stats)) |
|
stats['returns'] = torch.stack(returns).mean() / len(transitions) |
|
stats['advs'] = torch.stack(advs).mean() / len(transitions) |
|
stats['loss'].backward() |
|
stats['gradnorm_unclipped'] = sum((p.grad.norm(2).item() for p in self.network.parameters() if p.grad is not None)) |
|
nn.utils.clip_grad_norm_(self.network.parameters(), self.clip) |
|
stats['gradnorm_clipped'] = sum((p.grad.norm(2).item() for p in self.network.parameters() if p.grad is not None)) |
|
for (k, v) in stats.items(): |
|
stats_global[k] += v.item() if torch.is_tensor(v) else v |
|
del stats |
|
self.optimizer.step() |
|
self.optimizer.zero_grad() |
|
return stats_global |
|
|
|
def load(self): |
|
try: |
|
self.network = torch.load(os.path.join(self.save_path, 'model.pt')) |
|
except Exception as e: |
|
print('Error saving model.', e) |
|
|
|
def save(self): |
|
try: |
|
torch.save(self.network, os.path.join(self.save_path, 'model.pt')) |
|
except Exception as e: |
|
print('Error saving model.', e) |
|
|
|
# File: WebShop-master/baseline_models/env.py |
|
import sys |
|
import json |
|
import random |
|
from os.path import join, dirname, abspath |
|
from collections import defaultdict |
|
MODEL_PATH = dirname(abspath(__file__)) |
|
SITE_PATH = join(MODEL_PATH, '../') |
|
sys.path.insert(0, SITE_PATH) |
|
from web_agent_site.envs import WebAgentTextEnv |
|
from web_agent_site.utils import * |
|
from web_agent_site.engine.goal import get_reward |
|
|
|
class WebEnv: |
|
|
|
def __init__(self, args, split, server=None, id=None): |
|
self.env = WebAgentTextEnv(observation_mode=args.state_format, server=server, filter_goals=None, limit_goals=-1, num_products=args.num, human_goals=args.human_goals, get_image=args.get_image, num_prev_obs=args.num_prev_obs, num_prev_actions=args.num_prev_actions, session_prefix=id) |
|
if args.num is None: |
|
if split == 'test': |
|
self.goal_idxs = range(500) |
|
elif split == 'eval': |
|
self.goal_idxs = range(500, 1500) |
|
elif split == 'train': |
|
self.goal_idxs = range(1500, len(self.env.server.goals)) |
|
else: |
|
self.goal_idxs = range(len(self.env.server.goals)) |
|
print(self.goal_idxs) |
|
self.steps = 0 |
|
self.step_limit = args.step_limit |
|
self.stats = defaultdict(int) |
|
self.session = None |
|
self.click_item_name = args.click_item_name |
|
self.asin2name = {k.lower(): v['Title'].lower() for (k, v) in self.env.server.product_item_dict.items()} |
|
self.name2asin = {v: k for (k, v) in self.asin2name.items()} |
|
self.attributes_fail = defaultdict(int) |
|
self.attributes_success = defaultdict(int) |
|
self.items_clicked = defaultdict(int) |
|
self.harsh_reward = args.harsh_reward |
|
self.go_to_item = args.go_to_item |
|
self.go_to_search = args.go_to_search |
|
self.ban_buy = args.ban_buy |
|
self.prev_ob = self.cur_ob = None |
|
self.get_image = args.get_image |
|
self.item_rank = -1 |
|
self.reduce_click = 1 |
|
if args.extra_search_path != '': |
|
self.extra_search = json.load(open(args.extra_search_path)) |
|
self.extra_search = {k.strip('.'): v for (k, v) in self.extra_search.items()} |
|
else: |
|
self.extra_search = None |
|
|
|
def get_search_texts(self, atts, query, inst): |
|
if self.extra_search is not None: |
|
if ', and price lower than' in inst: |
|
idx = inst.find(', and price lower than') |
|
inst_ = inst[:idx] |
|
else: |
|
inst_ = inst |
|
texts = self.extra_search.get(inst_, []) + [inst.lower()] |
|
else: |
|
texts = [query] + [f'{att} {query}' for att in atts] + [inst.lower()] |
|
return texts |
|
|
|
def get_valid_actions(self): |
|
valid_info = self.env.get_available_actions() |
|
if valid_info['has_search_bar']: |
|
atts = self.session['goal']['attributes'] |
|
query = self.session['goal']['query'] |
|
inst = self.session['goal']['instruction_text'] |
|
texts = self.get_search_texts(atts, query, inst) |
|
valids = [f'search[{text}]' for text in texts] |
|
else: |
|
valids = [] |
|
for text in valid_info['clickables']: |
|
if text == 'buy now' and self.ban_buy: |
|
cur_options = len(self.session['options']) |
|
all_options = len(self.env.server.product_item_dict[self.session['asin']]['customization_options']) |
|
if cur_options != all_options: |
|
continue |
|
if text != 'search': |
|
if self.click_item_name and text in self.asin2name: |
|
text = 'item - ' + self.asin2name[text] |
|
valids.append(f'click[{text}]') |
|
if self.reduce_click and len(valids) > 20: |
|
valids = valids[:6] + random.sample(valids[6:], 10) |
|
if len(valids) == 0: |
|
valids = ['finish'] |
|
return valids |
|
|
|
def score(self): |
|
valid_acts = self.get_valid_actions() |
|
if 'click[description]' not in valid_acts: |
|
return 0.0 |
|
product = self.env.server.product_item_dict[self.session['asin']] |
|
goal = self.session['goal'] |
|
price = self.env.server.product_prices.get(self.session['asin']) |
|
options = self.session['options'] |
|
return get_reward(product, goal, price, options) |
|
|
|
def estimate_score(self, atts, opts, verify=False): |
|
valid_acts = self.get_valid_actions() |
|
assert 'click[description]' in valid_acts |
|
desc = self.step('click[description]')[0].lower() |
|
self.step('click[< prev]') |
|
feat = self.step('click[features]')[0].lower() |
|
ob = self.step('click[< prev]')[0].lower() |
|
n_att = 0 |
|
for att in atts: |
|
if att in desc or att in feat or att in ob: |
|
n_att += 1 |
|
r_att = n_att / len(atts) |
|
n_opt = 0 |
|
for opt in opts: |
|
for act in valid_acts: |
|
if opt in act: |
|
n_opt += 1 |
|
break |
|
r_opt = n_opt / len(opts) |
|
r = (n_att + n_opt + 1) / (len(atts) + len(opts) + 1) |
|
return (r, r_att, r_opt) |
|
|
|
def step(self, action): |
|
if self.click_item_name and action.startswith('click[item - ') and (action[13:-1] in self.name2asin): |
|
valid_items = [_ for _ in self.get_valid_actions() if _.startswith('click[item - ')] |
|
if action in valid_items: |
|
self.item_rank = valid_items.index(action) + 1 |
|
else: |
|
self.item_rank = -1 |
|
action = f'click[{self.name2asin[action[13:-1]]}]' |
|
(ob, reward, done, info) = self.env.step(action) |
|
if action.startswith('click[') and action[6:-1] in self.asin2name: |
|
self.items_clicked[action[6:-1]] += 1 |
|
desc = self.env.step('click[description]')[0].lower() |
|
self.env.step('click[< prev]') |
|
feat = self.env.step('click[features]')[0].lower() |
|
self.env.step('click[< prev]') |
|
else: |
|
desc = feat = '' |
|
r_visit = 0.0 |
|
(self.cur_ob, self.prev_ob) = (ob, self.cur_ob) |
|
if info is None: |
|
info = {} |
|
self.steps += 1 |
|
if self.step_limit and self.steps >= self.step_limit: |
|
done = True |
|
if done: |
|
info['verbose'] = self.session.get('verbose_info', {'r_att': 0.0, 'r_option': 0.0, 'r_price': 0.0, 'r_type': 0.0, 'w_att': 0.0, 'w_option': 0.0, 'w_price': 0.0}) |
|
verbose = info['verbose'] |
|
verbose['r_harsh'] = reward == 1 |
|
verbose['r_exact'] = reward == 1 and self.session['goal']['asin'] == self.session['asin'] |
|
verbose['r_norm'] = reward / self.steps |
|
verbose['r_visit'] = r_visit |
|
verbose['rank_item'] = self.item_rank |
|
if self.harsh_reward: |
|
reward = verbose['r_harsh'] |
|
for (k, v) in self.session['actions'].items(): |
|
self.stats[f'action_{k}'] += v |
|
cat = self.session['goal']['category'] |
|
self.stats[f'cat_{cat}'] += 1 |
|
for att in self.session['goal']['attributes']: |
|
if att in info['verbose'].get('purchased_attrs', []): |
|
self.attributes_success[att] += 1 |
|
else: |
|
self.attributes_fail[att] += 1 |
|
info.update({'valid': self.get_valid_actions(), 'goal': self.env.instruction_text, 'score': reward * 10, 'estimate_score': self.score(), 'prev_ob': self.prev_ob, 'desc': desc, 'feat': feat}) |
|
if self.get_image: |
|
image_feat = self.env.get_image() |
|
info['image_feat'] = image_feat |
|
return (ob, (reward + r_visit) * 10, done, info) |
|
|
|
def reset(self, idx=None): |
|
if idx is None: |
|
idx = random.sample(self.goal_idxs, k=1)[0] |
|
(ob, info) = self.env.reset(idx) |
|
self.session = self.env.server.user_sessions[self.env.session] |
|
if info is None: |
|
info = {} |
|
(self.cur_ob, self.prev_ob) = (ob, None) |
|
info.update({'valid': self.get_valid_actions(), 'goal': self.env.instruction_text, 'score': 0, 'estimate_score': self.score(), 'prev_ob': self.prev_ob, 'desc': '', 'feat': ''}) |
|
self.steps = 0 |
|
if self.go_to_search or self.go_to_item: |
|
name = self.session['goal']['name'].lower() |
|
(ob, _, _, info) = self.step(f'search[{name}]') |
|
self.stats['action_go_to_search'] += 1 |
|
if self.go_to_item: |
|
asin = self.session['goal']['asin'].lower() |
|
if asin in self.env.get_available_actions()['clickables']: |
|
(ob, _, _, info) = self.step(f'click[{asin}]') |
|
self.stats['action_go_to_item'] += 1 |
|
self.item_rank = -1 |
|
return (ob, info) |
|
|
|
def close(self): |
|
self.env.close() |
|
|
|
# File: WebShop-master/baseline_models/generate_search.py |
|
import json |
|
import time |
|
import torch |
|
from tqdm import tqdm |
|
from transformers import BartForConditionalGeneration |
|
from train_search import get_data, get_dataset, tokenizer |
|
if __name__ == '__main__': |
|
model = BartForConditionalGeneration.from_pretrained('./ckpts/web_search/checkpoint-800') |
|
model.eval() |
|
model = model.to('cuda') |
|
dataset = get_dataset('web_search') |
|
dataloader = torch.utils.data.DataLoader(dataset['all'], batch_size=32) |
|
(_, all_goals) = get_data('all') |
|
all_dec = [] |
|
for batch in tqdm(dataloader): |
|
output = model.generate(input_ids=batch['input_ids'].to('cuda'), attention_mask=batch['attention_mask'].to('cuda'), num_beams=10, num_return_sequences=10, max_length=512, early_stopping=True) |
|
dec = tokenizer.batch_decode(output, skip_special_tokens=True, clean_up_tokenization_spaces=False) |
|
assert len(dec) % 10 == 0 |
|
for i in range(len(dec) // 10): |
|
all_dec.append(dec[i * 10:(i + 1) * 10]) |
|
assert len(all_goals) == len(all_dec) |
|
d = {goal: dec for (goal, dec) in zip(all_goals, all_dec)} |
|
with open('./data/goal_query_predict.json', 'w') as f: |
|
json.dump(d, f) |
|
|
|
# File: WebShop-master/baseline_models/logger.py |
|
import os |
|
import sys |
|
import shutil |
|
import os.path as osp |
|
import json |
|
import time |
|
import datetime |
|
import tempfile |
|
from collections import defaultdict |
|
import wandb |
|
DEBUG = 10 |
|
INFO = 20 |
|
WARN = 30 |
|
ERROR = 40 |
|
DISABLED = 50 |
|
|
|
class KVWriter(object): |
|
|
|
def writekvs(self, kvs): |
|
raise NotImplementedError |
|
|
|
class SeqWriter(object): |
|
|
|
def writeseq(self, seq): |
|
raise NotImplementedError |
|
|
|
class HumanOutputFormat(KVWriter, SeqWriter): |
|
|
|
def __init__(self, filename_or_file): |
|
if isinstance(filename_or_file, str): |
|
self.file = open(filename_or_file, 'wt') |
|
self.own_file = True |
|
else: |
|
assert hasattr(filename_or_file, 'read'), 'expected file or str, got %s' % filename_or_file |
|
self.file = filename_or_file |
|
self.own_file = False |
|
|
|
def writekvs(self, kvs): |
|
key2str = {} |
|
for (key, val) in sorted(kvs.items()): |
|
if isinstance(val, float): |
|
valstr = '%-8.3g' % (val,) |
|
else: |
|
valstr = str(val) |
|
key2str[self._truncate(key)] = self._truncate(valstr) |
|
if len(key2str) == 0: |
|
print('WARNING: tried to write empty key-value dict') |
|
return |
|
else: |
|
keywidth = max(map(len, key2str.keys())) |
|
valwidth = max(map(len, key2str.values())) |
|
dashes = '-' * (keywidth + valwidth + 7) |
|
lines = [dashes] |
|
for (key, val) in sorted(key2str.items()): |
|
lines.append('| %s%s | %s%s |' % (key, ' ' * (keywidth - len(key)), val, ' ' * (valwidth - len(val)))) |
|
lines.append(dashes) |
|
self.file.write('\n'.join(lines) + '\n') |
|
self.file.flush() |
|
|
|
def _truncate(self, s): |
|
return s[:20] + '...' if len(s) > 23 else s |
|
|
|
def writeseq(self, seq): |
|
seq = list(seq) |
|
for (i, elem) in enumerate(seq): |
|
self.file.write(elem) |
|
if i < len(seq) - 1: |
|
self.file.write(' ') |
|
self.file.write('\n') |
|
self.file.flush() |
|
|
|
def close(self): |
|
if self.own_file: |
|
self.file.close() |
|
|
|
class JSONOutputFormat(KVWriter): |
|
|
|
def __init__(self, filename): |
|
self.file = open(filename, 'wt') |
|
|
|
def writekvs(self, kvs): |
|
for (k, v) in sorted(kvs.items()): |
|
if hasattr(v, 'dtype'): |
|
v = v.tolist() |
|
kvs[k] = float(v) |
|
self.file.write(json.dumps(kvs) + '\n') |
|
self.file.flush() |
|
|
|
def close(self): |
|
self.file.close() |
|
|
|
class WandBOutputFormat(KVWriter): |
|
|
|
def __init__(self, filename): |
|
group = None |
|
if filename.endswith('trial'): |
|
group = filename[:-6] |
|
wandb.init(project='web_drrn', name=filename, group=group) |
|
|
|
def writekvs(self, kvs): |
|
wandb.log(kvs) |
|
|
|
def close(self): |
|
pass |
|
|
|
class CSVOutputFormat(KVWriter): |
|
|
|
def __init__(self, filename): |
|
self.file = open(filename, 'w+t') |
|
self.keys = [] |
|
self.sep = ',' |
|
|
|
def writekvs(self, kvs): |
|
extra_keys = kvs.keys() - self.keys |
|
if extra_keys: |
|
self.keys.extend(extra_keys) |
|
self.file.seek(0) |
|
lines = self.file.readlines() |
|
self.file.seek(0) |
|
for (i, k) in enumerate(self.keys): |
|
if i > 0: |
|
self.file.write(',') |
|
self.file.write(k) |
|
self.file.write('\n') |
|
for line in lines[1:]: |
|
self.file.write(line[:-1]) |
|
self.file.write(self.sep * len(extra_keys)) |
|
self.file.write('\n') |
|
for (i, k) in enumerate(self.keys): |
|
if i > 0: |
|
self.file.write(',') |
|
v = kvs.get(k) |
|
if v is not None: |
|
self.file.write(str(v)) |
|
self.file.write('\n') |
|
self.file.flush() |
|
|
|
def close(self): |
|
self.file.close() |
|
|
|
class TensorBoardOutputFormat(KVWriter): |
|
|
|
def __init__(self, dir): |
|
os.makedirs(dir, exist_ok=True) |
|
self.dir = dir |
|
self.step = 1 |
|
prefix = 'events' |
|
path = osp.join(osp.abspath(dir), prefix) |
|
import tensorflow as tf |
|
from tensorflow.python import pywrap_tensorflow |
|
from tensorflow.core.util import event_pb2 |
|
from tensorflow.python.util import compat |
|
self.tf = tf |
|
self.event_pb2 = event_pb2 |
|
self.pywrap_tensorflow = pywrap_tensorflow |
|
self.writer = pywrap_tensorflow.EventsWriter(compat.as_bytes(path)) |
|
|
|
def writekvs(self, kvs): |
|
|
|
def summary_val(k, v): |
|
kwargs = {'tag': k, 'simple_value': float(v)} |
|
return self.tf.Summary.Value(**kwargs) |
|
summary = self.tf.Summary(value=[summary_val(k, v) for (k, v) in kvs.items()]) |
|
event = self.event_pb2.Event(wall_time=time.time(), summary=summary) |
|
event.step = self.step |
|
self.writer.WriteEvent(event) |
|
self.writer.Flush() |
|
self.step += 1 |
|
|
|
def close(self): |
|
if self.writer: |
|
self.writer.Close() |
|
self.writer = None |
|
|
|
def make_output_format(format, ev_dir, log_suffix='', args=None): |
|
os.makedirs(ev_dir, exist_ok=True) |
|
if format == 'stdout': |
|
return HumanOutputFormat(sys.stdout) |
|
elif format == 'log': |
|
return HumanOutputFormat(osp.join(ev_dir, 'log%s.txt' % log_suffix)) |
|
elif format == 'json': |
|
return JSONOutputFormat(osp.join(ev_dir, 'progress%s.json' % log_suffix)) |
|
elif format == 'csv': |
|
return CSVOutputFormat(osp.join(ev_dir, 'progress%s.csv' % log_suffix)) |
|
elif format == 'tensorboard': |
|
return TensorBoardOutputFormat(osp.join(ev_dir, 'tb%s' % log_suffix)) |
|
elif format == 'wandb': |
|
return WandBOutputFormat(ev_dir) |
|
else: |
|
raise ValueError('Unknown format specified: %s' % (format,)) |
|
|
|
def logkv(key, val): |
|
Logger.CURRENT.logkv(key, val) |
|
|
|
def logkv_mean(key, val): |
|
Logger.CURRENT.logkv_mean(key, val) |
|
|
|
def logkvs(d): |
|
for (k, v) in d.items(): |
|
logkv(k, v) |
|
|
|
def dumpkvs(): |
|
Logger.CURRENT.dumpkvs() |
|
|
|
def getkvs(): |
|
return Logger.CURRENT.name2val |
|
|
|
def log(*args, level=INFO): |
|
Logger.CURRENT.log(*args, level=level) |
|
|
|
def debug(*args): |
|
log(*args, level=DEBUG) |
|
|
|
def info(*args): |
|
log(*args, level=INFO) |
|
|
|
def warn(*args): |
|
log(*args, level=WARN) |
|
|
|
def error(*args): |
|
log(*args, level=ERROR) |
|
|
|
def set_level(level): |
|
Logger.CURRENT.set_level(level) |
|
|
|
def get_dir(): |
|
return Logger.CURRENT.get_dir() |
|
record_tabular = logkv |
|
dump_tabular = dumpkvs |
|
|
|
class ProfileKV: |
|
|
|
def __init__(self, n): |
|
self.n = 'wait_' + n |
|
|
|
def __enter__(self): |
|
self.t1 = time.time() |
|
|
|
def __exit__(self, type, value, traceback): |
|
Logger.CURRENT.name2val[self.n] += time.time() - self.t1 |
|
|
|
def profile(n): |
|
|
|
def decorator_with_name(func): |
|
|
|
def func_wrapper(*args, **kwargs): |
|
with ProfileKV(n): |
|
return func(*args, **kwargs) |
|
return func_wrapper |
|
return decorator_with_name |
|
|
|
class Logger(object): |
|
DEFAULT = None |
|
CURRENT = None |
|
|
|
def __init__(self, dir, output_formats): |
|
self.name2val = defaultdict(float) |
|
self.name2cnt = defaultdict(int) |
|
self.level = INFO |
|
self.dir = dir |
|
self.output_formats = output_formats |
|
|
|
def logkv(self, key, val): |
|
self.name2val[key] = val |
|
|
|
def logkv_mean(self, key, val): |
|
if val is None: |
|
self.name2val[key] = None |
|
return |
|
(oldval, cnt) = (self.name2val[key], self.name2cnt[key]) |
|
self.name2val[key] = oldval * cnt / (cnt + 1) + val / (cnt + 1) |
|
self.name2cnt[key] = cnt + 1 |
|
|
|
def dumpkvs(self): |
|
if self.level == DISABLED: |
|
return |
|
for fmt in self.output_formats: |
|
if isinstance(fmt, KVWriter): |
|
fmt.writekvs(self.name2val) |
|
self.name2val.clear() |
|
self.name2cnt.clear() |
|
|
|
def log(self, *args, level=INFO): |
|
if self.level <= level: |
|
self._do_log(args) |
|
|
|
def set_level(self, level): |
|
self.level = level |
|
|
|
def get_dir(self): |
|
return self.dir |
|
|
|
def close(self): |
|
for fmt in self.output_formats: |
|
fmt.close() |
|
|
|
def _do_log(self, args): |
|
for fmt in self.output_formats: |
|
if isinstance(fmt, SeqWriter): |
|
fmt.writeseq(map(str, args)) |
|
|
|
def configure(dir=None, format_strs=None): |
|
if dir is None: |
|
dir = os.getenv('OPENAI_LOGDIR') |
|
if dir is None: |
|
dir = osp.join(tempfile.gettempdir(), datetime.datetime.now().strftime('openai-%Y-%m-%d-%H-%M-%S-%f')) |
|
assert isinstance(dir, str) |
|
os.makedirs(dir, exist_ok=True) |
|
log_suffix = '' |
|
rank = 0 |
|
for varname in ['PMI_RANK', 'OMPI_COMM_WORLD_RANK']: |
|
if varname in os.environ: |
|
rank = int(os.environ[varname]) |
|
if rank > 0: |
|
log_suffix = '-rank%03i' % rank |
|
if format_strs is None: |
|
if rank == 0: |
|
format_strs = os.getenv('OPENAI_LOG_FORMAT', 'stdout,log,csv').split(',') |
|
else: |
|
format_strs = os.getenv('OPENAI_LOG_FORMAT_MPI', 'log').split(',') |
|
format_strs = filter(None, format_strs) |
|
output_formats = [make_output_format(f, dir, log_suffix) for f in format_strs] |
|
Logger.CURRENT = Logger(dir=dir, output_formats=output_formats) |
|
log('Logging to %s' % dir) |
|
|
|
def _configure_default_logger(): |
|
format_strs = None |
|
if 'OPENAI_LOG_FORMAT' not in os.environ: |
|
format_strs = ['stdout'] |
|
configure(format_strs=format_strs) |
|
Logger.DEFAULT = Logger.CURRENT |
|
|
|
def reset(): |
|
if Logger.CURRENT is not Logger.DEFAULT: |
|
Logger.CURRENT.close() |
|
Logger.CURRENT = Logger.DEFAULT |
|
log('Reset logger') |
|
|
|
class scoped_configure(object): |
|
|
|
def __init__(self, dir=None, format_strs=None): |
|
self.dir = dir |
|
self.format_strs = format_strs |
|
self.prevlogger = None |
|
|
|
def __enter__(self): |
|
self.prevlogger = Logger.CURRENT |
|
configure(dir=self.dir, format_strs=self.format_strs) |
|
|
|
def __exit__(self, *args): |
|
Logger.CURRENT.close() |
|
Logger.CURRENT = self.prevlogger |
|
|
|
def _demo(): |
|
info('hi') |
|
debug("shouldn't appear") |
|
set_level(DEBUG) |
|
debug('should appear') |
|
dir = '/tmp/testlogging' |
|
if os.path.exists(dir): |
|
shutil.rmtree(dir) |
|
configure(dir=dir) |
|
logkv('a', 3) |
|
logkv('b', 2.5) |
|
dumpkvs() |
|
logkv('b', -2.5) |
|
logkv('a', 5.5) |
|
dumpkvs() |
|
info('^^^ should see a = 5.5') |
|
logkv_mean('b', -22.5) |
|
logkv_mean('b', -44.4) |
|
logkv('a', 5.5) |
|
dumpkvs() |
|
info('^^^ should see b = 33.3') |
|
logkv('b', -2.5) |
|
dumpkvs() |
|
logkv('a', 'longasslongasslongasslongasslongasslongassvalue') |
|
dumpkvs() |
|
|
|
def read_json(fname): |
|
import pandas |
|
ds = [] |
|
with open(fname, 'rt') as fh: |
|
for line in fh: |
|
ds.append(json.loads(line)) |
|
return pandas.DataFrame(ds) |
|
|
|
def read_csv(fname): |
|
import pandas |
|
return pandas.read_csv(fname, index_col=None, comment='#') |
|
|
|
def read_tb(path): |
|
import pandas |
|
import numpy as np |
|
from glob import glob |
|
from collections import defaultdict |
|
import tensorflow as tf |
|
if osp.isdir(path): |
|
fnames = glob(osp.join(path, 'events.*')) |
|
elif osp.basename(path).startswith('events.'): |
|
fnames = [path] |
|
else: |
|
raise NotImplementedError('Expected tensorboard file or directory containing them. Got %s' % path) |
|
tag2pairs = defaultdict(list) |
|
maxstep = 0 |
|
for fname in fnames: |
|
for summary in tf.train.summary_iterator(fname): |
|
if summary.step > 0: |
|
for v in summary.summary.value: |
|
pair = (summary.step, v.simple_value) |
|
tag2pairs[v.tag].append(pair) |
|
maxstep = max(summary.step, maxstep) |
|
data = np.empty((maxstep, len(tag2pairs))) |
|
data[:] = np.nan |
|
tags = sorted(tag2pairs.keys()) |
|
for (colidx, tag) in enumerate(tags): |
|
pairs = tag2pairs[tag] |
|
for (step, value) in pairs: |
|
data[step - 1, colidx] = value |
|
return pandas.DataFrame(data, columns=tags) |
|
if __name__ == '__main__': |
|
_demo() |
|
|
|
# File: WebShop-master/baseline_models/models/bert.py |
|
import torch |
|
import torch.nn as nn |
|
import torch.nn.functional as F |
|
from transformers import BertModel, BertConfig, PretrainedConfig, PreTrainedModel |
|
from transformers.modeling_outputs import SequenceClassifierOutput |
|
from .modules import EncoderRNN, BiAttention, get_aggregated |
|
|
|
class BertConfigForWebshop(PretrainedConfig): |
|
model_type = 'bert' |
|
|
|
def __init__(self, pretrained_bert=True, image=False, **kwargs): |
|
self.pretrained_bert = pretrained_bert |
|
self.image = image |
|
super().__init__(**kwargs) |
|
|
|
class BertModelForWebshop(PreTrainedModel): |
|
config_class = BertConfigForWebshop |
|
|
|
def __init__(self, config): |
|
super().__init__(config) |
|
bert_config = BertConfig.from_pretrained('bert-base-uncased') |
|
if config.pretrained_bert: |
|
self.bert = BertModel.from_pretrained('bert-base-uncased') |
|
else: |
|
self.bert = BertModel(config) |
|
self.bert.resize_token_embeddings(30526) |
|
self.attn = BiAttention(768, 0.0) |
|
self.linear_1 = nn.Linear(768 * 4, 768) |
|
self.relu = nn.ReLU() |
|
self.linear_2 = nn.Linear(768, 1) |
|
if config.image: |
|
self.image_linear = nn.Linear(512, 768) |
|
else: |
|
self.image_linear = None |
|
self.linear_3 = nn.Sequential(nn.Linear(768, 128), nn.LeakyReLU(), nn.Linear(128, 1)) |
|
|
|
def forward(self, state_input_ids, state_attention_mask, action_input_ids, action_attention_mask, sizes, images=None, labels=None): |
|
sizes = sizes.tolist() |
|
state_rep = self.bert(state_input_ids, attention_mask=state_attention_mask)[0] |
|
if images is not None and self.image_linear is not None: |
|
images = self.image_linear(images) |
|
state_rep = torch.cat([images.unsqueeze(1), state_rep], dim=1) |
|
state_attention_mask = torch.cat([state_attention_mask[:, :1], state_attention_mask], dim=1) |
|
action_rep = self.bert(action_input_ids, attention_mask=action_attention_mask)[0] |
|
state_rep = torch.cat([state_rep[i:i + 1].repeat(j, 1, 1) for (i, j) in enumerate(sizes)], dim=0) |
|
state_attention_mask = torch.cat([state_attention_mask[i:i + 1].repeat(j, 1) for (i, j) in enumerate(sizes)], dim=0) |
|
act_lens = action_attention_mask.sum(1).tolist() |
|
state_action_rep = self.attn(action_rep, state_rep, state_attention_mask) |
|
state_action_rep = self.relu(self.linear_1(state_action_rep)) |
|
act_values = get_aggregated(state_action_rep, act_lens, 'mean') |
|
act_values = self.linear_2(act_values).squeeze(1) |
|
logits = [F.log_softmax(_, dim=0) for _ in act_values.split(sizes)] |
|
loss = None |
|
if labels is not None: |
|
loss = -sum([logit[label] for (logit, label) in zip(logits, labels)]) / len(logits) |
|
return SequenceClassifierOutput(loss=loss, logits=logits) |
|
|
|
def rl_forward(self, state_batch, act_batch, value=False, q=False, act=False): |
|
act_values = [] |
|
act_sizes = [] |
|
values = [] |
|
for (state, valid_acts) in zip(state_batch, act_batch): |
|
with torch.set_grad_enabled(not act): |
|
state_ids = torch.tensor([state.obs]).cuda() |
|
state_mask = (state_ids > 0).int() |
|
act_lens = [len(_) for _ in valid_acts] |
|
act_ids = [torch.tensor(_) for _ in valid_acts] |
|
act_ids = nn.utils.rnn.pad_sequence(act_ids, batch_first=True).cuda() |
|
act_mask = (act_ids > 0).int() |
|
act_size = torch.tensor([len(valid_acts)]).cuda() |
|
if self.image_linear is not None: |
|
images = [state.image_feat] |
|
images = [torch.zeros(512) if _ is None else _ for _ in images] |
|
images = torch.stack(images).cuda() |
|
else: |
|
images = None |
|
logits = self.forward(state_ids, state_mask, act_ids, act_mask, act_size, images=images).logits[0] |
|
act_values.append(logits) |
|
act_sizes.append(len(valid_acts)) |
|
if value: |
|
v = self.bert(state_ids, state_mask)[0] |
|
values.append(self.linear_3(v[0][0])) |
|
act_values = torch.cat(act_values, dim=0) |
|
act_values = torch.cat([F.log_softmax(_, dim=0) for _ in act_values.split(act_sizes)], dim=0) |
|
if value: |
|
values = torch.cat(values, dim=0) |
|
return (act_values, act_sizes, values) |
|
else: |
|
return (act_values, act_sizes) |
|
|
|
# File: WebShop-master/baseline_models/models/modules.py |
|
import itertools |
|
import torch |
|
import torch.nn as nn |
|
import torch.nn.functional as F |
|
from torch.nn.utils import rnn |
|
|
|
def duplicate(output, mask, lens, act_sizes): |
|
output = torch.cat([output[i:i + 1].repeat(j, 1, 1) for (i, j) in enumerate(act_sizes)], dim=0) |
|
mask = torch.cat([mask[i:i + 1].repeat(j, 1) for (i, j) in enumerate(act_sizes)], dim=0) |
|
lens = list(itertools.chain.from_iterable([lens[i:i + 1] * j for (i, j) in enumerate(act_sizes)])) |
|
return (output, mask, lens) |
|
|
|
def get_aggregated(output, lens, method): |
|
if method == 'mean': |
|
return torch.stack([output[i, :j, :].mean(0) for (i, j) in enumerate(lens)], dim=0) |
|
elif method == 'last': |
|
return torch.stack([output[i, j - 1, :] for (i, j) in enumerate(lens)], dim=0) |
|
elif method == 'first': |
|
return output[:, 0, :] |
|
|
|
class EncoderRNN(nn.Module): |
|
|
|
def __init__(self, input_size, num_units, nlayers, concat, bidir, layernorm, return_last): |
|
super().__init__() |
|
self.layernorm = layernorm == 'layer' |
|
if layernorm: |
|
self.norm = nn.LayerNorm(input_size) |
|
self.rnns = [] |
|
for i in range(nlayers): |
|
if i == 0: |
|
input_size_ = input_size |
|
output_size_ = num_units |
|
else: |
|
input_size_ = num_units if not bidir else num_units * 2 |
|
output_size_ = num_units |
|
self.rnns.append(nn.GRU(input_size_, output_size_, 1, bidirectional=bidir, batch_first=True)) |
|
self.rnns = nn.ModuleList(self.rnns) |
|
self.init_hidden = nn.ParameterList([nn.Parameter(torch.zeros(size=(2 if bidir else 1, 1, num_units)), requires_grad=True) for _ in range(nlayers)]) |
|
self.concat = concat |
|
self.nlayers = nlayers |
|
self.return_last = return_last |
|
self.reset_parameters() |
|
|
|
def reset_parameters(self): |
|
with torch.no_grad(): |
|
for rnn_layer in self.rnns: |
|
for (name, p) in rnn_layer.named_parameters(): |
|
if 'weight_ih' in name: |
|
torch.nn.init.xavier_uniform_(p.data) |
|
elif 'weight_hh' in name: |
|
torch.nn.init.orthogonal_(p.data) |
|
elif 'bias' in name: |
|
p.data.fill_(0.0) |
|
else: |
|
p.data.normal_(std=0.1) |
|
|
|
def get_init(self, bsz, i): |
|
return self.init_hidden[i].expand(-1, bsz, -1).contiguous() |
|
|
|
def forward(self, inputs, input_lengths=None): |
|
(bsz, slen) = (inputs.size(0), inputs.size(1)) |
|
if self.layernorm: |
|
inputs = self.norm(inputs) |
|
output = inputs |
|
outputs = [] |
|
lens = 0 |
|
if input_lengths is not None: |
|
lens = input_lengths |
|
for i in range(self.nlayers): |
|
hidden = self.get_init(bsz, i) |
|
if input_lengths is not None: |
|
output = rnn.pack_padded_sequence(output, lens, batch_first=True, enforce_sorted=False) |
|
(output, hidden) = self.rnns[i](output, hidden) |
|
if input_lengths is not None: |
|
(output, _) = rnn.pad_packed_sequence(output, batch_first=True) |
|
if output.size(1) < slen: |
|
padding = torch.zeros(size=(1, 1, 1), dtype=output.type(), device=output.device()) |
|
output = torch.cat([output, padding.expand(output.size(0), slen - output.size(1), output.size(2))], dim=1) |
|
if self.return_last: |
|
outputs.append(hidden.permute(1, 0, 2).contiguous().view(bsz, -1)) |
|
else: |
|
outputs.append(output) |
|
if self.concat: |
|
return torch.cat(outputs, dim=2) |
|
return outputs[-1] |
|
|
|
class BiAttention(nn.Module): |
|
|
|
def __init__(self, input_size, dropout): |
|
super().__init__() |
|
self.dropout = nn.Dropout(dropout) |
|
self.input_linear = nn.Linear(input_size, 1, bias=False) |
|
self.memory_linear = nn.Linear(input_size, 1, bias=False) |
|
self.dot_scale = nn.Parameter(torch.zeros(size=(input_size,)).uniform_(1.0 / input_size ** 0.5), requires_grad=True) |
|
self.init_parameters() |
|
|
|
def init_parameters(self): |
|
return |
|
|
|
def forward(self, context, memory, mask): |
|
(bsz, input_len) = (context.size(0), context.size(1)) |
|
memory_len = memory.size(1) |
|
context = self.dropout(context) |
|
memory = self.dropout(memory) |
|
input_dot = self.input_linear(context) |
|
memory_dot = self.memory_linear(memory).view(bsz, 1, memory_len) |
|
cross_dot = torch.bmm(context * self.dot_scale, memory.permute(0, 2, 1).contiguous()) |
|
att = input_dot + memory_dot + cross_dot |
|
att = att - 1e+30 * (1 - mask[:, None]) |
|
weight_one = F.softmax(att, dim=-1) |
|
output_one = torch.bmm(weight_one, memory) |
|
weight_two = F.softmax(att.max(dim=-1)[0], dim=-1).view(bsz, 1, input_len) |
|
output_two = torch.bmm(weight_two, context) |
|
return torch.cat([context, output_one, context * output_one, output_two * output_one], dim=-1) |
|
|
|
# File: WebShop-master/baseline_models/models/rnn.py |
|
import torch |
|
import torch.nn as nn |
|
import torch.nn.functional as F |
|
from .modules import EncoderRNN, BiAttention, get_aggregated, duplicate |
|
|
|
class RCDQN(nn.Module): |
|
|
|
def __init__(self, vocab_size, embedding_dim, hidden_dim, arch, grad, embs=None, gru_embed='embedding', get_image=0, bert_path=''): |
|
super().__init__() |
|
self.word_dim = embedding_dim |
|
self.word_emb = nn.Embedding(vocab_size, embedding_dim) |
|
if embs is not None: |
|
print('Loading embeddings of shape {}'.format(embs.shape)) |
|
self.word_emb.weight.data.copy_(torch.from_numpy(embs)) |
|
self.hidden_dim = hidden_dim |
|
self.keep_prob = 1.0 |
|
self.rnn = EncoderRNN(self.word_dim, self.hidden_dim, 1, concat=True, bidir=True, layernorm='None', return_last=False) |
|
self.att_1 = BiAttention(self.hidden_dim * 2, 1 - self.keep_prob) |
|
self.att_2 = BiAttention(self.hidden_dim * 2, 1 - self.keep_prob) |
|
self.att_3 = BiAttention(embedding_dim, 1 - self.keep_prob) |
|
self.linear_1 = nn.Sequential(nn.Linear(self.hidden_dim * 8, self.hidden_dim), nn.LeakyReLU()) |
|
self.rnn_2 = EncoderRNN(self.hidden_dim, self.hidden_dim, 1, concat=True, bidir=True, layernorm='layer', return_last=False) |
|
self.linear_2 = nn.Sequential(nn.Linear(self.hidden_dim * 12, self.hidden_dim * 2), nn.LeakyReLU()) |
|
self.linear_3 = nn.Sequential(nn.Linear(self.hidden_dim * 2, self.hidden_dim), nn.LeakyReLU(), nn.Linear(self.hidden_dim, 1)) |
|
self.get_image = get_image |
|
if self.get_image: |
|
self.linear_image = nn.Linear(512, self.hidden_dim) |
|
|
|
def prepare(self, ids): |
|
lens = [len(_) for _ in ids] |
|
ids = [torch.tensor(_) for _ in ids] |
|
ids = nn.utils.rnn.pad_sequence(ids, batch_first=True).cuda() |
|
mask = (ids > 0).float() |
|
embed = self.word_emb(ids) |
|
output = self.rnn(embed, lens) |
|
return (ids, lens, mask, embed, output) |
|
|
|
def forward(self, state_batch, act_batch, value=False, q=False, act=False): |
|
if self.arch == 'bert': |
|
return self.bert_forward(state_batch, act_batch, value, q, act) |
|
(obs_ids, obs_lens, obs_mask, obs_embed, obs_output) = self.prepare([state.obs for state in state_batch]) |
|
(goal_ids, goal_lens, goal_mask, goal_embed, goal_output) = self.prepare([state.goal for state in state_batch]) |
|
state_output = self.att_1(obs_output, goal_output, goal_mask) |
|
state_output = self.linear_1(state_output) |
|
if self.get_image: |
|
images = [state.image_feat for state in state_batch] |
|
images = [torch.zeros(512) if _ is None else _ for _ in images] |
|
images = torch.stack([_ for _ in images]).cuda() |
|
images = self.linear_image(images) |
|
state_output = torch.cat([images.unsqueeze(1), state_output], dim=1) |
|
obs_lens = [_ + 1 for _ in obs_lens] |
|
obs_mask = torch.cat([obs_mask[:, :1], obs_mask], dim=1) |
|
state_output = self.rnn_2(state_output, obs_lens) |
|
if value: |
|
values = get_aggregated(state_output, obs_lens, 'mean') |
|
values = self.linear_3(values).squeeze(1) |
|
act_sizes = [len(_) for _ in act_batch] |
|
act_batch = list(itertools.chain.from_iterable(act_batch)) |
|
(act_ids, act_lens, act_mask, act_embed, act_output) = self.prepare(act_batch) |
|
(state_output, state_mask, state_lens) = duplicate(state_output, obs_mask, obs_lens, act_sizes) |
|
(goal_embed, goal_mask, goal_lens) = duplicate(goal_embed, goal_mask, goal_lens, act_sizes) |
|
state_act_output = self.att_2(act_output, state_output, state_mask) |
|
goal_act_output = self.att_3(act_embed, goal_embed, goal_mask) |
|
output = torch.cat([state_act_output, goal_act_output], dim=-1) |
|
output = get_aggregated(output, act_lens, 'mean') |
|
output = self.linear_2(output) |
|
act_values = self.linear_3(output).squeeze(1) |
|
if not q: |
|
act_values = torch.cat([F.log_softmax(_, dim=0) for _ in act_values.split(act_sizes)], dim=0) |
|
if value: |
|
return (act_values, act_sizes, values) |
|
else: |
|
return (act_values, act_sizes) |
|
|
|
# File: WebShop-master/baseline_models/train_choice_il.py |
|
"""""" |
|
import argparse |
|
import json |
|
import logging |
|
import math |
|
import os |
|
import random |
|
from pathlib import Path |
|
import datasets |
|
import torch |
|
from datasets import load_dataset, load_metric |
|
from torch.utils.data import DataLoader |
|
from tqdm.auto import tqdm |
|
import transformers |
|
from accelerate import Accelerator |
|
from accelerate.logging import get_logger |
|
from accelerate.utils import set_seed |
|
from huggingface_hub import Repository |
|
from transformers import AdamW, AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, BertModel, BertConfig, DataCollatorWithPadding, PretrainedConfig, PreTrainedModel, SchedulerType, default_data_collator, get_scheduler |
|
from transformers.utils.versions import require_version |
|
from datasets import Dataset |
|
from transformers.modeling_outputs import SequenceClassifierOutput |
|
import torch.nn as nn |
|
import torch.nn.functional as F |
|
import wandb |
|
from models.bert import BertModelForWebshop, BertConfigForWebshop |
|
logger = get_logger(__name__) |
|
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt') |
|
task_to_keys = {'cola': ('sentence', None), 'mnli': ('premise', 'hypothesis'), 'mrpc': ('sentence1', 'sentence2'), 'qnli': ('question', 'sentence'), 'qqp': ('question1', 'question2'), 'rte': ('sentence1', 'sentence2'), 'sst2': ('sentence', None), 'stsb': ('sentence1', 'sentence2'), 'wnli': ('sentence1', 'sentence2')} |
|
tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased', truncation_side='left') |
|
print(len(tokenizer)) |
|
tokenizer.add_tokens(['[button]', '[button_]', '[clicked button]', '[clicked button_]'], special_tokens=True) |
|
print(len(tokenizer)) |
|
PATH = './data/il_trajs_finalized_images.jsonl' |
|
MEM_PATH = './data/il_trajs_mem_finalized_images.jsonl' |
|
HUMAN_GOAL_PATH = './data/human_goals.json' |
|
|
|
def process(s): |
|
s = s.lower().replace('"', '').replace("'", '').strip() |
|
s = s.replace('[sep]', '[SEP]') |
|
return s |
|
|
|
def process_goal(state): |
|
state = state.lower().replace('"', '').replace("'", '') |
|
state = state.replace('amazon shopping game\ninstruction:', '').replace('webshop\ninstruction:', '') |
|
state = state.replace('\n[button] search [button_]', '').strip() |
|
if ', and price lower than' in state: |
|
state = state.split(', and price lower than')[0] |
|
return state |
|
|
|
def get_data(split, mem=False, filter_search=True): |
|
path = MEM_PATH if mem else PATH |
|
print('Loading data from {}'.format(path)) |
|
with open(path, 'r') as json_file: |
|
json_list = list(json_file) |
|
human_goals = json.load(open(HUMAN_GOAL_PATH, 'r')) |
|
random.seed(233) |
|
random.shuffle(json_list) |
|
goal_range = range(len(human_goals)) |
|
if split == 'train': |
|
goal_range = range(1500, len(human_goals)) |
|
elif split == 'eval': |
|
goal_range = range(500, 1500) |
|
elif split == 'test': |
|
goal_range = range(0, 500) |
|
bad = cnt = 0 |
|
(state_list, action_list, idx_list, size_list) = ([], [], [], []) |
|
image_list = [] |
|
num_trajs = 0 |
|
for json_str in json_list: |
|
result = json.loads(json_str) |
|
s = process_goal(result['states'][0]) |
|
assert s in human_goals, s |
|
goal_idx = human_goals.index(s) |
|
if goal_idx not in goal_range: |
|
continue |
|
num_trajs += 1 |
|
if 'images' not in result: |
|
result['images'] = [0] * len(result['states']) |
|
for (state, valid_acts, idx, image) in zip(result['states'], result['available_actions'], result['action_idxs'], result['images']): |
|
cnt += 1 |
|
if filter_search and idx == -1: |
|
continue |
|
state_list.append(state) |
|
image_list.append([0.0] * 512 if image == 0 else image) |
|
if len(valid_acts) > 20: |
|
bad += 1 |
|
new_idxs = list(range(6)) + random.sample(range(6, len(valid_acts)), 10) |
|
if idx not in new_idxs: |
|
new_idxs += [idx] |
|
new_idxs = sorted(new_idxs) |
|
valid_acts = [valid_acts[i] for i in new_idxs] |
|
idx = new_idxs.index(idx) |
|
action_list.extend(valid_acts) |
|
idx_list.append(idx) |
|
size_list.append(len(valid_acts)) |
|
print('num of {} trajs: {}'.format(split, num_trajs)) |
|
print('total transitions and bad transitions: {} {}'.format(cnt, bad)) |
|
(state_list, action_list) = (list(map(process, state_list)), list(map(process, action_list))) |
|
return (state_list, action_list, idx_list, size_list, image_list) |
|
|
|
def get_dataset(split, mem=False): |
|
(states, actions, idxs, sizes, images) = get_data(split, mem) |
|
state_encodings = tokenizer(states, padding='max_length', max_length=512, truncation=True, return_tensors='pt') |
|
action_encodings = tokenizer(actions, padding='max_length', max_length=128, truncation=True, return_tensors='pt') |
|
dataset = {'state_input_ids': state_encodings['input_ids'], 'state_attention_mask': state_encodings['attention_mask'], 'action_input_ids': action_encodings['input_ids'].split(sizes), 'action_attention_mask': action_encodings['attention_mask'].split(sizes), 'sizes': sizes, 'images': torch.tensor(images), 'labels': idxs} |
|
return Dataset.from_dict(dataset) |
|
|
|
def data_collator(batch): |
|
(state_input_ids, state_attention_mask, action_input_ids, action_attention_mask, sizes, labels, images) = ([], [], [], [], [], [], []) |
|
for sample in batch: |
|
state_input_ids.append(sample['state_input_ids']) |
|
state_attention_mask.append(sample['state_attention_mask']) |
|
action_input_ids.extend(sample['action_input_ids']) |
|
action_attention_mask.extend(sample['action_attention_mask']) |
|
sizes.append(sample['sizes']) |
|
labels.append(sample['labels']) |
|
images.append(sample['images']) |
|
max_state_len = max((sum(x) for x in state_attention_mask)) |
|
max_action_len = max((sum(x) for x in action_attention_mask)) |
|
return {'state_input_ids': torch.tensor(state_input_ids)[:, :max_state_len], 'state_attention_mask': torch.tensor(state_attention_mask)[:, :max_state_len], 'action_input_ids': torch.tensor(action_input_ids)[:, :max_action_len], 'action_attention_mask': torch.tensor(action_attention_mask)[:, :max_action_len], 'sizes': torch.tensor(sizes), 'images': torch.tensor(images), 'labels': torch.tensor(labels)} |
|
|
|
def parse_args(): |
|
parser = argparse.ArgumentParser(description='Finetune a transformers model on a text classification task') |
|
parser.add_argument('--task_name', type=str, default='mprc', help='The name of the glue task to train on.', choices=list(task_to_keys.keys())) |
|
parser.add_argument('--train_file', type=str, default=None, help='A csv or a json file containing the training data.') |
|
parser.add_argument('--validation_file', type=str, default=None, help='A csv or a json file containing the validation data.') |
|
parser.add_argument('--max_length', type=int, default=128, help='The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded if `--pad_to_max_lengh` is passed.') |
|
parser.add_argument('--pad_to_max_length', action='store_true', help='If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.') |
|
parser.add_argument('--model_name_or_path', default='bert-base-uncased', type=str, help='Path to pretrained model or model identifier from huggingface.co/models.') |
|
parser.add_argument('--use_slow_tokenizer', action='store_true', help='If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).') |
|
parser.add_argument('--per_device_train_batch_size', type=int, default=1, help='Batch size (per device) for the training dataloader.') |
|
parser.add_argument('--per_device_eval_batch_size', type=int, default=8, help='Batch size (per device) for the evaluation dataloader.') |
|
parser.add_argument('--learning_rate', type=float, default=2e-05, help='Initial learning rate (after the potential warmup period) to use.') |
|
parser.add_argument('--weight_decay', type=float, default=0.0, help='Weight decay to use.') |
|
parser.add_argument('--num_train_epochs', type=int, default=10, help='Total number of training epochs to perform.') |
|
parser.add_argument('--max_train_steps', type=int, default=None, help='Total number of training steps to perform. If provided, overrides num_train_epochs.') |
|
parser.add_argument('--gradient_accumulation_steps', type=int, default=32, help='Number of updates steps to accumulate before performing a backward/update pass.') |
|
parser.add_argument('--lr_scheduler_type', type=SchedulerType, default='linear', help='The scheduler type to use.', choices=['linear', 'cosine', 'cosine_with_restarts', 'polynomial', 'constant', 'constant_with_warmup']) |
|
parser.add_argument('--num_warmup_steps', type=int, default=0, help='Number of steps for the warmup in the lr scheduler.') |
|
parser.add_argument('--output_dir', type=str, default='./ckpts/web_click', help='Where to store the final model.') |
|
parser.add_argument('--seed', type=int, default=None, help='A seed for reproducible training.') |
|
parser.add_argument('--push_to_hub', action='store_true', help='Whether or not to push the model to the Hub.') |
|
parser.add_argument('--hub_model_id', type=str, help='The name of the repository to keep in sync with the local `output_dir`.') |
|
parser.add_argument('--hub_token', type=str, help='The token to use to push to the Model Hub.') |
|
parser.add_argument('--checkpointing_steps', type=str, default='epoch', help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.") |
|
parser.add_argument('--resume_from_checkpoint', type=str, default=None, help='If the training should continue from a checkpoint folder.') |
|
parser.add_argument('--with_tracking', type=int, default=1, help='Whether to load in all available experiment trackers from the environment and use them for logging.') |
|
parser.add_argument('--mem', type=int, default=0, help='State with memory') |
|
parser.add_argument('--image', type=int, default=1, help='State with image') |
|
parser.add_argument('--pretrain', type=int, default=1, help='Pretrained BERT or not') |
|
parser.add_argument('--logging_steps', type=int, default=10, help='Logging in training') |
|
args = parser.parse_args() |
|
if args.task_name is None and args.train_file is None and (args.validation_file is None): |
|
raise ValueError('Need either a task name or a training/validation file.') |
|
else: |
|
if args.train_file is not None: |
|
extension = args.train_file.split('.')[-1] |
|
assert extension in ['csv', 'json'], '`train_file` should be a csv or a json file.' |
|
if args.validation_file is not None: |
|
extension = args.validation_file.split('.')[-1] |
|
assert extension in ['csv', 'json'], '`validation_file` should be a csv or a json file.' |
|
if args.push_to_hub: |
|
assert args.output_dir is not None, 'Need an `output_dir` to create a repo when `--push_to_hub` is passed.' |
|
return args |
|
|
|
def main(): |
|
args = parse_args() |
|
accelerator = Accelerator() |
|
wandb.init(project='bert_il', config=args, name=args.output_dir) |
|
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) |
|
logger.info(accelerator.state, main_process_only=False) |
|
if accelerator.is_local_main_process: |
|
datasets.utils.logging.set_verbosity_warning() |
|
transformers.utils.logging.set_verbosity_info() |
|
else: |
|
datasets.utils.logging.set_verbosity_error() |
|
transformers.utils.logging.set_verbosity_error() |
|
if args.seed is not None: |
|
set_seed(args.seed) |
|
config = BertConfigForWebshop(image=args.image, pretrain_bert=args.pretrain) |
|
model = BertModelForWebshop(config) |
|
train_dataset = get_dataset('train', mem=args.mem) |
|
eval_dataset = get_dataset('eval', mem=args.mem) |
|
for index in random.sample(range(len(train_dataset)), 3): |
|
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.') |
|
train_dataloader = DataLoader(train_dataset, shuffle=True, collate_fn=data_collator, batch_size=args.per_device_train_batch_size) |
|
eval_dataloader = DataLoader(eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size) |
|
no_decay = ['bias', 'LayerNorm.weight'] |
|
optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if not any((nd in n for nd in no_decay))], 'weight_decay': args.weight_decay}, {'params': [p for (n, p) in model.named_parameters() if any((nd in n for nd in no_decay))], 'weight_decay': 0.0}] |
|
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate) |
|
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) |
|
if args.max_train_steps is None: |
|
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch |
|
else: |
|
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) |
|
lr_scheduler = get_scheduler(name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, num_training_steps=args.max_train_steps) |
|
(model, optimizer, train_dataloader, eval_dataloader, lr_scheduler) = accelerator.prepare(model, optimizer, train_dataloader, eval_dataloader, lr_scheduler) |
|
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) |
|
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch |
|
if hasattr(args.checkpointing_steps, 'isdigit'): |
|
checkpointing_steps = args.checkpointing_steps |
|
if args.checkpointing_steps.isdigit(): |
|
checkpointing_steps = int(args.checkpointing_steps) |
|
else: |
|
checkpointing_steps = None |
|
if args.with_tracking: |
|
experiment_config = vars(args) |
|
experiment_config['lr_scheduler_type'] = experiment_config['lr_scheduler_type'].value |
|
accelerator.init_trackers('glue_no_trainer', experiment_config) |
|
metric = load_metric('accuracy') |
|
total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps |
|
logger.info('***** Running training *****') |
|
logger.info(f' Num examples = {len(train_dataset)}') |
|
logger.info(f' Num Epochs = {args.num_train_epochs}') |
|
logger.info(f' Instantaneous batch size per device = {args.per_device_train_batch_size}') |
|
logger.info(f' Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}') |
|
logger.info(f' Gradient Accumulation steps = {args.gradient_accumulation_steps}') |
|
logger.info(f' Total optimization steps = {args.max_train_steps}') |
|
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) |
|
completed_steps = 0 |
|
starting_epoch = 0 |
|
if args.resume_from_checkpoint: |
|
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != '': |
|
accelerator.print(f'Resumed from checkpoint: {args.resume_from_checkpoint}') |
|
accelerator.load_state(args.resume_from_checkpoint) |
|
path = os.path.basename(args.resume_from_checkpoint) |
|
else: |
|
dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()] |
|
dirs.sort(key=os.path.getctime) |
|
path = dirs[-1] |
|
training_difference = os.path.splitext(path)[0] |
|
if 'epoch' in training_difference: |
|
starting_epoch = int(training_difference.replace('epoch_', '')) + 1 |
|
resume_step = None |
|
else: |
|
resume_step = int(training_difference.replace('step_', '')) |
|
starting_epoch = resume_step // len(train_dataloader) |
|
resume_step -= starting_epoch * len(train_dataloader) |
|
for epoch in range(starting_epoch, args.num_train_epochs): |
|
model.train() |
|
if args.with_tracking: |
|
total_loss = total_step = 0 |
|
for (step, batch) in enumerate(train_dataloader): |
|
if args.resume_from_checkpoint and epoch == starting_epoch: |
|
if resume_step is not None and step < resume_step: |
|
completed_steps += 1 |
|
continue |
|
outputs = model(**batch) |
|
loss = outputs.loss |
|
if args.with_tracking: |
|
total_loss += loss.detach().float() |
|
total_step += 1 |
|
loss = loss / args.gradient_accumulation_steps |
|
accelerator.backward(loss) |
|
metric.add_batch(predictions=torch.stack([logit.argmax(dim=0) for logit in outputs.logits]), references=batch['labels']) |
|
if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1: |
|
optimizer.step() |
|
lr_scheduler.step() |
|
optimizer.zero_grad() |
|
progress_bar.update(1) |
|
completed_steps += 1 |
|
if args.with_tracking and args.logging_steps > 0 and (completed_steps % args.logging_steps == 0): |
|
train_metric = metric.compute() |
|
wandb.log({'train_accuracy': train_metric, 'train_loss': total_loss / total_step, 'train_step': completed_steps}) |
|
total_loss = total_step = 0 |
|
if isinstance(checkpointing_steps, int): |
|
if completed_steps % checkpointing_steps == 0: |
|
output_dir = f'step_{completed_steps}' |
|
if args.output_dir is not None: |
|
output_dir = os.path.join(args.output_dir, output_dir) |
|
accelerator.save_state(output_dir) |
|
if completed_steps >= args.max_train_steps: |
|
break |
|
model.eval() |
|
samples_seen = 0 |
|
total_loss = total_step = 0 |
|
if len(metric) > 0: |
|
metric.compute() |
|
for (step, batch) in enumerate(eval_dataloader): |
|
with torch.no_grad(): |
|
outputs = model(**batch) |
|
predictions = torch.stack([logit.argmax(dim=0) for logit in outputs.logits]) |
|
(predictions, references) = accelerator.gather((predictions, batch['labels'])) |
|
if accelerator.num_processes > 1: |
|
if step == len(eval_dataloader): |
|
predictions = predictions[:len(eval_dataloader.dataset) - samples_seen] |
|
references = references[:len(eval_dataloader.dataset) - samples_seen] |
|
else: |
|
samples_seen += references.shape[0] |
|
metric.add_batch(predictions=predictions, references=references) |
|
total_loss += outputs.loss.detach().float() |
|
total_step += 1 |
|
eval_metric = metric.compute() |
|
logger.info(f'epoch {epoch}: {eval_metric}') |
|
if args.with_tracking: |
|
wandb.log({'eval_accuracy': eval_metric, 'eval_loss': total_loss / total_step, 'epoch': epoch, 'epoch_step': completed_steps}) |
|
if args.checkpointing_steps == 'epoch': |
|
output_dir = f'epoch_{epoch}' |
|
if args.output_dir is not None: |
|
output_dir = os.path.join(args.output_dir, output_dir) |
|
os.makedirs(output_dir, exist_ok=True) |
|
unwrapped_model = accelerator.unwrap_model(model) |
|
torch.save(unwrapped_model.state_dict(), os.path.join(output_dir, 'model.pth')) |
|
if args.output_dir is not None: |
|
with open(os.path.join(args.output_dir, 'all_results.json'), 'w') as f: |
|
json.dump({'eval_accuracy': eval_metric['accuracy']}, f) |
|
if __name__ == '__main__': |
|
main() |
|
|
|
# File: WebShop-master/baseline_models/train_rl.py |
|
import argparse |
|
import logging |
|
import time |
|
import torch |
|
from collections import defaultdict |
|
import logger |
|
from agent import Agent, TransitionPG |
|
from env import WebEnv |
|
logging.getLogger().setLevel(logging.CRITICAL) |
|
|
|
def configure_logger(log_dir, wandb): |
|
logger.configure(log_dir, format_strs=['log']) |
|
global tb |
|
type_strs = ['json', 'stdout'] |
|
if wandb: |
|
type_strs += ['wandb'] |
|
tb = logger.Logger(log_dir, [logger.make_output_format(type_str, log_dir) for type_str in type_strs]) |
|
global log |
|
log = logger.log |
|
|
|
def evaluate(agent, env, split, nb_episodes=10): |
|
with torch.no_grad(): |
|
total_score = 0 |
|
for method in ['greedy']: |
|
for ep in range(nb_episodes): |
|
log('Starting {} episode {}'.format(split, ep)) |
|
if split == 'eval': |
|
score = evaluate_episode(agent, env, split, method) |
|
elif split == 'test': |
|
score = evaluate_episode(agent, env, split, method, idx=ep) |
|
log('{} episode {} ended with score {}\n\n'.format(split, ep, score)) |
|
total_score += score |
|
avg_score = total_score / nb_episodes |
|
return avg_score |
|
|
|
def evaluate_episode(agent, env, split, method='greedy', idx=None): |
|
step = 0 |
|
done = False |
|
(ob, info) = env.reset(idx) |
|
state = agent.build_state(ob, info) |
|
log('Obs{}: {}'.format(step, ob.encode('utf-8'))) |
|
while not done: |
|
valid_acts = info['valid'] |
|
with torch.no_grad(): |
|
action_str = agent.act([state], [valid_acts], method=method)[0][0] |
|
log('Action{}: {}'.format(step, action_str)) |
|
(ob, rew, done, info) = env.step(action_str) |
|
log('Reward{}: {}, Score {}, Done {}'.format(step, rew, info['score'], done)) |
|
step += 1 |
|
log('Obs{}: {}'.format(step, ob.encode('utf-8'))) |
|
state = agent.build_state(ob, info) |
|
tb.logkv_mean(f'{split}Score', info['score']) |
|
if 'verbose' in info: |
|
for (k, v) in info['verbose'].items(): |
|
if k.startswith('r'): |
|
tb.logkv_mean(f'{split}_' + k, v) |
|
return info['score'] |
|
|
|
def agg(envs, attr): |
|
res = defaultdict(int) |
|
for env in envs: |
|
for (k, v) in getattr(env, attr).items(): |
|
res[k] += v |
|
return res |
|
|
|
def train(agent, eval_env, test_env, envs, args): |
|
start = time.time() |
|
(states, valids, transitions) = ([], [], []) |
|
state0 = None |
|
for env in envs: |
|
(ob, info) = env.reset() |
|
if state0 is None: |
|
state0 = (ob, info) |
|
states.append(agent.build_state(ob, info)) |
|
valids.append(info['valid']) |
|
for step in range(1, args.max_steps + 1): |
|
(action_strs, action_ids, values) = agent.act(states, valids, method=args.exploration_method) |
|
with torch.no_grad(): |
|
(action_values, _) = agent.network.rl_forward(states[:1], agent.encode_valids(valids[:1])) |
|
actions = sorted(zip(state0[1]['valid'], action_values.tolist()), key=lambda x: -x[1]) |
|
log('State {}: {}'.format(step, state0[0].lower().encode('utf-8'))) |
|
log('Goal {}: {}'.format(step, state0[1]['goal'].lower().encode('utf-8'))) |
|
log('Actions{}: {}'.format(step, actions)) |
|
log('>> Values{}: {}'.format(step, float(values[0]))) |
|
log('>> Action{}: {}'.format(step, action_strs[0])) |
|
state0 = None |
|
(next_states, next_valids, rewards, dones) = ([], [], [], []) |
|
for (env, action_str, action_id, state) in zip(envs, action_strs, action_ids, states): |
|
(ob, reward, done, info) = env.step(action_str) |
|
if state0 is None: |
|
state0 = (ob, info) |
|
r_att = r_opt = 0 |
|
if 'verbose' in info: |
|
r_att = info['verbose'].get('r_att', 0) |
|
r_option = info['verbose'].get('r_option ', 0) |
|
r_price = info['verbose'].get('r_price', 0) |
|
r_type = info['verbose'].get('r_type', 0) |
|
w_att = info['verbose'].get('w_att', 0) |
|
w_option = info['verbose'].get('w_option', 0) |
|
w_price = info['verbose'].get('w_price', 0) |
|
reward_str = f'{reward / 10:.2f} = ({r_att:.2f} * {w_att:.2f} + {r_option:.2f} * {w_option:.2f} + {r_price:.2f} * {w_price:.2f}) * {r_type:.2f}' |
|
else: |
|
reward_str = str(reward) |
|
log('Reward{}: {}, Done {}\n'.format(step, reward_str, done)) |
|
next_state = agent.build_state(ob, info) |
|
next_valid = info['valid'] |
|
(next_states, next_valids, rewards, dones) = (next_states + [next_state], next_valids + [next_valid], rewards + [reward], dones + [done]) |
|
if done: |
|
tb.logkv_mean('EpisodeScore', info['score']) |
|
category = env.session['goal']['category'] |
|
tb.logkv_mean(f'EpisodeScore_{category}', info['score']) |
|
if 'verbose' in info: |
|
for (k, v) in info['verbose'].items(): |
|
if k.startswith('r'): |
|
tb.logkv_mean(k, v) |
|
transitions.append(TransitionPG(states, action_ids, rewards, values, agent.encode_valids(valids), dones)) |
|
if len(transitions) >= args.bptt: |
|
(_, _, last_values) = agent.act(next_states, next_valids, method='softmax') |
|
stats = agent.update(transitions, last_values, step=step) |
|
for (k, v) in stats.items(): |
|
tb.logkv_mean(k, v) |
|
del transitions[:] |
|
torch.cuda.empty_cache() |
|
for (i, env) in enumerate(envs): |
|
if dones[i]: |
|
(ob, info) = env.reset() |
|
if i == 0: |
|
state0 = (ob, info) |
|
next_states[i] = agent.build_state(ob, info) |
|
next_valids[i] = info['valid'] |
|
(states, valids) = (next_states, next_valids) |
|
if step % args.eval_freq == 0: |
|
evaluate(agent, eval_env, 'eval') |
|
if step % args.test_freq == 0: |
|
evaluate(agent, test_env, 'test', 500) |
|
if step % args.log_freq == 0: |
|
tb.logkv('Step', step) |
|
tb.logkv('FPS', int(step * len(envs) / (time.time() - start))) |
|
for (k, v) in agg(envs, 'stats').items(): |
|
tb.logkv(k, v) |
|
items_clicked = agg(envs, 'items_clicked') |
|
tb.logkv('ItemsClicked', len(items_clicked)) |
|
tb.dumpkvs() |
|
if step % args.ckpt_freq == 0: |
|
agent.save() |
|
|
|
def parse_args(): |
|
parser = argparse.ArgumentParser() |
|
parser.add_argument('--seed', default=0, type=int) |
|
parser.add_argument('--output_dir', default='logs') |
|
parser.add_argument('--ckpt_freq', default=10000, type=int) |
|
parser.add_argument('--eval_freq', default=500, type=int) |
|
parser.add_argument('--test_freq', default=5000, type=int) |
|
parser.add_argument('--log_freq', default=100, type=int) |
|
parser.add_argument('--wandb', default=1, type=int) |
|
parser.add_argument('--num_envs', default=4, type=int) |
|
parser.add_argument('--step_limit', default=100, type=int) |
|
parser.add_argument('--max_steps', default=300000, type=int) |
|
parser.add_argument('--learning_rate', default=1e-05, type=float) |
|
parser.add_argument('--gamma', default=0.9, type=float) |
|
parser.add_argument('--clip', default=10, type=float) |
|
parser.add_argument('--bptt', default=8, type=int) |
|
parser.add_argument('--exploration_method', default='softmax', type=str, choices=['eps', 'softmax']) |
|
parser.add_argument('--w_pg', default=1, type=float) |
|
parser.add_argument('--w_td', default=1, type=float) |
|
parser.add_argument('--w_il', default=0, type=float) |
|
parser.add_argument('--w_en', default=1, type=float) |
|
parser.add_argument('--network', default='bert', type=str, choices=['bert', 'rnn']) |
|
parser.add_argument('--bert_path', default='', type=str, help='which bert to load') |
|
parser.add_argument('--embedding_dim', default=128, type=int) |
|
parser.add_argument('--hidden_dim', default=128, type=int) |
|
parser.add_argument('--grad_encoder', default=1, type=int) |
|
parser.add_argument('--get_image', default=1, type=int, help='use image in models') |
|
parser.add_argument('--num', default=None, type=int) |
|
parser.add_argument('--click_item_name', default=1, type=int) |
|
parser.add_argument('--state_format', default='text_rich', type=str) |
|
parser.add_argument('--human_goals', default=1, type=int, help='use human goals') |
|
parser.add_argument('--num_prev_obs', default=0, type=int, help='number of previous observations') |
|
parser.add_argument('--num_prev_actions', default=0, type=int, help='number of previous actions') |
|
parser.add_argument('--extra_search_path', default='./data/goal_query_predict.json', type=str, help='path for extra search queries') |
|
parser.add_argument('--ban_buy', default=0, type=int, help='ban buy action before selecting options') |
|
parser.add_argument('--score_handicap', default=0, type=int, help='provide score in state') |
|
parser.add_argument('--go_to_item', default=0, type=int) |
|
parser.add_argument('--go_to_search', default=0, type=int) |
|
parser.add_argument('--harsh_reward', default=0, type=int) |
|
parser.add_argument('--debug', default=0, type=int, help='debug mode') |
|
parser.add_argument('--f', help='a dummy argument to fool ipython', default='1') |
|
return parser.parse_known_args() |
|
|
|
def main(): |
|
(args, unknown) = parse_args() |
|
if args.debug: |
|
args.num_envs = 2 |
|
args.wandb = 0 |
|
args.human_goals = 0 |
|
args.num = 100 |
|
print(unknown) |
|
print(args) |
|
configure_logger(args.output_dir, args.wandb) |
|
agent = Agent(args) |
|
train_env = WebEnv(args, split='train', id='train_') |
|
server = train_env.env.server |
|
eval_env = WebEnv(args, split='eval', id='eval_', server=server) |
|
test_env = WebEnv(args, split='test', id='test_', server=server) |
|
envs = [WebEnv(args, split='train', server=server, id=f'train{i}_') for i in range(args.num_envs)] |
|
print('loaded') |
|
train(agent, eval_env, test_env, envs, args) |
|
if __name__ == '__main__': |
|
main() |
|
|
|
# File: WebShop-master/baseline_models/train_search_il.py |
|
import json |
|
import os |
|
import random |
|
from datasets import Dataset, DatasetDict, load_from_disk |
|
from transformers import BartForConditionalGeneration, BartTokenizer, Trainer, TrainingArguments |
|
from transformers.models.bart.modeling_bart import shift_tokens_right |
|
tokenizer = BartTokenizer.from_pretrained('facebook/bart-large') |
|
BOS_TOKEN_ID = 0 |
|
PAD_TOKEN_ID = 1 |
|
EOS_TOKEN_ID = 2 |
|
UNK_TOKEN_ID = 3 |
|
PATH = './data/goal_query_map.json' |
|
HUMAN_GOAL_PATH = './data/human_goals.json' |
|
GOAL_PATH = './data/items_human_ins.json' |
|
|
|
def process_str(s): |
|
s = s.lower().replace('"', '').replace("'", '').strip() |
|
return s |
|
|
|
def process_goal(state): |
|
state = state.lower().replace('"', '').replace("'", '') |
|
state = state.replace('amazon shopping game\ninstruction:', '').replace('webshop\ninstruction:', '') |
|
state = state.replace('\n[button] search [button_]', '').strip() |
|
if ', and price lower than' in state: |
|
state = state.split(', and price lower than')[0] |
|
return state |
|
|
|
def get_data(split): |
|
data = json.load(open(PATH)) |
|
(goals, searches) = ([], []) |
|
for (goal, search_list) in data.items(): |
|
goal = process_goal(goal) |
|
for search in search_list: |
|
search = process_str(search) |
|
goals.append(goal) |
|
searches.append(search) |
|
n = len(goals) |
|
human_goals = json.load(open(HUMAN_GOAL_PATH, 'r')) |
|
goal_range = range(len(human_goals)) |
|
if split == 'train': |
|
goal_range = range(500, len(human_goals)) |
|
elif split == 'validation': |
|
goal_range = range(500, 1500) |
|
elif split == 'test': |
|
goal_range = range(0, 500) |
|
elif split == 'all': |
|
all_data = json.load(open(GOAL_PATH)) |
|
all_goals = [] |
|
all_goals_processed = [] |
|
for ins_list in all_data.values(): |
|
for ins in ins_list: |
|
ins = ins['instruction'] |
|
all_goals.append(ins) |
|
all_goals_processed.append(process_str(ins)) |
|
return (all_goals_processed, all_goals) |
|
(goals_, searches_) = ([], []) |
|
for (goal, search) in zip(goals, searches): |
|
if goal in human_goals and human_goals.index(goal) in goal_range: |
|
goals_.append(goal) |
|
searches_.append(search) |
|
return (goals_, searches_) |
|
|
|
def get_dataset(name, flip=False, variant=None, size=None): |
|
fname = name + '-flip' if flip else name |
|
fpath = os.path.join(os.path.dirname(__file__), fname) |
|
d = {} |
|
splits = ['train', 'validation', 'test'] |
|
if name == 'web_search': |
|
splits = ['train', 'validation', 'test', 'all'] |
|
for split in splits: |
|
(input, output) = get_data(split) if name != 'nl2bash' else get_data(split, variant=variant) |
|
l = len(input) if size is None else int(len(input) * size) |
|
print('{} size: {}'.format(split, l)) |
|
if flip: |
|
(input, output) = (output, input) |
|
(input, output) = (input[:l], output[:l]) |
|
d[split] = process_dataset(input, output) |
|
d = DatasetDict(d) |
|
return d |
|
|
|
def process_dataset(input, output, max_len=256): |
|
input_encodings = tokenizer(input, padding='max_length', max_length=max_len, truncation=True, return_tensors='pt') |
|
output_encodings = tokenizer(output, padding='max_length', max_length=max_len, truncation=True, return_tensors='pt') |
|
labels = output_encodings['input_ids'] |
|
decoder_input_ids = shift_tokens_right(labels, PAD_TOKEN_ID, EOS_TOKEN_ID) |
|
labels[labels[:, :] == PAD_TOKEN_ID] = -100 |
|
dataset = Dataset.from_dict({'input_ids': input_encodings['input_ids'], 'attention_mask': input_encodings['attention_mask'], 'decoder_input_ids': decoder_input_ids, 'labels': labels}) |
|
dataset.set_format(type='torch', columns=['input_ids', 'labels', 'decoder_input_ids', 'attention_mask']) |
|
return dataset |
|
if __name__ == '__main__': |
|
dataset = get_dataset('web_search', flip=False) |
|
train_dataset = dataset['train'] |
|
print(train_dataset[0]) |
|
model = BartForConditionalGeneration.from_pretrained('facebook/bart-base') |
|
model.resize_token_embeddings(len(tokenizer)) |
|
training_args = TrainingArguments(output_dir='./ckpts/web_search', num_train_epochs=10, per_device_train_batch_size=4, per_device_eval_batch_size=4, warmup_steps=50, weight_decay=0.01, evaluation_strategy='steps', logging_dir='./logs', logging_steps=50, eval_steps=20, save_steps=200) |
|
trainer = Trainer(model=model, args=training_args, train_dataset=train_dataset, eval_dataset=dataset['validation'], compute_metrics=None) |
|
trainer.train() |
|
|
|
# File: WebShop-master/run_envs/run_web_agent_site_env.py |
|
"""""" |
|
import gym |
|
from rich import print |
|
from rich.markup import escape |
|
from web_agent_site.envs import WebAgentSiteEnv |
|
from web_agent_site.models import HumanPolicy, RandomPolicy |
|
from web_agent_site.utils import DEBUG_PROD_SIZE |
|
if __name__ == '__main__': |
|
env = WebAgentSiteEnv(observation_mode='text', render=False, num_products=DEBUG_PROD_SIZE) |
|
global_step = 0 |
|
try: |
|
policy = RandomPolicy() |
|
observation = env.observation |
|
while True: |
|
print(observation) |
|
available_actions = env.get_available_actions() |
|
print('Available actions:', available_actions) |
|
action = policy.forward(observation, available_actions) |
|
(observation, reward, done, info) = env.step(action) |
|
print(f'Taking action "{escape(action)}" -> Reward = {reward}') |
|
if done: |
|
break |
|
global_step += 1 |
|
finally: |
|
env.close() |
|
|
|
# File: WebShop-master/run_envs/run_web_agent_text_env.py |
|
"""""" |
|
import gym |
|
from rich import print |
|
from rich.markup import escape |
|
from web_agent_site.envs import WebAgentTextEnv |
|
from web_agent_site.models import RandomPolicy |
|
from web_agent_site.utils import DEBUG_PROD_SIZE |
|
if __name__ == '__main__': |
|
env = gym.make('WebAgentTextEnv-v0', observation_mode='text', num_products=DEBUG_PROD_SIZE) |
|
env.reset() |
|
try: |
|
policy = RandomPolicy() |
|
observation = env.observation |
|
while True: |
|
print(observation) |
|
available_actions = env.get_available_actions() |
|
print('Available actions:', available_actions) |
|
action = policy.forward(observation, available_actions) |
|
(observation, reward, done, info) = env.step(action) |
|
print(f'Taking action "{escape(action)}" -> Reward = {reward}') |
|
if done: |
|
break |
|
finally: |
|
env.close() |
|
|
|
# File: WebShop-master/search_engine/convert_product_file_format.py |
|
import sys |
|
import json |
|
from tqdm import tqdm |
|
sys.path.insert(0, '../') |
|
from web_agent_site.utils import DEFAULT_FILE_PATH |
|
from web_agent_site.engine.engine import load_products |
|
(all_products, *_) = load_products(filepath=DEFAULT_FILE_PATH) |
|
docs = [] |
|
for p in tqdm(all_products, total=len(all_products)): |
|
option_texts = [] |
|
options = p.get('options', {}) |
|
for (option_name, option_contents) in options.items(): |
|
option_contents_text = ', '.join(option_contents) |
|
option_texts.append(f'{option_name}: {option_contents_text}') |
|
option_text = ', and '.join(option_texts) |
|
doc = dict() |
|
doc['id'] = p['asin'] |
|
doc['contents'] = ' '.join([p['Title'], p['Description'], p['BulletPoints'][0], option_text]).lower() |
|
doc['product'] = p |
|
docs.append(doc) |
|
with open('./resources_100/documents.jsonl', 'w+') as f: |
|
for doc in docs[:100]: |
|
f.write(json.dumps(doc) + '\n') |
|
with open('./resources/documents.jsonl', 'w+') as f: |
|
for doc in docs: |
|
f.write(json.dumps(doc) + '\n') |
|
with open('./resources_1k/documents.jsonl', 'w+') as f: |
|
for doc in docs[:1000]: |
|
f.write(json.dumps(doc) + '\n') |
|
with open('./resources_100k/documents.jsonl', 'w+') as f: |
|
for doc in docs[:100000]: |
|
f.write(json.dumps(doc) + '\n') |
|
|
|
# File: WebShop-master/search_engine/lucene_searcher.py |
|
import json |
|
from pyserini.search.lucene import LuceneSearcher |
|
from rich import print |
|
searcher = LuceneSearcher('indexes') |
|
hits = searcher.search('rubber sole shoes', k=20) |
|
for hit in hits: |
|
doc = searcher.doc(hit.docid) |
|
print(doc) |
|
obj = json.loads(doc.raw())['product']['Title'] |
|
print(obj) |
|
print(len(hits)) |
|
|
|
# File: WebShop-master/transfer/app.py |
|
import gradio as gr |
|
import json, time, torch |
|
from transformers import BartTokenizer, BartForConditionalGeneration, AutoModel, AutoTokenizer |
|
from webshop_lite import dict_to_fake_html |
|
from predict_help import Page, convert_dict_to_actions, convert_html_to_text, parse_results_amz, parse_item_page_amz, parse_results_ws, parse_item_page_ws, parse_results_ebay, parse_item_page_ebay, WEBSHOP_URL, WEBSHOP_SESSION |
|
ENVIRONMENTS = ['amazon', 'webshop', 'ebay'] |
|
BERT_MODEL_PATH = 'webshop/il-choice-bert-image_0' |
|
bart_tokenizer = BartTokenizer.from_pretrained('facebook/bart-large') |
|
bart_model = BartForConditionalGeneration.from_pretrained('webshop/il_search_bart') |
|
bert_tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased', truncation_side='left') |
|
bert_tokenizer.add_tokens(['[button]', '[button_]', '[clicked button]', '[clicked button_]'], special_tokens=True) |
|
bert_model = AutoModel.from_pretrained(BERT_MODEL_PATH, trust_remote_code=True) |
|
|
|
def process_str(s): |
|
s = s.lower().replace('"', '').replace("'", '').strip() |
|
s = s.replace('[sep]', '[SEP]') |
|
return s |
|
|
|
def process_goal(state): |
|
state = state.lower().replace('"', '').replace("'", '') |
|
state = state.replace('amazon shopping game\ninstruction:', '').replace('webshop\ninstruction:', '') |
|
state = state.replace('\n[button] search [button_]', '').strip() |
|
if ', and price lower than' in state: |
|
state = state.split(', and price lower than')[0] |
|
return state |
|
|
|
def data_collator(batch): |
|
(state_input_ids, state_attention_mask, action_input_ids, action_attention_mask, sizes, labels, images) = ([], [], [], [], [], [], []) |
|
for sample in batch: |
|
state_input_ids.append(sample['state_input_ids']) |
|
state_attention_mask.append(sample['state_attention_mask']) |
|
action_input_ids.extend(sample['action_input_ids']) |
|
action_attention_mask.extend(sample['action_attention_mask']) |
|
sizes.append(sample['sizes']) |
|
labels.append(sample['labels']) |
|
images.append(sample['images']) |
|
max_state_len = max((sum(x) for x in state_attention_mask)) |
|
max_action_len = max((sum(x) for x in action_attention_mask)) |
|
return {'state_input_ids': torch.tensor(state_input_ids)[:, :max_state_len], 'state_attention_mask': torch.tensor(state_attention_mask)[:, :max_state_len], 'action_input_ids': torch.tensor(action_input_ids)[:, :max_action_len], 'action_attention_mask': torch.tensor(action_attention_mask)[:, :max_action_len], 'sizes': torch.tensor(sizes), 'images': torch.tensor(images), 'labels': torch.tensor(labels)} |
|
|
|
def bart_predict(input): |
|
input_ids = bart_tokenizer(input)['input_ids'] |
|
input_ids = torch.tensor(input_ids).unsqueeze(0) |
|
output = bart_model.generate(input_ids, max_length=512, num_return_sequences=5, num_beams=5) |
|
return bart_tokenizer.batch_decode(output.tolist(), skip_special_tokens=True)[0] |
|
|
|
def bert_predict(obs, info, softmax=True): |
|
valid_acts = info['valid'] |
|
assert valid_acts[0].startswith('click[') |
|
state_encodings = bert_tokenizer(process_str(obs), max_length=512, truncation=True, padding='max_length') |
|
action_encodings = bert_tokenizer(list(map(process_str, valid_acts)), max_length=512, truncation=True, padding='max_length') |
|
batch = {'state_input_ids': state_encodings['input_ids'], 'state_attention_mask': state_encodings['attention_mask'], 'action_input_ids': action_encodings['input_ids'], 'action_attention_mask': action_encodings['attention_mask'], 'sizes': len(valid_acts), 'images': info['image_feat'].tolist(), 'labels': 0} |
|
batch = data_collator([batch]) |
|
outputs = bert_model(**batch) |
|
if softmax: |
|
idx = torch.multinomial(torch.nn.functional.softmax(outputs.logits[0], dim=0), 1)[0].item() |
|
else: |
|
idx = outputs.logits[0].argmax(0).item() |
|
return valid_acts[idx] |
|
|
|
def get_return_value(env, asin, options, search_terms, page_num, product): |
|
asin_url = None |
|
if env == 'webshop': |
|
query_str = '+'.join(search_terms.split()) |
|
options_str = json.dumps(options) |
|
asin_url = f'{WEBSHOP_URL}/item_page/{WEBSHOP_SESSION}/{asin}/{query_str}/{page_num}/{options_str}' |
|
else: |
|
asin_url = f'https://www.ebay.com/itm/{asin}' if env == 'ebay' else f'https://www.amazon.com/dp/{asin}' |
|
product_reduced = {k: v for (k, v) in product.items() if k in ['asin', 'Title', 'Description', 'BulletPoints']} |
|
product_reduced['Description'] = product_reduced['Description'][:100] + '...' |
|
product_reduced['Features'] = product_reduced.pop('BulletPoints') |
|
product_reduced['Features'] = product_reduced['Features'][:100] + '...' |
|
html = '<!DOCTYPE html><html><head><title>Chosen Product</title></head><body>' |
|
html += f'''Product Image:<img src="{product['MainImage']}" height="50px" /><br>''' if len(product['MainImage']) > 0 else '' |
|
html += f'Link to Product:\n <a href="{asin_url}" style="color:blue;text-decoration:underline;" target="_blank">{asin_url}</a>\n </body></html>' |
|
return (product_reduced, options if len(options) > 0 else 'None Selected', html) |
|
|
|
def predict(obs, info): |
|
valid_acts = info['valid'] |
|
if valid_acts[0].startswith('click['): |
|
return bert_predict(obs, info) |
|
else: |
|
return 'search[' + bart_predict(process_goal(obs)) + ']' |
|
|
|
def run_episode(goal, env, verbose=True): |
|
env = env.lower() |
|
if env not in ENVIRONMENTS: |
|
print(f'[ERROR] Environment {env} not recognized') |
|
obs = 'Amazon Shopping Game\nInstruction:' + goal + '\n[button] search [button]' |
|
info = {'valid': ['search[stuff]'], 'image_feat': torch.zeros(512)} |
|
product_map = {} |
|
title_to_asin_map = {} |
|
search_results_cache = {} |
|
(visited_asins, clicked_options) = (set(), set()) |
|
(sub_page_type, page_type, page_num) = (None, None, None) |
|
(search_terms, prod_title, asin) = (None, None, None) |
|
options = {} |
|
for i in range(100): |
|
action = predict(obs, info) |
|
if verbose: |
|
print('====') |
|
print(action) |
|
action_content = action[action.find('[') + 1:action.find(']')] |
|
prev_page_type = page_type |
|
if action.startswith('search['): |
|
page_type = Page.RESULTS |
|
search_terms = action_content |
|
page_num = 1 |
|
elif action.startswith('click['): |
|
if action.startswith('click[item -'): |
|
prod_title = action_content[len('item -'):].strip() |
|
found = False |
|
for key in title_to_asin_map: |
|
if prod_title == key: |
|
asin = title_to_asin_map[key] |
|
page_type = Page.ITEM_PAGE |
|
visited_asins.add(asin) |
|
found = True |
|
break |
|
if not found: |
|
raise Exception('Product to click not found') |
|
elif any((x.value in action for x in [Page.DESC, Page.FEATURES, Page.REVIEWS])): |
|
page_type = Page.SUB_PAGE |
|
sub_page_type = Page(action_content.lower()) |
|
elif action == 'click[< prev]': |
|
if sub_page_type is not None: |
|
(page_type, sub_page_type) = (Page.ITEM_PAGE, None) |
|
elif prev_page_type == Page.ITEM_PAGE: |
|
page_type = Page.RESULTS |
|
(options, clicked_options) = ({}, set()) |
|
elif prev_page_type == Page.RESULTS and page_num > 1: |
|
page_type = Page.RESULTS |
|
page_num -= 1 |
|
elif action == 'click[next >]': |
|
page_type = Page.RESULTS |
|
page_num += 1 |
|
elif action.lower() == 'click[back to search]': |
|
page_type = Page.SEARCH |
|
elif action == 'click[buy now]': |
|
return get_return_value(env, asin, options, search_terms, page_num, product_map[asin]) |
|
elif prev_page_type == Page.ITEM_PAGE: |
|
found = False |
|
for (opt_name, opt_values) in product_map[asin]['options'].items(): |
|
if action_content in opt_values: |
|
options[opt_name] = action_content |
|
page_type = Page.ITEM_PAGE |
|
clicked_options.add(action_content) |
|
found = True |
|
break |
|
if not found: |
|
raise Exception('Unrecognized action: ' + action) |
|
else: |
|
raise Exception('Unrecognized action:' + action) |
|
if verbose: |
|
print(f'Parsing {page_type.value} page...') |
|
if page_type == Page.RESULTS: |
|
if search_terms in search_results_cache: |
|
data = search_results_cache[search_terms] |
|
if verbose: |
|
print(f'Loading cached results page for "{search_terms}"') |
|
else: |
|
begin = time.time() |
|
if env == 'amazon': |
|
data = parse_results_amz(search_terms, page_num, verbose) |
|
if env == 'webshop': |
|
data = parse_results_ws(search_terms, page_num, verbose) |
|
if env == 'ebay': |
|
data = parse_results_ebay(search_terms, page_num, verbose) |
|
end = time.time() |
|
if verbose: |
|
print(f'Parsing search results took {end - begin} seconds') |
|
search_results_cache[search_terms] = data |
|
for d in data: |
|
title_to_asin_map[d['Title']] = d['asin'] |
|
elif page_type == Page.ITEM_PAGE or page_type == Page.SUB_PAGE: |
|
if asin in product_map: |
|
if verbose: |
|
print('Loading cached item page for', asin) |
|
data = product_map[asin] |
|
else: |
|
begin = time.time() |
|
if env == 'amazon': |
|
data = parse_item_page_amz(asin, verbose) |
|
if env == 'webshop': |
|
data = parse_item_page_ws(asin, search_terms, page_num, options, verbose) |
|
if env == 'ebay': |
|
data = parse_item_page_ebay(asin, verbose) |
|
end = time.time() |
|
if verbose: |
|
print('Parsing item page took', end - begin, 'seconds') |
|
product_map[asin] = data |
|
elif page_type == Page.SEARCH: |
|
if verbose: |
|
print('Executing search') |
|
obs = 'Amazon Shopping Game\nInstruction:' + goal + '\n[button] search [button]' |
|
info = {'valid': ['search[stuff]'], 'image_feat': torch.zeros(512)} |
|
continue |
|
else: |
|
raise Exception('Page of type `', page_type, '` not found') |
|
begin = time.time() |
|
html_str = dict_to_fake_html(data, page_type, asin, sub_page_type, options, product_map, goal) |
|
obs = convert_html_to_text(html_str, simple=False, clicked_options=clicked_options, visited_asins=visited_asins) |
|
end = time.time() |
|
if verbose: |
|
print('[Page Info -> WebShop HTML -> Observation] took', end - begin, 'seconds') |
|
begin = time.time() |
|
prod_arg = product_map if page_type == Page.ITEM_PAGE else data |
|
info = convert_dict_to_actions(page_type, prod_arg, asin, page_num) |
|
end = time.time() |
|
if verbose: |
|
print('Extracting available actions took', end - begin, 'seconds') |
|
if i == 50: |
|
return get_return_value(env, asin, options, search_terms, page_num, product_map[asin]) |
|
gr.Interface(fn=run_episode, inputs=[gr.inputs.Textbox(lines=7, label='Input Text'), gr.inputs.Radio(['Amazon', 'eBay'], type='value', default='Amazon', label='Environment')], outputs=[gr.outputs.JSON(label='Selected Product'), gr.outputs.JSON(label='Selected Options'), gr.outputs.HTML()], examples=[['I want to find a gold floor lamp with a glass shade and a nickel finish that i can use for my living room, and price lower than 270.00 dollars', 'Amazon'], ['I need some cute heart-shaped glittery cupcake picks as a gift to bring to a baby shower', 'Amazon'], ['I want to buy ballet shoes which have rubber sole in grey suede color and a size of 6', 'Amazon'], ['I would like a 7 piece king comforter set decorated with flowers and is machine washable', 'Amazon'], ["I'm trying to find white bluetooth speakers that are not only water resistant but also come with stereo sound", 'eBay'], ['find me the soy free 3.5 ounce 4-pack of dang thai rice chips, and make sure they are the aged cheddar flavor. i also need the ones in the resealable bags', 'eBay'], ['I am looking for a milk chocolate of 1 pound size in a single pack for valentine day', 'eBay'], ["I'm looking for a mini pc intel core desktop computer which supports with windows 11", 'eBay']], title='WebShop', article="<p style='padding-top:15px;text-align:center;'>To learn more about this project, check out the <a href='https://webshop-pnlp.github.io/' target='_blank'>project page</a>!</p>", description="<p style='text-align:center;'>Sim-to-real transfer of agent trained on WebShop to search a desired product on Amazon from any natural language query!</p>").launch(inline=False) |
|
|
|
# File: WebShop-master/transfer/predict_help.py |
|
from bs4 import BeautifulSoup |
|
from bs4.element import Comment |
|
from enum import Enum |
|
import re, time |
|
from urllib.parse import urlencode |
|
import json, requests, torch |
|
|
|
class Page(Enum): |
|
DESC = 'description' |
|
FEATURES = 'features' |
|
ITEM_PAGE = 'item_page' |
|
RESULTS = 'results' |
|
REVIEWS = 'reviews' |
|
SEARCH = 'search' |
|
SUB_PAGE = 'item_sub_page' |
|
HEADER_ = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.64 Safari/537.36' |
|
DEBUG_HTML = 'temp.html' |
|
NUM_PROD_LIMIT = 10 |
|
WEBSHOP_URL = 'http://3.83.245.205:3000' |
|
WEBSHOP_SESSION = 'abc' |
|
|
|
def parse_results_ebay(query, page_num=None, verbose=True): |
|
query_string = '+'.join(query.split()) |
|
page_num = 1 if page_num is None else page_num |
|
url = f'https://www.ebay.com/sch/i.html?_nkw={query_string}&_pgn={page_num}' |
|
if verbose: |
|
print(f'Search Results URL: {url}') |
|
webpage = requests.get(url, headers={'User-Agent': HEADER_, 'Accept-Language': 'en-US, en;q=0.5'}) |
|
soup = BeautifulSoup(webpage.text, 'html.parser') |
|
products = soup.select('.s-item__wrapper.clearfix') |
|
results = [] |
|
for item in products[:NUM_PROD_LIMIT]: |
|
title = item.select_one('.s-item__title').text.strip() |
|
if 'shop on ebay' in title.lower(): |
|
continue |
|
link = item.select_one('.s-item__link')['href'] |
|
asin = link.split('?')[0][len('https://www.ebay.com/itm/'):] |
|
try: |
|
price = item.select_one('.s-item__price').text |
|
if 'to' in price: |
|
prices = price.split(' to ') |
|
price = [p.strip('$') for p in prices] |
|
except: |
|
price = None |
|
results.append({'asin': asin, 'Title': title, 'Price': price}) |
|
if verbose: |
|
print(f'Scraped {len(results)} products') |
|
return results |
|
|
|
def parse_item_page_ebay(asin, verbose=True): |
|
product_dict = {} |
|
product_dict['asin'] = asin |
|
url = f'https://www.ebay.com/itm/{asin}' |
|
if verbose: |
|
print(f'Item Page URL: {url}') |
|
begin = time.time() |
|
webpage = requests.get(url, headers={'User-Agent': HEADER_, 'Accept-Language': 'en-US, en;q=0.5'}) |
|
end = time.time() |
|
if verbose: |
|
print(f'Item page scraping took {end - begin} seconds') |
|
soup = BeautifulSoup(webpage.content, 'html.parser') |
|
try: |
|
product_dict['Title'] = soup.find('h1', {'class': 'x-item-title__mainTitle'}).text.strip() |
|
except: |
|
product_dict['Title'] = 'N/A' |
|
try: |
|
price_str = soup.find('div', {'class': 'mainPrice'}).text |
|
prices = re.findall('\\d*\\.?\\d+', price_str) |
|
product_dict['Price'] = prices[0] |
|
except: |
|
product_dict['Price'] = 'N/A' |
|
try: |
|
img_div = soup.find('div', {'id': 'mainImgHldr'}) |
|
img_link = img_div.find('img', {'id': 'icImg'})['src'] |
|
product_dict['MainImage'] = img_link |
|
except: |
|
product_dict['MainImage'] = '' |
|
try: |
|
rating = soup.find('span', {'class': 'reviews-star-rating'})['title'].split()[0] |
|
except: |
|
rating = None |
|
product_dict['Rating'] = rating |
|
(options, options_to_images) = ({}, {}) |
|
try: |
|
option_blocks = soup.findAll('select', {'class': 'msku-sel'}) |
|
for block in option_blocks: |
|
name = block['name'].strip().strip(':') |
|
option_tags = block.findAll('option') |
|
opt_list = [] |
|
for option_tag in option_tags: |
|
if 'select' not in option_tag.text.lower(): |
|
opt_list.append(option_tag.text) |
|
options[name] = opt_list |
|
except: |
|
options = {} |
|
(product_dict['options'], product_dict['option_to_image']) = (options, options_to_images) |
|
desc = None |
|
try: |
|
desc_link = soup.find('iframe', {'id': 'desc_ifr'})['src'] |
|
desc_webpage = requests.get(desc_link, headers={'User-Agent': HEADER_, 'Accept-Language': 'en-US, en;q=0.5'}) |
|
desc_soup = BeautifulSoup(desc_webpage.content, 'html.parser') |
|
desc = ' '.join(desc_soup.text.split()) |
|
except: |
|
desc = 'N/A' |
|
product_dict['Description'] = desc |
|
features = None |
|
try: |
|
features = soup.find('div', {'class': 'x-about-this-item'}).text |
|
except: |
|
features = 'N/A' |
|
product_dict['BulletPoints'] = features |
|
return product_dict |
|
|
|
def parse_results_ws(query, page_num=None, verbose=True): |
|
query_string = '+'.join(query.split()) |
|
page_num = 1 if page_num is None else page_num |
|
url = f'{WEBSHOP_URL}/search_results/{WEBSHOP_SESSION}/{query_string}/{page_num}' |
|
if verbose: |
|
print(f'Search Results URL: {url}') |
|
webpage = requests.get(url, headers={'User-Agent': HEADER_, 'Accept-Language': 'en-US, en;q=0.5'}) |
|
soup = BeautifulSoup(webpage.content, 'html.parser') |
|
products = soup.findAll('div', {'class': 'list-group-item'}) |
|
results = [] |
|
for product in products: |
|
asin = product.find('a', {'class': 'product-link'}) |
|
title = product.find('h4', {'class': 'product-title'}) |
|
price = product.find('h5', {'class': 'product-price'}) |
|
if '\n' in title: |
|
title = title.text.split('\n')[0].strip() |
|
else: |
|
title = title.text.strip().strip('\n') |
|
if 'to' in price.text: |
|
prices = price.text.split(' to ') |
|
price = [float(p.strip().strip('\n$')) for p in prices] |
|
else: |
|
price = float(price.text.strip().strip('\n$')) |
|
results.append({'asin': asin.text, 'Title': title, 'Price': price}) |
|
if verbose: |
|
print(f'Scraped {len(results)} products') |
|
return results |
|
|
|
def parse_item_page_ws(asin, query, page_num, options, verbose=True): |
|
product_dict = {} |
|
product_dict['asin'] = asin |
|
query_string = '+'.join(query.split()) |
|
options_string = json.dumps(options) |
|
url = f'{WEBSHOP_URL}/item_page/{WEBSHOP_SESSION}/{asin}/{query_string}/{page_num}/{options_string}' |
|
if verbose: |
|
print(f'Item Page URL: {url}') |
|
webpage = requests.get(url, headers={'User-Agent': HEADER_, 'Accept-Language': 'en-US, en;q=0.5'}) |
|
soup = BeautifulSoup(webpage.content, 'html.parser') |
|
product_dict['Title'] = soup.find('h2').text |
|
h4_headers = soup.findAll('h4') |
|
for header in h4_headers: |
|
text = header.text |
|
if 'Price' in text: |
|
product_dict['Price'] = text.split(':')[1].strip().strip('$') |
|
elif 'Rating' in text: |
|
product_dict['Rating'] = text.split(':')[1].strip() |
|
product_dict['MainImage'] = soup.find('img')['src'] |
|
(options, options_to_image) = ({}, {}) |
|
option_blocks = soup.findAll('div', {'class': 'radio-toolbar'}) |
|
for block in option_blocks: |
|
name = block.find('input')['name'] |
|
labels = block.findAll('label') |
|
inputs = block.findAll('input') |
|
opt_list = [] |
|
for (label, input) in zip(labels, inputs): |
|
opt = label.text |
|
opt_img_path = input['onclick'].split('href=')[1].strip("';") |
|
opt_img_url = f'{WEBSHOP_URL}{opt_img_path}' |
|
opt_list.append(opt) |
|
options_to_image[opt] = opt_img_url |
|
options[name] = opt_list |
|
product_dict['options'] = options |
|
product_dict['option_to_image'] = options_to_image |
|
url = f'{WEBSHOP_URL}/item_sub_page/{WEBSHOP_SESSION}/{asin}/{query_string}/{page_num}/Description/{options_string}' |
|
if verbose: |
|
print(f'Item Description URL: {url}') |
|
webpage = requests.get(url, headers={'User-Agent': HEADER_, 'Accept-Language': 'en-US, en;q=0.5'}) |
|
soup = BeautifulSoup(webpage.content, 'html.parser') |
|
product_dict['Description'] = soup.find(name='p', attrs={'class': 'product-info'}).text.strip() |
|
url = f'{WEBSHOP_URL}/item_sub_page/{WEBSHOP_SESSION}/{asin}/{query_string}/{page_num}/Features/{options_string}' |
|
if verbose: |
|
print(f'Item Features URL: {url}') |
|
webpage = requests.get(url, headers={'User-Agent': HEADER_, 'Accept-Language': 'en-US, en;q=0.5'}) |
|
soup = BeautifulSoup(webpage.content, 'html.parser') |
|
bullets = soup.find(name='ul').findAll(name='li') |
|
product_dict['BulletPoints'] = '\n'.join([b.text.strip() for b in bullets]) |
|
return product_dict |
|
|
|
def parse_results_amz(query, page_num=None, verbose=True): |
|
url = 'https://www.amazon.com/s?k=' + query.replace(' ', '+') |
|
if page_num is not None: |
|
url += '&page=' + str(page_num) |
|
if verbose: |
|
print(f'Search Results URL: {url}') |
|
webpage = requests.get(url, headers={'User-Agent': HEADER_, 'Accept-Language': 'en-US, en;q=0.5'}) |
|
soup = BeautifulSoup(webpage.content, 'html.parser') |
|
products = soup.findAll('div', {'data-component-type': 's-search-result'}) |
|
if products is None: |
|
temp = open(DEBUG_HTML, 'w') |
|
temp.write(str(soup)) |
|
temp.close() |
|
raise Exception("Couldn't find search results page, outputted html for inspection") |
|
results = [] |
|
for product in products[:NUM_PROD_LIMIT]: |
|
asin = product['data-asin'] |
|
title = product.find('h2', {'class': 'a-size-mini'}) |
|
price_div = product.find('div', {'class': 's-price-instructions-style'}) |
|
price = price_div.find('span', {'class': 'a-offscreen'}) |
|
result = {'asin': asin, 'Title': title.text.strip(), 'Price': price.text.strip().strip('$')} |
|
results.append(result) |
|
if verbose: |
|
print('Scraped', len(results), 'products') |
|
return results |
|
|
|
def parse_item_page_amz(asin, verbose=True): |
|
product_dict = {} |
|
product_dict['asin'] = asin |
|
url = f'https://www.amazon.com/dp/{asin}' |
|
if verbose: |
|
print('Item Page URL:', url) |
|
begin = time.time() |
|
webpage = requests.get(url, headers={'User-Agent': HEADER_, 'Accept-Language': 'en-US, en;q=0.5'}) |
|
end = time.time() |
|
if verbose: |
|
print(f'Item page scraping took {end - begin} seconds') |
|
soup = BeautifulSoup(webpage.content, 'html.parser') |
|
try: |
|
title = soup.find('span', attrs={'id': 'productTitle'}) |
|
title = title.string.strip().replace(',', '') |
|
except AttributeError: |
|
title = 'N/A' |
|
product_dict['Title'] = title |
|
try: |
|
parent_price_span = soup.find(name='span', class_='apexPriceToPay') |
|
price_span = parent_price_span.find(name='span', class_='a-offscreen') |
|
price = float(price_span.getText().replace('$', '')) |
|
except AttributeError: |
|
price = 'N/A' |
|
product_dict['Price'] = price |
|
try: |
|
rating = soup.find(name='span', attrs={'id': 'acrPopover'}) |
|
if rating is None: |
|
rating = 'N/A' |
|
else: |
|
rating = rating.text |
|
except AttributeError: |
|
rating = 'N/A' |
|
product_dict['Rating'] = rating.strip('\n').strip() |
|
try: |
|
features = soup.find(name='div', attrs={'id': 'feature-bullets'}).text |
|
except AttributeError: |
|
features = 'N/A' |
|
product_dict['BulletPoints'] = features |
|
try: |
|
desc_body = soup.find(name='div', attrs={'id': 'productDescription_feature_div'}) |
|
desc_div = desc_body.find(name='div', attrs={'id': 'productDescription'}) |
|
desc_ps = desc_div.findAll(name='p') |
|
desc = ' '.join([p.text for p in desc_ps]) |
|
except AttributeError: |
|
desc = 'N/A' |
|
product_dict['Description'] = desc.strip() |
|
try: |
|
imgtag = soup.find('img', {'id': 'landingImage'}) |
|
imageurl = dict(imgtag.attrs)['src'] |
|
except AttributeError: |
|
imageurl = '' |
|
product_dict['MainImage'] = imageurl |
|
(options, options_to_image) = ({}, {}) |
|
try: |
|
option_body = soup.find(name='div', attrs={'id': 'softlinesTwister_feature_div'}) |
|
if option_body is None: |
|
option_body = soup.find(name='div', attrs={'id': 'twister_feature_div'}) |
|
option_blocks = option_body.findAll(name='ul') |
|
for block in option_blocks: |
|
name = json.loads(block['data-a-button-group'])['name'] |
|
opt_list = [] |
|
for li in block.findAll('li'): |
|
img = li.find(name='img') |
|
if img is not None: |
|
opt = img['alt'].strip() |
|
opt_img = img['src'] |
|
if len(opt) > 0: |
|
options_to_image[opt] = opt_img |
|
else: |
|
opt = li.text.strip() |
|
if len(opt) > 0: |
|
opt_list.append(opt) |
|
options[name.replace('_name', '').replace('twister_', '')] = opt_list |
|
except AttributeError: |
|
options = {} |
|
(product_dict['options'], product_dict['option_to_image']) = (options, options_to_image) |
|
return product_dict |
|
|
|
def convert_html_to_text(html, simple=False, clicked_options=None, visited_asins=None): |
|
|
|
def tag_visible(element): |
|
ignore = {'style', 'script', 'head', 'title', 'meta', '[document]'} |
|
return element.parent.name not in ignore and (not isinstance(element, Comment)) |
|
html_obj = BeautifulSoup(html, 'html.parser') |
|
texts = html_obj.findAll(text=True) |
|
visible_texts = filter(tag_visible, texts) |
|
if simple: |
|
return ' [SEP] '.join((t.strip() for t in visible_texts if t != '\n')) |
|
else: |
|
observation = '' |
|
for t in visible_texts: |
|
if t == '\n': |
|
continue |
|
if t.parent.name == 'button': |
|
processed_t = f'[button] {t} [button]' |
|
elif t.parent.name == 'label': |
|
if f'{t}' in clicked_options: |
|
processed_t = f' [clicked button] {t} [clicked button]' |
|
observation = f'You have clicked {t}.\n' + observation |
|
else: |
|
processed_t = f' [button] {t} [button]' |
|
elif t.parent.get('class') == ['product-link']: |
|
if f'{t}' in visited_asins: |
|
processed_t = f'\n[clicked button] {t} [clicked button]' |
|
else: |
|
processed_t = f'\n[button] {t} [button]' |
|
else: |
|
processed_t = str(t) |
|
observation += processed_t + '\n' |
|
return observation |
|
|
|
def convert_dict_to_actions(page_type, products=None, asin=None, page_num=None) -> dict: |
|
info = {'valid': []} |
|
if page_type == Page.RESULTS: |
|
info['valid'] = ['click[back to search]'] |
|
if products is None or page_num is None: |
|
print(page_num) |
|
print(products) |
|
raise Exception('Provide `products`, `page_num` to get `results` valid actions') |
|
if len(products) > 10: |
|
info['valid'].append('click[next >]') |
|
if page_num > 1: |
|
info['valid'].append('click[< prev]') |
|
for product in products: |
|
info['valid'].append('click[item - ' + product['Title'] + ']') |
|
if page_type == Page.ITEM_PAGE: |
|
if products is None or asin is None: |
|
raise Exception('Provide `products` and `asin` to get `item_page` valid actions') |
|
info['valid'] = ['click[back to search]', 'click[< prev]', 'click[description]', 'click[features]', 'click[buy now]'] |
|
if 'options' in products[asin]: |
|
for (key, values) in products[asin]['options'].items(): |
|
for value in values: |
|
info['valid'].append('click[' + value + ']') |
|
if page_type == Page.SUB_PAGE: |
|
info['valid'] = ['click[back to search]', 'click[< prev]'] |
|
info['image_feat'] = torch.zeros(512) |
|
return info |
|
|
|
# File: WebShop-master/transfer/webshop_lite.py |
|
import os |
|
from flask import render_template_string, Flask |
|
from predict_help import Page |
|
app = Flask(__name__) |
|
app.debug = True |
|
SESSION_ID = 'ABC' |
|
TEMPLATE_DIR = '../web_agent_site/templates/' |
|
KEYWORDS = ['placeholder (not needed)'] |
|
QUERY = '' |
|
product_map = {} |
|
|
|
def read_html_template(path): |
|
with open(path) as f: |
|
template = f.read() |
|
return template |
|
|
|
@app.route('/', methods=['GET', 'POST']) |
|
def index(session_id, **kwargs): |
|
print('Hello world') |
|
|
|
@app.route('/', methods=['GET', 'POST']) |
|
def search_results(data): |
|
path = os.path.join(TEMPLATE_DIR, 'results_page.html') |
|
html = render_template_string(read_html_template(path=path), session_id=SESSION_ID, products=data, keywords=KEYWORDS, page=1, total=len(data), instruction_text=QUERY) |
|
return html |
|
|
|
@app.route('/', methods=['GET', 'POST']) |
|
def item_page(session_id, asin, keywords, page, options): |
|
path = os.path.join(TEMPLATE_DIR, 'item_page.html') |
|
html = render_template_string(read_html_template(path=path), session_id=session_id, product_info=product_map[asin], keywords=keywords, page=page, asin=asin, options=options, instruction_text=QUERY) |
|
return html |
|
|
|
@app.route('/', methods=['GET', 'POST']) |
|
def item_sub_page(session_id, asin, keywords, page, sub_page, options): |
|
path = os.path.join(TEMPLATE_DIR, sub_page.value.lower() + '_page.html') |
|
html = render_template_string(read_html_template(path), session_id=session_id, product_info=product_map[asin], keywords=keywords, page=page, asin=asin, options=options, instruction_text=QUERY) |
|
return html |
|
|
|
@app.route('/', methods=['GET', 'POST']) |
|
def done(asin, options, session_id, **kwargs): |
|
path = os.path.join(TEMPLATE_DIR, 'done_page.html') |
|
html = render_template_string(read_html_template(path), session_id=session_id, reward=1, asin=asin, options=product_map[asin]['options'], reward_info=kwargs.get('reward_info'), goal_attrs=kwargs.get('goal_attrs'), purchased_attrs=kwargs.get('purchased_attrs'), goal=kwargs.get('goal'), mturk_code=kwargs.get('mturk_code'), query=kwargs.get('query'), category=kwargs.get('category'), product_category=kwargs.get('product_category')) |
|
return html |
|
|
|
def dict_to_fake_html(data, page_type, asin=None, sub_page_type=None, options=None, prod_map={}, query=''): |
|
global QUERY, product_map |
|
QUERY = query |
|
product_map = prod_map |
|
with app.app_context(), app.test_request_context(): |
|
if page_type == Page.RESULTS: |
|
return search_results(data) |
|
if page_type == Page.ITEM_PAGE: |
|
return item_page(SESSION_ID, asin, KEYWORDS, 1, options) |
|
if page_type == Page.SUB_PAGE: |
|
if sub_page_type is not None: |
|
return item_sub_page(SESSION_ID, asin, KEYWORDS, 1, sub_page_type, options) |
|
else: |
|
raise Exception('Sub page of type', sub_page_type, 'unrecognized') |
|
|
|
# File: WebShop-master/web_agent_site/app.py |
|
import argparse, json, logging, random |
|
from pathlib import Path |
|
from ast import literal_eval |
|
from flask import Flask, request, redirect, url_for |
|
from rich import print |
|
from web_agent_site.engine.engine import load_products, init_search_engine, convert_web_app_string_to_var, get_top_n_product_from_keywords, get_product_per_page, map_action_to_html, END_BUTTON |
|
from web_agent_site.engine.goal import get_reward, get_goals |
|
from web_agent_site.utils import generate_mturk_code, setup_logger, DEFAULT_FILE_PATH, DEBUG_PROD_SIZE |
|
app = Flask(__name__) |
|
search_engine = None |
|
all_products = None |
|
product_item_dict = None |
|
product_prices = None |
|
attribute_to_asins = None |
|
goals = None |
|
weights = None |
|
user_sessions = dict() |
|
user_log_dir = None |
|
SHOW_ATTRS_TAB = False |
|
|
|
@app.route('/') |
|
def home(): |
|
return redirect(url_for('index', session_id='abc')) |
|
|
|
@app.route('/<session_id>', methods=['GET', 'POST']) |
|
def index(session_id): |
|
global user_log_dir |
|
global all_products, product_item_dict, product_prices, attribute_to_asins, search_engine, goals, weights, user_sessions |
|
if search_engine is None: |
|
(all_products, product_item_dict, product_prices, attribute_to_asins) = load_products(filepath=DEFAULT_FILE_PATH, num_products=DEBUG_PROD_SIZE) |
|
search_engine = init_search_engine(num_products=DEBUG_PROD_SIZE) |
|
goals = get_goals(all_products, product_prices) |
|
random.seed(233) |
|
random.shuffle(goals) |
|
weights = [goal['weight'] for goal in goals] |
|
if session_id not in user_sessions and 'fixed' in session_id: |
|
goal_dix = int(session_id.split('_')[-1]) |
|
goal = goals[goal_dix] |
|
instruction_text = goal['instruction_text'] |
|
user_sessions[session_id] = {'goal': goal, 'done': False} |
|
if user_log_dir is not None: |
|
setup_logger(session_id, user_log_dir) |
|
elif session_id not in user_sessions: |
|
goal = random.choices(goals, weights)[0] |
|
instruction_text = goal['instruction_text'] |
|
user_sessions[session_id] = {'goal': goal, 'done': False} |
|
if user_log_dir is not None: |
|
setup_logger(session_id, user_log_dir) |
|
else: |
|
instruction_text = user_sessions[session_id]['goal']['instruction_text'] |
|
if request.method == 'POST' and 'search_query' in request.form: |
|
keywords = request.form['search_query'].lower().split(' ') |
|
return redirect(url_for('search_results', session_id=session_id, keywords=keywords, page=1)) |
|
if user_log_dir is not None: |
|
logger = logging.getLogger(session_id) |
|
logger.info(json.dumps(dict(page='index', url=request.url, goal=user_sessions[session_id]['goal']))) |
|
return map_action_to_html('start', session_id=session_id, instruction_text=instruction_text) |
|
|
|
@app.route('/search_results/<session_id>/<keywords>/<page>', methods=['GET', 'POST']) |
|
def search_results(session_id, keywords, page): |
|
instruction_text = user_sessions[session_id]['goal']['instruction_text'] |
|
page = convert_web_app_string_to_var('page', page) |
|
keywords = convert_web_app_string_to_var('keywords', keywords) |
|
top_n_products = get_top_n_product_from_keywords(keywords, search_engine, all_products, product_item_dict, attribute_to_asins) |
|
products = get_product_per_page(top_n_products, page) |
|
html = map_action_to_html('search', session_id=session_id, products=products, keywords=keywords, page=page, total=len(top_n_products), instruction_text=instruction_text) |
|
logger = logging.getLogger(session_id) |
|
logger.info(json.dumps(dict(page='search_results', url=request.url, goal=user_sessions[session_id]['goal'], content=dict(keywords=keywords, search_result_asins=[p['asin'] for p in products], page=page)))) |
|
return html |
|
|
|
@app.route('/item_page/<session_id>/<asin>/<keywords>/<page>/<options>', methods=['GET', 'POST']) |
|
def item_page(session_id, asin, keywords, page, options): |
|
options = literal_eval(options) |
|
product_info = product_item_dict[asin] |
|
goal_instruction = user_sessions[session_id]['goal']['instruction_text'] |
|
product_info['goal_instruction'] = goal_instruction |
|
html = map_action_to_html('click', session_id=session_id, product_info=product_info, keywords=keywords, page=page, asin=asin, options=options, instruction_text=goal_instruction, show_attrs=SHOW_ATTRS_TAB) |
|
logger = logging.getLogger(session_id) |
|
logger.info(json.dumps(dict(page='item_page', url=request.url, goal=user_sessions[session_id]['goal'], content=dict(keywords=keywords, page=page, asin=asin, options=options)))) |
|
return html |
|
|
|
@app.route('/item_sub_page/<session_id>/<asin>/<keywords>/<page>/<sub_page>/<options>', methods=['GET', 'POST']) |
|
def item_sub_page(session_id, asin, keywords, page, sub_page, options): |
|
options = literal_eval(options) |
|
product_info = product_item_dict[asin] |
|
goal_instruction = user_sessions[session_id]['goal']['instruction_text'] |
|
product_info['goal_instruction'] = goal_instruction |
|
html = map_action_to_html(f'click[{sub_page}]', session_id=session_id, product_info=product_info, keywords=keywords, page=page, asin=asin, options=options, instruction_text=goal_instruction) |
|
logger = logging.getLogger(session_id) |
|
logger.info(json.dumps(dict(page='item_sub_page', url=request.url, goal=user_sessions[session_id]['goal'], content=dict(keywords=keywords, page=page, asin=asin, options=options)))) |
|
return html |
|
|
|
@app.route('/done/<session_id>/<asin>/<options>', methods=['GET', 'POST']) |
|
def done(session_id, asin, options): |
|
options = literal_eval(options) |
|
goal = user_sessions[session_id]['goal'] |
|
purchased_product = product_item_dict[asin] |
|
price = product_prices[asin] |
|
(reward, reward_info) = get_reward(purchased_product, goal, price=price, options=options, verbose=True) |
|
user_sessions[session_id]['done'] = True |
|
user_sessions[session_id]['reward'] = reward |
|
print(user_sessions) |
|
logger = logging.getLogger(session_id) |
|
logger.info(json.dumps(dict(page='done', url=request.url, goal=goal, content=dict(asin=asin, options=options, price=price), reward=reward, reward_info=reward_info))) |
|
del logging.root.manager.loggerDict[session_id] |
|
return map_action_to_html(f'click[{END_BUTTON}]', session_id=session_id, reward=reward, asin=asin, options=options, reward_info=reward_info, query=purchased_product['query'], category=purchased_product['category'], product_category=purchased_product['product_category'], goal_attrs=user_sessions[session_id]['goal']['attributes'], purchased_attrs=purchased_product['Attributes'], goal=goal, mturk_code=generate_mturk_code(session_id)) |
|
if __name__ == '__main__': |
|
parser = argparse.ArgumentParser(description='WebShop flask app backend configuration') |
|
parser.add_argument('--log', action='store_true', help='Log actions on WebShop in trajectory file') |
|
parser.add_argument('--attrs', action='store_true', help='Show attributes tab in item page') |
|
args = parser.parse_args() |
|
if args.log: |
|
user_log_dir = Path('user_session_logs/mturk') |
|
user_log_dir.mkdir(parents=True, exist_ok=True) |
|
SHOW_ATTRS_TAB = args.attrs |
|
app.run(host='0.0.0.0', port=3000) |
|
|
|
# File: WebShop-master/web_agent_site/attributes/annotate.py |
|
import yaml |
|
from pathlib import Path |
|
from rich import print |
|
ATTR_DIR = './data/attributes' |
|
ATTR_PATHS = ['narrow_2-gram.yaml', 'narrow_1-gram.yaml', 'broad_2-gram.yaml', 'broad_1-gram.yaml'] |
|
ATTR_PATHS = [Path(ATTR_DIR) / af for af in ATTR_PATHS] |
|
|
|
def annotate(attr_path): |
|
with open(attr_path) as f: |
|
attrs_by_cat = yaml.safe_load(f) |
|
unique_attrs = set() |
|
all_attrs = [] |
|
for (_, attrs) in attrs_by_cat.items(): |
|
attrs = [a.split('|')[0].strip() for a in attrs] |
|
unique_attrs.update(attrs) |
|
all_attrs += attrs |
|
print(f'Total unique attributes: {len(unique_attrs)}') |
|
total = len(all_attrs) |
|
num_left = len(all_attrs) |
|
annotated_attrs_by_cat = dict() |
|
for (category, attrs) in attrs_by_cat.items(): |
|
print(f'Category: [ {category} ] | Number of attributes: {len(attrs)}\n') |
|
annotated_attrs = [] |
|
for (i, attr) in enumerate(attrs): |
|
(attr, score) = attr.split(' | ') |
|
print(f"{'[' + str(i) + ']':<5} [bold green]{attr:<30}[/bold green] | [red]{category}[/red] | {score}") |
|
tags = input('Annotate [1: ITEM, 2: PROP, 3: USE, ⎵: next example, q: next category] > ') |
|
print('\n') |
|
tags = tags.strip() |
|
annotated_attrs.append(f'{attr} | {score} | {tags}') |
|
if 'q' in tags: |
|
break |
|
num_left -= len(attrs) |
|
print(f'{num_left} / {total} total attributes left.') |
|
ans = input('Starting the next category... [y/n] > ') |
|
if ans == 'n': |
|
break |
|
|
|
def main(): |
|
for attr_path in ATTR_PATHS: |
|
annotate(attr_path) |
|
if __name__ == '__main__': |
|
'' |
|
main() |
|
|
|
# File: WebShop-master/web_agent_site/attributes/generate_attrs.py |
|
import json |
|
import yaml |
|
import random |
|
from pathlib import Path |
|
from collections import defaultdict |
|
from sklearn.feature_extraction.text import TfidfVectorizer |
|
from sklearn.feature_extraction import text as sk_text |
|
import pandas as pd |
|
from tqdm import tqdm |
|
from rich import print |
|
ITEMS_PATH = './data/ITEMS_mar1.json' |
|
REVIEWS_PATH = './data/reviews.json' |
|
ATTR_DIR = './data/attributes' |
|
random.seed(0) |
|
|
|
def get_stop_words(): |
|
extra_stop_words = set([str(i) for i in range(1000)]) |
|
stop_words = sk_text.ENGLISH_STOP_WORDS.union(extra_stop_words) |
|
return stop_words |
|
|
|
def load_products(num=None): |
|
with open(ITEMS_PATH) as f: |
|
all_products = json.load(f) |
|
if num is not None: |
|
random.shuffle(all_products) |
|
all_products = all_products[:num] |
|
products = dict() |
|
asins = set() |
|
for p in all_products: |
|
asin = p['asin'] |
|
if asin in asins: |
|
continue |
|
asins.add(asin) |
|
products[asin] = p |
|
with open(REVIEWS_PATH) as f: |
|
reviews = json.load(f) |
|
reviews = {r['asin']: r for r in reviews} |
|
for (asin, p) in products.items(): |
|
if asin in reviews: |
|
p['review'] = reviews[asin] |
|
else: |
|
p['review'] = None |
|
return products |
|
|
|
def get_top_attrs(attributes, k): |
|
attr_to_asins = defaultdict(list) |
|
for (asin, attr_scores) in attributes.items(): |
|
top_attr_scoress = attr_scores[:k] |
|
for (attr, score) in top_attr_scoress: |
|
attr_to_asins[attr].append(asin) |
|
total = len([asin for (asin, _) in attributes.items()]) |
|
top_attrs = [(attr, len(asins) / total) for (attr, asins) in attr_to_asins.items()] |
|
top_attrs = sorted(top_attrs, key=lambda x: -x[1]) |
|
top_attrs = [f'{attr} | {score:.4f}' for (attr, score) in top_attrs] |
|
return top_attrs |
|
|
|
def get_corpus(products, keys=('name', 'small_description'), category_type='category'): |
|
all_products = list(products.values()) |
|
asins_by_cat = defaultdict(set) |
|
corpus_by_cat = defaultdict(list) |
|
for p in all_products: |
|
category = p[category_type] |
|
asin = p['asin'] |
|
if asin in asins_by_cat[category]: |
|
continue |
|
asins_by_cat[category].add(asin) |
|
text = [] |
|
for key in keys: |
|
if key == 'review': |
|
rs = p['review']['reviews'] |
|
if r is not None: |
|
text_ = ' '.join([r['review'].lower() for r in rs]) |
|
else: |
|
text_ = '' |
|
else: |
|
text_ = p[key].lower() |
|
text.append(text_) |
|
text = ' '.join(text) |
|
corpus_by_cat[category].append((asin, text)) |
|
return corpus_by_cat |
|
|
|
def generate_ngram_attrs(corpus_by_cat, ngram_range, k, attrs): |
|
vectorizer = TfidfVectorizer(stop_words=get_stop_words(), ngram_range=ngram_range, max_features=1000) |
|
top_attrs_by_cat = dict() |
|
for (category, corpus) in tqdm(corpus_by_cat.items(), total=len(corpus_by_cat)): |
|
asins = [_[0] for _ in corpus] |
|
texts = [_[1] for _ in corpus] |
|
vec = vectorizer.fit_transform(texts).todense() |
|
df = pd.DataFrame(vec, columns=vectorizer.get_feature_names_out()) |
|
attrs_by_cat = dict() |
|
for (asin, (row_name, row)) in zip(asins, df.iterrows()): |
|
attr_scores = sorted(list(zip(row.index, row)), key=lambda x: -x[1]) |
|
attrs_by_cat[asin] = attr_scores |
|
attrs[asin] = attr_scores |
|
top_attrs_by_cat[category.lower()] = get_top_attrs(attrs_by_cat, k=k) |
|
print(top_attrs_by_cat.keys()) |
|
return top_attrs_by_cat |
|
|
|
def generate_attrs(corpus_by_cat, k, save_name): |
|
attrs = dict() |
|
for n in range(1, 3): |
|
ngram_range = (n, n) |
|
top_attrs_by_cat = generate_ngram_attrs(corpus_by_cat, ngram_range, k, attrs) |
|
if save_name is not None: |
|
save_path = Path(ATTR_DIR) / f'{save_name}_{n}-gram.yaml' |
|
with open(save_path, 'w') as f: |
|
yaml.dump(top_attrs_by_cat, f, default_flow_style=False) |
|
print(f'Saved: {save_path}') |
|
save_path = Path(ATTR_DIR) / f'{save_name}_attrs_unfiltered.json' |
|
with open(save_path, 'w') as f: |
|
json.dump(attrs, f) |
|
print(f'Saved: {save_path}') |
|
if __name__ == '__main__': |
|
'' |
|
products = load_products(num=40000) |
|
corpus_by_cat_broad = get_corpus(products, category_type='category') |
|
generate_attrs(corpus_by_cat_broad, k=5, save_name='broad') |
|
corpus_by_cat_narrow = get_corpus(products, category_type='query') |
|
generate_attrs(corpus_by_cat_narrow, k=5, save_name='narrow') |
|
|
|
# File: WebShop-master/web_agent_site/engine/engine.py |
|
"""""" |
|
import os |
|
import re |
|
import json |
|
import random |
|
from collections import defaultdict |
|
from ast import literal_eval |
|
from decimal import Decimal |
|
import cleantext |
|
from tqdm import tqdm |
|
from rank_bm25 import BM25Okapi |
|
from flask import render_template_string |
|
from rich import print |
|
from pyserini.search.lucene import LuceneSearcher |
|
from web_agent_site.utils import BASE_DIR, DEFAULT_FILE_PATH, DEFAULT_REVIEW_PATH, DEFAULT_ATTR_PATH, HUMAN_ATTR_PATH |
|
TEMPLATE_DIR = os.path.join(BASE_DIR, 'templates') |
|
SEARCH_RETURN_N = 50 |
|
PRODUCT_WINDOW = 10 |
|
TOP_K_ATTR = 10 |
|
END_BUTTON = 'Buy Now' |
|
NEXT_PAGE = 'Next >' |
|
PREV_PAGE = '< Prev' |
|
BACK_TO_SEARCH = 'Back to Search' |
|
ACTION_TO_TEMPLATE = {'Description': 'description_page.html', 'Features': 'features_page.html', 'Reviews': 'review_page.html', 'Attributes': 'attributes_page.html'} |
|
|
|
def map_action_to_html(action, **kwargs): |
|
(action_name, action_arg) = parse_action(action) |
|
if action_name == 'start': |
|
path = os.path.join(TEMPLATE_DIR, 'search_page.html') |
|
html = render_template_string(read_html_template(path=path), session_id=kwargs['session_id'], instruction_text=kwargs['instruction_text']) |
|
elif action_name == 'search': |
|
path = os.path.join(TEMPLATE_DIR, 'results_page.html') |
|
html = render_template_string(read_html_template(path=path), session_id=kwargs['session_id'], products=kwargs['products'], keywords=kwargs['keywords'], page=kwargs['page'], total=kwargs['total'], instruction_text=kwargs['instruction_text']) |
|
elif action_name == 'click' and action_arg == END_BUTTON: |
|
path = os.path.join(TEMPLATE_DIR, 'done_page.html') |
|
html = render_template_string(read_html_template(path), session_id=kwargs['session_id'], reward=kwargs['reward'], asin=kwargs['asin'], options=kwargs['options'], reward_info=kwargs.get('reward_info'), goal_attrs=kwargs.get('goal_attrs'), purchased_attrs=kwargs.get('purchased_attrs'), goal=kwargs.get('goal'), mturk_code=kwargs.get('mturk_code'), query=kwargs.get('query'), category=kwargs.get('category'), product_category=kwargs.get('product_category')) |
|
elif action_name == 'click' and action_arg in ACTION_TO_TEMPLATE: |
|
path = os.path.join(TEMPLATE_DIR, ACTION_TO_TEMPLATE[action_arg]) |
|
html = render_template_string(read_html_template(path), session_id=kwargs['session_id'], product_info=kwargs['product_info'], keywords=kwargs['keywords'], page=kwargs['page'], asin=kwargs['asin'], options=kwargs['options'], instruction_text=kwargs.get('instruction_text')) |
|
elif action_name == 'click': |
|
path = os.path.join(TEMPLATE_DIR, 'item_page.html') |
|
html = render_template_string(read_html_template(path), session_id=kwargs['session_id'], product_info=kwargs['product_info'], keywords=kwargs['keywords'], page=kwargs['page'], asin=kwargs['asin'], options=kwargs['options'], instruction_text=kwargs.get('instruction_text'), show_attrs=kwargs['show_attrs']) |
|
else: |
|
raise ValueError('Action name not recognized.') |
|
return html |
|
|
|
def read_html_template(path): |
|
with open(path) as f: |
|
template = f.read() |
|
return template |
|
|
|
def parse_action(action): |
|
pattern = re.compile('(.+)\\[(.+)\\]') |
|
m = re.match(pattern, action) |
|
if m is None: |
|
action_name = action |
|
action_arg = None |
|
else: |
|
(action_name, action_arg) = m.groups() |
|
return (action_name, action_arg) |
|
|
|
def convert_web_app_string_to_var(name, string): |
|
if name == 'keywords': |
|
keywords = string |
|
if keywords.startswith('['): |
|
keywords = literal_eval(keywords) |
|
else: |
|
keywords = [keywords] |
|
var = keywords |
|
elif name == 'page': |
|
page = string |
|
page = int(page) |
|
var = page |
|
else: |
|
raise ValueError('Name of variable not recognized.') |
|
return var |
|
|
|
def get_top_n_product_from_keywords(keywords, search_engine, all_products, product_item_dict, attribute_to_asins=None): |
|
if keywords[0] == '<r>': |
|
top_n_products = random.sample(all_products, k=SEARCH_RETURN_N) |
|
elif keywords[0] == '<a>': |
|
attribute = ' '.join(keywords[1:]).strip() |
|
asins = attribute_to_asins[attribute] |
|
top_n_products = [p for p in all_products if p['asin'] in asins] |
|
elif keywords[0] == '<c>': |
|
category = keywords[1].strip() |
|
top_n_products = [p for p in all_products if p['category'] == category] |
|
elif keywords[0] == '<q>': |
|
query = ' '.join(keywords[1:]).strip() |
|
top_n_products = [p for p in all_products if p['query'] == query] |
|
else: |
|
keywords = ' '.join(keywords) |
|
hits = search_engine.search(keywords, k=SEARCH_RETURN_N) |
|
docs = [search_engine.doc(hit.docid) for hit in hits] |
|
top_n_asins = [json.loads(doc.raw())['id'] for doc in docs] |
|
top_n_products = [product_item_dict[asin] for asin in top_n_asins if asin in product_item_dict] |
|
return top_n_products |
|
|
|
def get_product_per_page(top_n_products, page): |
|
return top_n_products[(page - 1) * PRODUCT_WINDOW:page * PRODUCT_WINDOW] |
|
|
|
def generate_product_prices(all_products): |
|
product_prices = dict() |
|
for product in all_products: |
|
asin = product['asin'] |
|
pricing = product['pricing'] |
|
if not pricing: |
|
price = 100.0 |
|
elif len(pricing) == 1: |
|
price = pricing[0] |
|
else: |
|
price = random.uniform(*pricing[:2]) |
|
product_prices[asin] = price |
|
return product_prices |
|
|
|
def init_search_engine(num_products=None): |
|
if num_products == 100: |
|
indexes = 'indexes_100' |
|
elif num_products == 1000: |
|
indexes = 'indexes_1k' |
|
elif num_products == 100000: |
|
indexes = 'indexes_100k' |
|
elif num_products is None: |
|
indexes = 'indexes' |
|
else: |
|
raise NotImplementedError(f'num_products being {num_products} is not supported yet.') |
|
search_engine = LuceneSearcher(os.path.join(BASE_DIR, f'../search_engine/{indexes}')) |
|
return search_engine |
|
|
|
def clean_product_keys(products): |
|
for product in products: |
|
product.pop('product_information', None) |
|
product.pop('brand', None) |
|
product.pop('brand_url', None) |
|
product.pop('list_price', None) |
|
product.pop('availability_quantity', None) |
|
product.pop('availability_status', None) |
|
product.pop('total_reviews', None) |
|
product.pop('total_answered_questions', None) |
|
product.pop('seller_id', None) |
|
product.pop('seller_name', None) |
|
product.pop('fulfilled_by_amazon', None) |
|
product.pop('fast_track_message', None) |
|
product.pop('aplus_present', None) |
|
product.pop('small_description_old', None) |
|
print('Keys cleaned.') |
|
return products |
|
|
|
def load_products(filepath, num_products=None, human_goals=True): |
|
with open(filepath) as f: |
|
products = json.load(f) |
|
print('Products loaded.') |
|
products = clean_product_keys(products) |
|
all_reviews = dict() |
|
all_ratings = dict() |
|
if human_goals: |
|
with open(HUMAN_ATTR_PATH) as f: |
|
human_attributes = json.load(f) |
|
with open(DEFAULT_ATTR_PATH) as f: |
|
attributes = json.load(f) |
|
with open(HUMAN_ATTR_PATH) as f: |
|
human_attributes = json.load(f) |
|
print('Attributes loaded.') |
|
asins = set() |
|
all_products = [] |
|
attribute_to_asins = defaultdict(set) |
|
if num_products is not None: |
|
products = products[:num_products] |
|
for (i, p) in tqdm(enumerate(products), total=len(products)): |
|
asin = p['asin'] |
|
if asin == 'nan' or len(asin) > 10: |
|
continue |
|
if asin in asins: |
|
continue |
|
else: |
|
asins.add(asin) |
|
products[i]['category'] = p['category'] |
|
products[i]['query'] = p['query'] |
|
products[i]['product_category'] = p['product_category'] |
|
products[i]['Title'] = p['name'] |
|
products[i]['Description'] = p['full_description'] |
|
products[i]['Reviews'] = all_reviews.get(asin, []) |
|
products[i]['Rating'] = all_ratings.get(asin, 'N.A.') |
|
for r in products[i]['Reviews']: |
|
if 'score' not in r: |
|
r['score'] = r.pop('stars') |
|
if 'review' not in r: |
|
r['body'] = '' |
|
else: |
|
r['body'] = r.pop('review') |
|
products[i]['BulletPoints'] = p['small_description'] if isinstance(p['small_description'], list) else [p['small_description']] |
|
pricing = p.get('pricing') |
|
if pricing is None or not pricing: |
|
pricing = [100.0] |
|
price_tag = '$100.0' |
|
else: |
|
pricing = [float(Decimal(re.sub('[^\\d.]', '', price))) for price in pricing.split('$')[1:]] |
|
if len(pricing) == 1: |
|
price_tag = f'${pricing[0]}' |
|
else: |
|
price_tag = f'${pricing[0]} to ${pricing[1]}' |
|
pricing = pricing[:2] |
|
products[i]['pricing'] = pricing |
|
products[i]['Price'] = price_tag |
|
options = dict() |
|
customization_options = p['customization_options'] |
|
option_to_image = dict() |
|
if customization_options: |
|
for (option_name, option_contents) in customization_options.items(): |
|
if option_contents is None: |
|
continue |
|
option_name = option_name.lower() |
|
option_values = [] |
|
for option_content in option_contents: |
|
option_value = option_content['value'].strip().replace('/', ' | ').lower() |
|
option_image = option_content.get('image', None) |
|
option_values.append(option_value) |
|
option_to_image[option_value] = option_image |
|
options[option_name] = option_values |
|
products[i]['options'] = options |
|
products[i]['option_to_image'] = option_to_image |
|
if asin in attributes and 'attributes' in attributes[asin]: |
|
products[i]['Attributes'] = attributes[asin]['attributes'] |
|
else: |
|
products[i]['Attributes'] = ['DUMMY_ATTR'] |
|
if human_goals: |
|
if asin in human_attributes: |
|
products[i]['instructions'] = human_attributes[asin] |
|
else: |
|
products[i]['instruction_text'] = attributes[asin].get('instruction', None) |
|
products[i]['instruction_attributes'] = attributes[asin].get('instruction_attributes', None) |
|
products[i]['MainImage'] = p['images'][0] |
|
products[i]['query'] = p['query'].lower().strip() |
|
all_products.append(products[i]) |
|
for p in all_products: |
|
for a in p['Attributes']: |
|
attribute_to_asins[a].add(p['asin']) |
|
product_item_dict = {p['asin']: p for p in all_products} |
|
product_prices = generate_product_prices(all_products) |
|
return (all_products, product_item_dict, product_prices, attribute_to_asins) |
|
|
|
# File: WebShop-master/web_agent_site/engine/goal.py |
|
"""""" |
|
import itertools |
|
import random |
|
import spacy |
|
from collections import defaultdict |
|
from rich import print |
|
from thefuzz import fuzz |
|
from web_agent_site.engine.normalize import normalize_color |
|
nlp = spacy.load('en_core_web_sm') |
|
PRICE_RANGE = [10.0 * i for i in range(1, 100)] |
|
|
|
def get_goals(all_products, product_prices, human_goals=True): |
|
if human_goals: |
|
return get_human_goals(all_products, product_prices) |
|
else: |
|
return get_synthetic_goals(all_products, product_prices) |
|
|
|
def get_human_goals(all_products, product_prices): |
|
goals = [] |
|
cnt_atts = defaultdict(int) |
|
cnt = 0 |
|
for item in all_products: |
|
asin = item['asin'] |
|
if 'instructions' not in item: |
|
continue |
|
for product in item['instructions']: |
|
attributes = product['instruction_attributes'] |
|
if len(attributes) == 0: |
|
cnt += 1 |
|
continue |
|
if product_prices is not None: |
|
price = product_prices[asin] |
|
price_range = [p for p in PRICE_RANGE if p > price][:4] |
|
if len(price_range) >= 2: |
|
(_, price_upper) = sorted(random.sample(price_range, 2)) |
|
price_text = f', and price lower than {price_upper:.2f} dollars' |
|
else: |
|
price_upper = 1000000 |
|
price_text = '' |
|
else: |
|
price_upper = 1000000 |
|
goals.append({'asin': asin, 'category': item['category'], 'query': item['query'], 'name': item['name'], 'product_category': item['product_category'], 'instruction_text': product['instruction'].strip('.') + price_text, 'attributes': attributes, 'price_upper': price_upper, 'goal_options': product['instruction_options']}) |
|
for att in attributes: |
|
cnt_atts[att] += 1 |
|
for goal in goals: |
|
goal['weight'] = 1 |
|
print(cnt, 'skipped') |
|
return goals |
|
|
|
def get_synthetic_goals(all_products, product_prices): |
|
goals = [] |
|
cnt_atts = defaultdict(int) |
|
for product in all_products: |
|
if 'instruction_text' not in product or product['instruction_text'] is None: |
|
continue |
|
product_goals = [] |
|
asin = product['asin'] |
|
attributes = product['instruction_attributes'] |
|
assert len(attributes) > 0 |
|
if product_prices is not None: |
|
price = product_prices[asin] |
|
price_range = [p for p in PRICE_RANGE if p > price][:4] |
|
if len(price_range) >= 2: |
|
(_, price_upper) = sorted(random.sample(price_range, 2)) |
|
price_text = f', and price lower than {price_upper:.2f} dollars' |
|
else: |
|
price_upper = 1000000 |
|
price_text = '' |
|
else: |
|
price_upper = 1000000 |
|
price_text = '' |
|
instruction_text = product['instruction_text'] |
|
options = product['options'] |
|
option_names = sorted(options) |
|
combinations = list(itertools.product(*(options[option_name] for option_name in option_names))) |
|
for combination in combinations: |
|
goal_options = dict() |
|
for (i, o) in enumerate(combination): |
|
goal_options[option_names[i]] = o |
|
option_text = ', and '.join([f'{k}: {v}' for (k, v) in goal_options.items()]) |
|
option_text = ' with ' + option_text if option_text else '' |
|
product_goals.append({'asin': asin, 'category': product['category'], 'query': product['query'], 'name': product['name'], 'product_category': product['product_category'], 'instruction_text': f'{instruction_text}{option_text}{price_text}', 'attributes': attributes, 'price_upper': price_upper, 'goal_options': goal_options, 'name': product['Title']}) |
|
for att in attributes: |
|
cnt_atts[att] += 1 |
|
goals += product_goals |
|
for goal in goals: |
|
goal['weight'] = sum((1.0 / cnt_atts[att] for att in goal['attributes'])) / len(goal['attributes']) |
|
return goals |
|
|
|
def get_type_reward(purchased_product, goal): |
|
query_match = purchased_product['query'] == goal['query'] |
|
purchased_product_category = [x.strip() for x in purchased_product['product_category'].split('›')] |
|
goal_product_category = [x.strip() for x in goal['product_category'].split('›')] |
|
category_match = len(set(purchased_product_category) & set(goal_product_category)) >= 2 |
|
purchased_type = purchased_product['name'] |
|
desired_type = goal['name'] |
|
purchased_type_parse = nlp(purchased_type) |
|
desired_type_parse = nlp(desired_type) |
|
purchased_type_parse = [t.text.lower() for t in purchased_type_parse if t.pos_ in ('PNOUN', 'NOUN', 'PROPN')] |
|
desired_type_parse = [t.text.lower() for t in desired_type_parse if t.pos_ in ('PNOUN', 'NOUN', 'PROPN')] |
|
n_intersect_type = len(set(purchased_type_parse) & set(desired_type_parse)) |
|
if len(desired_type_parse) == 0: |
|
title_score = 0.2 |
|
else: |
|
title_score = n_intersect_type / len(desired_type_parse) |
|
r_type = 1.0 |
|
match = query_match or category_match or title_score > 0.2 |
|
if not match: |
|
r_type = 0.5 |
|
if title_score < 0.1: |
|
r_type = 0.1 |
|
if title_score == 0.0: |
|
r_type = 0.0 |
|
return dict(r_type=r_type, query_match=query_match, category_match=category_match, title_score=title_score) |
|
|
|
def get_attribute_reward(purchased_product, goal): |
|
purchased_attrs = purchased_product['Attributes'] |
|
goal_attrs = goal['attributes'] |
|
num_attr_matches = 0 |
|
for g_attr in goal_attrs: |
|
matched = False |
|
for p_attr in purchased_attrs: |
|
score = fuzz.token_set_ratio(p_attr, g_attr) |
|
if score > 85: |
|
num_attr_matches += 1 |
|
matched = True |
|
break |
|
if not matched and (g_attr in purchased_product['Title'].lower() or g_attr in ' '.join(purchased_product['BulletPoints']).lower() or g_attr in purchased_product['Description'].lower()): |
|
num_attr_matches += 1 |
|
matched = True |
|
r_attr = num_attr_matches / len(goal_attrs) |
|
return (r_attr, num_attr_matches) |
|
|
|
def get_option_reward(purchased_options, goal_options): |
|
purchased_options = [normalize_color(o) for o in purchased_options] |
|
goal_options = [normalize_color(o) for o in goal_options] |
|
num_option_matches = 0 |
|
for g_option in goal_options: |
|
for p_option in purchased_options: |
|
score = fuzz.token_set_ratio(p_option, g_option) |
|
if score > 85: |
|
num_option_matches += 1 |
|
break |
|
r_option = num_option_matches / len(goal_options) if len(goal_options) > 0 else None |
|
return (r_option, num_option_matches) |
|
|
|
def get_reward(purchased_product, goal, price, options, **kwargs): |
|
r_type_dict = get_type_reward(purchased_product, goal) |
|
r_price = price <= goal['price_upper'] if goal['price_upper'] > 0 else None |
|
(r_att, num_attr_matches) = get_attribute_reward(purchased_product, goal) |
|
(r_option, num_option_matches) = get_option_reward(list(options.values()), goal['goal_options'].items() if isinstance(goal['goal_options'], dict) else goal['goal_options']) |
|
total_reward = (num_attr_matches + num_option_matches + r_price) / (len(goal['attributes']) + len(goal['goal_options']) + 1) |
|
total_reward *= r_type_dict['r_type'] |
|
if kwargs.get('verbose', False): |
|
info = {'r_type': r_type_dict['r_type'], 'r_att': r_att, 'w_att': len(goal['attributes']) / (len(goal['attributes']) + len(goal['goal_options']) + 1), 'query_match': r_type_dict['query_match'], 'category_match': r_type_dict['category_match'], 'title_score': r_type_dict['title_score']} |
|
if r_option is not None: |
|
info['r_option'] = r_option |
|
info['w_option'] = len(goal['goal_options']) / (len(goal['attributes']) + len(goal['goal_options']) + 1) |
|
if r_price is not None: |
|
info['r_price'] = r_price |
|
info['w_price'] = 1 / (len(goal['attributes']) + len(goal['goal_options']) + 1) |
|
return (total_reward, info) |
|
return total_reward |
|
|
|
# File: WebShop-master/web_agent_site/engine/normalize.py |
|
import re |
|
from typing import Tuple |
|
COLOR_SET = ['alabaster', 'apricot', 'aqua', 'ash', 'asphalt', 'azure', 'banana', 'beige', 'black', 'blue', 'blush', 'bordeaux', 'bronze', 'brown', 'burgundy', 'camel', 'camo', 'caramel', 'champagne', 'charcoal', 'cheetah', 'chestnut', 'chocolate', 'christmas', 'coffee', 'cognac', 'copper', 'coral', 'cranberry', 'cream', 'crystal', 'dark', 'denim', 'eggplant', 'elephant', 'espresso', 'fuchsia', 'gold', 'granite', 'grape', 'graphite', 'grass', 'gray', 'green', 'grey', 'heather', 'indigo', 'ivory', 'ivy', 'khaki', 'lavender', 'lemon', 'leopard', 'light', 'lilac', 'lime', 'magenta', 'maroon', 'mauve', 'merlot', 'midnight', 'mint', 'mocha', 'multicolor', 'mushroom', 'mustard', 'natural', 'navy', 'nude', 'olive', 'orange', 'peach', 'pewter', 'pink', 'plum', 'purple', 'rainbow', 'red', 'rose', 'royal', 'rust', 'sand', 'sapphire', 'seashell', 'silver', 'skull', 'slate', 'steel', 'stone', 'stonewash', 'sunflower', 'tan', 'taupe', 'teal', 'tiger', 'turquoise', 'violet', 'walnut', 'wheat', 'white', 'wine', 'yellow'] |
|
SIZE_SET = ['xx-large', '3x-large', '4x-large', '5x-large', 'x-large', 'x-small', 'medium', 'large', 'small', 'queen', 'twin', 'full', 'king', 'one size', 'pack'] |
|
SIZE_PATTERNS = [re.compile('(.*)neck(.*)sleeve'), re.compile('(.*) women \\| (.*) men'), re.compile('(.*)w x(.*)l'), re.compile('(.*)w by (.*)l'), re.compile('(.*)w x(.*)h'), re.compile('(.*)wide'), re.compile('(.*)x-wide'), re.compile('(.*)narrow'), re.compile('(.*)petite'), re.compile('(.*)inch'), re.compile('(.*)plus'), re.compile('(.*)mm'), re.compile('women(.*)'), re.compile('(.*)x(.*)'), re.compile('(.*)ft'), re.compile('(.*)feet'), re.compile('(.*)meter'), re.compile('(.*)yards'), re.compile('(.*)\\*(.*)'), re.compile('(.*)\\-(.*)'), re.compile('(\\d+)"$'), re.compile('(\\d+)f$'), re.compile('(\\d+)m$'), re.compile('(\\d+)cm$'), re.compile('(\\d+)g$')] |
|
SIZE_PATTERNS = [re.compile(s) for s in SIZE_SET] + SIZE_PATTERNS |
|
|
|
def normalize_color(color_string: str) -> str: |
|
for norm_color in COLOR_SET: |
|
if norm_color in color_string: |
|
return norm_color |
|
return color_string |
|
|
|
def normalize_color_size(product_prices: dict) -> Tuple[dict, dict]: |
|
(all_colors, all_sizes) = (set(), set()) |
|
for ((_, color, size), _) in product_prices.items(): |
|
all_colors.add(color.lower()) |
|
all_sizes.add(size.lower()) |
|
color_mapping = {'N.A.': 'not_matched'} |
|
for c in all_colors: |
|
matched = False |
|
for base in COLOR_SET: |
|
if base in c: |
|
color_mapping[c] = base |
|
matched = True |
|
break |
|
if not matched: |
|
color_mapping[c] = 'not_matched' |
|
size_mapping = {'N.A.': 'not_matched'} |
|
for s in all_sizes: |
|
matched = False |
|
for pattern in SIZE_PATTERNS: |
|
m = re.search(pattern, s) |
|
if m is not None: |
|
matched = True |
|
size_mapping[s] = pattern.pattern |
|
break |
|
if not matched: |
|
if s.replace('.', '', 1).isdigit(): |
|
size_mapping[s] = 'numeric_size' |
|
matched = True |
|
if not matched: |
|
size_mapping[s] = 'not_matched' |
|
return (color_mapping, size_mapping) |
|
|
|
# File: WebShop-master/web_agent_site/envs/__init__.py |
|
from gym.envs.registration import register |
|
from web_agent_site.envs.web_agent_site_env import WebAgentSiteEnv |
|
from web_agent_site.envs.web_agent_text_env import WebAgentTextEnv |
|
register(id='WebAgentSiteEnv-v0', entry_point='web_agent_site.envs:WebAgentSiteEnv') |
|
register(id='WebAgentTextEnv-v0', entry_point='web_agent_site.envs:WebAgentTextEnv') |
|
|
|
# File: WebShop-master/web_agent_site/envs/web_agent_site_env.py |
|
import gym |
|
import random |
|
import requests |
|
import string |
|
import time |
|
from bs4 import BeautifulSoup |
|
from bs4.element import Comment |
|
from gym import spaces |
|
from os.path import join, dirname, abspath |
|
from selenium import webdriver |
|
from selenium.webdriver.chrome.service import Service |
|
from selenium.webdriver.chrome.options import Options |
|
from selenium.webdriver.common.keys import Keys |
|
from selenium.common.exceptions import ElementNotInteractableException |
|
from web_agent_site.engine.engine import parse_action, END_BUTTON |
|
|
|
class WebAgentSiteEnv(gym.Env): |
|
|
|
def __init__(self, observation_mode='html', **kwargs): |
|
super(WebAgentSiteEnv, self).__init__() |
|
self.observation_mode = observation_mode |
|
self.kwargs = kwargs |
|
service = Service(join(dirname(abspath(__file__)), 'chromedriver')) |
|
options = Options() |
|
if 'render' not in kwargs or not kwargs['render']: |
|
options.add_argument('--headless') |
|
self.browser = webdriver.Chrome(service=service, options=options) |
|
self.text_to_clickable = None |
|
self.assigned_session = kwargs.get('session') |
|
self.session = None |
|
self.reset() |
|
|
|
def step(self, action): |
|
reward = 0.0 |
|
done = False |
|
info = None |
|
(action_name, action_arg) = parse_action(action) |
|
if action_name == 'search': |
|
try: |
|
search_bar = self.browser.find_element_by_id('search_input') |
|
except Exception: |
|
pass |
|
else: |
|
search_bar.send_keys(action_arg) |
|
search_bar.submit() |
|
elif action_name == 'click': |
|
try: |
|
self.text_to_clickable[action_arg].click() |
|
except ElementNotInteractableException: |
|
button = self.text_to_clickable[action_arg] |
|
self.browser.execute_script('arguments[0].click();', button) |
|
reward = self.get_reward() |
|
if action_arg == END_BUTTON: |
|
done = True |
|
elif action_name == 'end': |
|
done = True |
|
else: |
|
print('Invalid action. No action performed.') |
|
if 'pause' in self.kwargs: |
|
time.sleep(self.kwargs['pause']) |
|
return (self.observation, reward, done, info) |
|
|
|
def get_available_actions(self): |
|
try: |
|
search_bar = self.browser.find_element_by_id('search_input') |
|
except Exception: |
|
has_search_bar = False |
|
else: |
|
has_search_bar = True |
|
buttons = self.browser.find_elements_by_class_name('btn') |
|
product_links = self.browser.find_elements_by_class_name('product-link') |
|
buying_options = self.browser.find_elements_by_css_selector("input[type='radio']") |
|
self.text_to_clickable = {f'{b.text}': b for b in buttons + product_links} |
|
for opt in buying_options: |
|
opt_value = opt.get_attribute('value') |
|
self.text_to_clickable[f'{opt_value}'] = opt |
|
return dict(has_search_bar=has_search_bar, clickables=list(self.text_to_clickable.keys())) |
|
|
|
def _parse_html(self, html=None, url=None): |
|
if html is None: |
|
if url is not None: |
|
html = requests.get(url) |
|
else: |
|
html = self.state['html'] |
|
html_obj = BeautifulSoup(html, 'html.parser') |
|
return html_obj |
|
|
|
def get_reward(self): |
|
html_obj = self._parse_html() |
|
r = html_obj.find(id='reward') |
|
r = float(r.findChildren('pre')[0].string) if r is not None else 0.0 |
|
return r |
|
|
|
def get_instruction_text(self): |
|
html_obj = self._parse_html(self.browser.page_source) |
|
instruction_text = html_obj.find(id='instruction-text').h4.text |
|
return instruction_text |
|
|
|
def convert_html_to_text(self, html): |
|
texts = self._parse_html(html).findAll(text=True) |
|
visible_texts = filter(tag_visible, texts) |
|
observation = ' [SEP] '.join((t.strip() for t in visible_texts if t != '\n')) |
|
return observation |
|
|
|
@property |
|
def state(self): |
|
return dict(url=self.browser.current_url, html=self.browser.page_source, instruction_text=self.instruction_text) |
|
|
|
@property |
|
def observation(self): |
|
html = self.state['html'] |
|
if self.observation_mode == 'html': |
|
return html |
|
elif self.observation_mode == 'text': |
|
return self.convert_html_to_text(html) |
|
else: |
|
raise ValueError(f'Observation mode {self.observation_mode} not supported.') |
|
|
|
@property |
|
def action_space(self): |
|
return NotImplementedError |
|
|
|
@property |
|
def observation_space(self): |
|
return NotImplementedError |
|
|
|
def reset(self): |
|
if self.assigned_session is not None: |
|
self.session = self.assigned_session |
|
else: |
|
self.session = ''.join(random.choices(string.ascii_lowercase, k=5)) |
|
init_url = f'http://127.0.0.1:3000/{self.session}' |
|
self.browser.get(init_url) |
|
self.instruction_text = self.get_instruction_text() |
|
return (self.observation, None) |
|
|
|
def render(self, mode='human'): |
|
return NotImplementedError |
|
|
|
def close(self): |
|
self.browser.close() |
|
print('Browser closed.') |
|
|
|
def tag_visible(element): |
|
ignore = {'style', 'script', 'head', 'title', 'meta', '[document]'} |
|
return element.parent.name not in ignore and (not isinstance(element, Comment)) |
|
|
|
# File: WebShop-master/web_agent_site/envs/web_agent_text_env.py |
|
import gym |
|
import json |
|
import random |
|
import string |
|
import time |
|
import torch |
|
import numpy as np |
|
from bs4 import BeautifulSoup |
|
from bs4.element import Comment |
|
from collections import defaultdict |
|
from flask import Flask |
|
from web_agent_site.engine.engine import load_products, init_search_engine, get_top_n_product_from_keywords, map_action_to_html, parse_action, get_product_per_page, ACTION_TO_TEMPLATE, END_BUTTON, NEXT_PAGE, PREV_PAGE, BACK_TO_SEARCH |
|
from web_agent_site.engine.goal import get_reward, get_goals |
|
from web_agent_site.utils import DEFAULT_FILE_PATH, FEAT_CONV, FEAT_IDS, random_idx |
|
app = Flask(__name__) |
|
|
|
class WebAgentTextEnv(gym.Env): |
|
|
|
def __init__(self, observation_mode='html', file_path=DEFAULT_FILE_PATH, server=None, **kwargs): |
|
super(WebAgentTextEnv, self).__init__() |
|
self.observation_mode = observation_mode |
|
self.kwargs = kwargs |
|
self.file_path = file_path |
|
self.base_url = 'http://127.0.0.1:3000' |
|
self.server = SimServer(self.base_url, self.file_path, self.kwargs.get('filter_goals'), self.kwargs.get('limit_goals', -1), self.kwargs.get('num_products'), self.kwargs.get('human_goals'), self.kwargs.get('show_attrs', False)) if server is None else server |
|
self.browser = SimBrowser(self.server) |
|
self.session = self.kwargs.get('session') |
|
self.session_prefix = self.kwargs.get('session_prefix') |
|
if self.kwargs.get('get_image', 0): |
|
self.feats = torch.load(FEAT_CONV) |
|
self.ids = torch.load(FEAT_IDS) |
|
self.ids = {url: idx for (idx, url) in enumerate(self.ids)} |
|
self.prev_obs = [] |
|
self.prev_actions = [] |
|
self.num_prev_obs = self.kwargs.get('num_prev_obs', 0) |
|
self.num_prev_actions = self.kwargs.get('num_prev_actions', 0) |
|
self.reset() |
|
|
|
def step(self, action): |
|
info = None |
|
self.get_available_actions() |
|
(action_name, action_arg) = parse_action(action) |
|
if action_arg is not None: |
|
action_arg = action_arg.lower() |
|
if action_name == 'search' and action_arg is not None and (action_arg != ''): |
|
status = self.browser.search(action_arg) |
|
elif action_name == 'click' and action_arg in self.text_to_clickable.keys() and (action_arg != 'search'): |
|
status = self.browser.click(action_arg, self.text_to_clickable) |
|
else: |
|
status = dict(reward=0, done=False) |
|
ob = self.observation |
|
text_list = [ob] |
|
self.prev_actions.append(action) |
|
for i in range(1, 1 + max(self.num_prev_obs, self.num_prev_actions)): |
|
if len(self.prev_actions) >= i and self.num_prev_actions >= i: |
|
text_list.append(self.prev_actions[-i]) |
|
if len(self.prev_obs) >= i and self.num_prev_obs >= i: |
|
text_list.append(self.prev_obs[-i]) |
|
state = ' [SEP] '.join(text_list[::-1]) |
|
self.prev_obs.append(ob) |
|
return (state, status['reward'], status['done'], info) |
|
|
|
def get_available_actions(self): |
|
html_obj = self._parse_html() |
|
search_bar = html_obj.find(id='search_input') |
|
has_search_bar = True if search_bar is not None else False |
|
buttons = html_obj.find_all(class_='btn') |
|
product_links = html_obj.find_all(class_='product-link') |
|
buying_options = html_obj.select('input[type="radio"]') |
|
self.text_to_clickable = {f'{b.get_text()}'.lower(): b for b in buttons + product_links} |
|
for opt in buying_options: |
|
opt_value = opt.get('value') |
|
self.text_to_clickable[f'{opt_value}'] = opt |
|
return dict(has_search_bar=has_search_bar, clickables=list(self.text_to_clickable.keys())) |
|
|
|
def get_image(self): |
|
html_obj = self._parse_html(self.browser.page_source) |
|
image_url = html_obj.find(id='product-image') |
|
if image_url is not None: |
|
image_url = image_url['src'] |
|
if image_url in self.ids: |
|
image_idx = self.ids[image_url] |
|
image = self.feats[image_idx] |
|
return image |
|
return torch.zeros(512) |
|
|
|
def get_instruction_text(self): |
|
html_obj = self._parse_html(self.browser.page_source) |
|
instruction_text = html_obj.find(id='instruction-text').h4.text |
|
return instruction_text |
|
|
|
def _parse_html(self, html=None): |
|
if html is None: |
|
html = self.state['html'] |
|
html_obj = BeautifulSoup(html, 'html.parser') |
|
return html_obj |
|
|
|
@property |
|
def observation(self): |
|
html = self.state['html'] |
|
if self.observation_mode == 'html': |
|
return html |
|
elif self.observation_mode == 'text': |
|
return self.convert_html_to_text(html, simple=True) |
|
elif self.observation_mode == 'text_rich': |
|
return self.convert_html_to_text(html, simple=False) |
|
elif self.observation_mode == 'url': |
|
return self.state['url'] |
|
else: |
|
raise ValueError(f'Observation mode {self.observation_mode} not supported.') |
|
|
|
@property |
|
def state(self): |
|
return dict(url=self.browser.current_url, html=self.browser.page_source, instruction_text=self.instruction_text) |
|
|
|
def convert_html_to_text(self, html, simple=False): |
|
texts = self._parse_html(html).findAll(text=True) |
|
visible_texts = filter(tag_visible, texts) |
|
if simple: |
|
return ' [SEP] '.join((t.strip() for t in visible_texts if t != '\n')) |
|
else: |
|
observation = '' |
|
for t in visible_texts: |
|
if t == '\n': |
|
continue |
|
if t.parent.name == 'button': |
|
processed_t = f'[button] {t} [button_]' |
|
elif t.parent.name == 'label': |
|
if f'"{t}"' in self.state['url']: |
|
processed_t = f' [clicked button] {t} [clicked button_]' |
|
observation = f'You have clicked {t}.\n' + observation |
|
else: |
|
processed_t = f' [button] {t} [button_]' |
|
elif t.parent.get('class') == ['product-link']: |
|
if f'{t}' in self.server.user_sessions[self.session]['asins']: |
|
processed_t = f'\n[clicked button] {t} [clicked button_]' |
|
else: |
|
processed_t = f'\n[button] {t} [button_]' |
|
else: |
|
processed_t = str(t) |
|
observation += processed_t + '\n' |
|
return observation |
|
|
|
def reset(self, session=None, instruction_text=None): |
|
session_int = None |
|
if session is not None: |
|
self.session = str(session) |
|
if isinstance(session, int): |
|
session_int = session |
|
else: |
|
self.session = ''.join(random.choices(string.ascii_lowercase, k=10)) |
|
if self.session_prefix is not None: |
|
self.session = self.session_prefix + self.session |
|
init_url = f'{self.base_url}/{self.session}' |
|
self.browser.get(init_url, session_id=self.session, session_int=session_int) |
|
self.text_to_clickable = None |
|
self.instruction_text = self.get_instruction_text() if instruction_text is None else instruction_text |
|
obs = self.observation |
|
self.prev_obs = [obs] |
|
self.prev_actions = [] |
|
return (obs, None) |
|
|
|
def render(self, mode='human'): |
|
pass |
|
|
|
def close(self): |
|
pass |
|
|
|
def tag_visible(element): |
|
ignore = {'style', 'script', 'head', 'title', 'meta', '[document]'} |
|
return element.parent.name not in ignore and (not isinstance(element, Comment)) |
|
|
|
class SimServer: |
|
|
|
def __init__(self, base_url, file_path, filter_goals=None, limit_goals=-1, num_products=None, human_goals=0, show_attrs=False): |
|
self.base_url = base_url |
|
(self.all_products, self.product_item_dict, self.product_prices, _) = load_products(filepath=file_path, num_products=num_products, human_goals=human_goals) |
|
self.search_engine = init_search_engine(num_products=num_products) |
|
self.goals = get_goals(self.all_products, self.product_prices, human_goals) |
|
self.show_attrs = show_attrs |
|
random.seed(233) |
|
random.shuffle(self.goals) |
|
if filter_goals is not None: |
|
self.goals = [goal for (i, goal) in enumerate(self.goals) if filter_goals(i, goal)] |
|
if limit_goals != -1 and limit_goals < len(self.goals): |
|
self.weights = [goal['weight'] for goal in self.goals] |
|
self.cum_weights = [0] + np.cumsum(self.weights).tolist() |
|
idxs = [] |
|
while len(idxs) < limit_goals: |
|
idx = random_idx(self.cum_weights) |
|
if idx not in idxs: |
|
idxs.append(idx) |
|
self.goals = [self.goals[i] for i in idxs] |
|
print(f'Loaded {len(self.goals)} goals.') |
|
self.weights = [goal['weight'] for goal in self.goals] |
|
self.cum_weights = [0] + np.cumsum(self.weights).tolist() |
|
self.user_sessions = dict() |
|
self.search_time = 0 |
|
self.render_time = 0 |
|
self.sample_time = 0 |
|
self.assigned_instruction_text = None |
|
|
|
@app.route('/', methods=['GET', 'POST']) |
|
def index(self, session_id, **kwargs): |
|
html = map_action_to_html('start', session_id=session_id, instruction_text=kwargs['instruction_text']) |
|
url = f'{self.base_url}/{session_id}' |
|
return (html, url) |
|
|
|
@app.route('/', methods=['GET', 'POST']) |
|
def search_results(self, session_id, **kwargs): |
|
session = self.user_sessions[session_id] |
|
keywords = kwargs['keywords'] |
|
assert isinstance(keywords, list) |
|
page = 1 if 'page' not in kwargs else kwargs['page'] |
|
session['page'] = page |
|
session['keywords'] = keywords |
|
session['actions']['search'] += 1 |
|
session['asin'] = None |
|
session['options'] = {} |
|
old_time = time.time() |
|
top_n_products = get_top_n_product_from_keywords(keywords, self.search_engine, self.all_products, self.product_item_dict) |
|
self.search_time += time.time() - old_time |
|
products = get_product_per_page(top_n_products, page) |
|
keywords_url_string = '+'.join(keywords) |
|
url = f'{self.base_url}/search_results/{session_id}/{keywords_url_string}/{page}' |
|
old_time = time.time() |
|
html = map_action_to_html('search', session_id=session_id, products=products, keywords=session['keywords'], page=page, total=len(top_n_products), instruction_text=session['goal']['instruction_text']) |
|
self.render_time += time.time() - old_time |
|
return (html, url) |
|
|
|
@app.route('/', methods=['GET', 'POST']) |
|
def item_page(self, session_id, **kwargs): |
|
session = self.user_sessions[session_id] |
|
clickable_name = kwargs['clickable_name'] |
|
text_to_clickable = kwargs['text_to_clickable'] |
|
clickable = text_to_clickable[clickable_name] |
|
if clickable.get('class') is not None and clickable.get('class')[0] == 'product-link': |
|
session['asin'] = clickable_name.upper() |
|
session['actions']['asin'] += 1 |
|
session['asins'].add(session['asin']) |
|
elif clickable.get('name') is not None: |
|
clickable_key = clickable['name'].lower() |
|
session['options'][clickable_key] = clickable_name |
|
session['actions']['options'] += 1 |
|
product_info = self.product_item_dict[session['asin']] |
|
keywords_url_string = '+'.join(session['keywords']) |
|
option_string = json.dumps(session['options']) |
|
url = f"{self.base_url}/item_page/{session_id}/{session['asin']}/{keywords_url_string}/{session['page']}/{option_string}" |
|
html = map_action_to_html('click', session_id=session_id, product_info=product_info, keywords=session['keywords'], page=session['page'], asin=session['asin'], options=session['options'], instruction_text=session['goal']['instruction_text'], show_attrs=self.show_attrs) |
|
return (html, url) |
|
|
|
@app.route('/', methods=['GET', 'POST']) |
|
def item_sub_page(self, session_id, **kwargs): |
|
session = self.user_sessions[session_id] |
|
clickable_name = kwargs['clickable_name'] |
|
for k in ACTION_TO_TEMPLATE: |
|
if clickable_name.lower() == k.lower(): |
|
clickable_name = k |
|
break |
|
product_info = self.product_item_dict[session['asin']] |
|
session['actions'][clickable_name] += 1 |
|
keywords_url_string = '+'.join(session['keywords']) |
|
url = f"{self.base_url}/item_sub_page/{session_id}/{session['asin']}/{keywords_url_string}/{session['page']}/{clickable_name}/{session['options']}" |
|
html = map_action_to_html(f'click[{clickable_name}]', session_id=session_id, product_info=product_info, keywords=session['keywords'], page=session['page'], asin=session['asin'], options=session['options'], instruction_text=session['goal']['instruction_text']) |
|
return (html, url) |
|
|
|
@app.route('/', methods=['GET', 'POST']) |
|
def done(self, session_id, **kwargs): |
|
session = self.user_sessions[session_id] |
|
goal = self.user_sessions[session_id]['goal'] |
|
purchased_product = self.product_item_dict[session['asin']] |
|
session['actions']['purchase'] += 1 |
|
price = self.product_prices.get(session['asin']) |
|
(reward, info) = get_reward(purchased_product, goal, price=price, options=session['options'], verbose=True) |
|
self.user_sessions[session_id]['verbose_info'] = info |
|
self.user_sessions[session_id]['done'] = True |
|
self.user_sessions[session_id]['reward'] = reward |
|
url = f"{self.base_url}/done/{session_id}/{session['asin']}/{session['options']}" |
|
html = map_action_to_html(f'click[{END_BUTTON}]', session_id=session_id, reward=reward, asin=session['asin'], options=session['options'], instruction_text=session['goal']['instruction_text']) |
|
return (html, url, reward) |
|
|
|
def receive(self, session_id, current_url, session_int=None, **kwargs): |
|
status = dict(reward=0.0, done=False) |
|
with app.app_context(), app.test_request_context(): |
|
if session_id not in self.user_sessions: |
|
idx = session_int if session_int is not None and isinstance(session_int, int) else random_idx(self.cum_weights) |
|
goal = self.goals[idx] |
|
instruction_text = goal['instruction_text'] |
|
self.user_sessions[session_id] = {'goal': goal, 'done': False} |
|
else: |
|
instruction_text = self.user_sessions[session_id]['goal']['instruction_text'] |
|
if self.assigned_instruction_text is not None: |
|
instruction_text = self.assigned_instruction_text |
|
self.user_sessions[session_id]['goal']['instruction_text'] = instruction_text |
|
session = self.user_sessions[session_id] |
|
if not kwargs: |
|
kwargs['instruction_text'] = instruction_text |
|
(html, url) = self.index(session_id, **kwargs) |
|
self.user_sessions[session_id].update({'keywords': None, 'page': None, 'asin': None, 'asins': set(), 'options': dict(), 'actions': defaultdict(int)}) |
|
elif 'keywords' in kwargs: |
|
(html, url) = self.search_results(session_id, **kwargs) |
|
elif 'clickable_name' in kwargs: |
|
clickable_name = kwargs['clickable_name'].lower() |
|
if clickable_name == END_BUTTON.lower(): |
|
(html, url, reward) = self.done(session_id, **kwargs) |
|
status['reward'] = reward |
|
status['done'] = True |
|
elif clickable_name == BACK_TO_SEARCH.lower(): |
|
(html, url, status) = self.receive(session_id, current_url) |
|
elif clickable_name == NEXT_PAGE.lower() and self.get_page_name(current_url) == 'search_results': |
|
(html, url, status) = self.receive(session_id, current_url, keywords=session['keywords'], page=session['page'] + 1) |
|
elif clickable_name == PREV_PAGE.lower() and self.get_page_name(current_url) == 'search_results': |
|
(html, url, status) = self.receive(session_id, current_url, keywords=session['keywords'], page=session['page'] - 1) |
|
elif clickable_name == PREV_PAGE.lower() and self.get_page_name(current_url) == 'item_sub_page': |
|
(html, url) = self.item_page(session_id, **kwargs) |
|
elif clickable_name == PREV_PAGE.lower() and self.get_page_name(current_url) == 'item_page': |
|
(html, url) = self.search_results(session_id, keywords=session['keywords'], page=session['page'], **kwargs) |
|
elif clickable_name in [k.lower() for k in ACTION_TO_TEMPLATE]: |
|
(html, url) = self.item_sub_page(session_id, **kwargs) |
|
else: |
|
(html, url) = self.item_page(session_id, **kwargs) |
|
return (html, url, status) |
|
|
|
def get_page_name(self, url): |
|
if url is None: |
|
return None |
|
page_names = ['search_results', 'item_page', 'item_sub_page', 'done'] |
|
for page_name in page_names: |
|
if page_name in url: |
|
return page_name |
|
return '' |
|
|
|
class SimBrowser: |
|
|
|
def __init__(self, server): |
|
self.server = server |
|
self.current_url = None |
|
self.page_source = None |
|
self.session_id = None |
|
|
|
def get(self, url, session_id=None, session_int=None): |
|
self.session_id = url.split('/')[-1] if session_id is None else session_id |
|
(self.page_source, _, _) = self.server.receive(self.session_id, self.current_url, session_int=session_int) |
|
self.current_url = url |
|
|
|
def click(self, clickable_name, text_to_clickable): |
|
(self.page_source, self.current_url, status) = self.server.receive(self.session_id, current_url=self.current_url, clickable_name=clickable_name, text_to_clickable=text_to_clickable) |
|
return status |
|
|
|
def search(self, keywords): |
|
if isinstance(keywords, str): |
|
keywords = keywords.split(' ') |
|
(self.page_source, self.current_url, status) = self.server.receive(self.session_id, current_url=self.current_url, keywords=keywords) |
|
return status |
|
|
|
# File: WebShop-master/web_agent_site/models/models.py |
|
"""""" |
|
import random |
|
random.seed(4) |
|
|
|
class BasePolicy: |
|
|
|
def __init__(self): |
|
pass |
|
|
|
def forward(observation, available_actions): |
|
raise NotImplementedError |
|
|
|
class HumanPolicy(BasePolicy): |
|
|
|
def __init__(self): |
|
super().__init__() |
|
|
|
def forward(self, observation, available_actions): |
|
action = input('> ') |
|
return action |
|
|
|
class RandomPolicy(BasePolicy): |
|
|
|
def __init__(self): |
|
super().__init__() |
|
|
|
def forward(self, observation, available_actions): |
|
if available_actions['has_search_bar']: |
|
action = 'search[shoes]' |
|
else: |
|
action_arg = random.choice(available_actions['clickables']) |
|
action = f'click[{action_arg}]' |
|
return action |
|
|
|
# File: WebShop-master/web_agent_site/utils.py |
|
import bisect |
|
import hashlib |
|
import logging |
|
import random |
|
from os.path import dirname, abspath, join |
|
BASE_DIR = dirname(abspath(__file__)) |
|
DEBUG_PROD_SIZE = None |
|
DEFAULT_ATTR_PATH = join(BASE_DIR, '../data/items_ins_v2_1000.json') |
|
DEFAULT_FILE_PATH = join(BASE_DIR, '../data/items_shuffle_1000.json') |
|
DEFAULT_REVIEW_PATH = join(BASE_DIR, '../data/reviews.json') |
|
FEAT_CONV = join(BASE_DIR, '../data/feat_conv.pt') |
|
FEAT_IDS = join(BASE_DIR, '../data/feat_ids.pt') |
|
HUMAN_ATTR_PATH = join(BASE_DIR, '../data/items_human_ins.json') |
|
HUMAN_ATTR_PATH = join(BASE_DIR, '../data/items_human_ins.json') |
|
|
|
def random_idx(cum_weights): |
|
pos = random.uniform(0, cum_weights[-1]) |
|
idx = bisect.bisect(cum_weights, pos) |
|
idx = min(idx, len(cum_weights) - 2) |
|
return idx |
|
|
|
def setup_logger(session_id, user_log_dir): |
|
logger = logging.getLogger(session_id) |
|
formatter = logging.Formatter('%(message)s') |
|
file_handler = logging.FileHandler(user_log_dir / f'{session_id}.jsonl', mode='w') |
|
file_handler.setFormatter(formatter) |
|
logger.setLevel(logging.INFO) |
|
logger.addHandler(file_handler) |
|
return logger |
|
|
|
def generate_mturk_code(session_id: str) -> str: |
|
sha = hashlib.sha1(session_id.encode()) |
|
return sha.hexdigest()[:10].upper() |
|
|
|
|