python_code
stringlengths 0
290k
| repo_name
stringclasses 30
values | file_path
stringlengths 6
125
|
---|---|---|
# pylint: disable=no-self-use,invalid-name
from typing import List
import pytest
from allennlp.data.fields import TextField
from allennlp.common.util import ensure_list
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Instance, Token, Vocabulary
from allennlp.data.iterators import BasicIterator
from allennlp.data.token_indexers import SingleIdTokenIndexer
from allennlp.data.dataset_readers.dataset_reader import _LazyInstances
from adversarialnlp.generators.swag.swag_generator import SwagGenerator
from adversarialnlp.generators.swag.activitynet_captions import ActivityNetCaptionsDatasetReader
from adversarialnlp.tests.utils import FIXTURES_ROOT
class GeneratorTest(AllenNlpTestCase):
def setUp(self):
super(GeneratorTest, self).setUp()
self.token_indexers = {"tokens": SingleIdTokenIndexer()}
self.vocab = Vocabulary()
self.this_index = self.vocab.add_token_to_namespace('this')
self.is_index = self.vocab.add_token_to_namespace('is')
self.a_index = self.vocab.add_token_to_namespace('a')
self.sentence_index = self.vocab.add_token_to_namespace('sentence')
self.another_index = self.vocab.add_token_to_namespace('another')
self.yet_index = self.vocab.add_token_to_namespace('yet')
self.very_index = self.vocab.add_token_to_namespace('very')
self.long_index = self.vocab.add_token_to_namespace('long')
instances = [
self.create_instance(["this", "is", "a", "sentence"], ["this", "is", "another", "sentence"]),
self.create_instance(["yet", "another", "sentence"],
["this", "is", "a", "very", "very", "very", "very", "long", "sentence"]),
]
class LazyIterable:
def __iter__(self):
return (instance for instance in instances)
self.instances = instances
self.lazy_instances = LazyIterable()
def create_instance(self, first_sentence: List[str], second_sentence: List[str]):
first_tokens = [Token(t) for t in first_sentence]
second_tokens = [Token(t) for t in second_sentence]
instance = Instance({'first_sentence': TextField(first_tokens, self.token_indexers),
'second_sentence': TextField(second_tokens, self.token_indexers)})
return instance
def assert_instances_are_correct(self, candidate_instances):
# First we need to remove padding tokens from the candidates.
# pylint: disable=protected-access
candidate_instances = [tuple(w for w in instance if w != 0) for instance in candidate_instances]
expected_instances = [tuple(instance.fields["first_sentence"]._indexed_tokens["tokens"])
for instance in self.instances]
assert set(candidate_instances) == set(expected_instances)
class TestSwagGenerator(GeneratorTest):
# The Generator should work the same for lazy and non lazy datasets,
# so each remaining test runs over both.
def test_yield_one_epoch_generation_over_the_data_once(self):
for test_instances in (self.instances, self.lazy_instances):
generator = SwagGenerator(num_examples=1)
test_instances = ActivityNetCaptionsDatasetReader().read(FIXTURES_ROOT / 'activitynet_captions.json')
batches = list(generator(test_instances))
# We just want to get the single-token array for the text field in the instance.
instances = [tuple(instance.detach().cpu().numpy())
for batch in batches
for instance in batch['text']["tokens"]]
assert len(instances) == 5
self.assert_instances_are_correct(instances)
| adversarialnlp-master | adversarialnlp/tests/generators/swag_generator_test.py |
adversarialnlp-master | adversarialnlp/tests/generators/__init__.py |
|
# pylint: disable=no-self-use,invalid-name
from typing import List
import pytest
from adversarialnlp.generators.addsent.addsent_generator import AddSentGenerator
from adversarialnlp.generators.addsent.squad_reader import squad_reader
from adversarialnlp.common.file_utils import FIXTURES_ROOT
# class GeneratorTest(AllenNlpTestCase):
# def setUp(self):
# super(GeneratorTest, self).setUp()
# self.token_indexers = {"tokens": SingleIdTokenIndexer()}
# self.vocab = Vocabulary()
# self.this_index = self.vocab.add_token_to_namespace('this')
# self.is_index = self.vocab.add_token_to_namespace('is')
# self.a_index = self.vocab.add_token_to_namespace('a')
# self.sentence_index = self.vocab.add_token_to_namespace('sentence')
# self.another_index = self.vocab.add_token_to_namespace('another')
# self.yet_index = self.vocab.add_token_to_namespace('yet')
# self.very_index = self.vocab.add_token_to_namespace('very')
# self.long_index = self.vocab.add_token_to_namespace('long')
# instances = [
# self.create_instance(["this", "is", "a", "sentence"], ["this", "is", "another", "sentence"]),
# self.create_instance(["yet", "another", "sentence"],
# ["this", "is", "a", "very", "very", "very", "very", "long", "sentence"]),
# ]
# class LazyIterable:
# def __iter__(self):
# return (instance for instance in instances)
# self.instances = instances
# self.lazy_instances = LazyIterable()
# def create_instance(self, first_sentence: List[str], second_sentence: List[str]):
# first_tokens = [Token(t) for t in first_sentence]
# second_tokens = [Token(t) for t in second_sentence]
# instance = Instance({'first_sentence': TextField(first_tokens, self.token_indexers),
# 'second_sentence': TextField(second_tokens, self.token_indexers)})
# return instance
# def assert_instances_are_correct(self, candidate_instances):
# # First we need to remove padding tokens from the candidates.
# # pylint: disable=protected-access
# candidate_instances = [tuple(w for w in instance if w != 0) for instance in candidate_instances]
# expected_instances = [tuple(instance.fields["first_sentence"]._indexed_tokens["tokens"])
# for instance in self.instances]
# assert set(candidate_instances) == set(expected_instances)
class TestSwagGenerator():
# The Generator should work the same for lazy and non lazy datasets,
# so each remaining test runs over both.
def test_yield_one_epoch_generation_over_the_data_once(self):
generator = AddSentGenerator()
test_instances = squad_reader(FIXTURES_ROOT / 'squad.json')
batches = list(generator(test_instances, num_epochs=1))
# We just want to get the single-token array for the text field in the instance.
# instances = [tuple(instance.detach().cpu().numpy())
# for batch in batches
# for instance in batch['text']["tokens"]]
assert len(batches) == 5
# self.assert_instances_are_correct(instances)
| adversarialnlp-master | adversarialnlp/tests/generators/addsent_generator_test.py |
adversarialnlp-master | adversarialnlp/common/__init__.py |
|
# pylint: disable=invalid-name,protected-access
#!/usr/bin/env python3
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
"""
Utilities for downloading and building data.
These can be replaced if your particular file system does not support them.
"""
from typing import Union, List
from pathlib import Path
import time
import datetime
import os
import shutil
import requests
MODULE_ROOT = Path(__file__).parent.parent
FIXTURES_ROOT = (MODULE_ROOT / "tests" / "fixtures").resolve()
PACKAGE_ROOT = MODULE_ROOT.parent
DATA_ROOT = (PACKAGE_ROOT / "data").resolve()
class ProgressLogger(object):
"""Throttles and display progress in human readable form."""
def __init__(self, throttle=1, should_humanize=True):
"""Initialize Progress logger.
:param throttle: default 1, number in seconds to use as throttle rate
:param should_humanize: default True, whether to humanize data units
"""
self.latest = time.time()
self.throttle_speed = throttle
self.should_humanize = should_humanize
def humanize(self, num, suffix='B'):
"""Convert units to more human-readable format."""
if num < 0:
return num
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
def log(self, curr, total, width=40, force=False):
"""Display a bar showing the current progress."""
if curr == 0 and total == -1:
print('[ no data received for this file ]', end='\r')
return
curr_time = time.time()
if not force and curr_time - self.latest < self.throttle_speed:
return
else:
self.latest = curr_time
self.latest = curr_time
done = min(curr * width // total, width)
remain = width - done
if self.should_humanize:
curr = self.humanize(curr)
total = self.humanize(total)
progress = '[{}{}] {} / {}'.format(
''.join(['|'] * done),
''.join(['.'] * remain),
curr,
total
)
print(progress, end='\r')
def built(path, version_string=None):
"""Checks if '.built' flag has been set for that task.
If a version_string is provided, this has to match, or the version
is regarded as not built.
"""
built_file_path = os.path.join(path, '.built')
if not os.path.isfile(built_file_path):
return False
else:
with open(built_file_path, 'r') as built_file:
text = built_file.read().split('\n')
if len(text) <= 2:
return False
for fname in text[1:-1]:
if not os.path.isfile(os.path.join(path, fname)) and not os.path.isdir(os.path.join(path, fname)):
return False
return text[-1] == version_string if version_string else True
def mark_done(path, fnames, version_string='vXX'):
"""Marks the path as done by adding a '.built' file with the current
timestamp plus a version description string if specified.
"""
with open(os.path.join(path, '.built'), 'w') as built_file:
built_file.write(str(datetime.datetime.today()))
for fname in fnames:
fname = fname.replace('.tar.gz', '').replace('.tgz', '').replace('.gz', '').replace('.zip', '')
built_file.write('\n' + fname)
built_file.write('\n' + version_string)
def download(url, path, fname, redownload=False):
"""Downloads file using `requests`. If ``redownload`` is set to false, then
will not download tar file again if it is present (default ``True``)."""
outfile = os.path.join(path, fname)
curr_download = not os.path.isfile(outfile) or redownload
print("[ downloading: " + url + " to " + outfile + " ]")
retry = 5
exp_backoff = [2 ** r for r in reversed(range(retry))]
logger = ProgressLogger()
while curr_download and retry >= 0:
resume_file = outfile + '.part'
resume = os.path.isfile(resume_file)
if resume:
resume_pos = os.path.getsize(resume_file)
mode = 'ab'
else:
resume_pos = 0
mode = 'wb'
response = None
with requests.Session() as session:
try:
header = {'Range': 'bytes=%d-' % resume_pos,
'Accept-Encoding': 'identity'} if resume else {}
response = session.get(url, stream=True, timeout=5, headers=header)
# negative reply could be 'none' or just missing
if resume and response.headers.get('Accept-Ranges', 'none') == 'none':
resume_pos = 0
mode = 'wb'
CHUNK_SIZE = 32768
total_size = int(response.headers.get('Content-Length', -1))
# server returns remaining size if resuming, so adjust total
total_size += resume_pos
done = resume_pos
with open(resume_file, mode) as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
if total_size > 0:
done += len(chunk)
if total_size < done:
# don't freak out if content-length was too small
total_size = done
logger.log(done, total_size)
break
except requests.exceptions.ConnectionError:
retry -= 1
# TODO Better way to clean progress bar?
print(''.join([' '] * 60), end='\r')
if retry >= 0:
print('Connection error, retrying. (%d retries left)' % retry)
time.sleep(exp_backoff[retry])
else:
print('Retried too many times, stopped retrying.')
finally:
if response:
response.close()
if retry < 0:
raise RuntimeWarning('Connection broken too many times. Stopped retrying.')
if curr_download and retry > 0:
logger.log(done, total_size, force=True)
print()
if done < total_size:
raise RuntimeWarning('Received less data than specified in ' +
'Content-Length header for ' + url + '.' +
' There may be a download problem.')
move(resume_file, outfile)
def make_dir(path):
"""Makes the directory and any nonexistent parent directories."""
# the current working directory is a fine path
if path != '':
os.makedirs(path, exist_ok=True)
def move(path1, path2):
"""Renames the given file."""
shutil.move(path1, path2)
def remove_dir(path):
"""Removes the given directory, if it exists."""
shutil.rmtree(path, ignore_errors=True)
def untar(path, fname, deleteTar=True):
"""Unpacks the given archive file to the same directory, then (by default)
deletes the archive file.
"""
print('unpacking ' + fname)
fullpath = os.path.join(path, fname)
if '.tar.gz' in fname:
shutil.unpack_archive(fullpath, path, format='gztar')
else:
shutil.unpack_archive(fullpath, path)
if deleteTar:
os.remove(fullpath)
def cat(file1, file2, outfile, deleteFiles=True):
with open(outfile, 'wb') as wfd:
for f in [file1, file2]:
with open(f, 'rb') as fd:
shutil.copyfileobj(fd, wfd, 1024 * 1024 * 10)
# 10MB per writing chunk to avoid reading big file into memory.
if deleteFiles:
os.remove(file1)
os.remove(file2)
def _get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def download_from_google_drive(gd_id, destination):
"""Uses the requests package to download a file from Google Drive."""
URL = 'https://docs.google.com/uc?export=download'
with requests.Session() as session:
response = session.get(URL, params={'id': gd_id}, stream=True)
token = _get_confirm_token(response)
if token:
response.close()
params = {'id': gd_id, 'confirm': token}
response = session.get(URL, params=params, stream=True)
CHUNK_SIZE = 32768
with open(destination, 'wb') as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
response.close()
def download_files(fnames: List[Union[str, Path]],
local_folder: str,
version: str = 'v1.0',
paths: Union[List[str], str] = 'aws') -> List[str]:
r"""Download model/data files from a url.
Args:
fnames: List of filenames to download
local_folder: Sub-folder of `./data` where models/data will
be downloaded.
version: Version of the model
path: url or respective urls for downloading filenames.
Return:
List[str]: List of downloaded file path.
If the downloaded file was a compressed file (`.tar.gz`,
`.zip`, `.tgz`, `.gz`), return the path of the folder
containing the extracted files.
"""
dpath = str(DATA_ROOT / local_folder)
out_paths = list(dpath + '/' + fname.replace('.tar.gz', '').replace('.tgz', '').replace('.gz', '').replace('.zip', '')
for fname in fnames)
if not built(dpath, version):
for fname in fnames:
print('[building data: ' + dpath + '/' + fname + ']')
if built(dpath):
# An older version exists, so remove these outdated files.
remove_dir(dpath)
make_dir(dpath)
if isinstance(paths, str):
paths = [paths] * len(fnames)
# Download the data.
for fname, path in zip(fnames, paths):
if path == 'aws':
url = 'http://huggingface.co/downloads/models/'
url += local_folder + '/'
url += fname
else:
url = path + '/' + fname
download(url, dpath, fname)
if '.tar.gz' in fname or '.tgz' in fname or '.gz' in fname or '.zip' in fname:
untar(dpath, fname)
# Mark the data as built.
mark_done(dpath, fnames, version)
return out_paths
| adversarialnlp-master | adversarialnlp/common/file_utils.py |
from typing import Dict
import argparse
import logging
from allennlp.commands.subcommand import Subcommand
from allennlp.common.util import import_submodules
from adversarialnlp import __version__
from adversarialnlp.commands.test_install import TestInstall
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def main(prog: str = None,
subcommand_overrides: Dict[str, Subcommand] = {}) -> None:
"""
:mod:`~adversarialnlp.run` command.
"""
# pylint: disable=dangerous-default-value
parser = argparse.ArgumentParser(description="Run AdversarialNLP", usage='%(prog)s', prog=prog)
parser.add_argument('--version', action='version', version='%(prog)s ' + __version__)
subparsers = parser.add_subparsers(title='Commands', metavar='')
subcommands = {
# Default commands
"test-install": TestInstall(),
# Superseded by overrides
**subcommand_overrides
}
for name, subcommand in subcommands.items():
subparser = subcommand.add_subparser(name, subparsers)
# configure doesn't need include-package because it imports
# whatever classes it needs.
if name != "configure":
subparser.add_argument('--include-package',
type=str,
action='append',
default=[],
help='additional packages to include')
args = parser.parse_args()
# If a subparser is triggered, it adds its work as `args.func`.
# So if no such attribute has been added, no subparser was triggered,
# so give the user some help.
if 'func' in dir(args):
# Import any additional modules needed (to register custom classes).
for package_name in getattr(args, 'include_package', ()):
import_submodules(package_name)
args.func(args)
else:
parser.print_help()
| adversarialnlp-master | adversarialnlp/commands/__init__.py |
"""
The ``test-install`` subcommand verifies
an installation by running the unit tests.
.. code-block:: bash
$ adversarialnlp test-install --help
usage: adversarialnlp test-install [-h] [--run-all]
[--include-package INCLUDE_PACKAGE]
Test that installation works by running the unit tests.
optional arguments:
-h, --help show this help message and exit
--run-all By default, we skip tests that are slow or download
large files. This flag will run all tests.
--include-package INCLUDE_PACKAGE
additional packages to include
"""
import argparse
import logging
import os
import pathlib
import pytest
from allennlp.commands.subcommand import Subcommand
import adversarialnlp
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class TestInstall(Subcommand):
def add_subparser(self, name: str, parser: argparse._SubParsersAction) -> argparse.ArgumentParser:
# pylint: disable=protected-access
description = '''Test that installation works by running the unit tests.'''
subparser = parser.add_parser(
name, description=description, help='Run the unit tests.')
subparser.add_argument('--run-all', action="store_true",
help="By default, we skip tests that are slow "
"or download large files. This flag will run all tests.")
subparser.set_defaults(func=_run_test)
return subparser
def _get_module_root():
return pathlib.Path(adversarialnlp.__file__).parent
def _run_test(args: argparse.Namespace):
initial_working_dir = os.getcwd()
module_parent = _get_module_root().parent
logger.info("Changing directory to %s", module_parent)
os.chdir(module_parent)
test_dir = os.path.join(module_parent, "adversarialnlp")
logger.info("Running tests at %s", test_dir)
if args.run_all:
# TODO(nfliu): remove this when notebooks have been rewritten as markdown.
exit_code = pytest.main([test_dir, '--color=no', '-k', 'not notebooks_test'])
else:
exit_code = pytest.main([test_dir, '--color=no', '-k', 'not sniff_test and not notebooks_test',
'-m', 'not java'])
# Change back to original working directory after running tests
os.chdir(initial_working_dir)
exit(exit_code)
| adversarialnlp-master | adversarialnlp/commands/test_install.py |
from .generator import Generator
from .swag import SwagGenerator
from .addsent import AddSentGenerator
| adversarialnlp-master | adversarialnlp/generators/__init__.py |
import logging
from typing import Dict, Union, Iterable, List
from collections import defaultdict
import itertools
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class Generator():
r"""An abstract ``Generator`` class.
A ``Generator`` takes as inputs an iterable of seeds (for examples
samples from a training dataset) and edit them to generate
potential adversarial examples.
This class is an abstract class. To implement a ``Generator``, you
should override the `generate_from_seed(self, seed: any)` method
with a specific method to use for yielding adversarial samples
from a seed sample.
Optionally, you should also:
- define a typing class for the ``seed`` objects
- define a default seed source in the ``__init__`` class, for
examples by downloading an appropriate dataset. See examples
in the ``AddSentGenerator`` class.
Args:
default_seeds: Default Iterable to use as source of seeds.
quiet: Output debuging information.
Inputs:
**seed_instances** (optional): Instances to use as seed
for adversarial example generation. If None uses the
default_seeds providing at class instantiation.
Default to None
**num_epochs** (optional): How many times should we iterate
over the seeds? If None, we will iterate over it forever.
Default to None.
**shuffle** (optional): Shuffle the instances before iteration.
If True, we will shuffle the instances before iterating.
Default to False.
Yields:
**adversarial_examples** (Iterable): Adversarial examples
generated from the seeds.
Examples::
>> generator = Generator()
>> examples = generator(num_epochs=1)
"""
def __init__(self,
default_seeds: Iterable = None,
quiet: bool = False):
self.default_seeds = default_seeds
self.quiet: bool = quiet
self._epochs: Dict[int, int] = defaultdict(int)
def generate_from_seed(self, seed: any):
r"""Generate an adversarial example from a seed.
"""
raise NotImplementedError
def __call__(self,
seeds: Iterable = None,
num_epochs: int = None,
shuffle: bool = True) -> Iterable:
r"""Generate adversarial examples using _generate_from_seed.
Args:
seeds: Instances to use as seed for adversarial
example generation.
num_epochs: How many times should we iterate over the seeds?
If None, we will iterate over it forever.
shuffle: Shuffle the instances before iteration.
If True, we will shuffle the instances before iterating.
Yields: adversarial_examples
adversarial_examples: Adversarial examples generated
from the seeds.
"""
if seeds is None:
if self.default_seeds is not None:
seeds = self.default_seeds
else:
return
# Instances is likely to be a list, which cannot be used as a key,
# so we take the object id instead.
key = id(seeds)
starting_epoch = self._epochs[key]
if num_epochs is None:
epochs: Iterable[int] = itertools.count(starting_epoch)
else:
epochs = range(starting_epoch, starting_epoch + num_epochs)
for epoch in epochs:
self._epochs[key] = epoch
for seed in seeds:
yield from self.generate_from_seed(seed)
| adversarialnlp-master | adversarialnlp/generators/generator.py |
# Python wrapper for Stanford CoreNLP
# Copyright (c) 2017 Lynten Guo, 2018 Thomas Wolf
# Extracted and adapted from https://github.com/Lynten/stanford-corenlp
from __future__ import print_function
import glob
import json
import logging
import os
import re
import socket
import subprocess
import sys
import time
import psutil
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
import requests
class StanfordCoreNLP:
def __init__(self, path_or_host, port=None, memory='4g', lang='en', timeout=1500, quiet=True,
logging_level=logging.WARNING, max_retries=5):
self.path_or_host = path_or_host
self.port = port
self.memory = memory
self.lang = lang
self.timeout = timeout
self.quiet = quiet
self.logging_level = logging_level
logging.basicConfig(level=self.logging_level)
# Check args
self._check_args()
if path_or_host.startswith('http'):
self.url = path_or_host + ':' + str(port)
logging.info('Using an existing server {}'.format(self.url))
else:
# Check Java
if not subprocess.call(['java', '-version'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) == 0:
raise RuntimeError('Java not found.')
# Check if the dir exists
if not os.path.isdir(self.path_or_host):
raise IOError(str(self.path_or_host) + ' is not a directory.')
directory = os.path.normpath(self.path_or_host) + os.sep
self.class_path_dir = directory
# Check if the language specific model file exists
switcher = {
'en': 'stanford-corenlp-[0-9].[0-9].[0-9]-models.jar',
'zh': 'stanford-chinese-corenlp-[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]-models.jar',
'ar': 'stanford-arabic-corenlp-[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]-models.jar',
'fr': 'stanford-french-corenlp-[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]-models.jar',
'de': 'stanford-german-corenlp-[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]-models.jar',
'es': 'stanford-spanish-corenlp-[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]-models.jar'
}
jars = {
'en': 'stanford-corenlp-x.x.x-models.jar',
'zh': 'stanford-chinese-corenlp-yyyy-MM-dd-models.jar',
'ar': 'stanford-arabic-corenlp-yyyy-MM-dd-models.jar',
'fr': 'stanford-french-corenlp-yyyy-MM-dd-models.jar',
'de': 'stanford-german-corenlp-yyyy-MM-dd-models.jar',
'es': 'stanford-spanish-corenlp-yyyy-MM-dd-models.jar'
}
if len(glob.glob(directory + switcher.get(self.lang))) <= 0:
raise IOError(jars.get(
self.lang) + ' not exists. You should download and place it in the ' + directory + ' first.')
# If port not set, auto select
# Commenting: see https://github.com/Lynten/stanford-corenlp/issues/26
# if self.port is None:
# for port_candidate in range(9000, 65535):
# if port_candidate not in [conn.laddr[1] for conn in psutil.net_connections()]:
# self.port = port_candidate
# break
self.port = 9999
# Check if the port is in use
# Also commenting: see https://github.com/Lynten/stanford-corenlp/issues/26
# if self.port in [conn.laddr[1] for conn in psutil.net_connections()]:
# raise IOError('Port ' + str(self.port) + ' is already in use.')
# Start native server
logging.info('Initializing native server...')
cmd = "java"
java_args = "-Xmx{}".format(self.memory)
java_class = "edu.stanford.nlp.pipeline.StanfordCoreNLPServer"
class_path = '"{}*"'.format(directory)
args = [cmd, java_args, '-cp', class_path, java_class, '-port', str(self.port)]
args = ' '.join(args)
logging.info(args)
# Silence
with open(os.devnull, 'w') as null_file:
out_file = None
if self.quiet:
out_file = null_file
self.p = subprocess.Popen(args, shell=True, stdout=out_file, stderr=subprocess.STDOUT)
logging.info('Server shell PID: {}'.format(self.p.pid))
self.url = 'http://localhost:' + str(self.port)
# Wait until server starts
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host_name = urlparse(self.url).hostname
time.sleep(1) # OSX, not tested
trial = 1
while sock.connect_ex((host_name, self.port)):
if trial > max_retries:
raise ValueError('Corenlp server is not available')
logging.info('Waiting until the server is available.')
trial += 1
time.sleep(1)
logging.info('The server is available.')
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def close(self):
logging.info('Cleanup...')
if hasattr(self, 'p'):
try:
parent = psutil.Process(self.p.pid)
except psutil.NoSuchProcess:
logging.info('No process: {}'.format(self.p.pid))
return
if self.class_path_dir not in ' '.join(parent.cmdline()):
logging.info('Process not in: {}'.format(parent.cmdline()))
return
children = parent.children(recursive=True)
for process in children:
logging.info('Killing pid: {}, cmdline: {}'.format(process.pid, process.cmdline()))
# process.send_signal(signal.SIGTERM)
process.kill()
logging.info('Killing shell pid: {}, cmdline: {}'.format(parent.pid, parent.cmdline()))
# parent.send_signal(signal.SIGTERM)
parent.kill()
def annotate(self, text, properties=None):
if sys.version_info.major >= 3:
text = text.encode('utf-8')
r = requests.post(self.url, params={'properties': str(properties)}, data=text,
headers={'Connection': 'close'})
return r.text
def tregex(self, sentence, pattern):
tregex_url = self.url + '/tregex'
r_dict = self._request(tregex_url, "tokenize,ssplit,depparse,parse", sentence, pattern=pattern)
return r_dict
def tokensregex(self, sentence, pattern):
tokensregex_url = self.url + '/tokensregex'
r_dict = self._request(tokensregex_url, "tokenize,ssplit,depparse", sentence, pattern=pattern)
return r_dict
def semgrex(self, sentence, pattern):
semgrex_url = self.url + '/semgrex'
r_dict = self._request(semgrex_url, "tokenize,ssplit,depparse", sentence, pattern=pattern)
return r_dict
def word_tokenize(self, sentence, span=False):
r_dict = self._request('ssplit,tokenize', sentence)
tokens = [token['originalText'] for s in r_dict['sentences'] for token in s['tokens']]
# Whether return token span
if span:
spans = [(token['characterOffsetBegin'], token['characterOffsetEnd']) for s in r_dict['sentences'] for token
in s['tokens']]
return tokens, spans
else:
return tokens
def pos_tag(self, sentence):
r_dict = self._request(self.url, 'pos', sentence)
words = []
tags = []
for s in r_dict['sentences']:
for token in s['tokens']:
words.append(token['originalText'])
tags.append(token['pos'])
return list(zip(words, tags))
def ner(self, sentence):
r_dict = self._request(self.url, 'ner', sentence)
words = []
ner_tags = []
for s in r_dict['sentences']:
for token in s['tokens']:
words.append(token['originalText'])
ner_tags.append(token['ner'])
return list(zip(words, ner_tags))
def parse(self, sentence):
r_dict = self._request(self.url, 'pos,parse', sentence)
return [s['parse'] for s in r_dict['sentences']][0]
def dependency_parse(self, sentence):
r_dict = self._request(self.url, 'depparse', sentence)
return [(dep['dep'], dep['governor'], dep['dependent']) for s in r_dict['sentences'] for dep in
s['basicDependencies']]
def coref(self, text):
r_dict = self._request('coref', text)
corefs = []
for k, mentions in r_dict['corefs'].items():
simplified_mentions = []
for m in mentions:
simplified_mentions.append((m['sentNum'], m['startIndex'], m['endIndex'], m['text']))
corefs.append(simplified_mentions)
return corefs
def switch_language(self, language="en"):
self._check_language(language)
self.lang = language
def _request(self, url, annotators=None, data=None, *args, **kwargs):
if sys.version_info.major >= 3:
data = data.encode('utf-8')
properties = {'annotators': annotators, 'outputFormat': 'json'}
params = {'properties': str(properties), 'pipelineLanguage': self.lang}
if 'pattern' in kwargs:
params = {"pattern": kwargs['pattern'], 'properties': str(properties), 'pipelineLanguage': self.lang}
logging.info(params)
r = requests.post(url, params=params, data=data, headers={'Connection': 'close'})
r_dict = json.loads(r.text)
return r_dict
def _check_args(self):
self._check_language(self.lang)
if not re.match('\dg', self.memory):
raise ValueError('memory=' + self.memory + ' not supported. Use 4g, 6g, 8g and etc. ')
def _check_language(self, lang):
if lang not in ['en', 'zh', 'ar', 'fr', 'de', 'es']:
raise ValueError('lang=' + self.lang + ' not supported. Use English(en), Chinese(zh), Arabic(ar), '
'French(fr), German(de), Spanish(es).')
| adversarialnlp-master | adversarialnlp/generators/addsent/corenlp.py |
from .addsent_generator import AddSentGenerator
from .squad_reader import squad_reader
| adversarialnlp-master | adversarialnlp/generators/addsent/__init__.py |
"""Utilities for AddSent generator."""
from typing import List, Dict, Tuple, Optional
class ConstituencyParse(object):
"""A CoreNLP constituency parse (or a node in a parse tree).
Word-level constituents have |word| and |index| set and no children.
Phrase-level constituents have no |word| or |index| and have at least one child.
"""
def __init__(self, tag, children=None, word=None, index=None):
self.tag = tag
if children:
self.children = children
else:
self.children = None
self.word = word
self.index = index
@classmethod
def _recursive_parse_corenlp(cls, tokens, i, j):
orig_i = i
if tokens[i] == '(':
tag = tokens[i + 1]
children = []
i = i + 2
while True:
child, i, j = cls._recursive_parse_corenlp(tokens, i, j)
if isinstance(child, cls):
children.append(child)
if tokens[i] == ')':
return cls(tag, children), i + 1, j
else:
if tokens[i] != ')':
raise ValueError('Expected ")" following leaf')
return cls(tag, word=child, index=j), i + 1, j + 1
else:
# Only other possibility is it's a word
return tokens[i], i + 1, j
@classmethod
def from_corenlp(cls, s):
"""Parses the "parse" attribute returned by CoreNLP parse annotator."""
# "parse": "(ROOT\n (SBARQ\n (WHNP (WDT What)\n (NP (NN portion)\n (PP (IN of)\n (NP\n (NP (NNS households))\n (PP (IN in)\n (NP (NNP Jacksonville)))))))\n (SQ\n (VP (VBP have)\n (NP (RB only) (CD one) (NN person))))\n (. ? )))",
s_spaced = s.replace('\n', ' ').replace('(', ' ( ').replace(')', ' ) ')
tokens = [t for t in s_spaced.split(' ') if t]
tree, index, num_words = cls._recursive_parse_corenlp(tokens, 0, 0)
if index != len(tokens):
raise ValueError('Only parsed %d of %d tokens' % (index, len(tokens)))
return tree
def is_singleton(self):
if self.word:
return True
if len(self.children) > 1:
return False
return self.children[0].is_singleton()
def print_tree(self, indent=0):
spaces = ' ' * indent
if self.word:
print(f"{spaces}{self.tag}: {self.word} ({self.index})")
else:
print(f"{spaces}{self.tag}")
for c in self.children:
c.print_tree(indent=indent + 1)
def get_phrase(self):
if self.word:
return self.word
toks = []
for i, c in enumerate(self.children):
p = c.get_phrase()
if i == 0 or p.startswith("'"):
toks.append(p)
else:
toks.append(' ' + p)
return ''.join(toks)
def get_start_index(self):
if self.index is not None:
return self.index
return self.children[0].get_start_index()
def get_end_index(self):
if self.index is not None:
return self.index + 1
return self.children[-1].get_end_index()
@classmethod
def _recursive_replace_words(cls, tree, new_words, i):
if tree.word:
new_word = new_words[i]
return (cls(tree.tag, word=new_word, index=tree.index), i + 1)
new_children = []
for c in tree.children:
new_child, i = cls._recursive_replace_words(c, new_words, i)
new_children.append(new_child)
return cls(tree.tag, children=new_children), i
@classmethod
def replace_words(cls, tree, new_words):
"""Return a new tree, with new words replacing old ones."""
new_tree, i = cls._recursive_replace_words(tree, new_words, 0)
if i != len(new_words):
raise ValueError('len(new_words) == %d != i == %d' % (len(new_words), i))
return new_tree
def rejoin(tokens: List[Dict[str, str]], sep: str = None) -> str:
"""Rejoin tokens into the original sentence.
Args:
tokens: a list of dicts containing 'originalText' and 'before' fields.
All other fields will be ignored.
sep: if provided, use the given character as a separator instead of
the 'before' field (e.g. if you want to preserve where tokens are).
Returns: the original sentence that generated this CoreNLP token list.
"""
if sep is None:
return ''.join('%s%s' % (t['before'], t['originalText']) for t in tokens)
else:
# Use the given separator instead
return sep.join(t['originalText'] for t in tokens)
def get_tokens_for_answers(answer_objs: List[Tuple[int, Dict]], corenlp_obj: Dict) -> Tuple[int, List]:
"""Get CoreNLP tokens corresponding to a SQuAD answer object."""
first_a_toks = None
for i, a_obj in enumerate(answer_objs):
a_toks = []
answer_start = a_obj['answer_start']
answer_end = answer_start + len(a_obj['text'])
for sent in corenlp_obj['sentences']:
for tok in sent['tokens']:
if tok['characterOffsetBegin'] >= answer_end:
continue
if tok['characterOffsetEnd'] <= answer_start:
continue
a_toks.append(tok)
if rejoin(a_toks).strip() == a_obj['text']:
# Make sure that the tokens reconstruct the answer
return i, a_toks
if i == 0:
first_a_toks = a_toks
# None of the extracted token lists reconstruct the answer
# Default to the first
return 0, first_a_toks
def get_determiner_for_answers(answer_objs: List[Dict]) -> Optional[str]:
for ans in answer_objs:
words = ans['text'].split(' ')
if words[0].lower() == 'the':
return 'the'
if words[0].lower() in ('a', 'an'):
return 'a'
return None
def compress_whnp(tree, inside_whnp=False):
if not tree.children: return tree # Reached leaf
# Compress all children
for i, c in enumerate(tree.children):
tree.children[i] = compress_whnp(c, inside_whnp=inside_whnp or tree.tag == 'WHNP')
if tree.tag != 'WHNP':
if inside_whnp:
# Wrap everything in an NP
return ConstituencyParse('NP', children=[tree])
return tree
wh_word = None
new_np_children = []
new_siblings = []
for i, c in enumerate(tree.children):
if i == 0:
if c.tag in ('WHNP', 'WHADJP', 'WHAVP', 'WHPP'):
wh_word = c.children[0]
new_np_children.extend(c.children[1:])
elif c.tag in ('WDT', 'WP', 'WP$', 'WRB'):
wh_word = c
else:
# No WH-word at start of WHNP
return tree
else:
if c.tag == 'SQ': # Due to bad parse, SQ may show up here
new_siblings = tree.children[i:]
break
# Wrap everything in an NP
new_np_children.append(ConstituencyParse('NP', children=[c]))
if new_np_children:
new_np = ConstituencyParse('NP', children=new_np_children)
new_tree = ConstituencyParse('WHNP', children=[wh_word, new_np])
else:
new_tree = tree
if new_siblings:
new_tree = ConstituencyParse('SBARQ', children=[new_tree] + new_siblings)
return new_tree
def read_const_parse(parse_str):
tree = ConstituencyParse.from_corenlp(parse_str)
new_tree = compress_whnp(tree)
return new_tree
| adversarialnlp-master | adversarialnlp/generators/addsent/utils.py |
import json
import logging
from typing import Iterator, List, Tuple
from adversarialnlp.common.file_utils import download_files
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def squad_reader(file_path: str = None) -> Iterator[List[Tuple[str, str]]]:
r""" Reads a JSON-formatted SQuAD file and returns an Iterator.
Args:
file_path: Path to a JSON-formatted SQuAD file.
If no path is provided, download and use SQuAD v1.0 training dataset.
Return:
list of tuple (question_answer, paragraph).
"""
if file_path is None:
file_path = download_files(fnames=['train-v1.1.json'],
paths='https://rajpurkar.github.io/SQuAD-explorer/dataset/',
local_folder='squad')
file_path = file_path[0]
logger.info("Reading file at %s", file_path)
with open(file_path) as dataset_file:
dataset_json = json.load(dataset_file)
dataset = dataset_json['data']
logger.info("Reading the dataset")
out_data = []
for article in dataset:
for paragraph_json in article['paragraphs']:
paragraph = paragraph_json["context"]
for question_answer in paragraph_json['qas']:
question_answer["question"] = question_answer["question"].strip().replace("\n", "")
out_data.append((question_answer, paragraph))
return out_data
| adversarialnlp-master | adversarialnlp/generators/addsent/squad_reader.py |
import logging
import json
import itertools
from typing import Iterable, Dict, Tuple
from collections import defaultdict
from adversarialnlp.common.file_utils import download_files
from adversarialnlp.generators import Generator
from adversarialnlp.generators.addsent.rules import (ANSWER_RULES, HIGH_CONF_ALTER_RULES, ALL_ALTER_RULES,
DO_NOT_ALTER, BAD_ALTERATIONS, CONVERSION_RULES)
from adversarialnlp.generators.addsent.utils import (rejoin, ConstituencyParse, get_tokens_for_answers,
get_determiner_for_answers, read_const_parse)
from adversarialnlp.generators.addsent.squad_reader import squad_reader
from adversarialnlp.generators.addsent.corenlp import StanfordCoreNLP
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
SQUAD_FILE = 'data/squad/train-v1.1.json'
NEARBY_GLOVE_FILE = 'data/addsent/nearby_n100_glove_6B_100d.json'
POSTAG_FILE = 'data/addsent/postag_dict.json'
class AddSentGenerator(Generator):
r"""Adversarial examples generator based on AddSent.
AddSent is described in the paper `Adversarial Examples for
Evaluating Reading Comprehension Systems`_
by Robin Jia & Percy Liang
Args, input and yield:
See the ``Generator`` class.
Additional arguments:
alteration_strategy: Alteration strategy. Options:
- `separate`: Do best alteration for each word separately.
- `best`: Generate exactly one best alteration
(may over-alter).
- `high-conf`: Do all possible high-confidence alterations.
- `high-conf-separate`: Do best high-confidence alteration
for each word separately.
- `all`: Do all possible alterations (very conservative)
prepend: Insert adversarial example at the beginning
or end of the context.
use_answer_placeholder: Use and answer placeholder.
Seeds:
Tuple of SQuAD-like instances containing
- question-answer-span, and
- context paragraph.
default_seeds:
If no seeds are provided, the default_seeds are the training
set of the
`SQuAD V1.0 dataset <https://rajpurkar.github.io/SQuAD-explorer/>`_.
"""
def __init__(self,
alteration_strategy: str = 'high-conf',
prepend: bool = False,
use_answer_placeholder: bool = False,
default_seeds: Iterable = None,
quiet: bool = False):
super(AddSentGenerator).__init__(default_seeds, quiet)
model_files = download_files(fnames=['nearby_n100_glove_6B_100d.json',
'postag_dict.json'],
local_folder='addsent')
corenlp_path = download_files(fnames=['stanford-corenlp-full-2018-02-27.zip'],
paths='http://nlp.stanford.edu/software/',
local_folder='corenlp')
self.nlp: StanfordCoreNLP = StanfordCoreNLP(corenlp_path[0])
with open(model_files[0], 'r') as data_file:
self.nearby_word_dict: Dict = json.load(data_file)
with open(model_files[1], 'r') as data_file:
self.postag_dict: Dict = json.load(data_file)
self.alteration_strategy: str = alteration_strategy
self.prepend: bool = prepend
self.use_answer_placeholder: bool = use_answer_placeholder
if default_seeds is None:
self.default_seeds = squad_reader(SQUAD_FILE)
else:
self.default_seeds = default_seeds
def close(self):
self.nlp.close()
def _annotate(self, text: str, annotators: str):
r"""Wrapper to call CoreNLP. """
props = {'annotators': annotators,
'ssplit.newlineIsSentenceBreak': 'always',
'outputFormat':'json'}
return json.loads(self.nlp.annotate(text, properties=props))
def _alter_question(self, question, tokens, const_parse):
r"""Alter the question to make it ask something else. """
used_words = [tok['word'].lower() for tok in tokens]
new_qs = []
toks_all = []
if self.alteration_strategy.startswith('high-conf'):
rules = HIGH_CONF_ALTER_RULES
else:
rules = ALL_ALTER_RULES
for i, tok in enumerate(tokens):
if tok['word'].lower() in DO_NOT_ALTER:
if self.alteration_strategy in ('high-conf', 'all'):
toks_all.append(tok)
continue
begin = tokens[:i]
end = tokens[i+1:]
found = False
for rule_name in rules:
rule = rules[rule_name]
new_words = rule(tok, nearby_word_dict=self.nearby_word_dict, postag_dict=self.postag_dict)
if new_words:
for word in new_words:
if word.lower() in used_words:
continue
if word.lower() in BAD_ALTERATIONS:
continue
# Match capitzliation
if tok['word'] == tok['word'].upper():
word = word.upper()
elif tok['word'] == tok['word'].title():
word = word.title()
new_tok = dict(tok)
new_tok['word'] = new_tok['lemma'] = new_tok['originalText'] = word
new_tok['altered'] = True
# NOTE: obviously this is approximate
if self.alteration_strategy.endswith('separate'):
new_tokens = begin + [new_tok] + end
new_q = rejoin(new_tokens)
tag = '%s-%d-%s' % (rule_name, i, word)
new_const_parse = ConstituencyParse.replace_words(
const_parse, [tok['word'] for tok in new_tokens])
new_qs.append((new_q, new_tokens, new_const_parse, tag))
break
elif self.alteration_strategy in ('high-conf', 'all'):
toks_all.append(new_tok)
found = True
break
if self.alteration_strategy in ('high-conf', 'all') and found:
break
if self.alteration_strategy in ('high-conf', 'all') and not found:
toks_all.append(tok)
if self.alteration_strategy in ('high-conf', 'all'):
new_q = rejoin(toks_all)
new_const_parse = ConstituencyParse.replace_words(
const_parse, [tok['word'] for tok in toks_all])
if new_q != question:
new_qs.append((rejoin(toks_all), toks_all, new_const_parse, self.alteration_strategy))
return new_qs
def generate_from_seed(self, seed: Tuple):
r"""Edit a SQuAD example using rules. """
qas, paragraph = seed
question = qas['question'].strip()
if not self.quiet:
print(f"Question: {question}")
if self.use_answer_placeholder:
answer = 'ANSWER'
determiner = ''
else:
p_parse = self._annotate(paragraph, 'tokenize,ssplit,pos,ner,entitymentions')
ind, a_toks = get_tokens_for_answers(qas['answers'], p_parse)
determiner = get_determiner_for_answers(qas['answers'])
answer_obj = qas['answers'][ind]
for _, func in ANSWER_RULES:
answer = func(answer_obj, a_toks, question, determiner=determiner)
if answer:
break
else:
raise ValueError('Missing answer')
q_parse = self._annotate(question, 'tokenize,ssplit,pos,parse,ner')
q_parse = q_parse['sentences'][0]
q_tokens = q_parse['tokens']
q_const_parse = read_const_parse(q_parse['parse'])
if self.alteration_strategy:
# Easiest to alter the question before converting
q_list = self._alter_question(question, q_tokens, q_const_parse)
else:
q_list = [(question, q_tokens, q_const_parse, 'unaltered')]
for q_str, q_tokens, q_const_parse, tag in q_list:
for rule in CONVERSION_RULES:
sent = rule.convert(q_str, answer, q_tokens, q_const_parse)
if sent:
if not self.quiet:
print(f" Sent ({tag}): {sent}'")
cur_qa = {
'question': qas['question'],
'id': '%s-%s' % (qas['id'], tag),
'answers': qas['answers']
}
if self.prepend:
cur_text = '%s %s' % (sent, paragraph)
new_answers = []
for ans in qas['answers']:
new_answers.append({
'text': ans['text'],
'answer_start': ans['answer_start'] + len(sent) + 1
})
cur_qa['answers'] = new_answers
else:
cur_text = '%s %s' % (paragraph, sent)
out_example = {'title': title,
'seed_context': paragraph,
'seed_qas': qas,
'context': cur_text,
'qas': [cur_qa]}
yield out_example
# from adversarialnlp.common.file_utils import FIXTURES_ROOT
# generator = AddSentGenerator()
# test_instances = squad_reader(FIXTURES_ROOT / 'squad.json')
# batches = list(generator(test_instances, num_epochs=1))
# assert len(batches) != 0
| adversarialnlp-master | adversarialnlp/generators/addsent/addsent_generator.py |
import math
from adversarialnlp.generators.addsent.utils import rejoin
MONTHS = ['january', 'february', 'march', 'april', 'may', 'june', 'july',
'august', 'september', 'october', 'november', 'december']
def ans_number(a, tokens, q, **kwargs):
out_toks = []
seen_num = False
for t in tokens:
ner = t['ner']
pos = t['pos']
w = t['word']
out_tok = {'before': t['before']}
# Split on dashes
leftover = ''
dash_toks = w.split('-')
if len(dash_toks) > 1:
w = dash_toks[0]
leftover = '-'.join(dash_toks[1:])
# Try to get a number out
value = None
if w != '%':
# Percent sign should just pass through
try:
value = float(w.replace(',', ''))
except:
try:
norm_ner = t['normalizedNER']
if norm_ner[0] in ('%', '>', '<'):
norm_ner = norm_ner[1:]
value = float(norm_ner)
except:
pass
if not value and (
ner == 'NUMBER' or
(ner == 'PERCENT' and pos == 'CD')):
# Force this to be a number anyways
value = 10
if value:
if math.isinf(value) or math.isnan(value): value = 9001
seen_num = True
if w in ('thousand', 'million', 'billion', 'trillion'):
if w == 'thousand':
new_val = 'million'
else:
new_val = 'thousand'
else:
if value < 2500 and value > 1000:
new_val = str(value - 75)
else:
# Change leading digit
if value == int(value):
val_chars = list('%d' % value)
else:
val_chars = list('%g' % value)
c = val_chars[0]
for i in range(len(val_chars)):
c = val_chars[i]
if c >= '0' and c <= '9':
val_chars[i] = str(max((int(c) + 5) % 10, 1))
break
new_val = ''.join(val_chars)
if leftover:
new_val = '%s-%s' % (new_val, leftover)
out_tok['originalText'] = new_val
else:
out_tok['originalText'] = t['originalText']
out_toks.append(out_tok)
if seen_num:
return rejoin(out_toks).strip()
else:
return None
def ans_date(a, tokens, q, **kwargs):
out_toks = []
if not all(t['ner'] == 'DATE' for t in tokens):
return None
for t in tokens:
if t['pos'] == 'CD' or t['word'].isdigit():
try:
value = int(t['word'])
except:
value = 10 # fallback
if value > 50: new_val = str(value - 25) # Year
else: # Day of month
if value > 15: new_val = str(value - 11)
else: new_val = str(value + 11)
else:
if t['word'].lower() in MONTHS:
m_ind = MONTHS.index(t['word'].lower())
new_val = MONTHS[(m_ind + 6) % 12].title()
else:
# Give up
new_val = t['originalText']
out_toks.append({'before': t['before'], 'originalText': new_val})
new_ans = rejoin(out_toks).strip()
if new_ans == a['text']: return None
return new_ans
def ans_entity_full(ner_tag, new_ans):
"""Returns a function that yields new_ans iff every token has |ner_tag|."""
def func(a, tokens, q, **kwargs):
for t in tokens:
if t['ner'] != ner_tag: return None
return new_ans
return func
def ans_abbrev(new_ans):
def func(a, tokens, q, **kwargs):
s = a['text']
if s == s.upper() and s != s.lower():
return new_ans
return None
return func
def ans_match_wh(wh_word, new_ans):
"""Returns a function that yields new_ans if the question starts with |wh_word|."""
def func(a, tokens, q, **kwargs):
if q.lower().startswith(wh_word + ' '):
return new_ans
return None
return func
def ans_pos(pos, new_ans, end=False, add_dt=False):
"""Returns a function that yields new_ans if the first/last token has |pos|."""
def func(a, tokens, q, determiner, **kwargs):
if end:
t = tokens[-1]
else:
t = tokens[0]
if t['pos'] != pos: return None
if add_dt and determiner:
return '%s %s' % (determiner, new_ans)
return new_ans
return func
def ans_catch_all(new_ans):
def func(a, tokens, q, **kwargs):
return new_ans
return func
ANSWER_RULES = [
('date', ans_date),
('number', ans_number),
('ner_person', ans_entity_full('PERSON', 'Jeff Dean')),
('ner_location', ans_entity_full('LOCATION', 'Chicago')),
('ner_organization', ans_entity_full('ORGANIZATION', 'Stark Industries')),
('ner_misc', ans_entity_full('MISC', 'Jupiter')),
('abbrev', ans_abbrev('LSTM')),
('wh_who', ans_match_wh('who', 'Jeff Dean')),
('wh_when', ans_match_wh('when', '1956')),
('wh_where', ans_match_wh('where', 'Chicago')),
('wh_where', ans_match_wh('how many', '42')),
# Starts with verb
('pos_begin_vb', ans_pos('VB', 'learn')),
('pos_end_vbd', ans_pos('VBD', 'learned')),
('pos_end_vbg', ans_pos('VBG', 'learning')),
('pos_end_vbp', ans_pos('VBP', 'learns')),
('pos_end_vbz', ans_pos('VBZ', 'learns')),
# Ends with some POS tag
('pos_end_nn', ans_pos('NN', 'hamster', end=True, add_dt=True)),
('pos_end_nnp', ans_pos('NNP', 'Central Park', end=True, add_dt=True)),
('pos_end_nns', ans_pos('NNS', 'hamsters', end=True, add_dt=True)),
('pos_end_nnps', ans_pos('NNPS', 'Kew Gardens', end=True, add_dt=True)),
('pos_end_jj', ans_pos('JJ', 'deep', end=True)),
('pos_end_jjr', ans_pos('JJR', 'deeper', end=True)),
('pos_end_jjs', ans_pos('JJS', 'deepest', end=True)),
('pos_end_rb', ans_pos('RB', 'silently', end=True)),
('pos_end_vbg', ans_pos('VBG', 'learning', end=True)),
('catch_all', ans_catch_all('aliens')),
]
| adversarialnlp-master | adversarialnlp/generators/addsent/rules/answer_rules.py |
from .answer_rules import ANSWER_RULES
from .alteration_rules import (HIGH_CONF_ALTER_RULES, ALL_ALTER_RULES,
DO_NOT_ALTER, BAD_ALTERATIONS)
from .conversion_rules import CONVERSION_RULES
| adversarialnlp-master | adversarialnlp/generators/addsent/rules/__init__.py |
from pattern import en as patten
CONST_PARSE_MACROS = {
'$Noun': '$NP/$NN/$NNS/$NNP/$NNPS',
'$Verb': '$VB/$VBD/$VBP/$VBZ',
'$Part': '$VBN/$VG',
'$Be': 'is/are/was/were',
'$Do': "do/did/does/don't/didn't/doesn't",
'$WHP': '$WHADJP/$WHADVP/$WHNP/$WHPP',
}
# Map to pattern.en aliases
# http://www.clips.ua.ac.be/pages/pattern-en#conjugation
POS_TO_PATTERN = {
'vb': 'inf', # Infinitive
'vbp': '1sg', # non-3rd-person singular present
'vbz': '3sg', # 3rd-person singular present
'vbg': 'part', # gerund or present participle
'vbd': 'p', # past
'vbn': 'ppart', # past participle
}
# Tenses prioritized by likelihood of arising
PATTERN_TENSES = ['inf', '3sg', 'p', 'part', 'ppart', '1sg']
def _check_match(node, pattern_tok):
if pattern_tok in CONST_PARSE_MACROS:
pattern_tok = CONST_PARSE_MACROS[pattern_tok]
if ':' in pattern_tok:
# ':' means you match the LHS category and start with something on the right
lhs, rhs = pattern_tok.split(':')
match_lhs = _check_match(node, lhs)
if not match_lhs: return False
phrase = node.get_phrase().lower()
retval = any(phrase.startswith(w) for w in rhs.split('/'))
return retval
elif '/' in pattern_tok:
return any(_check_match(node, t) for t in pattern_tok.split('/'))
return ((pattern_tok.startswith('$') and pattern_tok[1:] == node.tag) or
(node.word and pattern_tok.lower() == node.word.lower()))
def _recursive_match_pattern(pattern_toks, stack, matches):
"""Recursively try to match a pattern, greedily."""
if len(matches) == len(pattern_toks):
# We matched everything in the pattern; also need stack to be empty
return len(stack) == 0
if len(stack) == 0: return False
cur_tok = pattern_toks[len(matches)]
node = stack.pop()
# See if we match the current token at this level
is_match = _check_match(node, cur_tok)
if is_match:
cur_num_matches = len(matches)
matches.append(node)
new_stack = list(stack)
success = _recursive_match_pattern(pattern_toks, new_stack, matches)
if success: return True
# Backtrack
while len(matches) > cur_num_matches:
matches.pop()
# Recurse to children
if not node.children: return False # No children to recurse on, we failed
stack.extend(node.children[::-1]) # Leftmost children should be popped first
return _recursive_match_pattern(pattern_toks, stack, matches)
def match_pattern(pattern, const_parse):
pattern_toks = pattern.split(' ')
whole_phrase = const_parse.get_phrase()
if whole_phrase.endswith('?') or whole_phrase.endswith('.'):
# Match trailing punctuation as needed
pattern_toks.append(whole_phrase[-1])
matches = []
success = _recursive_match_pattern(pattern_toks, [const_parse], matches)
if success:
return matches
else:
return None
def run_postprocessing(s, rules, all_args):
rule_list = rules.split(',')
for rule in rule_list:
if rule == 'lower':
s = s.lower()
elif rule.startswith('tense-'):
ind = int(rule[6:])
orig_vb = all_args[ind]
tenses = patten.tenses(orig_vb)
for tense in PATTERN_TENSES: # Prioritize by PATTERN_TENSES
if tense in tenses:
break
else: # Default to first tense
tense = PATTERN_TENSES[0]
s = patten.conjugate(s, tense)
elif rule in POS_TO_PATTERN:
s = patten.conjugate(s, POS_TO_PATTERN[rule])
return s
def convert_whp(node, q, a, tokens, quiet=False):
if node.tag in ('WHNP', 'WHADJP', 'WHADVP', 'WHPP'):
# Apply WHP rules
cur_phrase = node.get_phrase()
cur_tokens = tokens[node.get_start_index():node.get_end_index()]
for r in WHP_RULES:
phrase = r.convert(cur_phrase, a, cur_tokens, node, run_fix_style=False)
if phrase:
if not quiet:
print(f" WHP Rule '{r.name}': {phrase}")
return phrase
return None
### Rules for converting questions into declarative sentences
def fix_style(s):
"""Minor, general style fixes for questions."""
s = s.replace('?', '') # Delete question marks anywhere in sentence.
s = s.strip(' .')
if s[0] == s[0].lower():
s = s[0].upper() + s[1:]
return s + '.'
class ConversionRule(object):
def convert(self, q, a, tokens, const_parse, run_fix_style=True):
raise NotImplementedError
class ConstituencyRule(ConversionRule):
"""A rule for converting question to sentence based on constituency parse."""
def __init__(self, in_pattern, out_pattern, postproc=None):
self.in_pattern = in_pattern # e.g. "where did $NP $VP"
self.out_pattern = out_pattern #unicode(out_pattern)
# e.g. "{1} did {2} at {0}." Answer is always 0
self.name = in_pattern
if postproc:
self.postproc = postproc
else:
self.postproc = {}
def convert(self, q, a, tokens, const_parse, run_fix_style=True) -> str:
pattern_toks = self.in_pattern.split(' ') # Don't care about trailing punctuation
match = match_pattern(self.in_pattern, const_parse)
appended_clause = False
if not match:
# Try adding a PP at the beginning
appended_clause = True
new_pattern = '$PP , ' + self.in_pattern
pattern_toks = new_pattern.split(' ')
match = match_pattern(new_pattern, const_parse)
if not match:
# Try adding an SBAR at the beginning
new_pattern = '$SBAR , ' + self.in_pattern
pattern_toks = new_pattern.split(' ')
match = match_pattern(new_pattern, const_parse)
if not match: return None
appended_clause_match = None
fmt_args = [a]
for t, m in zip(pattern_toks, match):
if t.startswith('$') or '/' in t:
# First check if it's a WHP
phrase = convert_whp(m, q, a, tokens)
if not phrase:
phrase = m.get_phrase()
fmt_args.append(phrase)
if appended_clause:
appended_clause_match = fmt_args[1]
fmt_args = [a] + fmt_args[2:]
for i in range(len(fmt_args)):
if i in self.postproc:
# Run postprocessing filters
fmt_args[i] = run_postprocessing(fmt_args[i], self.postproc[i], fmt_args)
output = self.gen_output(fmt_args)
if appended_clause:
output = appended_clause_match + ', ' + output
if run_fix_style:
output = fix_style(output)
return output
def gen_output(self, fmt_args):
"""By default, use self.out_pattern. Can be overridden."""
return self.out_pattern.format(*fmt_args)
class ReplaceRule(ConversionRule):
"""A simple rule that replaces some tokens with the answer."""
def __init__(self, target, replacement='{}', start=False):
self.target = target
self.replacement = replacement #unicode(replacement)
self.name = 'replace(%s)' % target
self.start = start
def convert(self, q, a, tokens, const_parse, run_fix_style=True):
t_toks = self.target.split(' ')
q_toks = q.rstrip('?.').split(' ')
replacement_text = self.replacement.format(a)
for i in range(len(q_toks)):
if self.start and i != 0: continue
if ' '.join(q_toks[i:i + len(t_toks)]).rstrip(',').lower() == self.target:
begin = q_toks[:i]
end = q_toks[i + len(t_toks):]
output = ' '.join(begin + [replacement_text] + end)
if run_fix_style:
output = fix_style(output)
return output
return None
class FindWHPRule(ConversionRule):
"""A rule that looks for $WHP's from right to left and does replacements."""
name = 'FindWHP'
def _recursive_convert(self, node, q, a, tokens, found_whp):
if node.word:
return node.word, found_whp
if not found_whp:
whp_phrase = convert_whp(node, q, a, tokens)
if whp_phrase:
return whp_phrase, True
child_phrases = []
for c in node.children[::-1]:
c_phrase, found_whp = self._recursive_convert(c, q, a, tokens, found_whp)
child_phrases.append(c_phrase)
out_toks = []
for i, p in enumerate(child_phrases[::-1]):
if i == 0 or p.startswith("'"):
out_toks.append(p)
else:
out_toks.append(' ' + p)
return ''.join(out_toks), found_whp
def convert(self, q, a, tokens, const_parse, run_fix_style=True):
out_phrase, found_whp = self._recursive_convert(const_parse, q, a, tokens, False)
if found_whp:
if run_fix_style:
out_phrase = fix_style(out_phrase)
return out_phrase
return None
class AnswerRule(ConversionRule):
"""Just return the answer."""
name = 'AnswerRule'
def convert(self, q, a, tokens, const_parse, run_fix_style=True):
return a
CONVERSION_RULES = [
# Special rules
ConstituencyRule('$WHP:what $Be $NP called that $VP', '{2} that {3} {1} called {1}'),
# What type of X
#ConstituencyRule("$WHP:what/which type/sort/kind/group of $NP/$Noun $Be $NP", '{5} {4} a {1} {3}'),
#ConstituencyRule("$WHP:what/which type/sort/kind/group of $NP/$Noun $Be $VP", '{1} {3} {4} {5}'),
#ConstituencyRule("$WHP:what/which type/sort/kind/group of $NP $VP", '{1} {3} {4}'),
# How $JJ
ConstituencyRule('how $JJ $Be $NP $IN $NP', '{3} {2} {0} {1} {4} {5}'),
ConstituencyRule('how $JJ $Be $NP $SBAR', '{3} {2} {0} {1} {4}'),
ConstituencyRule('how $JJ $Be $NP', '{3} {2} {0} {1}'),
# When/where $Verb
ConstituencyRule('$WHP:when/where $Do $NP', '{3} occurred in {1}'),
ConstituencyRule('$WHP:when/where $Do $NP $Verb', '{3} {4} in {1}', {4: 'tense-2'}),
ConstituencyRule('$WHP:when/where $Do $NP $Verb $NP/$PP', '{3} {4} {5} in {1}', {4: 'tense-2'}),
ConstituencyRule('$WHP:when/where $Do $NP $Verb $NP $PP', '{3} {4} {5} {6} in {1}', {4: 'tense-2'}),
ConstituencyRule('$WHP:when/where $Be $NP', '{3} {2} in {1}'),
ConstituencyRule('$WHP:when/where $Verb $NP $VP/$ADJP', '{3} {2} {4} in {1}'),
# What/who/how $Do
ConstituencyRule("$WHP:what/which/who $Do $NP do", '{3} {1}', {0: 'tense-2'}),
ConstituencyRule("$WHP:what/which/who/how $Do $NP $Verb", '{3} {4} {1}', {4: 'tense-2'}),
ConstituencyRule("$WHP:what/which/who $Do $NP $Verb $IN/$NP", '{3} {4} {5} {1}', {4: 'tense-2', 0: 'vbg'}),
ConstituencyRule("$WHP:what/which/who $Do $NP $Verb $PP", '{3} {4} {1} {5}', {4: 'tense-2', 0: 'vbg'}),
ConstituencyRule("$WHP:what/which/who $Do $NP $Verb $NP $VP", '{3} {4} {5} {6} {1}', {4: 'tense-2'}),
ConstituencyRule("$WHP:what/which/who $Do $NP $Verb to $VB", '{3} {4} to {5} {1}', {4: 'tense-2'}),
ConstituencyRule("$WHP:what/which/who $Do $NP $Verb to $VB $VP", '{3} {4} to {5} {1} {6}', {4: 'tense-2'}),
ConstituencyRule("$WHP:what/which/who/how $Do $NP $Verb $NP $IN $VP", '{3} {4} {5} {6} {1} {7}', {4: 'tense-2'}),
ConstituencyRule("$WHP:what/which/who/how $Do $NP $Verb $PP/$S/$VP/$SBAR/$SQ", '{3} {4} {1} {5}', {4: 'tense-2'}),
ConstituencyRule("$WHP:what/which/who/how $Do $NP $Verb $PP $PP/$S/$VP/$SBAR", '{3} {4} {1} {5} {6}', {4: 'tense-2'}),
# What/who/how $Be
# Watch out for things that end in a preposition
ConstituencyRule("$WHP:what/which/who $Be/$MD $NP of $NP $Verb/$Part $IN", '{3} of {4} {2} {5} {6} {1}'),
ConstituencyRule("$WHP:what/which/who $Be/$MD $NP $NP $IN", '{3} {2} {4} {5} {1}'),
ConstituencyRule("$WHP:what/which/who $Be/$MD $NP $VP/$IN", '{3} {2} {4} {1}'),
ConstituencyRule("$WHP:what/which/who $Be/$MD $NP $IN $NP/$VP", '{1} {2} {3} {4} {5}'),
ConstituencyRule('$WHP:what/which/who $Be/$MD $NP $Verb $PP', '{3} {2} {4} {1} {5}'),
ConstituencyRule('$WHP:what/which/who $Be/$MD $NP/$VP/$PP', '{1} {2} {3}'),
ConstituencyRule("$WHP:how $Be/$MD $NP $VP", '{3} {2} {4} by {1}'),
# What/who $Verb
ConstituencyRule("$WHP:what/which/who $VP", '{1} {2}'),
# $IN what/which $NP
ConstituencyRule('$IN what/which $NP $Do $NP $Verb $NP', '{5} {6} {7} {1} the {3} of {0}',
{1: 'lower', 6: 'tense-4'}),
ConstituencyRule('$IN what/which $NP $Be $NP $VP/$ADJP', '{5} {4} {6} {1} the {3} of {0}',
{1: 'lower'}),
ConstituencyRule('$IN what/which $NP $Verb $NP/$ADJP $VP', '{5} {4} {6} {1} the {3} of {0}',
{1: 'lower'}),
FindWHPRule(),
]
# Rules for going from WHP to an answer constituent
WHP_RULES = [
# WHPP rules
ConstituencyRule('$IN what/which type/sort/kind/group of $NP/$Noun', '{1} {0} {4}'),
ConstituencyRule('$IN what/which type/sort/kind/group of $NP/$Noun $PP', '{1} {0} {4} {5}'),
ConstituencyRule('$IN what/which $NP', '{1} the {3} of {0}'),
ConstituencyRule('$IN $WP/$WDT', '{1} {0}'),
# what/which
ConstituencyRule('what/which type/sort/kind/group of $NP/$Noun', '{0} {3}'),
ConstituencyRule('what/which type/sort/kind/group of $NP/$Noun $PP', '{0} {3} {4}'),
ConstituencyRule('what/which $NP', 'the {2} of {0}'),
# How many
ConstituencyRule('how many/much $NP', '{0} {2}'),
# Replace
ReplaceRule('what'),
ReplaceRule('who'),
ReplaceRule('how many'),
ReplaceRule('how much'),
ReplaceRule('which'),
ReplaceRule('where'),
ReplaceRule('when'),
ReplaceRule('why'),
ReplaceRule('how'),
# Just give the answer
AnswerRule(),
]
| adversarialnlp-master | adversarialnlp/generators/addsent/rules/conversion_rules.py |
import collections
import nltk
nltk.download('wordnet')
from nltk.corpus import wordnet as wn
from nltk.stem.lancaster import LancasterStemmer
STEMMER = LancasterStemmer()
POS_TO_WORDNET = {
'NN': wn.NOUN,
'JJ': wn.ADJ,
'JJR': wn.ADJ,
'JJS': wn.ADJ,
}
def alter_special(token, **kwargs):
w = token['originalText']
if w in SPECIAL_ALTERATIONS:
return [SPECIAL_ALTERATIONS[w]]
return None
def alter_nearby(pos_list, ignore_pos=False, is_ner=False):
def func(token, nearby_word_dict=None, postag_dict=None, **kwargs):
if token['pos'] not in pos_list: return None
if is_ner and token['ner'] not in ('PERSON', 'LOCATION', 'ORGANIZATION', 'MISC'):
return None
w = token['word'].lower()
if w in ('war'): return None
if w not in nearby_word_dict: return None
new_words = []
w_stem = STEMMER.stem(w.replace('.', ''))
for x in nearby_word_dict[w][1:]:
new_word = x['word']
# Make sure words aren't too similar (e.g. same stem)
new_stem = STEMMER.stem(new_word.replace('.', ''))
if w_stem.startswith(new_stem) or new_stem.startswith(w_stem): continue
if not ignore_pos:
# Check for POS tag match
if new_word not in postag_dict: continue
new_postag = postag_dict[new_word]
if new_postag != token['pos']: continue
new_words.append(new_word)
return new_words
return func
def alter_entity_glove(token, nearby_word_dict=None, **kwargs):
# NOTE: Deprecated
if token['ner'] not in ('PERSON', 'LOCATION', 'ORGANIZATION', 'MISC'): return None
w = token['word'].lower()
if w == token['word']: return None # Only do capitalized words
if w not in nearby_word_dict: return None
new_words = []
for x in nearby_word_dict[w][1:3]:
if token['word'] == w.upper():
new_words.append(x['word'].upper())
else:
new_words.append(x['word'].title())
return new_words
def alter_entity_type(token, **kwargs):
pos = token['pos']
ner = token['ner']
word = token['word']
is_abbrev = word == word.upper() and not word == word.lower()
if token['pos'] not in (
'JJ', 'JJR', 'JJS', 'NN', 'NNS', 'NNP', 'NNPS', 'RB', 'RBR', 'RBS',
'VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ'):
# Don't alter non-content words
return None
if ner == 'PERSON':
return ['Jackson']
elif ner == 'LOCATION':
return ['Berlin']
elif ner == 'ORGANIZATION':
if is_abbrev: return ['UNICEF']
return ['Acme']
elif ner == 'MISC':
return ['Neptune']
elif ner == 'NNP':
if is_abbrev: return ['XKCD']
return ['Dalek']
elif pos == 'NNPS':
return ['Daleks']
return None
def alter_wordnet_antonyms(token, **kwargs):
if token['pos'] not in POS_TO_WORDNET: return None
w = token['word'].lower()
wn_pos = POS_TO_WORDNET[token['pos']]
synsets = wn.synsets(w, wn_pos)
if not synsets: return None
synset = synsets[0]
antonyms = []
for lem in synset.lemmas():
if lem.antonyms():
for a in lem.antonyms():
new_word = a.name()
if '_' in a.name(): continue
antonyms.append(new_word)
return antonyms
SPECIAL_ALTERATIONS = {
'States': 'Kingdom',
'US': 'UK',
'U.S': 'U.K.',
'U.S.': 'U.K.',
'UK': 'US',
'U.K.': 'U.S.',
'U.K': 'U.S.',
'largest': 'smallest',
'smallest': 'largest',
'highest': 'lowest',
'lowest': 'highest',
'May': 'April',
'Peyton': 'Trevor',
}
DO_NOT_ALTER = ['many', 'such', 'few', 'much', 'other', 'same', 'general',
'type', 'record', 'kind', 'sort', 'part', 'form', 'terms', 'use',
'place', 'way', 'old', 'young', 'bowl', 'united', 'one',
'likely', 'different', 'square', 'war', 'republic', 'doctor', 'color']
BAD_ALTERATIONS = ['mx2004', 'planet', 'u.s.', 'Http://Www.Co.Mo.Md.Us']
HIGH_CONF_ALTER_RULES = collections.OrderedDict([
('special', alter_special),
('wn_antonyms', alter_wordnet_antonyms),
('nearbyNum', alter_nearby(['CD'], ignore_pos=True)),
('nearbyProperNoun', alter_nearby(['NNP', 'NNPS'])),
('nearbyProperNoun', alter_nearby(['NNP', 'NNPS'], ignore_pos=True)),
('nearbyEntityNouns', alter_nearby(['NN', 'NNS'], is_ner=True)),
('nearbyEntityJJ', alter_nearby(['JJ', 'JJR', 'JJS'], is_ner=True)),
('entityType', alter_entity_type),
#('entity_glove', alter_entity_glove),
])
ALL_ALTER_RULES = collections.OrderedDict(list(HIGH_CONF_ALTER_RULES.items()) + [
('nearbyAdj', alter_nearby(['JJ', 'JJR', 'JJS'])),
('nearbyNoun', alter_nearby(['NN', 'NNS'])),
#('nearbyNoun', alter_nearby(['NN', 'NNS'], ignore_pos=True)),
])
| adversarialnlp-master | adversarialnlp/generators/addsent/rules/alteration_rules.py |
from .swag_generator import SwagGenerator
from .activitynet_captions_reader import ActivityNetCaptionsDatasetReader
| adversarialnlp-master | adversarialnlp/generators/swag/__init__.py |
import re
from itertools import tee
from num2words import num2words
def optimistic_restore(network, state_dict):
mismatch = False
own_state = network.state_dict()
for name, param in state_dict.items():
if name not in own_state:
print("Unexpected key {} in state_dict with size {}".format(name, param.size()))
mismatch = True
elif param.size() == own_state[name].size():
own_state[name].copy_(param)
else:
print("Network has {} with size {}, ckpt has {}".format(name,
own_state[name].size(),
param.size()))
mismatch = True
missing = set(own_state.keys()) - set(state_dict.keys())
if len(missing) > 0:
print("We couldn't find {}".format(','.join(missing)))
mismatch = True
return not mismatch
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def n2w_1k(num, use_ordinal=False):
if num > 1000:
return ''
return num2words(num, to='ordinal' if use_ordinal else 'cardinal')
def postprocess(sentence):
"""
make sure punctuation is followed by a space
:param sentence:
:return:
"""
sentence = remove_allcaps(sentence)
# Aggressively get rid of some punctuation markers
sent0 = re.sub(r'^.*(\\|/|!!!|~|=|#|@|\*|¡|©|¿|«|»|¬|{|}|\||\(|\)|\+|\]|\[).*$',
' ', sentence, flags=re.MULTILINE|re.IGNORECASE)
# Less aggressively get rid of quotes, apostrophes
sent1 = re.sub(r'"', ' ', sent0)
sent2 = re.sub(r'`', '\'', sent1)
# match ordinals
sent3 = re.sub(r'(\d+(?:rd|st|nd))',
lambda x: n2w_1k(int(x.group(0)[:-2]), use_ordinal=True), sent2)
#These things all need to be followed by spaces or else we'll run into problems
sent4 = re.sub(r'[:;,\"\!\.\-\?](?! )', lambda x: x.group(0) + ' ', sent3)
#These things all need to be preceded by spaces or else we'll run into problems
sent5 = re.sub(r'(?! )[-]', lambda x: ' ' + x.group(0), sent4)
# Several spaces
sent6 = re.sub(r'\s\s+', ' ', sent5)
sent7 = sent6.strip()
return sent7
def remove_allcaps(sent):
"""
Given a sentence, filter it so that it doesn't contain some words that are ALLcaps
:param sent: string, like SOMEONE wheels SOMEONE on, mouthing silent words of earnest prayer.
:return: Someone wheels someone on, mouthing silent words of earnest prayer.
"""
# Remove all caps
def _sanitize(word, is_first):
if word == "I":
return word
num_capitals = len([x for x in word if not x.islower()])
if num_capitals > len(word) // 2:
# We have an all caps word here.
if is_first:
return word[0] + word[1:].lower()
return word.lower()
return word
return ' '.join([_sanitize(word, i == 0) for i, word in enumerate(sent.split(' '))])
| adversarialnlp-master | adversarialnlp/generators/swag/utils.py |
# pylint: disable=invalid-name,arguments-differ
from typing import List, Iterable, Tuple
import logging
import torch
from allennlp.common.util import JsonDict
from allennlp.common.file_utils import cached_path
from allennlp.data import Instance, Token, Vocabulary
from allennlp.data.fields import TextField
from allennlp.pretrained import PretrainedModel
from adversarialnlp.common.file_utils import download_files, DATA_ROOT
from adversarialnlp.generators import Generator
from adversarialnlp.generators.swag.simple_bilm import SimpleBiLM
from adversarialnlp.generators.swag.utils import optimistic_restore
from adversarialnlp.generators.swag.activitynet_captions_reader import ActivityNetCaptionsDatasetReader
BATCH_SIZE = 1
BEAM_SIZE = 8 * BATCH_SIZE
logger = logging.getLogger(__name__)
class SwagGenerator(Generator):
"""
``SwagGenerator`` inherit from the ``Generator`` class.
This ``Generator`` generate adversarial examples from seeds using
the method described in
`SWAG: A Large-Scale Adversarial Dataset for Grounded Commonsense Inference <http://arxiv.org/abs/1808.05326>`_.
This method goes schematically as follows:
- In a seed sample containing a pair of sequential sentence (ex: video captions),
the second sentence is split into noun and verb phrases.
- A language model generates several possible endings from the sencond sentence noun phrase.
Args, input and yield:
See the ``Generator`` class.
Seeds:
AllenNLP ``Instance`` containing two ``TextField``:
`first_sentence` and `first_sentence`, respectively containing
first and the second consecutive sentences.
default_seeds:
If no seeds are provided, the default_seeds are the training set
of the
`ActivityNet Captions dataset <https://cs.stanford.edu/people/ranjaykrishna/densevid/>`_.
"""
def __init__(self,
default_seeds: Iterable = None,
quiet: bool = False):
super().__init__(default_seeds, quiet)
lm_files = download_files(fnames=['vocabulary.zip',
'lm-fold-0.bin'],
local_folder='swag_lm')
activity_data_files = download_files(fnames=['captions.zip'],
paths='https://cs.stanford.edu/people/ranjaykrishna/densevid/',
local_folder='activitynet_captions')
const_parser_files = cached_path('https://s3-us-west-2.amazonaws.com/allennlp/models/elmo-constituency-parser-2018.03.14.tar.gz',
cache_dir=str(DATA_ROOT / 'allennlp_constituency_parser'))
self.const_parser = PretrainedModel(const_parser_files, 'constituency-parser').predictor()
vocab = Vocabulary.from_files(lm_files[0])
self.language_model = SimpleBiLM(vocab=vocab, recurrent_dropout_probability=0.2,
embedding_dropout_probability=0.2)
optimistic_restore(self.language_model, torch.load(lm_files[1], map_location='cpu')['state_dict'])
if default_seeds is None:
self.default_seeds = ActivityNetCaptionsDatasetReader().read(activity_data_files[0] + '/train.json')
else:
self.default_seeds = default_seeds
def _find_VP(self, tree: JsonDict) -> List[Tuple[str, any]]:
r"""Recurse on a constituency parse tree until we find verb phrases"""
# Recursion is annoying because we need to check whether each is a list or not
def _recurse_on_children():
assert 'children' in tree
result = []
for child in tree['children']:
res = self._find_VP(child)
if isinstance(res, tuple):
result.append(res)
else:
result.extend(res)
return result
if 'VP' in tree['attributes']:
# # Now we'll get greedy and see if we can find something better
# if 'children' in tree and len(tree['children']) > 1:
# recurse_result = _recurse_on_children()
# if all([x[1] in ('VP', 'NP', 'CC') for x in recurse_result]):
# return recurse_result
return [(tree['word'], 'VP')]
# base cases
if 'NP' in tree['attributes']:
return [(tree['word'], 'NP')]
# No children
if not 'children' in tree:
return [(tree['word'], tree['attributes'][0])]
# If a node only has 1 child then we'll have to stick with that
if len(tree['children']) == 1:
return _recurse_on_children()
# try recursing on everything
return _recurse_on_children()
def _split_on_final_vp(self, sentence: Instance) -> (List[str], List[str]):
r"""Splits a sentence on the final verb phrase """
sentence_txt = ' '.join(t.text for t in sentence.tokens)
res = self.const_parser.predict(sentence_txt)
res_chunked = self._find_VP(res['hierplane_tree']['root'])
is_vp: List[int] = [i for i, (word, pos) in enumerate(res_chunked) if pos == 'VP']
if not is_vp:
return None, None
vp_ind = max(is_vp)
not_vp = [token for x in res_chunked[:vp_ind] for token in x[0].split(' ')]
is_vp = [token for x in res_chunked[vp_ind:] for token in x[0].split(' ')]
return not_vp, is_vp
def generate_from_seed(self, seed: Tuple):
"""Edit a seed example.
"""
first_sentence: TextField = seed.fields["first_sentence"]
second_sentence: TextField = seed.fields["second_sentence"]
eos_bounds = [i + 1 for i, x in enumerate(first_sentence.tokens) if x.text in ('.', '?', '!')]
if not eos_bounds:
first_sentence = TextField(tokens=first_sentence.tokens + [Token(text='.')],
token_indexers=first_sentence.token_indexers)
context_len = len(first_sentence.tokens)
if context_len < 6 or context_len > 100:
print("skipping on {} (too short or long)".format(
' '.join(first_sentence.tokens + second_sentence.tokens)))
return
# Something I should have done:
# make sure that there aren't multiple periods, etc. in s2 or in the middle
eos_bounds_s2 = [i + 1 for i, x in enumerate(second_sentence.tokens) if x.text in ('.', '?', '!')]
if len(eos_bounds_s2) > 1 or max(eos_bounds_s2) != len(second_sentence.tokens):
return
elif not eos_bounds_s2:
second_sentence = TextField(tokens=second_sentence.tokens + [Token(text='.')],
token_indexers=second_sentence.token_indexers)
# Now split on the VP
startphrase, endphrase = self._split_on_final_vp(second_sentence)
if startphrase is None or not startphrase or len(endphrase) < 5 or len(endphrase) > 25:
print("skipping on {}->{},{}".format(' '.join(first_sentence.tokens + second_sentence.tokens),
startphrase, endphrase), flush=True)
return
# if endphrase contains unk then it's hopeless
# if any(vocab.get_token_index(tok.lower()) == vocab.get_token_index(vocab._oov_token)
# for tok in endphrase):
# print("skipping on {} (unk!)".format(' '.join(s1_toks + s2_toks)))
# return
context = [token.text for token in first_sentence.tokens] + startphrase
lm_out = self.language_model.conditional_generation(context, gt_completion=endphrase,
batch_size=BEAM_SIZE,
max_gen_length=25)
gens0, fwd_scores, ctx_scores = lm_out
if len(gens0) < BATCH_SIZE:
print("Couldn't generate enough candidates so skipping")
return
gens0 = gens0[:BATCH_SIZE]
yield gens0
# fwd_scores = fwd_scores[:BATCH_SIZE]
# # Now get the backward scores.
# full_sents = [context + gen for gen in gens0] # NOTE: #1 is GT
# result_dict = self.language_model(self.language_model.batch_to_ids(full_sents),
# use_forward=False, use_reverse=True, compute_logprobs=True)
# ending_lengths = (fwd_scores < 0).sum(1)
# ending_lengths_float = ending_lengths.astype(np.float32)
# rev_scores = result_dict['reverse_logprobs'].cpu().detach().numpy()
# forward_logperp_ending = -fwd_scores.sum(1) / ending_lengths_float
# reverse_logperp_ending = -rev_scores[:, context_len:].sum(1) / ending_lengths_float
# forward_logperp_begin = -ctx_scores.mean()
# reverse_logperp_begin = -rev_scores[:, :context_len].mean(1)
# eos_logperp = -fwd_scores[np.arange(fwd_scores.shape[0]), ending_lengths - 1]
# # print("Time elapsed {:.3f}".format(time() - tic), flush=True)
# scores = np.exp(np.column_stack((
# forward_logperp_ending,
# reverse_logperp_ending,
# reverse_logperp_begin,
# eos_logperp,
# np.ones(forward_logperp_ending.shape[0], dtype=np.float32) * forward_logperp_begin,
# )))
# PRINTOUT
# low2high = scores[:, 2].argsort()
# print("\n\n Dataset={} ctx: {} (perp={:.3f})\n~~~\n".format(item['dataset'], ' '.join(context),
# np.exp(forward_logperp_begin)), flush=True)
# for i, ind in enumerate(low2high.tolist()):
# gen_i = ' '.join(gens0[ind])
# if (ind == 0) or (i < 128):
# print("{:3d}/{:4d}) ({}, end|ctx:{:5.1f} end:{:5.1f} ctx|end:{:5.1f} EOS|(ctx, end):{:5.1f}) {}".format(
# i, len(gens0), 'GOLD' if ind == 0 else ' ', *scores[ind][:-1], gen_i), flush=True)
# gt_score = low2high.argsort()[0]
# item_full = deepcopy(item)
# item_full['sent1'] = first_sentence
# item_full['startphrase'] = startphrase
# item_full['context'] = context
# item_full['generations'] = gens0
# item_full['postags'] = [ # parse real fast
# [x.orth_.lower() if pos_vocab.get_token_index(x.orth_.lower()) != 1 else x.pos_ for x in y]
# for y in spacy_model.pipe([startphrase + gen for gen in gens0], batch_size=BATCH_SIZE)]
# item_full['scores'] = pd.DataFrame(data=scores, index=np.arange(scores.shape[0]),
# columns=['end-from-ctx', 'end', 'ctx-from-end', 'eos-from-ctxend', 'ctx'])
# generated_examples.append(gens0)
# if len(generated_examples) > 0:
# yield generated_examples
# generated_examples = []
# from adversarialnlp.common.file_utils import FIXTURES_ROOT
# generator = SwagGenerator()
# test_instances = ActivityNetCaptionsDatasetReader().read(FIXTURES_ROOT / 'activitynet_captions.json')
# batches = list(generator(test_instances, num_epochs=1))
# assert len(batches) != 0
| adversarialnlp-master | adversarialnlp/generators/swag/swag_generator.py |
from typing import Dict
import json
import logging
from overrides import overrides
from unidecode import unidecode
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import TextField, MetadataField
from allennlp.data.instance import Instance
from allennlp.data.tokenizers import Tokenizer, WordTokenizer
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from adversarialnlp.generators.swag.utils import pairwise, postprocess
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
@DatasetReader.register("activitynet_captions")
class ActivityNetCaptionsDatasetReader(DatasetReader):
r""" Reads ActivityNet Captions JSON files and creates a dataset suitable for crafting
adversarial examples with swag using these captions.
Expected format:
JSON dict[video_id, video_obj] where
video_id: str,
video_obj: {
"duration": float,
"timestamps": list of pairs of float,
"sentences": list of strings
}
The output of ``read`` is a list of ``Instance`` s with the fields:
video_id: ``MetadataField``
first_sentence: ``TextField``
second_sentence: ``TextField``
The instances are created from all consecutive pair of sentences
associated to each video.
Ex: if a video has three associated sentences: s1, s2, s3 read will
generate two instances:
1. Instance("first_sentence" = s1, "second_sentence" = s2)
2. Instance("first_sentence" = s2, "second_sentence" = s3)
Args:
lazy : If True, training will start sooner, but will take
longer per batch. This allows training with datasets that
are too large to fit in memory. Passed to DatasetReader.
tokenizer : Tokenizer to use to split the title and abstract
into words or other kinds of tokens.
token_indexers : Indexers used to define input token
representations.
"""
def __init__(self,
lazy: bool = False,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None) -> None:
super().__init__(lazy)
self._tokenizer = tokenizer or WordTokenizer()
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
@overrides
def _read(self, file_path):
with open(cached_path(file_path), "r") as data_file:
logger.info("Reading instances from: %s", file_path)
json_data = json.load(data_file)
for video_id, value in json_data.items():
sentences = [postprocess(unidecode(x.strip()))
for x in value['sentences']]
for first_sentence, second_sentence in pairwise(sentences):
yield self.text_to_instance(video_id, first_sentence, second_sentence)
@overrides
def text_to_instance(self,
video_id: str,
first_sentence: str,
second_sentence: str) -> Instance: # type: ignore
# pylint: disable=arguments-differ
tokenized_first_sentence = self._tokenizer.tokenize(first_sentence)
tokenized_second_sentence = self._tokenizer.tokenize(second_sentence)
first_sentence_field = TextField(tokenized_first_sentence, self._token_indexers)
second_sentence_field = TextField(tokenized_second_sentence, self._token_indexers)
fields = {'video_id': MetadataField(video_id),
'first_sentence': first_sentence_field,
'second_sentence': second_sentence_field}
return Instance(fields)
| adversarialnlp-master | adversarialnlp/generators/swag/activitynet_captions_reader.py |
"""
A wrapper around ai2s elmo LM to allow for an lm objective...
"""
from typing import Optional, Tuple
from typing import Union, List, Dict
import numpy as np
import torch
from allennlp.common.checks import ConfigurationError
from allennlp.data import Token, Vocabulary, Instance
from allennlp.data.dataset import Batch
from allennlp.data.fields import TextField
from allennlp.data.token_indexers import SingleIdTokenIndexer
from allennlp.modules.augmented_lstm import AugmentedLstm
from allennlp.modules.seq2seq_encoders.pytorch_seq2seq_wrapper import PytorchSeq2SeqWrapper
from allennlp.nn.util import sequence_cross_entropy_with_logits
from torch.autograd import Variable
from torch.nn import functional as F
from torch.nn.utils.rnn import PackedSequence
def _de_duplicate_generations(generations):
"""
Given a list of list of strings, filter out the ones that are duplicates. and return an idx corresponding
to the good ones
:param generations:
:return:
"""
dup_set = set()
unique_idx = []
for i, gen_i in enumerate(generations):
gen_i_str = ' '.join(gen_i)
if gen_i_str not in dup_set:
unique_idx.append(i)
dup_set.add(gen_i_str)
return [generations[i] for i in unique_idx], np.array(unique_idx)
class StackedLstm(torch.nn.Module):
"""
A stacked LSTM.
Parameters
----------
input_size : int, required
The dimension of the inputs to the LSTM.
hidden_size : int, required
The dimension of the outputs of the LSTM.
num_layers : int, required
The number of stacked LSTMs to use.
recurrent_dropout_probability: float, optional (default = 0.0)
The dropout probability to be used in a dropout scheme as stated in
`A Theoretically Grounded Application of Dropout in Recurrent Neural Networks
<https://arxiv.org/abs/1512.05287>`_ .
use_input_projection_bias : bool, optional (default = True)
Whether or not to use a bias on the input projection layer. This is mainly here
for backwards compatibility reasons and will be removed (and set to False)
in future releases.
Returns
-------
output_accumulator : PackedSequence
The outputs of the interleaved LSTMs per timestep. A tensor of shape
(batch_size, max_timesteps, hidden_size) where for a given batch
element, all outputs past the sequence length for that batch are
zero tensors.
"""
def __init__(self,
input_size: int,
hidden_size: int,
num_layers: int,
recurrent_dropout_probability: float = 0.0,
use_highway: bool = True,
use_input_projection_bias: bool = True,
go_forward: bool = True) -> None:
super(StackedLstm, self).__init__()
# Required to be wrapped with a :class:`PytorchSeq2SeqWrapper`.
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
layers = []
lstm_input_size = input_size
for layer_index in range(num_layers):
layer = AugmentedLstm(lstm_input_size, hidden_size, go_forward,
recurrent_dropout_probability=recurrent_dropout_probability,
use_highway=use_highway,
use_input_projection_bias=use_input_projection_bias)
lstm_input_size = hidden_size
self.add_module('layer_{}'.format(layer_index), layer)
layers.append(layer)
self.lstm_layers = layers
def forward(self, # pylint: disable=arguments-differ
inputs: PackedSequence,
initial_state: Optional[Tuple[torch.Tensor, torch.Tensor]] = None):
"""
Parameters
----------
inputs : ``PackedSequence``, required.
A batch first ``PackedSequence`` to run the stacked LSTM over.
initial_state : Tuple[torch.Tensor, torch.Tensor], optional, (default = None)
A tuple (state, memory) representing the initial hidden state and memory
of the LSTM. Each tensor has shape (1, batch_size, output_dimension).
Returns
-------
output_sequence : PackedSequence
The encoded sequence of shape (batch_size, sequence_length, hidden_size)
final_states: torch.Tensor
The per-layer final (state, memory) states of the LSTM, each with shape
(num_layers, batch_size, hidden_size).
"""
if not initial_state:
hidden_states = [None] * len(self.lstm_layers)
elif initial_state[0].size()[0] != len(self.lstm_layers):
raise ConfigurationError("Initial states were passed to forward() but the number of "
"initial states does not match the number of layers.")
else:
hidden_states = list(zip(initial_state[0].split(1, 0),
initial_state[1].split(1, 0)))
output_sequence = inputs
final_states = []
for i, state in enumerate(hidden_states):
layer = getattr(self, 'layer_{}'.format(i))
# The state is duplicated to mirror the Pytorch API for LSTMs.
output_sequence, final_state = layer(output_sequence, state)
final_states.append(final_state)
final_state_tuple = tuple(torch.cat(state_list, 0) for state_list in zip(*final_states))
return output_sequence, final_state_tuple
class SimpleBiLM(torch.nn.Module):
def __init__(self,
vocab: Vocabulary,
recurrent_dropout_probability: float = 0.0,
embedding_dropout_probability: float = 0.0,
input_size=512,
hidden_size=512) -> None:
"""
:param options_file: for initializing elmo BiLM
:param weight_file: for initializing elmo BiLM
:param requires_grad: Whether or not to finetune the LSTM layers
:param recurrent_dropout_probability: recurrent dropout to add to LSTM layers
"""
super(SimpleBiLM, self).__init__()
self.forward_lm = PytorchSeq2SeqWrapper(StackedLstm(
input_size=input_size, hidden_size=hidden_size, num_layers=2, go_forward=True,
recurrent_dropout_probability=recurrent_dropout_probability,
use_input_projection_bias=False, use_highway=True), stateful=True)
self.reverse_lm = PytorchSeq2SeqWrapper(StackedLstm(
input_size=input_size, hidden_size=hidden_size, num_layers=2, go_forward=False,
recurrent_dropout_probability=recurrent_dropout_probability,
use_input_projection_bias=False, use_highway=True), stateful=True)
# This will also be the encoder
self.decoder = torch.nn.Linear(512, vocab.get_vocab_size(namespace='tokens'))
self.vocab = vocab
self.register_buffer('eos_tokens', torch.LongTensor([vocab.get_token_index(tok) for tok in
['.', '!', '?', '@@UNKNOWN@@', '@@PADDING@@', '@@bos@@',
'@@eos@@']]))
self.register_buffer('invalid_tokens', torch.LongTensor([vocab.get_token_index(tok) for tok in
['@@UNKNOWN@@', '@@PADDING@@', '@@bos@@', '@@eos@@',
'@@NEWLINE@@']]))
self.embedding_dropout_probability = embedding_dropout_probability
def embed_words(self, words):
# assert words.dim() == 2
return F.embedding(words, self.decoder.weight)
# if not self.training:
# return F.embedding(words, self.decoder.weight)
# Embedding dropout
# vocab_size = self.decoder.weight.size(0)
# mask = Variable(
# self.decoder.weight.data.new(vocab_size, 1).bernoulli_(1 - self.embedding_dropout_probability).expand_as(
# self.decoder.weight) / (1 - self.embedding_dropout_probability))
# padding_idx = 0
# embeds = self.decoder._backend.Embedding.apply(words, mask * self.decoder.weight, padding_idx, None,
# 2, False, False)
# return embeds
def timestep_to_ids(self, timestep_tokenized: List[str]):
""" Just a single timestep (so dont add BOS or EOS"""
return torch.tensor([self.vocab.get_token_index(x) for x in timestep_tokenized])[:, None]
def batch_to_ids(self, stories_tokenized: List[List[str]]):
"""
Simple wrapper around _elmo_batch_to_ids
:param batch: A list of tokenized sentences.
:return: A tensor of padded character ids.
"""
batch = Batch([Instance(
{'story': TextField([Token('@@bos@@')] + [Token(x) for x in story] + [Token('@@eos@@')],
token_indexers={
'tokens': SingleIdTokenIndexer(namespace='tokens', lowercase_tokens=True)})})
for story in stories_tokenized])
batch.index_instances(self.vocab)
words = {k: v['tokens'] for k, v in batch.as_tensor_dict().items()}['story']
return words
def conditional_generation(self, context: List[str], gt_completion: List[str],
batch_size: int = 128, max_gen_length: int = 25,
same_length_as_gt: bool = False, first_is_gold: bool = False):
"""
Generate conditoned on the context. While we're at it we'll score the GT going forwards
:param context: List of tokens to condition on. We'll add the BOS marker to it
:param gt_completion: The gold truth completion
:param batch_size: Number of sentences to generate
:param max_gen_length: Max length for genertaed sentences (irrelvant if same_length_as_gt=True)
:param same_length_as_gt: set to True if you want all the sents to have the same length as the gt_completion
:param first_is_gold: set to True if you want the first sample to be the gt_completion
:return:
"""
# Forward condition on context, then repeat to be the right batch size:
# (layer_index, batch_size, fwd hidden dim)
log_probs = self(self.batch_to_ids([context]), use_forward=True,
use_reverse=False, compute_logprobs=True)
forward_logprobs = log_probs['forward_logprobs']
self.forward_lm._states = tuple(x.repeat(1, batch_size, 1).contiguous() for x in self.forward_lm._states)
# Each item will be (token, score)
generations = [[(context[-1], 0.0)] for i in range(batch_size)]
mask = forward_logprobs.new(batch_size).long().fill_(1)
gt_completion_padded = [self.vocab.get_token_index(gt_token) for gt_token in
[x.lower() for x in gt_completion] + ['@@PADDING@@'] * (
max_gen_length - len(gt_completion))]
for index, gt_token_ind in enumerate(gt_completion_padded):
embeds = self.embed_words(self.timestep_to_ids([gen[-1][0] for gen in generations]))
next_dists = F.softmax(self.decoder(self.forward_lm(embeds, mask[:, None]))[:, 0], dim=1)
# Perform hacky stuff on the distribution (disallowing BOS, EOS, that sorta thing
sampling_probs = next_dists.clone()
sampling_probs[:, self.invalid_tokens] = 0.0
if first_is_gold:
# Gold truth is first row
sampling_probs[0].zero_()
sampling_probs[0, gt_token_ind] = 1
if same_length_as_gt:
if index == (len(gt_completion) - 1):
sampling_probs.zero_()
sampling_probs[:, gt_token_ind] = 1
else:
sampling_probs[:, self.eos_tokens] = 0.0
sampling_probs = sampling_probs / sampling_probs.sum(1, keepdim=True)
next_preds = torch.multinomial(sampling_probs, 1).squeeze(1)
next_scores = np.log(next_dists[
torch.arange(0, next_dists.size(0),
out=mask.data.new(next_dists.size(0))),
next_preds,
].cpu().detach().numpy())
for i, (gen_list, pred_id, score_i, mask_i) in enumerate(
zip(generations, next_preds.cpu().detach().numpy(), next_scores, mask.data.cpu().detach().numpy())):
if mask_i:
gen_list.append((self.vocab.get_token_from_index(pred_id), score_i))
is_eos = (next_preds[:, None] == self.eos_tokens[None]).max(1)[0]
mask[is_eos] = 0
if mask.sum().item() == 0:
break
generation_scores = np.zeros((len(generations), max([len(g) - 1 for g in generations])), dtype=np.float32)
for i, gen in enumerate(generations):
for j, (_, v) in enumerate(gen[1:]):
generation_scores[i, j] = v
generation_toks, idx = _de_duplicate_generations([[tok for (tok, score) in gen[1:]] for gen in generations])
return generation_toks, generation_scores[idx], forward_logprobs.cpu().detach().numpy()
def _chunked_logsoftmaxes(self, activation, word_targets, chunk_size=256):
"""
do the softmax in chunks so memory doesnt explode
:param activation: [batch, T, dim]
:param targets: [batch, T] indices
:param chunk_size: you might need to tune this based on GPU specs
:return:
"""
all_logprobs = []
num_chunks = (activation.size(0) - 1) // chunk_size + 1
for activation_chunk, target_chunk in zip(torch.chunk(activation, num_chunks, dim=0),
torch.chunk(word_targets, num_chunks, dim=0)):
assert activation_chunk.size()[:2] == target_chunk.size()[:2]
targets_flat = target_chunk.view(-1)
time_indexer = torch.arange(0, targets_flat.size(0),
out=target_chunk.data.new(targets_flat.size(0))) % target_chunk.size(1)
batch_indexer = torch.arange(0, targets_flat.size(0),
out=target_chunk.data.new(targets_flat.size(0))) / target_chunk.size(1)
all_logprobs.append(F.log_softmax(self.decoder(activation_chunk), 2)[
batch_indexer, time_indexer, targets_flat].view(*target_chunk.size()))
return torch.cat(all_logprobs, 0)
def forward(self, words: torch.Tensor, use_forward: bool = True, use_reverse: bool = True,
compute_logprobs: bool = False) -> Dict[str, Union[torch.Tensor, List[torch.Tensor]]]:
"""
use this for training the LM
:param words: [batch_size, N] words. assuming you're starting with BOS and ending with EOS here
:return:
"""
encoded_inputs = self.embed_words(words)
mask = (words != 0).long()[:, 2:]
word_targets = words[:, 1:-1].contiguous()
result_dict = {
'mask': mask,
'word_targets': word_targets,
}
# TODO: try to reduce duplicate code here
if use_forward:
self.forward_lm.reset_states()
forward_activation = self.forward_lm(encoded_inputs[:, :-2], mask)
if compute_logprobs:
# being memory efficient here is critical if the input tensors are large
result_dict['forward_logprobs'] = self._chunked_logsoftmaxes(forward_activation,
word_targets) * mask.float()
else:
result_dict['forward_logits'] = self.decoder(forward_activation)
result_dict['forward_loss'] = sequence_cross_entropy_with_logits(result_dict['forward_logits'],
word_targets,
mask)
if use_reverse:
self.reverse_lm.reset_states()
reverse_activation = self.reverse_lm(encoded_inputs[:, 2:], mask)
if compute_logprobs:
result_dict['reverse_logprobs'] = self._chunked_logsoftmaxes(reverse_activation,
word_targets) * mask.float()
else:
result_dict['reverse_logits'] = self.decoder(reverse_activation)
result_dict['reverse_loss'] = sequence_cross_entropy_with_logits(result_dict['reverse_logits'],
word_targets,
mask)
return result_dict
| adversarialnlp-master | adversarialnlp/generators/swag/simple_bilm.py |
from typing import Dict, List, Tuple, Union, Optional
import torch
import numpy as np
from allennlp.common.checks import ConfigurationError
from allennlp.data.vocabulary import Vocabulary
from allennlp.models.model import Model
from allennlp.modules.openai_transformer import OpenaiTransformer
from allennlp.modules.token_embedders import OpenaiTransformerEmbedder
from allennlp.nn.util import get_text_field_mask, remove_sentence_boundaries
@Model.register('openai-transformer-language-model')
class OpenAITransformerLanguageModel(Model):
"""
The ``OpenAITransformerLanguageModel`` is a wrapper around ``OpenATransformerModule``.
Parameters
----------
vocab: ``Vocabulary``
remove_bos_eos: ``bool``, optional (default: True)
Typically the provided token indexes will be augmented with
begin-sentence and end-sentence tokens. If this flag is True
the corresponding embeddings will be removed from the return values.
"""
def __init__(self,
vocab: Vocabulary,
openai_token_embedder: OpenaiTransformerEmbedder,
remove_bos_eos: bool = True) -> None:
super().__init__(vocab)
model_path = "https://s3-us-west-2.amazonaws.com/allennlp/models/openai-transformer-lm-2018.07.23.tar.gz"
indexer = OpenaiTransformerBytePairIndexer(model_path=model_path)
transformer = OpenaiTransformer(model_path=model_path)
self._token_embedders = OpenaiTransformerEmbedder(transformer=transformer, top_layer_only=True)
self._remove_bos_eos = remove_bos_eos
def _get_target_token_embedding(self,
token_embeddings: torch.Tensor,
mask: torch.Tensor,
direction: int) -> torch.Tensor:
# Need to shift the mask in the correct direction
zero_col = token_embeddings.new_zeros(mask.size(0), 1).byte()
if direction == 0:
# forward direction, get token to right
shifted_mask = torch.cat([zero_col, mask[:, 0:-1]], dim=1)
else:
shifted_mask = torch.cat([mask[:, 1:], zero_col], dim=1)
return token_embeddings.masked_select(shifted_mask.unsqueeze(-1)).view(-1, self._forward_dim)
def _compute_loss(self,
lm_embeddings: torch.Tensor,
token_embeddings: torch.Tensor,
forward_targets: torch.Tensor,
backward_targets: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
# lm_embeddings is shape (batch_size, timesteps, dim * 2)
# forward_targets, backward_targets are shape (batch_size, timesteps)
# masked with 0
forward_embeddings, backward_embeddings = lm_embeddings.chunk(2, -1)
losses: List[torch.Tensor] = []
for idx, embedding, targets in ((0, forward_embeddings, forward_targets),
(1, backward_embeddings, backward_targets)):
mask = targets > 0
# we need to subtract 1 to undo the padding id since the softmax
# does not include a padding dimension
non_masked_targets = targets.masked_select(mask) - 1
non_masked_embedding = embedding.masked_select(
mask.unsqueeze(-1)
).view(-1, self._forward_dim)
# note: need to return average loss across forward and backward
# directions, but total sum loss across all batches.
# Assuming batches include full sentences, forward and backward
# directions have the same number of samples, so sum up loss
# here then divide by 2 just below
if not self._softmax_loss.tie_embeddings or not self._use_character_inputs:
losses.append(self._softmax_loss(non_masked_embedding, non_masked_targets))
else:
# we also need the token embeddings corresponding to the
# the targets
raise NotImplementedError("This requires SampledSoftmaxLoss, which isn't implemented yet.")
# pylint: disable=unreachable
non_masked_token_embedding = self._get_target_token_embedding(token_embeddings, mask, idx)
losses.append(self._softmax(non_masked_embedding,
non_masked_targets,
non_masked_token_embedding))
return losses[0], losses[1]
def forward(self, # type: ignore
source: Dict[str, torch.LongTensor]) -> Dict[str, torch.Tensor]:
"""
Computes the averaged forward and backward LM loss from the batch.
By convention, the input dict is required to have at least a ``"tokens"``
entry that's the output of a ``SingleIdTokenIndexer``, which is used
to compute the language model targets.
If the model was instantatiated with ``remove_bos_eos=True``,
then it is expected that each of the input sentences was augmented with
begin-sentence and end-sentence tokens.
Parameters
----------
tokens: ``torch.Tensor``, required.
The output of ``Batch.as_tensor_dict()`` for a batch of sentences.
Returns
-------
Dict with keys:
``'loss'``: ``torch.Tensor``
averaged forward/backward negative log likelihood
``'forward_loss'``: ``torch.Tensor``
forward direction negative log likelihood
``'backward_loss'``: ``torch.Tensor``
backward direction negative log likelihood
``'lm_embeddings'``: ``torch.Tensor``
(batch_size, timesteps, embed_dim) tensor of top layer contextual representations
``'mask'``: ``torch.Tensor``
(batch_size, timesteps) mask for the embeddings
"""
# pylint: disable=arguments-differ
mask = get_text_field_mask(source)
# We must have token_ids so that we can compute targets
token_ids = source.get("tokens")
if token_ids is None:
raise ConfigurationError("Your data must have a 'tokens': SingleIdTokenIndexer() "
"in order to use the BidirectionalLM")
# Use token_ids to compute targets
forward_targets = torch.zeros_like(token_ids)
backward_targets = torch.zeros_like(token_ids)
forward_targets[:, 0:-1] = token_ids[:, 1:]
backward_targets[:, 1:] = token_ids[:, 0:-1]
embeddings = self._text_field_embedder(source)
# Apply LayerNorm if appropriate.
embeddings = self._layer_norm(embeddings)
contextual_embeddings = self._contextualizer(embeddings, mask)
# add dropout
contextual_embeddings = self._dropout(contextual_embeddings)
# compute softmax loss
forward_loss, backward_loss = self._compute_loss(contextual_embeddings,
embeddings,
forward_targets,
backward_targets)
num_targets = torch.sum((forward_targets > 0).long())
if num_targets > 0:
average_loss = 0.5 * (forward_loss + backward_loss) / num_targets.float()
else:
average_loss = torch.tensor(0.0).to(forward_targets.device) # pylint: disable=not-callable
# this is stored to compute perplexity if needed
self._last_average_loss[0] = average_loss.detach().item()
if num_targets > 0:
# loss is directly minimized
if self._loss_scale == 'n_samples':
scale_factor = num_targets.float()
else:
scale_factor = self._loss_scale
return_dict = {
'loss': average_loss * scale_factor,
'forward_loss': forward_loss * scale_factor / num_targets.float(),
'backward_loss': backward_loss * scale_factor / num_targets.float()
}
else:
# average_loss zero tensor, return it for all
return_dict = {
'loss': average_loss,
'forward_loss': average_loss,
'backward_loss': average_loss
}
if self._remove_bos_eos:
contextual_embeddings, mask = remove_sentence_boundaries(contextual_embeddings, mask)
return_dict.update({
'lm_embeddings': contextual_embeddings,
'mask': mask
})
return return_dict
| adversarialnlp-master | adversarialnlp/generators/swag/openai_transformer_model.py |
# coding=utf-8
from pytorch_pretrained_bert import BertForMaskedLM,tokenization
import torch
import sys
import csv
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model_name = 'bert-large-uncased'
if 'base' in sys.argv: model_name = 'bert-base-uncased'
print("using model:",model_name,file=sys.stderr)
only_prefix = False
if 'only_prefix' in sys.argv:
only_prefix = True
print("We take only the prefix", file=sys.stderr)
bert=BertForMaskedLM.from_pretrained(model_name)
tokenizer=tokenization.BertTokenizer.from_pretrained(model_name)
bert.eval()
bert.to(device)
def get_probs_for_words(sent,w1,w2):
pre,target,post=sent.split('***')
if 'mask' in target.lower():
target=['[MASK]']
else:
target=tokenizer.tokenize(target)
tokens=['[CLS]']+tokenizer.tokenize(pre)
target_idx=len(tokens)
#print(target_idx)
tokens+=target
if not only_prefix:
tokens+=tokenizer.tokenize(post)
tokens+=['[SEP]']
input_ids=tokenizer.convert_tokens_to_ids(tokens)
try:
word_ids=tokenizer.convert_tokens_to_ids([w1,w2])
except KeyError:
print("skipping",w1,w2,"bad wins")
return None
tens=torch.LongTensor(input_ids).unsqueeze(0).to(device)
with torch.no_grad():
res=bert(tens)[0,target_idx]
#res=torch.nn.functional.softmax(res,-1)
scores = res[word_ids]
return [float(x.item()) for x in scores]
from collections import Counter
def load_marvin():
cc = Counter()
# note: I edited the LM_Syneval/src/make_templates.py script, and run "python LM_Syneval/src/make_templates.py LM_Syneval/data/templates/ > marvin_linzen_dataset.tsv"
out = []
for line in open("marvin_linzen_dataset.tsv"):
case = line.strip().split("\t")
cc[case[1]]+=1
g,ug = case[-2],case[-1]
g = g.split()
ug = ug.split()
assert(len(g)==len(ug)),(g,ug)
diffs = [i for i,pair in enumerate(zip(g,ug)) if pair[0]!=pair[1]]
if (len(diffs)!=1):
#print(diffs)
#print(g,ug)
continue
assert(len(diffs)==1),diffs
gv=g[diffs[0]] # good
ugv=ug[diffs[0]] # bad
g[diffs[0]]="***mask***"
g.append(".")
out.append((case[0],case[1]," ".join(g),gv,ugv))
return out
def eval_marvin():
o = load_marvin()
print(len(o),file=sys.stderr)
from collections import defaultdict
import time
rc = defaultdict(Counter)
tc = Counter()
start = time.time()
for i,(case,tp,s,g,b) in enumerate(o):
ps = get_probs_for_words(s,g,b)
if ps is None: ps = [0,1]
gp = ps[0]
bp = ps[1]
print(gp>bp,case,tp,g,b,s)
if i % 100==0:
print(i,time.time()-start,file=sys.stderr)
start=time.time()
sys.stdout.flush()
def eval_lgd():
for i,line in enumerate(open("lgd_dataset.tsv",encoding="utf8")):
# for i,line in enumerate(open("lgd_dataset_with_is_are.tsv",encoding="utf8")):
na,_,masked,good,bad = line.strip().split("\t")
ps = get_probs_for_words(masked,good,bad)
if ps is None: continue
gp = ps[0]
bp = ps[1]
print(str(gp>bp),na,good,gp,bad,bp,masked.encode("utf8"),sep=u"\t")
if i%100 == 0:
print(i,file=sys.stderr)
sys.stdout.flush()
def read_gulordava():
rows = csv.DictReader(open("generated.tab",encoding="utf8"),delimiter="\t")
data=[]
for row in rows:
row2=next(rows)
assert(row['sent']==row2['sent'])
assert(row['class']=='correct')
assert(row2['class']=='wrong')
sent = row['sent'].lower().split()[:-1] # dump the <eos> token.
good_form = row['form']
bad_form = row2['form']
sent[int(row['len_prefix'])]="***mask***"
sent = " ".join(sent)
data.append((sent,row['n_attr'],good_form,bad_form))
return data
def eval_gulordava():
for i,(masked,natt,good,bad) in enumerate(read_gulordava()):
if good in ["is","are"]:
print("skipping is/are")
continue
ps = get_probs_for_words(masked,good,bad)
if ps is None: continue
gp = ps[0]
bp = ps[1]
print(str(gp>bp),natt,good,gp,bad,bp,masked.encode("utf8"),sep=u"\t")
if i%100 == 0:
print(i,file=sys.stderr)
sys.stdout.flush()
if 'marvin' in sys.argv:
eval_marvin()
elif 'gul' in sys.argv:
eval_gulordava()
else:
eval_lgd()
| bert-syntax-master | eval_bert.py |
import sys
from collections import *
files=[("base","results/marvin_results_base.txt"),("large","results/marvin_results_large.txt")]
if "with_only_prefix" in sys.argv:
files += [("base_only_prefix","results/marvin_results_base_only_prefix.txt"),("large_only_prefix","results/marvin_results_large_only_prefix.txt")]
if "no_split" in sys.argv:
files.append(("openai_gpt", "results/marvin_results_openai_gpt_no_split.txt"))
elif "use_postfix" in sys.argv:
files.append(("openai_gpt", "results/marvin_results_openai_gpt_use_postfix.txt"))
else:
files.append(("openai_gpt", "results/marvin_results_openai_gpt.txt"))
by_model={}
conditions=set()
for title,fname in files:
lines = open(fname)
results=defaultdict(Counter)
by_model[title]=results
skipped = set()
for line in lines:
if line.startswith("Better speed"): continue
if line.startswith("skipping"):
skipped.add(line.split()[1])
next(lines)
continue
res,c1,c2,w1,w2,s = line.split(None, 5)
c1 = c1.replace("inanim","anim")
conditions.add(c1)
results[c1][res]+=1
print("skipped:",skipped)
if "with_only_prefix" in sys.argv:
print("condition & base & large & base_only_prefix & large_only_prefix & openai_gpt & count bert & count openai_gpt \\\\")
else:
print("condition & base & large & openai_gpt & count bert & count openai_gpt \\\\")
for cond in conditions:
rb = by_model['base'][cond]
rl = by_model['large'][cond]
ro = by_model['openai_gpt'][cond]
if sum(ro.values())==0:
so = "-"
else:
so = "%.2f" % (ro['True']/(ro['True']+ro['False']))
sb = "%.2f" % (rb['True']/(rb['True']+rb['False']))
sl = "%.2f" % (rl['True']/(rl['True']+rl['False']))
if "with_only_prefix" in sys.argv:
rbp = by_model['base_only_prefix'][cond]
rlp = by_model['large_only_prefix'][cond]
sbp = "%.2f" % (rbp['True']/(rbp['True']+rbp['False']))
slp = "%.2f" % (rlp['True']/(rlp['True']+rlp['False']))
print(" & ".join(map(str,[cond, sb, sl, sbp, slp, so, sum(rb.values()), sum(ro.values())])),"\\\\")
else:
print(" & ".join(map(str,[cond, sb, sl, so, sum(rb.values()), sum(ro.values())])),"\\\\")
| bert-syntax-master | gen_marvin_tbl_openai_gpt.py |
import sys
from collections import *
files=[("base","results/gulordava_results_base.txt"),("large","results/gulordava_results_large.txt")]
by_model={}
conditions=set()
nskipped=0
for title,fname in files:
lines = open(fname)
results=defaultdict(Counter)
by_model[title]=results
skipped = set()
for line in lines:
if line.startswith("Better speed"): continue
if line.startswith("skipping"):
skipped.add(line.split()[1])
#next(lines) # no need to skip, skipped in testing
nskipped += 1
continue
assert (line.strip().split()[0] in ['True','False']),line
res,c1,_ = line.split(None, 2)
conditions.add(c1)
conditions.add('all')
results[c1][res]+=1
print("adding",res,"to",c1)
results['all'][res]+=1
print("skipped:",nskipped,len(skipped),skipped)
print("condition & base & large & count \\\\")
for cond in conditions:
rb = by_model['base'][cond]
rl = by_model['large'][cond]
if sum(rb.values())==0: continue
sb = "%.2f" % (rb['True']/(rb['True']+rb['False']))
sl = "%.2f" % (rl['True']/(rl['True']+rl['False']))
print(" & ".join(map(str,[cond, sb, sl, sum(rb.values())])),"\\\\")
| bert-syntax-master | gen_gul_tbl.py |
import sys
from collections import *
files=[("base","results/lgd_results_base.txt"),("large","results/lgd_results_large.txt")]
if "with_only_prefix" in sys.argv:
files+=[("base_only_prefix","results/lgd_results_base_only_prefix.txt"),("large_only_prefix","results/lgd_results_large_only_prefix.txt")]
if "no_split" in sys.argv:
files.append(("openai_gpt", "results/lgd_results_openai_gpt_no_split.txt"))
elif "use_postfix" in sys.argv:
print("Let's use postfix")
files.append(("openai_gpt", "results/lgd_results_openai_gpt_use_postfix.txt"))
else:
files.append(("openai_gpt", "results/lgd_results_openai_gpt.txt"))
by_model={}
conditions=set()
nskipped=0
for title,fname in files:
lines = open(fname)
results=defaultdict(Counter)
by_model[title]=results
skipped = set()
for line in lines:
if line.startswith("Better speed"): continue
if line.startswith("skipping"):
skipped.add(line.split()[1])
#next(lines) # no need to skip, skipped in testing
nskipped += 1
continue
assert (line.strip().split()[0] in ['True','False']),line
res,c1,_ = line.split(None, 2)
conditions.add(c1)
results[c1][res]+=1
print("skipped:",nskipped,len(skipped),skipped)
if "with_only_prefix" in sys.argv:
print("condition & base & large & base_only_prefix & large_only_prefix & openai_gpt & count bert & count openai_gpt \\\\")
else:
print("condition & base & large & openai_gpt & count bert & count openai_gpt \\\\")
for cond in conditions:
rb = by_model['base'][cond]
rl = by_model['large'][cond]
ro = by_model['openai_gpt'][cond]
sb = "%.2f" % (rb['True']/(rb['True']+rb['False']))
sl = "%.2f" % (rl['True']/(rl['True']+rl['False']))
so = "%.2f" % (ro['True']/(ro['True']+ro['False']))
if "with_only_prefix" in sys.argv:
rbp = by_model['base_only_prefix'][cond]
rlp = by_model['large_only_prefix'][cond]
sbp = "%.2f" % (rbp['True']/(rbp['True']+rbp['False']))
slp = "%.2f" % (rlp['True']/(rlp['True']+rlp['False']))
print(" & ".join(map(str,[cond, sb, sl, sbp, slp, so, sum(rb.values()), sum(ro.values())])),"\\\\")
else:
print(" & ".join(map(str,[cond, sb, sl, so, sum(rb.values()), sum(ro.values())])),"\\\\")
| bert-syntax-master | gen_lgd_tbl_openai_gpt.py |
'''
inflect.py: correctly generate plurals, ordinals, indefinite articles;
convert numbers to words
Copyright (C) 2010 Paul Dyson
Based upon the Perl module Lingua::EN::Inflect by Damian Conway.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
The original Perl module Lingua::EN::Inflect by Damian Conway is
available from http://search.cpan.org/~dconway/
This module can be downloaded at http://pypi.python.org/pypi/inflect
methods:
classical inflect
plural plural_noun plural_verb plural_adj singular_noun no num a an
compare compare_nouns compare_verbs compare_adjs
present_participle
ordinal
number_to_words
join
defnoun defverb defadj defa defan
INFLECTIONS: classical inflect
plural plural_noun plural_verb plural_adj singular_noun compare
no num a an present_participle
PLURALS: classical inflect
plural plural_noun plural_verb plural_adj singular_noun no num
compare compare_nouns compare_verbs compare_adjs
COMPARISONS: classical
compare compare_nouns compare_verbs compare_adjs
ARTICLES: classical inflect num a an
NUMERICAL: ordinal number_to_words
USER_DEFINED: defnoun defverb defadj defa defan
Exceptions:
UnknownClassicalModeError
BadNumValueError
BadChunkingOptionError
NumOutOfRangeError
BadUserDefinedPatternError
BadRcFileError
BadGenderError
'''
from re import match, search, subn, IGNORECASE, VERBOSE
from re import split as splitre
from re import error as reerror
from re import sub as resub
class UnknownClassicalModeError(Exception):
pass
class BadNumValueError(Exception):
pass
class BadChunkingOptionError(Exception):
pass
class NumOutOfRangeError(Exception):
pass
class BadUserDefinedPatternError(Exception):
pass
class BadRcFileError(Exception):
pass
class BadGenderError(Exception):
pass
__ver_major__ = 0
__ver_minor__ = 2
__ver_patch__ = 5
__ver_sub__ = ""
__version__ = "%d.%d.%d%s" % (__ver_major__, __ver_minor__,
__ver_patch__, __ver_sub__)
STDOUT_ON = False
def print3(txt):
if STDOUT_ON:
print(txt)
def enclose(s):
return "(?:%s)" % s
def joinstem(cutpoint=0, words=''):
'''
join stem of each word in words into a string for regex
each word is truncated at cutpoint
cutpoint is usually negative indicating the number of letters to remove
from the end of each word
e.g.
joinstem(-2, ["ephemeris", "iris", ".*itis"]) returns
(?:ephemer|ir|.*it)
'''
return enclose('|'.join(w[:cutpoint] for w in words))
def bysize(words):
'''
take a list of words and return a dict of sets sorted by word length
e.g.
ret[3]=set(['ant', 'cat', 'dog', 'pig'])
ret[4]=set(['frog', 'goat'])
ret[5]=set(['horse'])
ret[8]=set(['elephant'])
'''
ret = {}
for w in words:
if len(w) not in ret:
ret[len(w)] = set()
ret[len(w)].add(w)
return ret
def make_pl_si_lists(lst, plending, siendingsize, dojoinstem=True):
'''
given a list of singular words: lst
an ending to append to make the plural: plending
the number of characters to remove from the singular before appending plending: siendingsize
a flag whether to create a joinstem: dojoinstem
return:
a list of pluralised words: si_list (called si because this is what you need to
look for to make the singular)
the pluralised words as a dict of sets sorted by word length: si_bysize
the singular words as a dict of sets sorted by word length: pl_bysize
if dojoinstem is True: a regular expression that matches any of the stems: stem
'''
if siendingsize is not None:
siendingsize = -siendingsize
si_list = [w[:siendingsize] + plending for w in lst]
pl_bysize = bysize(lst)
si_bysize = bysize(si_list)
if dojoinstem:
stem = joinstem(siendingsize, lst)
return si_list, si_bysize, pl_bysize, stem
else:
return si_list, si_bysize, pl_bysize
# 1. PLURALS
pl_sb_irregular_s = {
"corpus": "corpuses|corpora",
"opus": "opuses|opera",
"genus": "genera",
"mythos": "mythoi",
"penis": "penises|penes",
"testis": "testes",
"atlas": "atlases|atlantes",
"yes": "yeses",
}
pl_sb_irregular = {
"child": "children",
"brother": "brothers|brethren",
"loaf": "loaves",
"hoof": "hoofs|hooves",
"beef": "beefs|beeves",
"thief": "thiefs|thieves",
"money": "monies",
"mongoose": "mongooses",
"ox": "oxen",
"cow": "cows|kine",
"graffito": "graffiti",
"octopus": "octopuses|octopodes",
"genie": "genies|genii",
"ganglion": "ganglions|ganglia",
"trilby": "trilbys",
"turf": "turfs|turves",
"numen": "numina",
"atman": "atmas",
"occiput": "occiputs|occipita",
"sabretooth": "sabretooths",
"sabertooth": "sabertooths",
"lowlife": "lowlifes",
"flatfoot": "flatfoots",
"tenderfoot": "tenderfoots",
"romany": "romanies",
"jerry": "jerries",
"mary": "maries",
"talouse": "talouses",
"blouse": "blouses",
"rom": "roma",
"carmen": "carmina",
}
pl_sb_irregular.update(pl_sb_irregular_s)
# pl_sb_irregular_keys = enclose('|'.join(pl_sb_irregular.keys()))
pl_sb_irregular_caps = {
'Romany': 'Romanies',
'Jerry': 'Jerrys',
'Mary': 'Marys',
'Rom': 'Roma',
}
pl_sb_irregular_compound = {
"prima donna": "prima donnas|prime donne",
}
si_sb_irregular = dict([(v, k) for (k, v) in pl_sb_irregular.items()])
keys = list(si_sb_irregular.keys())
for k in keys:
if '|' in k:
k1, k2 = k.split('|')
si_sb_irregular[k1] = si_sb_irregular[k2] = si_sb_irregular[k]
del si_sb_irregular[k]
si_sb_irregular_caps = dict([(v, k) for (k, v) in pl_sb_irregular_caps.items()])
si_sb_irregular_compound = dict([(v, k) for (k, v) in pl_sb_irregular_compound.items()])
keys = list(si_sb_irregular_compound.keys())
for k in keys:
if '|' in k:
k1, k2 = k.split('|')
si_sb_irregular_compound[k1] = si_sb_irregular_compound[k2] = si_sb_irregular_compound[k]
del si_sb_irregular_compound[k]
# si_sb_irregular_keys = enclose('|'.join(si_sb_irregular.keys()))
# Z's that don't double
pl_sb_z_zes_list = (
"quartz", "topaz",
)
pl_sb_z_zes_bysize = bysize(pl_sb_z_zes_list)
pl_sb_ze_zes_list = ('snooze',)
pl_sb_ze_zes_bysize = bysize(pl_sb_ze_zes_list)
# CLASSICAL "..is" -> "..ides"
pl_sb_C_is_ides_complete = [
# GENERAL WORDS...
"ephemeris", "iris", "clitoris",
"chrysalis", "epididymis",
]
pl_sb_C_is_ides_endings = [
# INFLAMATIONS...
"itis",
]
pl_sb_C_is_ides = joinstem(-2, pl_sb_C_is_ides_complete + ['.*%s' % w for w in pl_sb_C_is_ides_endings])
pl_sb_C_is_ides_list = pl_sb_C_is_ides_complete + pl_sb_C_is_ides_endings
(si_sb_C_is_ides_list, si_sb_C_is_ides_bysize,
pl_sb_C_is_ides_bysize) = make_pl_si_lists(pl_sb_C_is_ides_list, 'ides', 2, dojoinstem=False)
# CLASSICAL "..a" -> "..ata"
pl_sb_C_a_ata_list = (
"anathema", "bema", "carcinoma", "charisma", "diploma",
"dogma", "drama", "edema", "enema", "enigma", "lemma",
"lymphoma", "magma", "melisma", "miasma", "oedema",
"sarcoma", "schema", "soma", "stigma", "stoma", "trauma",
"gumma", "pragma",
)
(si_sb_C_a_ata_list, si_sb_C_a_ata_bysize,
pl_sb_C_a_ata_bysize, pl_sb_C_a_ata) = make_pl_si_lists(pl_sb_C_a_ata_list, 'ata', 1)
# UNCONDITIONAL "..a" -> "..ae"
pl_sb_U_a_ae_list = (
"alumna", "alga", "vertebra", "persona"
)
(si_sb_U_a_ae_list, si_sb_U_a_ae_bysize,
pl_sb_U_a_ae_bysize, pl_sb_U_a_ae) = make_pl_si_lists(pl_sb_U_a_ae_list, 'e', None)
# CLASSICAL "..a" -> "..ae"
pl_sb_C_a_ae_list = (
"amoeba", "antenna", "formula", "hyperbola",
"medusa", "nebula", "parabola", "abscissa",
"hydra", "nova", "lacuna", "aurora", "umbra",
"flora", "fauna",
)
(si_sb_C_a_ae_list, si_sb_C_a_ae_bysize,
pl_sb_C_a_ae_bysize, pl_sb_C_a_ae) = make_pl_si_lists(pl_sb_C_a_ae_list, 'e', None)
# CLASSICAL "..en" -> "..ina"
pl_sb_C_en_ina_list = (
"stamen", "foramen", "lumen",
)
(si_sb_C_en_ina_list, si_sb_C_en_ina_bysize,
pl_sb_C_en_ina_bysize, pl_sb_C_en_ina) = make_pl_si_lists(pl_sb_C_en_ina_list, 'ina', 2)
# UNCONDITIONAL "..um" -> "..a"
pl_sb_U_um_a_list = (
"bacterium", "agendum", "desideratum", "erratum",
"stratum", "datum", "ovum", "extremum",
"candelabrum",
)
(si_sb_U_um_a_list, si_sb_U_um_a_bysize,
pl_sb_U_um_a_bysize, pl_sb_U_um_a) = make_pl_si_lists(pl_sb_U_um_a_list, 'a', 2)
# CLASSICAL "..um" -> "..a"
pl_sb_C_um_a_list = (
"maximum", "minimum", "momentum", "optimum",
"quantum", "cranium", "curriculum", "dictum",
"phylum", "aquarium", "compendium", "emporium",
"enconium", "gymnasium", "honorarium", "interregnum",
"lustrum", "memorandum", "millennium", "rostrum",
"spectrum", "speculum", "stadium", "trapezium",
"ultimatum", "medium", "vacuum", "velum",
"consortium", "arboretum",
)
(si_sb_C_um_a_list, si_sb_C_um_a_bysize,
pl_sb_C_um_a_bysize, pl_sb_C_um_a) = make_pl_si_lists(pl_sb_C_um_a_list, 'a', 2)
# UNCONDITIONAL "..us" -> "i"
pl_sb_U_us_i_list = (
"alumnus", "alveolus", "bacillus", "bronchus",
"locus", "nucleus", "stimulus", "meniscus",
"sarcophagus",
)
(si_sb_U_us_i_list, si_sb_U_us_i_bysize,
pl_sb_U_us_i_bysize, pl_sb_U_us_i) = make_pl_si_lists(pl_sb_U_us_i_list, 'i', 2)
# CLASSICAL "..us" -> "..i"
pl_sb_C_us_i_list = (
"focus", "radius", "genius",
"incubus", "succubus", "nimbus",
"fungus", "nucleolus", "stylus",
"torus", "umbilicus", "uterus",
"hippopotamus", "cactus",
)
(si_sb_C_us_i_list, si_sb_C_us_i_bysize,
pl_sb_C_us_i_bysize, pl_sb_C_us_i) = make_pl_si_lists(pl_sb_C_us_i_list, 'i', 2)
# CLASSICAL "..us" -> "..us" (ASSIMILATED 4TH DECLENSION LATIN NOUNS)
pl_sb_C_us_us = (
"status", "apparatus", "prospectus", "sinus",
"hiatus", "impetus", "plexus",
)
pl_sb_C_us_us_bysize = bysize(pl_sb_C_us_us)
# UNCONDITIONAL "..on" -> "a"
pl_sb_U_on_a_list = (
"criterion", "perihelion", "aphelion",
"phenomenon", "prolegomenon", "noumenon",
"organon", "asyndeton", "hyperbaton",
)
(si_sb_U_on_a_list, si_sb_U_on_a_bysize,
pl_sb_U_on_a_bysize, pl_sb_U_on_a) = make_pl_si_lists(pl_sb_U_on_a_list, 'a', 2)
# CLASSICAL "..on" -> "..a"
pl_sb_C_on_a_list = (
"oxymoron",
)
(si_sb_C_on_a_list, si_sb_C_on_a_bysize,
pl_sb_C_on_a_bysize, pl_sb_C_on_a) = make_pl_si_lists(pl_sb_C_on_a_list, 'a', 2)
# CLASSICAL "..o" -> "..i" (BUT NORMALLY -> "..os")
pl_sb_C_o_i = [
"solo", "soprano", "basso", "alto",
"contralto", "tempo", "piano", "virtuoso",
] # list not tuple so can concat for pl_sb_U_o_os
pl_sb_C_o_i_bysize = bysize(pl_sb_C_o_i)
si_sb_C_o_i_bysize = bysize(['%si' % w[:-1] for w in pl_sb_C_o_i])
pl_sb_C_o_i_stems = joinstem(-1, pl_sb_C_o_i)
# ALWAYS "..o" -> "..os"
pl_sb_U_o_os_complete = set((
"ado", "ISO", "NATO", "NCO", "NGO", "oto",
))
si_sb_U_o_os_complete = set('%ss' % w for w in pl_sb_U_o_os_complete)
pl_sb_U_o_os_endings = [
"aficionado", "aggro",
"albino", "allegro", "ammo",
"Antananarivo", "archipelago", "armadillo",
"auto", "avocado", "Bamako",
"Barquisimeto", "bimbo", "bingo",
"Biro", "bolero", "Bolzano",
"bongo", "Boto", "burro",
"Cairo", "canto", "cappuccino",
"casino", "cello", "Chicago",
"Chimango", "cilantro", "cochito",
"coco", "Colombo", "Colorado",
"commando", "concertino", "contango",
"credo", "crescendo", "cyano",
"demo", "ditto", "Draco",
"dynamo", "embryo", "Esperanto",
"espresso", "euro", "falsetto",
"Faro", "fiasco", "Filipino",
"flamenco", "furioso", "generalissimo",
"Gestapo", "ghetto", "gigolo",
"gizmo", "Greensboro", "gringo",
"Guaiabero", "guano", "gumbo",
"gyro", "hairdo", "hippo",
"Idaho", "impetigo", "inferno",
"info", "intermezzo", "intertrigo",
"Iquico", "jumbo",
"junto", "Kakapo", "kilo",
"Kinkimavo", "Kokako", "Kosovo",
"Lesotho", "libero", "libido",
"libretto", "lido", "Lilo",
"limbo", "limo", "lineno",
"lingo", "lino", "livedo",
"loco", "logo", "lumbago",
"macho", "macro", "mafioso",
"magneto", "magnifico", "Majuro",
"Malabo", "manifesto", "Maputo",
"Maracaibo", "medico", "memo",
"metro", "Mexico", "micro",
"Milano", "Monaco", "mono",
"Montenegro", "Morocco", "Muqdisho",
"myo",
"neutrino", "Ningbo",
"octavo", "oregano", "Orinoco",
"Orlando", "Oslo",
"panto", "Paramaribo", "Pardusco",
"pedalo", "photo", "pimento",
"pinto", "pleco", "Pluto",
"pogo", "polo", "poncho",
"Porto-Novo", "Porto", "pro",
"psycho", "pueblo", "quarto",
"Quito", "rhino", "risotto",
"rococo", "rondo", "Sacramento",
"saddo", "sago", "salvo",
"Santiago", "Sapporo", "Sarajevo",
"scherzando", "scherzo", "silo",
"sirocco", "sombrero", "staccato",
"sterno", "stucco", "stylo",
"sumo", "Taiko", "techno",
"terrazzo", "testudo", "timpano",
"tiro", "tobacco", "Togo",
"Tokyo", "torero", "Torino",
"Toronto", "torso", "tremolo",
"typo", "tyro", "ufo",
"UNESCO", "vaquero", "vermicello",
"verso", "vibrato", "violoncello",
"Virgo", "weirdo", "WHO",
"WTO", "Yamoussoukro", "yo-yo",
"zero", "Zibo",
] + pl_sb_C_o_i
pl_sb_U_o_os_bysize = bysize(pl_sb_U_o_os_endings)
si_sb_U_o_os_bysize = bysize(['%ss' % w for w in pl_sb_U_o_os_endings])
# UNCONDITIONAL "..ch" -> "..chs"
pl_sb_U_ch_chs_list = (
"czech", "eunuch", "stomach"
)
(si_sb_U_ch_chs_list, si_sb_U_ch_chs_bysize,
pl_sb_U_ch_chs_bysize, pl_sb_U_ch_chs) = make_pl_si_lists(pl_sb_U_ch_chs_list, 's', None)
# UNCONDITIONAL "..[ei]x" -> "..ices"
pl_sb_U_ex_ices_list = (
"codex", "murex", "silex",
)
(si_sb_U_ex_ices_list, si_sb_U_ex_ices_bysize,
pl_sb_U_ex_ices_bysize, pl_sb_U_ex_ices) = make_pl_si_lists(pl_sb_U_ex_ices_list, 'ices', 2)
pl_sb_U_ix_ices_list = (
"radix", "helix",
)
(si_sb_U_ix_ices_list, si_sb_U_ix_ices_bysize,
pl_sb_U_ix_ices_bysize, pl_sb_U_ix_ices) = make_pl_si_lists(pl_sb_U_ix_ices_list, 'ices', 2)
# CLASSICAL "..[ei]x" -> "..ices"
pl_sb_C_ex_ices_list = (
"vortex", "vertex", "cortex", "latex",
"pontifex", "apex", "index", "simplex",
)
(si_sb_C_ex_ices_list, si_sb_C_ex_ices_bysize,
pl_sb_C_ex_ices_bysize, pl_sb_C_ex_ices) = make_pl_si_lists(pl_sb_C_ex_ices_list, 'ices', 2)
pl_sb_C_ix_ices_list = (
"appendix",
)
(si_sb_C_ix_ices_list, si_sb_C_ix_ices_bysize,
pl_sb_C_ix_ices_bysize, pl_sb_C_ix_ices) = make_pl_si_lists(pl_sb_C_ix_ices_list, 'ices', 2)
# ARABIC: ".." -> "..i"
pl_sb_C_i_list = (
"afrit", "afreet", "efreet",
)
(si_sb_C_i_list, si_sb_C_i_bysize,
pl_sb_C_i_bysize, pl_sb_C_i) = make_pl_si_lists(pl_sb_C_i_list, 'i', None)
# HEBREW: ".." -> "..im"
pl_sb_C_im_list = (
"goy", "seraph", "cherub",
)
(si_sb_C_im_list, si_sb_C_im_bysize,
pl_sb_C_im_bysize, pl_sb_C_im) = make_pl_si_lists(pl_sb_C_im_list, 'im', None)
# UNCONDITIONAL "..man" -> "..mans"
pl_sb_U_man_mans_list = """
ataman caiman cayman ceriman
desman dolman farman harman hetman
human leman ottoman shaman talisman
""".split()
pl_sb_U_man_mans_caps_list = """
Alabaman Bahaman Burman German
Hiroshiman Liman Nakayaman Norman Oklahoman
Panaman Roman Selman Sonaman Tacoman Yakiman
Yokohaman Yuman
""".split()
(si_sb_U_man_mans_list, si_sb_U_man_mans_bysize,
pl_sb_U_man_mans_bysize) = make_pl_si_lists(pl_sb_U_man_mans_list, 's', None, dojoinstem=False)
(si_sb_U_man_mans_caps_list, si_sb_U_man_mans_caps_bysize,
pl_sb_U_man_mans_caps_bysize) = make_pl_si_lists(pl_sb_U_man_mans_caps_list, 's', None, dojoinstem=False)
pl_sb_uninflected_s_complete = [
# PAIRS OR GROUPS SUBSUMED TO A SINGULAR...
"breeches", "britches", "pajamas", "pyjamas", "clippers", "gallows",
"hijinks", "headquarters", "pliers", "scissors", "testes", "herpes",
"pincers", "shears", "proceedings", "trousers",
# UNASSIMILATED LATIN 4th DECLENSION
"cantus", "coitus", "nexus",
# RECENT IMPORTS...
"contretemps", "corps", "debris",
"siemens",
# DISEASES
"mumps",
# MISCELLANEOUS OTHERS...
"diabetes", "jackanapes", "series", "species", "subspecies", "rabies",
"chassis", "innings", "news", "mews", "haggis",
]
pl_sb_uninflected_s_endings = [
# RECENT IMPORTS...
"ois",
# DISEASES
"measles",
]
pl_sb_uninflected_s = pl_sb_uninflected_s_complete + ['.*%s' % w for w in pl_sb_uninflected_s_endings]
pl_sb_uninflected_herd = (
# DON'T INFLECT IN CLASSICAL MODE, OTHERWISE NORMAL INFLECTION
"wildebeest", "swine", "eland", "bison", "buffalo",
"elk", "rhinoceros", 'zucchini',
'caribou', 'dace', 'grouse', 'guinea fowl', 'guinea-fowl',
'haddock', 'hake', 'halibut', 'herring', 'mackerel',
'pickerel', 'pike', 'roe', 'seed', 'shad',
'snipe', 'teal', 'turbot', 'water fowl', 'water-fowl',
)
pl_sb_uninflected_complete = [
# SOME FISH AND HERD ANIMALS
"tuna", "salmon", "mackerel", "trout",
"bream", "sea-bass", "sea bass", "carp", "cod", "flounder", "whiting",
"moose",
# OTHER ODDITIES
"graffiti", "djinn", 'samuri',
'offspring', 'pence', 'quid', 'hertz',
] + pl_sb_uninflected_s_complete
# SOME WORDS ENDING IN ...s (OFTEN PAIRS TAKEN AS A WHOLE)
pl_sb_uninflected_caps = [
# ALL NATIONALS ENDING IN -ese
"Portuguese", "Amoyese", "Borghese", "Congoese", "Faroese",
"Foochowese", "Genevese", "Genoese", "Gilbertese", "Hottentotese",
"Kiplingese", "Kongoese", "Lucchese", "Maltese", "Nankingese",
"Niasese", "Pekingese", "Piedmontese", "Pistoiese", "Sarawakese",
"Shavese", "Vermontese", "Wenchowese", "Yengeese",
]
pl_sb_uninflected_endings = [
# SOME FISH AND HERD ANIMALS
"fish",
"deer", "sheep",
# ALL NATIONALS ENDING IN -ese
"nese", "rese", "lese", "mese",
# DISEASES
"pox",
# OTHER ODDITIES
'craft',
] + pl_sb_uninflected_s_endings
# SOME WORDS ENDING IN ...s (OFTEN PAIRS TAKEN AS A WHOLE)
pl_sb_uninflected_bysize = bysize(pl_sb_uninflected_endings)
# SINGULAR WORDS ENDING IN ...s (ALL INFLECT WITH ...es)
pl_sb_singular_s_complete = [
"acropolis", "aegis", "alias", "asbestos", "bathos", "bias",
"bronchitis", "bursitis", "caddis", "cannabis",
"canvas", "chaos", "cosmos", "dais", "digitalis",
"epidermis", "ethos", "eyas", "gas", "glottis",
"hubris", "ibis", "lens", "mantis", "marquis", "metropolis",
"pathos", "pelvis", "polis", "rhinoceros",
"sassafras", "trellis",
] + pl_sb_C_is_ides_complete
pl_sb_singular_s_endings = [
"ss", "us",
] + pl_sb_C_is_ides_endings
pl_sb_singular_s_bysize = bysize(pl_sb_singular_s_endings)
si_sb_singular_s_complete = ['%ses' % w for w in pl_sb_singular_s_complete]
si_sb_singular_s_endings = ['%ses' % w for w in pl_sb_singular_s_endings]
si_sb_singular_s_bysize = bysize(si_sb_singular_s_endings)
pl_sb_singular_s_es = [
"[A-Z].*es",
]
pl_sb_singular_s = enclose('|'.join(pl_sb_singular_s_complete +
['.*%s' % w for w in pl_sb_singular_s_endings] +
pl_sb_singular_s_es))
# PLURALS ENDING IN uses -> use
si_sb_ois_oi_case = (
'Bolshois', 'Hanois'
)
si_sb_uses_use_case = (
'Betelgeuses', 'Duses', 'Meuses', 'Syracuses', 'Toulouses',
)
si_sb_uses_use = (
'abuses', 'applauses', 'blouses',
'carouses', 'causes', 'chartreuses', 'clauses',
'contuses', 'douses', 'excuses', 'fuses',
'grouses', 'hypotenuses', 'masseuses',
'menopauses', 'misuses', 'muses', 'overuses', 'pauses',
'peruses', 'profuses', 'recluses', 'reuses',
'ruses', 'souses', 'spouses', 'suffuses', 'transfuses', 'uses',
)
si_sb_ies_ie_case = (
'Addies', 'Aggies', 'Allies', 'Amies', 'Angies', 'Annies',
'Annmaries', 'Archies', 'Arties', 'Aussies', 'Barbies',
'Barries', 'Basies', 'Bennies', 'Bernies', 'Berties', 'Bessies',
'Betties', 'Billies', 'Blondies', 'Bobbies', 'Bonnies',
'Bowies', 'Brandies', 'Bries', 'Brownies', 'Callies',
'Carnegies', 'Carries', 'Cassies', 'Charlies', 'Cheries',
'Christies', 'Connies', 'Curies', 'Dannies', 'Debbies', 'Dixies',
'Dollies', 'Donnies', 'Drambuies', 'Eddies', 'Effies', 'Ellies',
'Elsies', 'Eries', 'Ernies', 'Essies', 'Eugenies', 'Fannies',
'Flossies', 'Frankies', 'Freddies', 'Gillespies', 'Goldies',
'Gracies', 'Guthries', 'Hallies', 'Hatties', 'Hetties',
'Hollies', 'Jackies', 'Jamies', 'Janies', 'Jannies', 'Jeanies',
'Jeannies', 'Jennies', 'Jessies', 'Jimmies', 'Jodies', 'Johnies',
'Johnnies', 'Josies', 'Julies', 'Kalgoorlies', 'Kathies', 'Katies',
'Kellies', 'Kewpies', 'Kristies', 'Laramies', 'Lassies', 'Lauries',
'Leslies', 'Lessies', 'Lillies', 'Lizzies', 'Lonnies', 'Lories',
'Lorries', 'Lotties', 'Louies', 'Mackenzies', 'Maggies', 'Maisies',
'Mamies', 'Marcies', 'Margies', 'Maries', 'Marjories', 'Matties',
'McKenzies', 'Melanies', 'Mickies', 'Millies', 'Minnies', 'Mollies',
'Mounties', 'Nannies', 'Natalies', 'Nellies', 'Netties', 'Ollies',
'Ozzies', 'Pearlies', 'Pottawatomies', 'Reggies', 'Richies', 'Rickies',
'Robbies', 'Ronnies', 'Rosalies', 'Rosemaries', 'Rosies', 'Roxies',
'Rushdies', 'Ruthies', 'Sadies', 'Sallies', 'Sammies', 'Scotties',
'Selassies', 'Sherries', 'Sophies', 'Stacies', 'Stefanies', 'Stephanies',
'Stevies', 'Susies', 'Sylvies', 'Tammies', 'Terries', 'Tessies',
'Tommies', 'Tracies', 'Trekkies', 'Valaries', 'Valeries', 'Valkyries',
'Vickies', 'Virgies', 'Willies', 'Winnies', 'Wylies', 'Yorkies',
)
si_sb_ies_ie = (
'aeries', 'baggies', 'belies', 'biggies', 'birdies', 'bogies',
'bonnies', 'boogies', 'bookies', 'bourgeoisies', 'brownies',
'budgies', 'caddies', 'calories', 'camaraderies', 'cockamamies',
'collies', 'cookies', 'coolies', 'cooties', 'coteries', 'crappies',
'curies', 'cutesies', 'dogies', 'eyrie', 'floozies', 'footsies',
'freebies', 'genies', 'goalies', 'groupies',
'hies', 'jalousies', 'junkies',
'kiddies', 'laddies', 'lassies', 'lies',
'lingeries', 'magpies', 'menageries', 'mommies', 'movies', 'neckties',
'newbies', 'nighties', 'oldies', 'organdies', 'overlies',
'pies', 'pinkies', 'pixies', 'potpies', 'prairies',
'quickies', 'reveries', 'rookies', 'rotisseries', 'softies', 'sorties',
'species', 'stymies', 'sweeties', 'ties', 'underlies', 'unties',
'veggies', 'vies', 'yuppies', 'zombies',
)
si_sb_oes_oe_case = (
'Chloes', 'Crusoes', 'Defoes', 'Faeroes', 'Ivanhoes', 'Joes',
'McEnroes', 'Moes', 'Monroes', 'Noes', 'Poes', 'Roscoes',
'Tahoes', 'Tippecanoes', 'Zoes',
)
si_sb_oes_oe = (
'aloes', 'backhoes', 'canoes',
'does', 'floes', 'foes', 'hoes', 'mistletoes',
'oboes', 'pekoes', 'roes', 'sloes',
'throes', 'tiptoes', 'toes', 'woes',
)
si_sb_z_zes = (
"quartzes", "topazes",
)
si_sb_zzes_zz = (
'buzzes', 'fizzes', 'frizzes', 'razzes'
)
si_sb_ches_che_case = (
'Andromaches', 'Apaches', 'Blanches', 'Comanches',
'Nietzsches', 'Porsches', 'Roches',
)
si_sb_ches_che = (
'aches', 'avalanches', 'backaches', 'bellyaches', 'caches',
'cloches', 'creches', 'douches', 'earaches', 'fiches',
'headaches', 'heartaches', 'microfiches',
'niches', 'pastiches', 'psyches', 'quiches',
'stomachaches', 'toothaches',
)
si_sb_xes_xe = (
'annexes', 'axes', 'deluxes', 'pickaxes',
)
si_sb_sses_sse_case = (
'Hesses', 'Jesses', 'Larousses', 'Matisses',
)
si_sb_sses_sse = (
'bouillabaisses', 'crevasses', 'demitasses', 'impasses',
'mousses', 'posses',
)
si_sb_ves_ve_case = (
# *[nwl]ives -> [nwl]live
'Clives', 'Palmolives',
)
si_sb_ves_ve = (
# *[^d]eaves -> eave
'interweaves', 'weaves',
# *[nwl]ives -> [nwl]live
'olives',
# *[eoa]lves -> [eoa]lve
'bivalves', 'dissolves', 'resolves', 'salves', 'twelves', 'valves',
)
plverb_special_s = enclose('|'.join(
[pl_sb_singular_s] +
pl_sb_uninflected_s +
list(pl_sb_irregular_s.keys()) + [
'(.*[csx])is',
'(.*)ceps',
'[A-Z].*s',
]
))
pl_sb_postfix_adj = {
'general': ['(?!major|lieutenant|brigadier|adjutant|.*star)\S+'],
'martial': ['court'],
}
for k in list(pl_sb_postfix_adj.keys()):
pl_sb_postfix_adj[k] = enclose(
enclose('|'.join(pl_sb_postfix_adj[k])) +
"(?=(?:-|\\s+)%s)" % k)
pl_sb_postfix_adj_stems = '(' + '|'.join(list(pl_sb_postfix_adj.values())) + ')(.*)'
# PLURAL WORDS ENDING IS es GO TO SINGULAR is
si_sb_es_is = (
'amanuenses', 'amniocenteses', 'analyses', 'antitheses',
'apotheoses', 'arterioscleroses', 'atheroscleroses', 'axes',
# 'bases', # bases -> basis
'catalyses', 'catharses', 'chasses', 'cirrhoses',
'cocces', 'crises', 'diagnoses', 'dialyses', 'diereses',
'electrolyses', 'emphases', 'exegeses', 'geneses',
'halitoses', 'hydrolyses', 'hypnoses', 'hypotheses', 'hystereses',
'metamorphoses', 'metastases', 'misdiagnoses', 'mitoses',
'mononucleoses', 'narcoses', 'necroses', 'nemeses', 'neuroses',
'oases', 'osmoses', 'osteoporoses', 'paralyses', 'parentheses',
'parthenogeneses', 'periphrases', 'photosyntheses', 'probosces',
'prognoses', 'prophylaxes', 'prostheses', 'preces', 'psoriases',
'psychoanalyses', 'psychokineses', 'psychoses', 'scleroses',
'scolioses', 'sepses', 'silicoses', 'symbioses', 'synopses',
'syntheses', 'taxes', 'telekineses', 'theses', 'thromboses',
'tuberculoses', 'urinalyses',
)
pl_prep_list = """
about above across after among around at athwart before behind
below beneath beside besides between betwixt beyond but by
during except for from in into near of off on onto out over
since till to under until unto upon with""".split()
pl_prep_list_da = pl_prep_list + ['de', 'du', 'da']
pl_prep_bysize = bysize(pl_prep_list_da)
pl_prep = enclose('|'.join(pl_prep_list_da))
pl_sb_prep_dual_compound = r'(.*?)((?:-|\s+)(?:' + pl_prep + r')(?:-|\s+))a(?:-|\s+)(.*)'
singular_pronoun_genders = set(['neuter',
'feminine',
'masculine',
'gender-neutral',
'feminine or masculine',
'masculine or feminine'])
pl_pron_nom = {
# NOMINATIVE REFLEXIVE
"i": "we", "myself": "ourselves",
"you": "you", "yourself": "yourselves",
"she": "they", "herself": "themselves",
"he": "they", "himself": "themselves",
"it": "they", "itself": "themselves",
"they": "they", "themself": "themselves",
# POSSESSIVE
"mine": "ours",
"yours": "yours",
"hers": "theirs",
"his": "theirs",
"its": "theirs",
"theirs": "theirs",
}
si_pron = {}
si_pron['nom'] = dict([(v, k) for (k, v) in pl_pron_nom.items()])
si_pron['nom']['we'] = 'I'
pl_pron_acc = {
# ACCUSATIVE REFLEXIVE
"me": "us", "myself": "ourselves",
"you": "you", "yourself": "yourselves",
"her": "them", "herself": "themselves",
"him": "them", "himself": "themselves",
"it": "them", "itself": "themselves",
"them": "them", "themself": "themselves",
}
pl_pron_acc_keys = enclose('|'.join(list(pl_pron_acc.keys())))
pl_pron_acc_keys_bysize = bysize(list(pl_pron_acc.keys()))
si_pron['acc'] = dict([(v, k) for (k, v) in pl_pron_acc.items()])
for thecase, plur, gend, sing in (
('nom', 'they', 'neuter', 'it'),
('nom', 'they', 'feminine', 'she'),
('nom', 'they', 'masculine', 'he'),
('nom', 'they', 'gender-neutral', 'they'),
('nom', 'they', 'feminine or masculine', 'she or he'),
('nom', 'they', 'masculine or feminine', 'he or she'),
('nom', 'themselves', 'neuter', 'itself'),
('nom', 'themselves', 'feminine', 'herself'),
('nom', 'themselves', 'masculine', 'himself'),
('nom', 'themselves', 'gender-neutral', 'themself'),
('nom', 'themselves', 'feminine or masculine', 'herself or himself'),
('nom', 'themselves', 'masculine or feminine', 'himself or herself'),
('nom', 'theirs', 'neuter', 'its'),
('nom', 'theirs', 'feminine', 'hers'),
('nom', 'theirs', 'masculine', 'his'),
('nom', 'theirs', 'gender-neutral', 'theirs'),
('nom', 'theirs', 'feminine or masculine', 'hers or his'),
('nom', 'theirs', 'masculine or feminine', 'his or hers'),
('acc', 'them', 'neuter', 'it'),
('acc', 'them', 'feminine', 'her'),
('acc', 'them', 'masculine', 'him'),
('acc', 'them', 'gender-neutral', 'them'),
('acc', 'them', 'feminine or masculine', 'her or him'),
('acc', 'them', 'masculine or feminine', 'him or her'),
('acc', 'themselves', 'neuter', 'itself'),
('acc', 'themselves', 'feminine', 'herself'),
('acc', 'themselves', 'masculine', 'himself'),
('acc', 'themselves', 'gender-neutral', 'themself'),
('acc', 'themselves', 'feminine or masculine', 'herself or himself'),
('acc', 'themselves', 'masculine or feminine', 'himself or herself'),
):
try:
si_pron[thecase][plur][gend] = sing
except TypeError:
si_pron[thecase][plur] = {}
si_pron[thecase][plur][gend] = sing
si_pron_acc_keys = enclose('|'.join(list(si_pron['acc'].keys())))
si_pron_acc_keys_bysize = bysize(list(si_pron['acc'].keys()))
def get_si_pron(thecase, word, gender):
try:
sing = si_pron[thecase][word]
except KeyError:
raise # not a pronoun
try:
return sing[gender] # has several types due to gender
except TypeError:
return sing # answer independent of gender
plverb_irregular_pres = {
# 1st PERS. SING. 2ND PERS. SING. 3RD PERS. SINGULAR
# 3RD PERS. (INDET.)
"am": "are", "are": "are", "is": "are",
"was": "were", "were": "were", "was": "were",
"have": "have", "have": "have", "has": "have",
"do": "do", "do": "do", "does": "do",
}
plverb_ambiguous_pres = {
# 1st PERS. SING. 2ND PERS. SING. 3RD PERS. SINGULAR
# 3RD PERS. (INDET.)
"act": "act", "act": "act", "acts": "act",
"blame": "blame", "blame": "blame", "blames": "blame",
"can": "can", "can": "can", "can": "can",
"must": "must", "must": "must", "must": "must",
"fly": "fly", "fly": "fly", "flies": "fly",
"copy": "copy", "copy": "copy", "copies": "copy",
"drink": "drink", "drink": "drink", "drinks": "drink",
"fight": "fight", "fight": "fight", "fights": "fight",
"fire": "fire", "fire": "fire", "fires": "fire",
"like": "like", "like": "like", "likes": "like",
"look": "look", "look": "look", "looks": "look",
"make": "make", "make": "make", "makes": "make",
"reach": "reach", "reach": "reach", "reaches": "reach",
"run": "run", "run": "run", "runs": "run",
"sink": "sink", "sink": "sink", "sinks": "sink",
"sleep": "sleep", "sleep": "sleep", "sleeps": "sleep",
"view": "view", "view": "view", "views": "view",
}
plverb_ambiguous_pres_keys = enclose('|'.join(list(plverb_ambiguous_pres.keys())))
plverb_irregular_non_pres = (
"did", "had", "ate", "made", "put",
"spent", "fought", "sank", "gave", "sought",
"shall", "could", "ought", "should",
)
plverb_ambiguous_non_pres = enclose('|'.join((
"thought", "saw", "bent", "will", "might", "cut",
)))
# "..oes" -> "..oe" (the rest are "..oes" -> "o")
pl_v_oes_oe = ('canoes', 'floes', 'oboes', 'roes', 'throes', 'woes')
pl_v_oes_oe_endings_size4 = ('hoes', 'toes')
pl_v_oes_oe_endings_size5 = ('shoes')
pl_count_zero = (
"0", "no", "zero", "nil"
)
pl_count_one = (
"1", "a", "an", "one", "each", "every", "this", "that",
)
pl_adj_special = {
"a": "some", "an": "some",
"this": "these", "that": "those",
}
pl_adj_special_keys = enclose('|'.join(list(pl_adj_special.keys())))
pl_adj_poss = {
"my": "our",
"your": "your",
"its": "their",
"her": "their",
"his": "their",
"their": "their",
}
pl_adj_poss_keys = enclose('|'.join(list(pl_adj_poss.keys())))
# 2. INDEFINITE ARTICLES
# THIS PATTERN MATCHES STRINGS OF CAPITALS STARTING WITH A "VOWEL-SOUND"
# CONSONANT FOLLOWED BY ANOTHER CONSONANT, AND WHICH ARE NOT LIKELY
# TO BE REAL WORDS (OH, ALL RIGHT THEN, IT'S JUST MAGIC!)
A_abbrev = r"""
(?! FJO | [HLMNS]Y. | RY[EO] | SQU
| ( F[LR]? | [HL] | MN? | N | RH? | S[CHKLMNPTVW]? | X(YL)?) [AEIOU])
[FHLMNRSX][A-Z]
"""
# THIS PATTERN CODES THE BEGINNINGS OF ALL ENGLISH WORDS BEGINING WITH A
# 'y' FOLLOWED BY A CONSONANT. ANY OTHER Y-CONSONANT PREFIX THEREFORE
# IMPLIES AN ABBREVIATION.
A_y_cons = 'y(b[lor]|cl[ea]|fere|gg|p[ios]|rou|tt)'
# EXCEPTIONS TO EXCEPTIONS
A_explicit_a = enclose('|'.join((
"unabomber", "unanimous", "US",
)))
A_explicit_an = enclose('|'.join((
"euler",
"hour(?!i)", "heir", "honest", "hono[ur]",
"mpeg",
)))
A_ordinal_an = enclose('|'.join((
"[aefhilmnorsx]-?th",
)))
A_ordinal_a = enclose('|'.join((
"[bcdgjkpqtuvwyz]-?th",
)))
# NUMERICAL INFLECTIONS
nth = {
0: 'th',
1: 'st',
2: 'nd',
3: 'rd',
4: 'th',
5: 'th',
6: 'th',
7: 'th',
8: 'th',
9: 'th',
11: 'th',
12: 'th',
13: 'th',
}
ordinal = dict(ty='tieth',
one='first',
two='second',
three='third',
five='fifth',
eight='eighth',
nine='ninth',
twelve='twelfth')
ordinal_suff = '|'.join(list(ordinal.keys()))
# NUMBERS
unit = ['', 'one', 'two', 'three', 'four', 'five',
'six', 'seven', 'eight', 'nine']
teen = ['ten', 'eleven', 'twelve', 'thirteen', 'fourteen',
'fifteen', 'sixteen', 'seventeen', 'eighteen', 'nineteen']
ten = ['', '', 'twenty', 'thirty', 'forty',
'fifty', 'sixty', 'seventy', 'eighty', 'ninety']
mill = [' ', ' thousand', ' million', ' billion', ' trillion', ' quadrillion',
' quintillion', ' sextillion', ' septillion', ' octillion',
' nonillion', ' decillion']
# SUPPORT CLASSICAL PLURALIZATIONS
def_classical = dict(
all=False,
zero=False,
herd=False,
names=True,
persons=False,
ancient=False,
)
all_classical = dict((k, True) for k in list(def_classical.keys()))
no_classical = dict((k, False) for k in list(def_classical.keys()))
# TODO: .inflectrc file does not work
# can't just execute methods from another file like this
# for rcfile in (pathjoin(dirname(__file__), '.inflectrc'),
# expanduser(pathjoin(('~'), '.inflectrc'))):
# if isfile(rcfile):
# try:
# execfile(rcfile)
# except:
# print3("\nBad .inflectrc file (%s):\n" % rcfile)
# raise BadRcFileError
class engine:
def __init__(self):
self.classical_dict = def_classical.copy()
self.persistent_count = None
self.mill_count = 0
self.pl_sb_user_defined = []
self.pl_v_user_defined = []
self.pl_adj_user_defined = []
self.si_sb_user_defined = []
self.A_a_user_defined = []
self.thegender = 'neuter'
deprecated_methods = dict(pl='plural',
plnoun='plural_noun',
plverb='plural_verb',
pladj='plural_adj',
sinoun='single_noun',
prespart='present_participle',
numwords='number_to_words',
plequal='compare',
plnounequal='compare_nouns',
plverbequal='compare_verbs',
pladjequal='compare_adjs',
wordlist='join',
)
def __getattr__(self, meth):
if meth in self.deprecated_methods:
print3('%s() deprecated, use %s()' % (meth, self.deprecated_methods[meth]))
raise DeprecationWarning
raise AttributeError
def defnoun(self, singular, plural):
'''
Set the noun plural of singular to plural.
'''
self.checkpat(singular)
self.checkpatplural(plural)
self.pl_sb_user_defined.extend((singular, plural))
self.si_sb_user_defined.extend((plural, singular))
return 1
def defverb(self, s1, p1, s2, p2, s3, p3):
'''
Set the verb plurals for s1, s2 and s3 to p1, p2 and p3 respectively.
Where 1, 2 and 3 represent the 1st, 2nd and 3rd person forms of the verb.
'''
self.checkpat(s1)
self.checkpat(s2)
self.checkpat(s3)
self.checkpatplural(p1)
self.checkpatplural(p2)
self.checkpatplural(p3)
self.pl_v_user_defined.extend((s1, p1, s2, p2, s3, p3))
return 1
def defadj(self, singular, plural):
'''
Set the adjective plural of singular to plural.
'''
self.checkpat(singular)
self.checkpatplural(plural)
self.pl_adj_user_defined.extend((singular, plural))
return 1
def defa(self, pattern):
'''
Define the indefinate article as 'a' for words matching pattern.
'''
self.checkpat(pattern)
self.A_a_user_defined.extend((pattern, 'a'))
return 1
def defan(self, pattern):
'''
Define the indefinate article as 'an' for words matching pattern.
'''
self.checkpat(pattern)
self.A_a_user_defined.extend((pattern, 'an'))
return 1
def checkpat(self, pattern):
'''
check for errors in a regex pattern
'''
if pattern is None:
return
try:
match(pattern, '')
except reerror:
print3("\nBad user-defined singular pattern:\n\t%s\n" % pattern)
raise BadUserDefinedPatternError
def checkpatplural(self, pattern):
'''
check for errors in a regex replace pattern
'''
return
# can't find a pattern that doesn't pass the following test:
# if pattern is None:
# return
# try:
# resub('', pattern, '')
# except reerror:
# print3("\nBad user-defined plural pattern:\n\t%s\n" % pattern)
# raise BadUserDefinedPatternError
def ud_match(self, word, wordlist):
for i in range(len(wordlist) - 2, -2, -2): # backwards through even elements
mo = search(r'^%s$' % wordlist[i], word, IGNORECASE)
if mo:
if wordlist[i + 1] is None:
return None
pl = resub(r'\$(\d+)', r'\\1', wordlist[i + 1]) # change $n to \n for expand
return mo.expand(pl)
return None
def classical(self, **kwargs):
"""
turn classical mode on and off for various categories
turn on all classical modes:
classical()
classical(all=True)
turn on or off specific claassical modes:
e.g.
classical(herd=True)
classical(names=False)
By default all classical modes are off except names.
unknown value in args or key in kwargs rasies exception: UnknownClasicalModeError
"""
classical_mode = list(def_classical.keys())
if not kwargs:
self.classical_dict = all_classical.copy()
return
if 'all' in kwargs:
if kwargs['all']:
self.classical_dict = all_classical.copy()
else:
self.classical_dict = no_classical.copy()
for k, v in list(kwargs.items()):
if k in classical_mode:
self.classical_dict[k] = v
else:
raise UnknownClassicalModeError
def num(self, count=None, show=None): # (;$count,$show)
'''
Set the number to be used in other method calls.
Returns count.
Set show to False to return '' instead.
'''
if count is not None:
try:
self.persistent_count = int(count)
except ValueError:
raise BadNumValueError
if (show is None) or show:
return str(count)
else:
self.persistent_count = None
return ''
def gender(self, gender):
'''
set the gender for the singular of plural pronouns
can be one of:
'neuter' ('they' -> 'it')
'feminine' ('they' -> 'she')
'masculine' ('they' -> 'he')
'gender-neutral' ('they' -> 'they')
'feminine or masculine' ('they' -> 'she or he')
'masculine or feminine' ('they' -> 'he or she')
'''
if gender in singular_pronoun_genders:
self.thegender = gender
else:
raise BadGenderError
def nummo(self, matchobject):
'''
num but take a matchobject
use groups 1 and 2 in matchobject
'''
return self.num(matchobject.group(1), matchobject.group(2))
def plmo(self, matchobject):
'''
plural but take a matchobject
use groups 1 and 3 in matchobject
'''
return self.plural(matchobject.group(1), matchobject.group(3))
def plnounmo(self, matchobject):
'''
plural_noun but take a matchobject
use groups 1 and 3 in matchobject
'''
return self.plural_noun(matchobject.group(1), matchobject.group(3))
def plverbmo(self, matchobject):
'''
plural_verb but take a matchobject
use groups 1 and 3 in matchobject
'''
return self.plural_verb(matchobject.group(1), matchobject.group(3))
def pladjmo(self, matchobject):
'''
plural_adj but take a matchobject
use groups 1 and 3 in matchobject
'''
return self.plural_adj(matchobject.group(1), matchobject.group(3))
def sinounmo(self, matchobject):
'''
singular_noun but take a matchobject
use groups 1 and 3 in matchobject
'''
return self.singular_noun(matchobject.group(1), matchobject.group(3))
def amo(self, matchobject):
'''
A but take a matchobject
use groups 1 and 3 in matchobject
'''
if matchobject.group(3) is None:
return self.a(matchobject.group(1))
return self.a(matchobject.group(1), matchobject.group(3))
def nomo(self, matchobject):
'''
NO but take a matchobject
use groups 1 and 3 in matchobject
'''
return self.no(matchobject.group(1), matchobject.group(3))
def ordinalmo(self, matchobject):
'''
ordinal but take a matchobject
use group 1
'''
return self.ordinal(matchobject.group(1))
def numwordsmo(self, matchobject):
'''
number_to_words but take a matchobject
use group 1
'''
return self.number_to_words(matchobject.group(1))
def prespartmo(self, matchobject):
'''
prespart but take a matchobject
use group 1
'''
return self.present_participle(matchobject.group(1))
# 0. PERFORM GENERAL INFLECTIONS IN A STRING
def inflect(self, text):
'''
Perform inflections in a string.
e.g. inflect('The plural of cat is plural(cat)') returns
'The plural of cat is cats'
can use plural, plural_noun, plural_verb, plural_adj, singular_noun, a, an, no, ordinal,
number_to_words and prespart
'''
save_persistent_count = self.persistent_count
sections = splitre(r"(num\([^)]*\))", text)
inflection = []
for section in sections:
(section, count) = subn(r"num\(\s*?(?:([^),]*)(?:,([^)]*))?)?\)", self.nummo, section)
if not count:
total = -1
while total:
(section, total) = subn(
r"(?x)\bplural \( ([^),]*) (, ([^)]*) )? \) ",
self.plmo, section)
(section, count) = subn(
r"(?x)\bplural_noun \( ([^),]*) (, ([^)]*) )? \) ",
self.plnounmo, section)
total += count
(section, count) = subn(
r"(?x)\bplural_verb \( ([^),]*) (, ([^)]*) )? \) ",
self.plverbmo, section)
total += count
(section, count) = subn(
r"(?x)\bplural_adj \( ([^),]*) (, ([^)]*) )? \) ",
self.pladjmo, section)
total += count
(section, count) = subn(
r"(?x)\bsingular_noun \( ([^),]*) (, ([^)]*) )? \) ",
self.sinounmo, section)
total += count
(section, count) = subn(
r"(?x)\ban? \( ([^),]*) (, ([^)]*) )? \) ",
self.amo, section)
total += count
(section, count) = subn(
r"(?x)\bno \( ([^),]*) (, ([^)]*) )? \) ",
self.nomo, section)
total += count
(section, count) = subn(
r"(?x)\bordinal \( ([^)]*) \) ",
self.ordinalmo, section)
total += count
(section, count) = subn(
r"(?x)\bnumber_to_words \( ([^)]*) \) ",
self.numwordsmo, section)
total += count
(section, count) = subn(
r"(?x)\bpresent_participle \( ([^)]*) \) ",
self.prespartmo, section)
total += count
inflection.append(section)
self.persistent_count = save_persistent_count
return "".join(inflection)
# ## PLURAL SUBROUTINES
def postprocess(self, orig, inflected):
"""
FIX PEDANTRY AND CAPITALIZATION :-)
"""
if '|' in inflected:
inflected = inflected.split('|')[self.classical_dict['all']]
if orig == "I":
return inflected
if orig == orig.upper():
return inflected.upper()
if orig[0] == orig[0].upper():
return '%s%s' % (inflected[0].upper(),
inflected[1:])
return inflected
def partition_word(self, text):
mo = search(r'\A(\s*)(.+?)(\s*)\Z', text)
try:
return mo.group(1), mo.group(2), mo.group(3)
except AttributeError: # empty string
return '', '', ''
# def pl(self, *args, **kwds):
# print 'pl() deprecated, use plural()'
# raise DeprecationWarning
# return self.plural(*args, **kwds)
#
# def plnoun(self, *args, **kwds):
# print 'plnoun() deprecated, use plural_noun()'
# raise DeprecationWarning
# return self.plural_noun(*args, **kwds)
#
# def plverb(self, *args, **kwds):
# print 'plverb() deprecated, use plural_verb()'
# raise DeprecationWarning
# return self.plural_verb(*args, **kwds)
#
# def pladj(self, *args, **kwds):
# print 'pladj() deprecated, use plural_adj()'
# raise DeprecationWarning
# return self.plural_adj(*args, **kwds)
#
# def sinoun(self, *args, **kwds):
# print 'sinoun() deprecated, use singular_noun()'
# raise DeprecationWarning
# return self.singular_noun(*args, **kwds)
#
# def prespart(self, *args, **kwds):
# print 'prespart() deprecated, use present_participle()'
# raise DeprecationWarning
# return self.present_participle(*args, **kwds)
#
# def numwords(self, *args, **kwds):
# print 'numwords() deprecated, use number_to_words()'
# raise DeprecationWarning
# return self.number_to_words(*args, **kwds)
def plural(self, text, count=None):
'''
Return the plural of text.
If count supplied, then return text if count is one of:
1, a, an, one, each, every, this, that
otherwise return the plural.
Whitespace at the start and end is preserved.
'''
pre, word, post = self.partition_word(text)
if not word:
return text
plural = self.postprocess(
word,
self._pl_special_adjective(word, count) or
self._pl_special_verb(word, count) or
self._plnoun(word, count))
return "%s%s%s" % (pre, plural, post)
def plural_noun(self, text, count=None):
'''
Return the plural of text, where text is a noun.
If count supplied, then return text if count is one of:
1, a, an, one, each, every, this, that
otherwise return the plural.
Whitespace at the start and end is preserved.
'''
pre, word, post = self.partition_word(text)
if not word:
return text
plural = self.postprocess(word, self._plnoun(word, count))
return "%s%s%s" % (pre, plural, post)
def plural_verb(self, text, count=None):
'''
Return the plural of text, where text is a verb.
If count supplied, then return text if count is one of:
1, a, an, one, each, every, this, that
otherwise return the plural.
Whitespace at the start and end is preserved.
'''
pre, word, post = self.partition_word(text)
if not word:
return text
plural = self.postprocess(word, self._pl_special_verb(word, count) or
self._pl_general_verb(word, count))
return "%s%s%s" % (pre, plural, post)
def plural_adj(self, text, count=None):
'''
Return the plural of text, where text is an adjective.
If count supplied, then return text if count is one of:
1, a, an, one, each, every, this, that
otherwise return the plural.
Whitespace at the start and end is preserved.
'''
pre, word, post = self.partition_word(text)
if not word:
return text
plural = self.postprocess(word, self._pl_special_adjective(word, count) or word)
return "%s%s%s" % (pre, plural, post)
def compare(self, word1, word2):
'''
compare word1 and word2 for equality regardless of plurality
return values:
eq - the strings are equal
p:s - word1 is the plural of word2
s:p - word2 is the plural of word1
p:p - word1 and word2 are two different plural forms of the one word
False - otherwise
'''
return (
self._plequal(word1, word2, self.plural_noun) or
self._plequal(word1, word2, self.plural_verb) or
self._plequal(word1, word2, self.plural_adj))
def compare_nouns(self, word1, word2):
'''
compare word1 and word2 for equality regardless of plurality
word1 and word2 are to be treated as nouns
return values:
eq - the strings are equal
p:s - word1 is the plural of word2
s:p - word2 is the plural of word1
p:p - word1 and word2 are two different plural forms of the one word
False - otherwise
'''
return self._plequal(word1, word2, self.plural_noun)
def compare_verbs(self, word1, word2):
'''
compare word1 and word2 for equality regardless of plurality
word1 and word2 are to be treated as verbs
return values:
eq - the strings are equal
p:s - word1 is the plural of word2
s:p - word2 is the plural of word1
p:p - word1 and word2 are two different plural forms of the one word
False - otherwise
'''
return self._plequal(word1, word2, self.plural_verb)
def compare_adjs(self, word1, word2):
'''
compare word1 and word2 for equality regardless of plurality
word1 and word2 are to be treated as adjectives
return values:
eq - the strings are equal
p:s - word1 is the plural of word2
s:p - word2 is the plural of word1
p:p - word1 and word2 are two different plural forms of the one word
False - otherwise
'''
return self._plequal(word1, word2, self.plural_adj)
def singular_noun(self, text, count=None, gender=None):
'''
Return the singular of text, where text is a plural noun.
If count supplied, then return the singular if count is one of:
1, a, an, one, each, every, this, that or if count is None
otherwise return text unchanged.
Whitespace at the start and end is preserved.
'''
pre, word, post = self.partition_word(text)
if not word:
return text
sing = self._sinoun(word, count=count, gender=gender)
if sing is not False:
plural = self.postprocess(word, self._sinoun(word, count=count, gender=gender))
return "%s%s%s" % (pre, plural, post)
return False
def _plequal(self, word1, word2, pl):
classval = self.classical_dict.copy()
self.classical_dict = all_classical.copy()
if word1 == word2:
return "eq"
if word1 == pl(word2):
return "p:s"
if pl(word1) == word2:
return "s:p"
self.classical_dict = no_classical.copy()
if word1 == pl(word2):
return "p:s"
if pl(word1) == word2:
return "s:p"
self.classical_dict = classval.copy()
if pl == self.plural or pl == self.plural_noun:
if self._pl_check_plurals_N(word1, word2):
return "p:p"
if self._pl_check_plurals_N(word2, word1):
return "p:p"
if pl == self.plural or pl == self.plural_adj:
if self._pl_check_plurals_adj(word1, word2):
return "p:p"
return False
def _pl_reg_plurals(self, pair, stems, end1, end2):
if search(r"(%s)(%s\|\1%s|%s\|\1%s)" % (stems, end1, end2, end2, end1), pair):
return True
return False
def _pl_check_plurals_N(self, word1, word2):
pair = "%s|%s" % (word1, word2)
if pair in list(pl_sb_irregular_s.values()):
return True
if pair in list(pl_sb_irregular.values()):
return True
if pair in list(pl_sb_irregular_caps.values()):
return True
for (stems, end1, end2) in (
(pl_sb_C_a_ata, "as", "ata"),
(pl_sb_C_is_ides, "is", "ides"),
(pl_sb_C_a_ae, "s", "e"),
(pl_sb_C_en_ina, "ens", "ina"),
(pl_sb_C_um_a, "ums", "a"),
(pl_sb_C_us_i, "uses", "i"),
(pl_sb_C_on_a, "ons", "a"),
(pl_sb_C_o_i_stems, "os", "i"),
(pl_sb_C_ex_ices, "exes", "ices"),
(pl_sb_C_ix_ices, "ixes", "ices"),
(pl_sb_C_i, "s", "i"),
(pl_sb_C_im, "s", "im"),
('.*eau', "s", "x"),
('.*ieu', "s", "x"),
('.*tri', "xes", "ces"),
('.{2,}[yia]n', "xes", "ges")
):
if self._pl_reg_plurals(pair, stems, end1, end2):
return True
return False
def _pl_check_plurals_adj(self, word1, word2):
# VERSION: tuple in endswith requires python 2.5
word1a = word1[:word1.rfind("'")] if word1.endswith(("'s", "'")) else ''
word2a = word2[:word2.rfind("'")] if word2.endswith(("'s", "'")) else ''
# TODO: BUG? report upstream. I don't think you should chop off the s'
# word1b = word1[:-2] if word1.endswith("s'") else ''
# word2b = word2[:-2] if word2.endswith("s'") else ''
# TODO: dresses', dresses's -> dresses, dresses when chop off letters
# then they return False because they are the same. Need to fix this.
if word1a:
if word2a and (self._pl_check_plurals_N(word1a, word2a)
or self._pl_check_plurals_N(word2a, word1a)):
return True
# if word2b and ( self._pl_check_plurals_N(word1a, word2b)
# or self._pl_check_plurals_N(word2b, word1a) ):
# return True
# if word1b:
# if word2a and ( self._pl_check_plurals_N(word1b, word2a)
# or self._pl_check_plurals_N(word2a, word1b) ):
# return True
# if word2b and ( self._pl_check_plurals_N(word1b, word2b)
# or self._pl_check_plurals_N(word2b, word1b) ):
# return True
return False
def get_count(self, count=None):
if count is None and self.persistent_count is not None:
count = self.persistent_count
if count is not None:
count = 1 if ((str(count) in pl_count_one) or
(self.classical_dict['zero'] and str(count).lower() in pl_count_zero)) else 2
else:
count = ''
return count
# @profile
def _plnoun(self, word, count=None):
count = self.get_count(count)
# DEFAULT TO PLURAL
if count == 1:
return word
# HANDLE USER-DEFINED NOUNS
value = self.ud_match(word, self.pl_sb_user_defined)
if value is not None:
return value
# HANDLE EMPTY WORD, SINGULAR COUNT AND UNINFLECTED PLURALS
if word == '':
return word
lowerword = word.lower()
if lowerword in pl_sb_uninflected_complete:
return word
if word in pl_sb_uninflected_caps:
return word
for k, v in pl_sb_uninflected_bysize.items():
if lowerword[-k:] in v:
return word
if (self.classical_dict['herd'] and lowerword in pl_sb_uninflected_herd):
return word
# HANDLE COMPOUNDS ("Governor General", "mother-in-law", "aide-de-camp", ETC.)
mo = search(r"^(?:%s)$" % pl_sb_postfix_adj_stems, word, IGNORECASE)
if mo and mo.group(2) != '':
return "%s%s" % (self._plnoun(mo.group(1), 2), mo.group(2))
if ' a ' in lowerword or '-a-' in lowerword:
mo = search(r"^(?:%s)$" % pl_sb_prep_dual_compound, word, IGNORECASE)
if mo and mo.group(2) != '' and mo.group(3) != '':
return "%s%s%s" % (self._plnoun(mo.group(1), 2),
mo.group(2),
self._plnoun(mo.group(3)))
lowersplit = lowerword.split(' ')
if len(lowersplit) >= 3:
for numword in range(1, len(lowersplit) - 1):
if lowersplit[numword] in pl_prep_list_da:
return ' '.join(
lowersplit[:numword - 1] +
[self._plnoun(lowersplit[numword - 1], 2)] + lowersplit[numword:])
lowersplit = lowerword.split('-')
if len(lowersplit) >= 3:
for numword in range(1, len(lowersplit) - 1):
if lowersplit[numword] in pl_prep_list_da:
return ' '.join(
lowersplit[:numword - 1] +
[self._plnoun(lowersplit[numword - 1], 2) +
'-' + lowersplit[numword] + '-']) + ' '.join(lowersplit[(numword + 1):])
# HANDLE PRONOUNS
for k, v in pl_pron_acc_keys_bysize.items():
if lowerword[-k:] in v: # ends with accusivate pronoun
for pk, pv in pl_prep_bysize.items():
if lowerword[:pk] in pv: # starts with a prep
if lowerword.split() == [lowerword[:pk], lowerword[-k:]]: # only whitespace in between
return lowerword[:-k] + pl_pron_acc[lowerword[-k:]]
try:
return pl_pron_nom[word.lower()]
except KeyError:
pass
try:
return pl_pron_acc[word.lower()]
except KeyError:
pass
# HANDLE ISOLATED IRREGULAR PLURALS
wordsplit = word.split()
wordlast = wordsplit[-1]
lowerwordlast = wordlast.lower()
if wordlast in list(pl_sb_irregular_caps.keys()):
llen = len(wordlast)
return '%s%s' % (word[:-llen],
pl_sb_irregular_caps[wordlast])
if lowerwordlast in list(pl_sb_irregular.keys()):
llen = len(lowerwordlast)
return '%s%s' % (word[:-llen],
pl_sb_irregular[lowerwordlast])
if (' '.join(wordsplit[-2:])).lower() in list(pl_sb_irregular_compound.keys()):
llen = len(' '.join(wordsplit[-2:])) # TODO: what if 2 spaces between these words?
return '%s%s' % (word[:-llen],
pl_sb_irregular_compound[(' '.join(wordsplit[-2:])).lower()])
if lowerword[-3:] == 'quy':
return word[:-1] + 'ies'
if lowerword[-6:] == 'person':
if self.classical_dict['persons']:
return word + 's'
else:
return word[:-4] + 'ople'
# HANDLE FAMILIES OF IRREGULAR PLURALS
if lowerword[-3:] == 'man':
for k, v in pl_sb_U_man_mans_bysize.items():
if lowerword[-k:] in v:
return word + 's'
for k, v in pl_sb_U_man_mans_caps_bysize.items():
if word[-k:] in v:
return word + 's'
return word[:-3] + 'men'
if lowerword[-5:] == 'mouse':
return word[:-5] + 'mice'
if lowerword[-5:] == 'louse':
return word[:-5] + 'lice'
if lowerword[-5:] == 'goose':
return word[:-5] + 'geese'
if lowerword[-5:] == 'tooth':
return word[:-5] + 'teeth'
if lowerword[-4:] == 'foot':
return word[:-4] + 'feet'
if lowerword == 'die':
return 'dice'
# HANDLE UNASSIMILATED IMPORTS
if lowerword[-4:] == 'ceps':
return word
if lowerword[-4:] == 'zoon':
return word[:-2] + 'a'
if lowerword[-3:] in ('cis', 'sis', 'xis'):
return word[:-2] + 'es'
for lastlet, d, numend, post in (
('h', pl_sb_U_ch_chs_bysize, None, 's'),
('x', pl_sb_U_ex_ices_bysize, -2, 'ices'),
('x', pl_sb_U_ix_ices_bysize, -2, 'ices'),
('m', pl_sb_U_um_a_bysize, -2, 'a'),
('s', pl_sb_U_us_i_bysize, -2, 'i'),
('n', pl_sb_U_on_a_bysize, -2, 'a'),
('a', pl_sb_U_a_ae_bysize, None, 'e'),
):
if lowerword[-1] == lastlet: # this test to add speed
for k, v in d.items():
if lowerword[-k:] in v:
return word[:numend] + post
# HANDLE INCOMPLETELY ASSIMILATED IMPORTS
if (self.classical_dict['ancient']):
if lowerword[-4:] == 'trix':
return word[:-1] + 'ces'
if lowerword[-3:] in ('eau', 'ieu'):
return word + 'x'
if lowerword[-3:] in ('ynx', 'inx', 'anx') and len(word) > 4:
return word[:-1] + 'ges'
for lastlet, d, numend, post in (
('n', pl_sb_C_en_ina_bysize, -2, 'ina'),
('x', pl_sb_C_ex_ices_bysize, -2, 'ices'),
('x', pl_sb_C_ix_ices_bysize, -2, 'ices'),
('m', pl_sb_C_um_a_bysize, -2, 'a'),
('s', pl_sb_C_us_i_bysize, -2, 'i'),
('s', pl_sb_C_us_us_bysize, None, ''),
('a', pl_sb_C_a_ae_bysize, None, 'e'),
('a', pl_sb_C_a_ata_bysize, None, 'ta'),
('s', pl_sb_C_is_ides_bysize, -1, 'des'),
('o', pl_sb_C_o_i_bysize, -1, 'i'),
('n', pl_sb_C_on_a_bysize, -2, 'a'),
):
if lowerword[-1] == lastlet: # this test to add speed
for k, v in d.items():
if lowerword[-k:] in v:
return word[:numend] + post
for d, numend, post in (
(pl_sb_C_i_bysize, None, 'i'),
(pl_sb_C_im_bysize, None, 'im'),
):
for k, v in d.items():
if lowerword[-k:] in v:
return word[:numend] + post
# HANDLE SINGULAR NOUNS ENDING IN ...s OR OTHER SILIBANTS
if lowerword in pl_sb_singular_s_complete:
return word + 'es'
for k, v in pl_sb_singular_s_bysize.items():
if lowerword[-k:] in v:
return word + 'es'
if lowerword[-2:] == 'es' and word[0] == word[0].upper():
return word + 'es'
# Wouldn't special words
# ending with 's' always have been caught, regardless of them starting
# with a capital letter (i.e. being names)
# It makes sense below to do this for words ending in 'y' so that
# Sally -> Sallys. But not sure it makes sense here. Where is the case
# of a word ending in s that is caught here and would otherwise have been
# caught below?
#
# removing it as I can't find a case that executes it
# TODO: check this again
#
# if (self.classical_dict['names']):
# mo = search(r"([A-Z].*s)$", word)
# if mo:
# return "%ses" % mo.group(1)
if lowerword[-1] == 'z':
for k, v in pl_sb_z_zes_bysize.items():
if lowerword[-k:] in v:
return word + 'es'
if lowerword[-2:-1] != 'z':
return word + 'zes'
if lowerword[-2:] == 'ze':
for k, v in pl_sb_ze_zes_bysize.items():
if lowerword[-k:] in v:
return word + 's'
if lowerword[-2:] in ('ch', 'sh', 'zz', 'ss') or lowerword[-1] == 'x':
return word + 'es'
# ## (r"(.*)(us)$", "%s%ses"), TODO: why is this commented?
# HANDLE ...f -> ...ves
if lowerword[-3:] in ('elf', 'alf', 'olf'):
return word[:-1] + 'ves'
if lowerword[-3:] == 'eaf' and lowerword[-4:-3] != 'd':
return word[:-1] + 'ves'
if lowerword[-4:] in ('nife', 'life', 'wife'):
return word[:-2] + 'ves'
if lowerword[-3:] == 'arf':
return word[:-1] + 'ves'
# HANDLE ...y
if lowerword[-1] == 'y':
if lowerword[-2:-1] in 'aeiou' or len(word) == 1:
return word + 's'
if (self.classical_dict['names']):
if lowerword[-1] == 'y' and word[0] == word[0].upper():
return word + 's'
return word[:-1] + 'ies'
# HANDLE ...o
if lowerword in pl_sb_U_o_os_complete:
return word + 's'
for k, v in pl_sb_U_o_os_bysize.items():
if lowerword[-k:] in v:
return word + 's'
if lowerword[-2:] in ('ao', 'eo', 'io', 'oo', 'uo'):
return word + 's'
if lowerword[-1] == 'o':
return word + 'es'
# OTHERWISE JUST ADD ...s
return "%ss" % word
def _pl_special_verb(self, word, count=None):
if (self.classical_dict['zero'] and
str(count).lower() in pl_count_zero):
return False
count = self.get_count(count)
if count == 1:
return word
# HANDLE USER-DEFINED VERBS
value = self.ud_match(word, self.pl_v_user_defined)
if value is not None:
return value
# HANDLE IRREGULAR PRESENT TENSE (SIMPLE AND COMPOUND)
lowerword = word.lower()
try:
firstword = lowerword.split()[0]
except IndexError:
return False # word is ''
if firstword in list(plverb_irregular_pres.keys()):
return "%s%s" % (plverb_irregular_pres[firstword], word[len(firstword):])
# HANDLE IRREGULAR FUTURE, PRETERITE AND PERFECT TENSES
if firstword in plverb_irregular_non_pres:
return word
# HANDLE PRESENT NEGATIONS (SIMPLE AND COMPOUND)
if firstword.endswith("n't") and firstword[:-3] in list(plverb_irregular_pres.keys()):
return "%sn't%s" % (plverb_irregular_pres[firstword[:-3]], word[len(firstword):])
if firstword.endswith("n't"):
return word
# HANDLE SPECIAL CASES
mo = search(r"^(%s)$" % plverb_special_s, word)
if mo:
return False
if search(r"\s", word):
return False
if lowerword == 'quizzes':
return 'quiz'
# HANDLE STANDARD 3RD PERSON (CHOP THE ...(e)s OFF SINGLE WORDS)
if lowerword[-4:] in ('ches', 'shes', 'zzes', 'sses') or \
lowerword[-3:] == 'xes':
return word[:-2]
# # mo = search(r"^(.*)([cs]h|[x]|zz|ss)es$",
# # word, IGNORECASE)
# # if mo:
# # return "%s%s" % (mo.group(1), mo.group(2))
if lowerword[-3:] == 'ies' and len(word) > 3:
return lowerword[:-3] + 'y'
if (lowerword in pl_v_oes_oe or
lowerword[-4:] in pl_v_oes_oe_endings_size4 or
lowerword[-5:] in pl_v_oes_oe_endings_size5):
return word[:-1]
if lowerword.endswith('oes') and len(word) > 3:
return lowerword[:-2]
mo = search(r"^(.*[^s])s$", word, IGNORECASE)
if mo:
return mo.group(1)
# OTHERWISE, A REGULAR VERB (HANDLE ELSEWHERE)
return False
def _pl_general_verb(self, word, count=None):
count = self.get_count(count)
if count == 1:
return word
# HANDLE AMBIGUOUS PRESENT TENSES (SIMPLE AND COMPOUND)
mo = search(r"^(%s)((\s.*)?)$" % plverb_ambiguous_pres_keys, word, IGNORECASE)
if mo:
return "%s%s" % (plverb_ambiguous_pres[mo.group(1).lower()], mo.group(2))
# HANDLE AMBIGUOUS PRETERITE AND PERFECT TENSES
mo = search(r"^(%s)((\s.*)?)$" % plverb_ambiguous_non_pres, word, IGNORECASE)
if mo:
return word
# OTHERWISE, 1st OR 2ND PERSON IS UNINFLECTED
return word
def _pl_special_adjective(self, word, count=None):
count = self.get_count(count)
if count == 1:
return word
# HANDLE USER-DEFINED ADJECTIVES
value = self.ud_match(word, self.pl_adj_user_defined)
if value is not None:
return value
# HANDLE KNOWN CASES
mo = search(r"^(%s)$" % pl_adj_special_keys,
word, IGNORECASE)
if mo:
return "%s" % (pl_adj_special[mo.group(1).lower()])
# HANDLE POSSESSIVES
mo = search(r"^(%s)$" % pl_adj_poss_keys,
word, IGNORECASE)
if mo:
return "%s" % (pl_adj_poss[mo.group(1).lower()])
mo = search(r"^(.*)'s?$",
word)
if mo:
pl = self.plural_noun(mo.group(1))
trailing_s = "" if pl[-1] == 's' else "s"
return "%s'%s" % (pl, trailing_s)
# OTHERWISE, NO IDEA
return False
# @profile
def _sinoun(self, word, count=None, gender=None):
count = self.get_count(count)
# DEFAULT TO PLURAL
if count == 2:
return word
# SET THE GENDER
try:
if gender is None:
gender = self.thegender
elif gender not in singular_pronoun_genders:
raise BadGenderError
except (TypeError, IndexError):
raise BadGenderError
# HANDLE USER-DEFINED NOUNS
value = self.ud_match(word, self.si_sb_user_defined)
if value is not None:
return value
# HANDLE EMPTY WORD, SINGULAR COUNT AND UNINFLECTED PLURALS
if word == '':
return word
lowerword = word.lower()
if word in si_sb_ois_oi_case:
return word[:-1]
if lowerword in pl_sb_uninflected_complete:
return word
if word in pl_sb_uninflected_caps:
return word
for k, v in pl_sb_uninflected_bysize.items():
if lowerword[-k:] in v:
return word
if (self.classical_dict['herd'] and lowerword in pl_sb_uninflected_herd):
return word
# HANDLE COMPOUNDS ("Governor General", "mother-in-law", "aide-de-camp", ETC.)
mo = search(r"^(?:%s)$" % pl_sb_postfix_adj_stems, word, IGNORECASE)
if mo and mo.group(2) != '':
return "%s%s" % (self._sinoun(mo.group(1), 1, gender=gender), mo.group(2))
# how to reverse this one?
# mo = search(r"^(?:%s)$" % pl_sb_prep_dual_compound, word, IGNORECASE)
# if mo and mo.group(2) != '' and mo.group(3) != '':
# return "%s%s%s" % (self._sinoun(mo.group(1), 1),
# mo.group(2),
# self._sinoun(mo.group(3), 1))
lowersplit = lowerword.split(' ')
if len(lowersplit) >= 3:
for numword in range(1, len(lowersplit) - 1):
if lowersplit[numword] in pl_prep_list_da:
return ' '.join(lowersplit[:numword - 1] +
[self._sinoun(lowersplit[numword - 1], 1, gender=gender) or
lowersplit[numword - 1]] + lowersplit[numword:])
lowersplit = lowerword.split('-')
if len(lowersplit) >= 3:
for numword in range(1, len(lowersplit) - 1):
if lowersplit[numword] in pl_prep_list_da:
return ' '.join(
lowersplit[:numword - 1] +
[(self._sinoun(lowersplit[numword - 1], 1, gender=gender) or lowersplit[numword - 1]) +
'-' + lowersplit[numword] + '-']) + ' '.join(lowersplit[(numword + 1):])
# HANDLE PRONOUNS
for k, v in si_pron_acc_keys_bysize.items():
if lowerword[-k:] in v: # ends with accusivate pronoun
for pk, pv in pl_prep_bysize.items():
if lowerword[:pk] in pv: # starts with a prep
if lowerword.split() == [lowerword[:pk], lowerword[-k:]]: # only whitespace in between
return lowerword[:-k] + get_si_pron('acc', lowerword[-k:], gender)
try:
return get_si_pron('nom', word.lower(), gender)
except KeyError:
pass
try:
return get_si_pron('acc', word.lower(), gender)
except KeyError:
pass
# HANDLE ISOLATED IRREGULAR PLURALS
wordsplit = word.split()
wordlast = wordsplit[-1]
lowerwordlast = wordlast.lower()
if wordlast in list(si_sb_irregular_caps.keys()):
llen = len(wordlast)
return '%s%s' % (word[:-llen],
si_sb_irregular_caps[wordlast])
if lowerwordlast in list(si_sb_irregular.keys()):
llen = len(lowerwordlast)
return '%s%s' % (word[:-llen],
si_sb_irregular[lowerwordlast])
if (' '.join(wordsplit[-2:])).lower() in list(si_sb_irregular_compound.keys()):
llen = len(' '.join(wordsplit[-2:])) # TODO: what if 2 spaces between these words?
return '%s%s' % (word[:-llen],
si_sb_irregular_compound[(' '.join(wordsplit[-2:])).lower()])
if lowerword[-5:] == 'quies':
return word[:-3] + 'y'
if lowerword[-7:] == 'persons':
return word[:-1]
if lowerword[-6:] == 'people':
return word[:-4] + 'rson'
# HANDLE FAMILIES OF IRREGULAR PLURALS
if lowerword[-4:] == 'mans':
for k, v in si_sb_U_man_mans_bysize.items():
if lowerword[-k:] in v:
return word[:-1]
for k, v in si_sb_U_man_mans_caps_bysize.items():
if word[-k:] in v:
return word[:-1]
if lowerword[-3:] == 'men':
return word[:-3] + 'man'
if lowerword[-4:] == 'mice':
return word[:-4] + 'mouse'
if lowerword[-4:] == 'lice':
return word[:-4] + 'louse'
if lowerword[-5:] == 'geese':
return word[:-5] + 'goose'
if lowerword[-5:] == 'teeth':
return word[:-5] + 'tooth'
if lowerword[-4:] == 'feet':
return word[:-4] + 'foot'
if lowerword == 'dice':
return 'die'
# HANDLE UNASSIMILATED IMPORTS
if lowerword[-4:] == 'ceps':
return word
if lowerword[-3:] == 'zoa':
return word[:-1] + 'on'
for lastlet, d, numend, post in (
('s', si_sb_U_ch_chs_bysize, -1, ''),
('s', si_sb_U_ex_ices_bysize, -4, 'ex'),
('s', si_sb_U_ix_ices_bysize, -4, 'ix'),
('a', si_sb_U_um_a_bysize, -1, 'um'),
('i', si_sb_U_us_i_bysize, -1, 'us'),
('a', si_sb_U_on_a_bysize, -1, 'on'),
('e', si_sb_U_a_ae_bysize, -1, ''),
):
if lowerword[-1] == lastlet: # this test to add speed
for k, v in d.items():
if lowerword[-k:] in v:
return word[:numend] + post
# HANDLE INCOMPLETELY ASSIMILATED IMPORTS
if (self.classical_dict['ancient']):
if lowerword[-6:] == 'trices':
return word[:-3] + 'x'
if lowerword[-4:] in ('eaux', 'ieux'):
return word[:-1]
if lowerword[-5:] in ('ynges', 'inges', 'anges') and len(word) > 6:
return word[:-3] + 'x'
for lastlet, d, numend, post in (
('a', si_sb_C_en_ina_bysize, -3, 'en'),
('s', si_sb_C_ex_ices_bysize, -4, 'ex'),
('s', si_sb_C_ix_ices_bysize, -4, 'ix'),
('a', si_sb_C_um_a_bysize, -1, 'um'),
('i', si_sb_C_us_i_bysize, -1, 'us'),
('s', pl_sb_C_us_us_bysize, None, ''),
('e', si_sb_C_a_ae_bysize, -1, ''),
('a', si_sb_C_a_ata_bysize, -2, ''),
('s', si_sb_C_is_ides_bysize, -3, 's'),
('i', si_sb_C_o_i_bysize, -1, 'o'),
('a', si_sb_C_on_a_bysize, -1, 'on'),
('m', si_sb_C_im_bysize, -2, ''),
('i', si_sb_C_i_bysize, -1, ''),
):
if lowerword[-1] == lastlet: # this test to add speed
for k, v in d.items():
if lowerword[-k:] in v:
return word[:numend] + post
# HANDLE PLURLS ENDING IN uses -> use
if (lowerword[-6:] == 'houses' or
word in si_sb_uses_use_case or
lowerword in si_sb_uses_use):
return word[:-1]
# HANDLE PLURLS ENDING IN ies -> ie
if word in si_sb_ies_ie_case or lowerword in si_sb_ies_ie:
return word[:-1]
# HANDLE PLURLS ENDING IN oes -> oe
if (lowerword[-5:] == 'shoes' or
word in si_sb_oes_oe_case or
lowerword in si_sb_oes_oe):
return word[:-1]
# HANDLE SINGULAR NOUNS ENDING IN ...s OR OTHER SILIBANTS
if (word in si_sb_sses_sse_case or
lowerword in si_sb_sses_sse):
return word[:-1]
if lowerword in si_sb_singular_s_complete:
return word[:-2]
for k, v in si_sb_singular_s_bysize.items():
if lowerword[-k:] in v:
return word[:-2]
if lowerword[-4:] == 'eses' and word[0] == word[0].upper():
return word[:-2]
# Wouldn't special words
# ending with 's' always have been caught, regardless of them starting
# with a capital letter (i.e. being names)
# It makes sense below to do this for words ending in 'y' so that
# Sally -> Sallys. But not sure it makes sense here. Where is the case
# of a word ending in s that is caught here and would otherwise have been
# caught below?
#
# removing it as I can't find a case that executes it
# TODO: check this again
#
# if (self.classical_dict['names']):
# mo = search(r"([A-Z].*ses)$", word)
# if mo:
# return "%s" % mo.group(1)
if lowerword in si_sb_z_zes:
return word[:-2]
if lowerword in si_sb_zzes_zz:
return word[:-2]
if lowerword[-4:] == 'zzes':
return word[:-3]
if (word in si_sb_ches_che_case or
lowerword in si_sb_ches_che):
return word[:-1]
if lowerword[-4:] in ('ches', 'shes'):
return word[:-2]
if lowerword in si_sb_xes_xe:
return word[:-1]
if lowerword[-3:] == 'xes':
return word[:-2]
# (r"(.*)(us)es$", "%s%s"), TODO: why is this commented?
# HANDLE ...f -> ...ves
if (word in si_sb_ves_ve_case or
lowerword in si_sb_ves_ve):
return word[:-1]
if lowerword[-3:] == 'ves':
if lowerword[-5:-3] in ('el', 'al', 'ol'):
return word[:-3] + 'f'
if lowerword[-5:-3] == 'ea' and word[-6:-5] != 'd':
return word[:-3] + 'f'
if lowerword[-5:-3] in ('ni', 'li', 'wi'):
return word[:-3] + 'fe'
if lowerword[-5:-3] == 'ar':
return word[:-3] + 'f'
# HANDLE ...y
if lowerword[-2:] == 'ys':
if len(lowerword) > 2 and lowerword[-3] in 'aeiou':
return word[:-1]
if (self.classical_dict['names']):
if lowerword[-2:] == 'ys' and word[0] == word[0].upper():
return word[:-1]
if lowerword[-3:] == 'ies':
return word[:-3] + 'y'
# HANDLE ...o
if lowerword[-2:] == 'os':
if lowerword in si_sb_U_o_os_complete:
return word[:-1]
for k, v in si_sb_U_o_os_bysize.items():
if lowerword[-k:] in v:
return word[:-1]
if lowerword[-3:] in ('aos', 'eos', 'ios', 'oos', 'uos'):
return word[:-1]
if lowerword[-3:] == 'oes':
return word[:-2]
# UNASSIMILATED IMPORTS FINAL RULE
if word in si_sb_es_is:
return word[:-2] + 'is'
# OTHERWISE JUST REMOVE ...s
if lowerword[-1] == 's':
return word[:-1]
# COULD NOT FIND SINGULAR
return False
# ADJECTIVES
def a(self, text, count=1):
'''
Return the appropriate indefinite article followed by text.
The indefinite article is either 'a' or 'an'.
If count is not one, then return count followed by text
instead of 'a' or 'an'.
Whitespace at the start and end is preserved.
'''
mo = search(r"\A(\s*)(?:an?\s+)?(.+?)(\s*)\Z",
text, IGNORECASE)
if mo:
word = mo.group(2)
if not word:
return text
pre = mo.group(1)
post = mo.group(3)
result = self._indef_article(word, count)
return "%s%s%s" % (pre, result, post)
return ''
an = a
def _indef_article(self, word, count):
mycount = self.get_count(count)
if mycount != 1:
return "%s %s" % (count, word)
# HANDLE USER-DEFINED VARIANTS
value = self.ud_match(word, self.A_a_user_defined)
if value is not None:
return "%s %s" % (value, word)
# HANDLE ORDINAL FORMS
for a in (
(r"^(%s)" % A_ordinal_a, "a"),
(r"^(%s)" % A_ordinal_an, "an"),
):
mo = search(a[0], word, IGNORECASE)
if mo:
return "%s %s" % (a[1], word)
# HANDLE SPECIAL CASES
for a in (
(r"^(%s)" % A_explicit_an, "an"),
(r"^[aefhilmnorsx]$", "an"),
(r"^[bcdgjkpqtuvwyz]$", "a"),
):
mo = search(a[0], word, IGNORECASE)
if mo:
return "%s %s" % (a[1], word)
# HANDLE ABBREVIATIONS
for a in (
(r"(%s)" % A_abbrev, "an", VERBOSE),
(r"^[aefhilmnorsx][.-]", "an", IGNORECASE),
(r"^[a-z][.-]", "a", IGNORECASE),
):
mo = search(a[0], word, a[2])
if mo:
return "%s %s" % (a[1], word)
# HANDLE CONSONANTS
mo = search(r"^[^aeiouy]", word, IGNORECASE)
if mo:
return "a %s" % word
# HANDLE SPECIAL VOWEL-FORMS
for a in (
(r"^e[uw]", "a"),
(r"^onc?e\b", "a"),
(r"^onetime\b", "a"),
(r"^uni([^nmd]|mo)", "a"),
(r"^u[bcfghjkqrst][aeiou]", "a"),
(r"^ukr", "a"),
(r"^(%s)" % A_explicit_a, "a"),
):
mo = search(a[0], word, IGNORECASE)
if mo:
return "%s %s" % (a[1], word)
# HANDLE SPECIAL CAPITALS
mo = search(r"^U[NK][AIEO]?", word)
if mo:
return "a %s" % word
# HANDLE VOWELS
mo = search(r"^[aeiou]", word, IGNORECASE)
if mo:
return "an %s" % word
# HANDLE y... (BEFORE CERTAIN CONSONANTS IMPLIES (UNNATURALIZED) "i.." SOUND)
mo = search(r"^(%s)" % A_y_cons, word, IGNORECASE)
if mo:
return "an %s" % word
# OTHERWISE, GUESS "a"
return "a %s" % word
# 2. TRANSLATE ZERO-QUANTIFIED $word TO "no plural($word)"
def no(self, text, count=None):
'''
If count is 0, no, zero or nil, return 'no' followed by the plural
of text.
If count is one of:
1, a, an, one, each, every, this, that
return count followed by text.
Otherwise return count follow by the plural of text.
In the return value count is always followed by a space.
Whitespace at the start and end is preserved.
'''
if count is None and self.persistent_count is not None:
count = self.persistent_count
if count is None:
count = 0
mo = search(r"\A(\s*)(.+?)(\s*)\Z", text)
pre = mo.group(1)
word = mo.group(2)
post = mo.group(3)
if str(count).lower() in pl_count_zero:
return "%sno %s%s" % (pre, self.plural(word, 0), post)
else:
return "%s%s %s%s" % (pre, count, self.plural(word, count), post)
# PARTICIPLES
def present_participle(self, word):
'''
Return the present participle for word.
word is the 3rd person singular verb.
'''
plv = self.plural_verb(word, 2)
for pat, repl in (
(r"ie$", r"y"),
(r"ue$", r"u"), # TODO: isn't ue$ -> u encompassed in the following rule?
(r"([auy])e$", r"\g<1>"),
(r"ski$", r"ski"),
(r"[^b]i$", r""),
(r"^(are|were)$", r"be"),
(r"^(had)$", r"hav"),
(r"^(hoe)$", r"\g<1>"),
(r"([^e])e$", r"\g<1>"),
(r"er$", r"er"),
(r"([^aeiou][aeiouy]([bdgmnprst]))$", "\g<1>\g<2>"),
):
(ans, num) = subn(pat, repl, plv)
if num:
return "%sing" % ans
return "%sing" % ans
# NUMERICAL INFLECTIONS
def ordinal(self, num):
'''
Return the ordinal of num.
num can be an integer or text
e.g. ordinal(1) returns '1st'
ordinal('one') returns 'first'
'''
if match(r"\d", str(num)):
try:
num % 2
n = num
except TypeError:
if '.' in str(num):
try:
n = int(num[-1]) # numbers after decimal, so only need last one for ordinal
except ValueError: # ends with '.', so need to use whole string
n = int(num[:-1])
else:
n = int(num)
try:
post = nth[n % 100]
except KeyError:
post = nth[n % 10]
return "%s%s" % (num, post)
else:
mo = search(r"(%s)\Z" % ordinal_suff, num)
try:
post = ordinal[mo.group(1)]
return resub(r"(%s)\Z" % ordinal_suff, post, num)
except AttributeError:
return "%sth" % num
def millfn(self, ind=0):
if ind > len(mill) - 1:
print3("number out of range")
raise NumOutOfRangeError
return mill[ind]
def unitfn(self, units, mindex=0):
return "%s%s" % (unit[units], self.millfn(mindex))
def tenfn(self, tens, units, mindex=0):
if tens != 1:
return "%s%s%s%s" % (ten[tens],
'-' if tens and units else '',
unit[units],
self.millfn(mindex))
return "%s%s" % (teen[units], mill[mindex])
def hundfn(self, hundreds, tens, units, mindex):
if hundreds:
return "%s hundred%s%s%s, " % (unit[hundreds], # use unit not unitfn as simpler
" %s " % self.number_args['andword'] if tens or units else '',
self.tenfn(tens, units),
self.millfn(mindex))
if tens or units:
return "%s%s, " % (self.tenfn(tens, units), self.millfn(mindex))
return ''
def group1sub(self, mo):
units = int(mo.group(1))
if units == 1:
return " %s, " % self.number_args['one']
elif units:
# TODO: bug one and zero are padded with a space but other numbers aren't. check this in perl
return "%s, " % unit[units]
else:
return " %s, " % self.number_args['zero']
def group1bsub(self, mo):
units = int(mo.group(1))
if units:
# TODO: bug one and zero are padded with a space but other numbers aren't. check this in perl
return "%s, " % unit[units]
else:
return " %s, " % self.number_args['zero']
def group2sub(self, mo):
tens = int(mo.group(1))
units = int(mo.group(2))
if tens:
return "%s, " % self.tenfn(tens, units)
if units:
return " %s %s, " % (self.number_args['zero'], unit[units])
return " %s %s, " % (self.number_args['zero'], self.number_args['zero'])
def group3sub(self, mo):
hundreds = int(mo.group(1))
tens = int(mo.group(2))
units = int(mo.group(3))
if hundreds == 1:
hunword = " %s" % self.number_args['one']
elif hundreds:
hunword = "%s" % unit[hundreds]
# TODO: bug one and zero are padded with a space but other numbers aren't. check this in perl
else:
hunword = " %s" % self.number_args['zero']
if tens:
tenword = self.tenfn(tens, units)
elif units:
tenword = " %s %s" % (self.number_args['zero'], unit[units])
else:
tenword = " %s %s" % (self.number_args['zero'], self.number_args['zero'])
return "%s %s, " % (hunword, tenword)
def hundsub(self, mo):
ret = self.hundfn(int(mo.group(1)), int(mo.group(2)), int(mo.group(3)), self.mill_count)
self.mill_count += 1
return ret
def tensub(self, mo):
return "%s, " % self.tenfn(int(mo.group(1)), int(mo.group(2)), self.mill_count)
def unitsub(self, mo):
return "%s, " % self.unitfn(int(mo.group(1)), self.mill_count)
def enword(self, num, group):
# import pdb
# pdb.set_trace()
if group == 1:
num = resub(r"(\d)", self.group1sub, num)
elif group == 2:
num = resub(r"(\d)(\d)", self.group2sub, num)
num = resub(r"(\d)", self.group1bsub, num, 1)
# group1bsub same as
# group1sub except it doesn't use the default word for one.
# Is this required? i.e. is the default word not to beused when
# grouping in pairs?
#
# No. This is a bug. Fixed. TODO: report upstream.
elif group == 3:
num = resub(r"(\d)(\d)(\d)", self.group3sub, num)
num = resub(r"(\d)(\d)", self.group2sub, num, 1)
num = resub(r"(\d)", self.group1sub, num, 1)
elif int(num) == 0:
num = self.number_args['zero']
elif int(num) == 1:
num = self.number_args['one']
else:
num = num.lstrip().lstrip('0')
self.mill_count = 0
# surely there's a better way to do the next bit
mo = search(r"(\d)(\d)(\d)(?=\D*\Z)", num)
while mo:
num = resub(r"(\d)(\d)(\d)(?=\D*\Z)", self.hundsub, num, 1)
mo = search(r"(\d)(\d)(\d)(?=\D*\Z)", num)
num = resub(r"(\d)(\d)(?=\D*\Z)", self.tensub, num, 1)
num = resub(r"(\d)(?=\D*\Z)", self.unitsub, num, 1)
return num
def blankfn(self, mo):
''' do a global blank replace
TODO: surely this can be done with an option to resub
rather than this fn
'''
return ''
def commafn(self, mo):
''' do a global ',' replace
TODO: surely this can be done with an option to resub
rather than this fn
'''
return ','
def spacefn(self, mo):
''' do a global ' ' replace
TODO: surely this can be done with an option to resub
rather than this fn
'''
return ' '
def number_to_words(self, num, wantlist=False,
group=0, comma=',', andword='and',
zero='zero', one='one', decimal='point',
threshold=None):
'''
Return a number in words.
group = 1, 2 or 3 to group numbers before turning into words
comma: define comma
andword: word for 'and'. Can be set to ''.
e.g. "one hundred and one" vs "one hundred one"
zero: word for '0'
one: word for '1'
decimal: word for decimal point
threshold: numbers above threshold not turned into words
parameters not remembered from last call. Departure from Perl version.
'''
self.number_args = dict(andword=andword, zero=zero, one=one)
num = '%s' % num
# Handle "stylistic" conversions (up to a given threshold)...
if (threshold is not None and float(num) > threshold):
spnum = num.split('.', 1)
while (comma):
(spnum[0], n) = subn(r"(\d)(\d{3}(?:,|\Z))", r"\1,\2", spnum[0])
if n == 0:
break
try:
return "%s.%s" % (spnum[0], spnum[1])
except IndexError:
return "%s" % spnum[0]
if group < 0 or group > 3:
raise BadChunkingOptionError
nowhite = num.lstrip()
if nowhite[0] == '+':
sign = "plus"
elif nowhite[0] == '-':
sign = "minus"
else:
sign = ""
myord = (num[-2:] in ('st', 'nd', 'rd', 'th'))
if myord:
num = num[:-2]
finalpoint = False
if decimal:
if group != 0:
chunks = num.split('.')
else:
chunks = num.split('.', 1)
if chunks[-1] == '': # remove blank string if nothing after decimal
chunks = chunks[:-1]
finalpoint = True # add 'point' to end of output
else:
chunks = [num]
first = 1
loopstart = 0
if chunks[0] == '':
first = 0
if len(chunks) > 1:
loopstart = 1
for i in range(loopstart, len(chunks)):
chunk = chunks[i]
# remove all non numeric \D
chunk = resub(r"\D", self.blankfn, chunk)
if chunk == "":
chunk = "0"
if group == 0 and (first == 0 or first == ''):
chunk = self.enword(chunk, 1)
else:
chunk = self.enword(chunk, group)
if chunk[-2:] == ', ':
chunk = chunk[:-2]
chunk = resub(r"\s+,", self.commafn, chunk)
if group == 0 and first:
chunk = resub(r", (\S+)\s+\Z", " %s \\1" % andword, chunk)
chunk = resub(r"\s+", self.spacefn, chunk)
# chunk = resub(r"(\A\s|\s\Z)", self.blankfn, chunk)
chunk = chunk.strip()
if first:
first = ''
chunks[i] = chunk
numchunks = []
if first != 0:
numchunks = chunks[0].split("%s " % comma)
if myord and numchunks:
# TODO: can this be just one re as it is in perl?
mo = search(r"(%s)\Z" % ordinal_suff, numchunks[-1])
if mo:
numchunks[-1] = resub(r"(%s)\Z" % ordinal_suff, ordinal[mo.group(1)],
numchunks[-1])
else:
numchunks[-1] += 'th'
for chunk in chunks[1:]:
numchunks.append(decimal)
numchunks.extend(chunk.split("%s " % comma))
if finalpoint:
numchunks.append(decimal)
# wantlist: Perl list context. can explictly specify in Python
if wantlist:
if sign:
numchunks = [sign] + numchunks
return numchunks
elif group:
signout = "%s " % sign if sign else ''
return "%s%s" % (signout, ", ".join(numchunks))
else:
signout = "%s " % sign if sign else ''
num = "%s%s" % (signout, numchunks.pop(0))
if decimal is None:
first = True
else:
first = not num.endswith(decimal)
for nc in numchunks:
if nc == decimal:
num += " %s" % nc
first = 0
elif first:
num += "%s %s" % (comma, nc)
else:
num += " %s" % nc
return num
# Join words with commas and a trailing 'and' (when appropriate)...
def join(self, words, sep=None, sep_spaced=True,
final_sep=None, conj='and', conj_spaced=True):
'''
Join words into a list.
e.g. join(['ant', 'bee', 'fly']) returns 'ant, bee, and fly'
options:
conj: replacement for 'and'
sep: separator. default ',', unless ',' is in the list then ';'
final_sep: final separator. default ',', unless ',' is in the list then ';'
conj_spaced: boolean. Should conj have spaces around it
'''
if not words:
return ""
if len(words) == 1:
return words[0]
if conj_spaced:
if conj == '':
conj = ' '
else:
conj = ' %s ' % conj
if len(words) == 2:
return "%s%s%s" % (words[0], conj, words[1])
if sep is None:
if ',' in ''.join(words):
sep = ';'
else:
sep = ','
if final_sep is None:
final_sep = sep
final_sep = "%s%s" % (final_sep, conj)
if sep_spaced:
sep += ' '
return "%s%s%s" % (sep.join(words[0:-1]), final_sep, words[-1])
| bert-syntax-master | inflect.py |
import sys
from collections import *
files=[("base","results/gulordava_results_base.txt"),("large","results/gulordava_results_large.txt")]
if "with_only_prefix" in sys.argv:
files+=[("base_only_prefix","results/gulordava_results_base_only_prefix.txt"),("large_only_prefix","results/gulordava_results_large_only_prefix.txt")]
if "no_split" in sys.argv:
files.append(("openai_gpt", "results/gulordava_results_openai_gpt_no_split.txt"))
elif "use_postfix" in sys.argv:
files.append(("openai_gpt", "results/gulordava_results_openai_gpt_use_postfix.txt"))
else:
files.append(("openai_gpt", "results/gulordava_results_openai_gpt.txt"))
by_model={}
conditions=set()
nskipped=0
for title,fname in files:
lines = open(fname)
results=defaultdict(Counter)
by_model[title]=results
skipped = set()
for line in lines:
if line.startswith("Better speed"): continue
if line.startswith("skipping"):
skipped.add(line.split()[1])
#next(lines) # no need to skip, skipped in testing
nskipped += 1
continue
assert (line.strip().split()[0] in ['True','False']),line
res,c1,_ = line.split(None, 2)
conditions.add(c1)
conditions.add('all')
results[c1][res]+=1
print("adding",res,"to",c1)
results['all'][res]+=1
print("skipped:",nskipped,len(skipped),skipped)
if "with_only_prefix" in sys.argv:
print("condition & base & large & base_only_prefix & large_only_prefix & openai_gpt & count bert & count openai_gpt \\\\")
else:
print("condition & base & large & openai_gpt & count bert & count openai_gpt \\\\")
for cond in conditions:
rb = by_model['base'][cond]
rl = by_model['large'][cond]
ro = by_model['openai_gpt'][cond]
sb = "%.2f" % (rb['True']/(rb['True']+rb['False']))
sl = "%.2f" % (rl['True']/(rl['True']+rl['False']))
so = "%.2f" % (ro['True']/(ro['True']+ro['False']))
if "with_only_prefix" in sys.argv:
rbp = by_model['base_only_prefix'][cond]
rlp = by_model['large_only_prefix'][cond]
sbp = "%.2f" % (rbp['True']/(rbp['True']+rbp['False']))
slp = "%.2f" % (rlp['True']/(rlp['True']+rlp['False']))
print(" & ".join(map(str,[cond, sb, sl, sbp, slp, so, sum(rb.values()), sum(ro.values())])),"\\\\")
else:
print(" & ".join(map(str,[cond, sb, sl, so, sum(rb.values()), sum(ro.values())])),"\\\\")
| bert-syntax-master | gen_gul_tbl_openai_gpt.py |
import csv
cases_we_care_about=['1','2','3','4']
from utils import vinfl
def inflect(verb):
return vinfl[verb]
for record in csv.DictReader(open('agr_50_mostcommon_10K.tsv','r'), delimiter='\t'):
orig = record['orig_sentence']
n_i = record['n_intervening']
n_di = record['n_diff_intervening']
vindex = int(record['verb_index'])-1
if n_i != n_di: continue
if n_di in cases_we_care_about:
sorig = orig.split()
verb = sorig[vindex]
iverb = inflect(verb)
#if verb in ['is','are']: continue # skip because of copular agreement
sorig[vindex] = "***mask***"
masked = " ".join(sorig)
print("\t".join([n_di,orig,masked,verb,iverb]))
| bert-syntax-master | make_linzen_goldberg_testset.py |
# coding=utf-8
from pytorch_pretrained_bert import OpenAIGPTLMHeadModel, OpenAIGPTTokenizer, BertTokenizer
import torch
import sys
import csv
import logging
import itertools
logging.basicConfig(level=logging.INFO)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model_name = "openai-gpt"
print("using model: {}".format(model_name), file=sys.stderr)
split_words = True
if 'no_split' in sys.argv:
split_words = False
print("We don't split words", file=sys.stderr)
use_postfix = False
if 'use_postfix' in sys.argv:
use_postfix = True
print("We compute probabilities over the entire sentence", file=sys.stderr)
model = OpenAIGPTLMHeadModel.from_pretrained(model_name)
tokenizer = OpenAIGPTTokenizer.from_pretrained(model_name)
bert_tokenizer=BertTokenizer.from_pretrained('bert-base-uncased')
model.eval()
model.to(device)
def get_probs_for_words(sent, w1, w2):
pre, target, post = sent.split("***")
if "mask" in target.lower():
target = ["[MASK]"]
else:
target = tokenizer.tokenize(target)
tokens = tokenizer.tokenize(pre)
target_idx = len(tokens)
# Filter answers based on BERT wordpieces to align with BERT results
try:
word_ids=bert_tokenizer.convert_tokens_to_ids([w1,w2])
except KeyError:
print("skipping",w1,w2,"bad wins")
return None
tok_w1, tok_w2 = tokenizer.tokenize(w1), tokenizer.tokenize(w2)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
w1_ids = tokenizer.convert_tokens_to_ids(tok_w1)
w2_ids = tokenizer.convert_tokens_to_ids(tok_w2)
if len(input_ids) == 0:
print("skipping",pre,w1,w2,"empty beggingin")
return None
if not split_words and (len(tok_w1) > 1 or len(tok_w2) > 1):
print("skipping",pre,w1,w2,"splitted words")
return None
if use_postfix:
# Add post focus tokens
end_tokens = tokenizer.tokenize(post)
end_ids = tokenizer.convert_tokens_to_ids(end_tokens)
w1_ids += end_ids
w2_ids += end_ids
# Compute the score for w1 and w2
add_tok_w1 = []
add_tok_w2 = []
score_w1 = 0
score_w2 = 0
for ids_w1, ids_w2 in itertools.zip_longest(w1_ids, w2_ids):
tens = torch.LongTensor([input_ids + add_tok_w1, input_ids + add_tok_w2]).to(device)
with torch.no_grad():
res = model(tens)
res = res[..., 0:model.config.vocab_size] # Restrict to the vocabulary only
res = torch.nn.functional.log_softmax(res, dim=-1)
if ids_w1 is not None:
score_w1 = score_w1 + res[0, -1, ids_w1].item()
if ids_w2 is not None:
score_w2 = score_w2 + res[1, -1, ids_w2].item()
add_tok_w1.append(ids_w1 if ids_w1 is not None else [0])
add_tok_w2.append(ids_w2 if ids_w2 is not None else [0])
# Compute the score for w2
# add_tok = []
# score_w2 = 0
# for ids in w2_ids:
# tens = torch.LongTensor(input_ids + add_tok).unsqueeze(0).to(device)
# with torch.no_grad():
# res = model(tens)
# res = res[..., 0:model.config.vocab_size] # Restrict to the vocabulary only
# res = torch.nn.functional.log_softmax(res,dim=-1)
# score_w2 = score_w2 + res[0, -1, ids]
# add_tok.append(ids)
return [float(score_w1), float(score_w2)]
from collections import Counter
def load_marvin():
cc = Counter()
# note: I edited the LM_Syneval/src/make_templates.py script, and run "python LM_Syneval/src/make_templates.py LM_Syneval/data/templates/ > marvin_linzen_dataset.tsv"
out = []
for line in open("marvin_linzen_dataset.tsv"):
case = line.strip().split("\t")
cc[case[1]] += 1
g, ug = case[-2], case[-1]
g = g.split()
ug = ug.split()
assert len(g) == len(ug), (g, ug)
diffs = [i for i, pair in enumerate(zip(g, ug)) if pair[0] != pair[1]]
if len(diffs) != 1:
# print(diffs)
# print(g,ug)
continue
assert len(diffs) == 1, diffs
gv = g[diffs[0]] # good
ugv = ug[diffs[0]] # bad
g[diffs[0]] = "***mask***"
g.append(".")
out.append((case[0], case[1], " ".join(g), gv, ugv))
return out
def eval_marvin():
o = load_marvin()
print(len(o), file=sys.stderr)
from collections import defaultdict
import time
rc = defaultdict(Counter)
tc = Counter()
start = time.time()
for i, (case, tp, s, g, b) in enumerate(o):
ps = get_probs_for_words(s, g, b)
if ps is None:
ps = [0, 1]
gp = ps[0]
bp = ps[1]
print(gp > bp, case, tp, g, b, s)
if i % 100 == 0:
print(i, time.time() - start, file=sys.stderr)
start = time.time()
sys.stdout.flush()
def eval_lgd():
for i, line in enumerate(open("lgd_dataset.tsv", encoding="utf8")):
# for i,line in enumerate(open("lgd_dataset_with_is_are.tsv",encoding="utf8")):
na, _, masked, good, bad = line.strip().split("\t")
ps = get_probs_for_words(masked, good, bad)
if ps is None:
continue
gp = ps[0]
bp = ps[1]
print(str(gp > bp), na, good, gp, bad, bp, masked.encode("utf8"), sep=u"\t")
if i % 100 == 0:
print(i, file=sys.stderr)
sys.stdout.flush()
def read_gulordava():
rows = csv.DictReader(open("generated.tab", encoding="utf8"), delimiter="\t")
data = []
for row in rows:
row2 = next(rows)
assert row["sent"] == row2["sent"]
assert row["class"] == "correct"
assert row2["class"] == "wrong"
sent = row["sent"].lower().split()[:-1] # dump the <eos> token.
good_form = row["form"]
bad_form = row2["form"]
sent[int(row["len_prefix"])] = "***mask***"
sent = " ".join(sent)
data.append((sent, row["n_attr"], good_form, bad_form))
return data
def eval_gulordava():
for i, (masked, natt, good, bad) in enumerate(read_gulordava()):
if good in ["is", "are"]:
print("skipping is/are")
continue
ps = get_probs_for_words(masked, good, bad)
if ps is None:
continue
gp = ps[0]
bp = ps[1]
print(str(gp > bp), natt, good, gp, bad, bp, masked.encode("utf8"), sep=u"\t")
if i % 100 == 0:
print(i, file=sys.stderr)
sys.stdout.flush()
if "marvin" in sys.argv:
eval_marvin()
elif "gul" in sys.argv:
eval_gulordava()
else:
eval_lgd()
| bert-syntax-master | eval_openai_gpt.py |
# from Linzen's code repo
import inflect
infl_eng = inflect.engine()
def gen_inflect_from_vocab(vocab_file, freq_threshold=1000):
vbp = {}
vbz = {}
nn = {}
nns = {}
from_pos = {'NNS': nns, 'NN': nn, 'VBP': vbp, 'VBZ': vbz}
for line in file(vocab_file):
if line.startswith(' '): # empty string token
continue
word, pos, count = line.strip().split()
count = int(count)
if len(word) > 1 and pos in from_pos and count >= freq_threshold:
from_pos[pos][word] = count
verb_infl = {'VBP': 'VBZ', 'VBZ': 'VBP'}
for word, count in vbz.iteritems():
candidate = infl_eng.plural_verb(word)
if candidate in vbp:
verb_infl[candidate] = word
verb_infl[word] = candidate
noun_infl = {'NN': 'NNS', 'NNS': 'NN'}
for word, count in nn.iteritems():
candidate = infl_eng.plural_noun(word)
if candidate in nns:
noun_infl[candidate] = word
noun_infl[word] = candidate
return verb_infl, noun_infl
vinfl, ninfl = gen_inflect_from_vocab('wiki.vocab')
| bert-syntax-master | utils.py |
import sys
from collections import *
files=[("base","results/marvin_results_base.txt"),("large","results/marvin_results_large.txt")]
by_model={}
conditions=set()
for title,fname in files:
lines = open(fname)
results=defaultdict(Counter)
by_model[title]=results
skipped = set()
for line in lines:
if line.startswith("Better speed"): continue
if line.startswith("skipping"):
skipped.add(line.split()[1])
next(lines)
continue
res,c1,c2,w1,w2,s = line.split(None, 5)
c1 = c1.replace("inanim","anim")
conditions.add(c1)
results[c1][res]+=1
print("skipped:",skipped)
print("condition & base & large & count \\\\")
for cond in conditions:
rb = by_model['base'][cond]
rl = by_model['large'][cond]
sb = "%.2f" % (rb['True']/(rb['True']+rb['False']))
sl = "%.2f" % (rl['True']/(rl['True']+rl['False']))
print(" & ".join(map(str,[cond, sb, sl, sum(rb.values())])),"\\\\")
| bert-syntax-master | gen_marvin_tbl.py |
import sys
from collections import *
files=[("base","results/lgd_results_base.txt"),("large","results/lgd_results_large.txt")]
by_model={}
conditions=set()
nskipped=0
for title,fname in files:
lines = open(fname)
results=defaultdict(Counter)
by_model[title]=results
skipped = set()
for line in lines:
if line.startswith("Better speed"): continue
if line.startswith("skipping"):
skipped.add(line.split()[1])
#next(lines) # no need to skip, skipped in testing
nskipped += 1
continue
assert (line.strip().split()[0] in ['True','False']),line
res,c1,_ = line.split(None, 2)
conditions.add(c1)
results[c1][res]+=1
print("skipped:",nskipped,len(skipped),skipped)
print("condition & base & large & count \\\\")
for cond in conditions:
rb = by_model['base'][cond]
rl = by_model['large'][cond]
sb = "%.2f" % (rb['True']/(rb['True']+rb['False']))
sl = "%.2f" % (rl['True']/(rl['True']+rl['False']))
print(" & ".join(map(str,[cond, sb, sl, sum(rb.values())])),"\\\\")
| bert-syntax-master | gen_lgd_tbl.py |
# Lint as: python3
"""
HuggingFace / AutoTrain Advanced
"""
import os
from setuptools import find_packages, setup
DOCLINES = __doc__.split("\n")
this_directory = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_directory, "README.md"), encoding="utf-8") as f:
LONG_DESCRIPTION = f.read()
# get INSTALL_REQUIRES from requirements.txt
with open(os.path.join(this_directory, "requirements.txt"), encoding="utf-8") as f:
INSTALL_REQUIRES = f.read().splitlines()
QUALITY_REQUIRE = [
"black",
"isort",
"flake8==3.7.9",
]
TESTS_REQUIRE = ["pytest"]
EXTRAS_REQUIRE = {
"dev": INSTALL_REQUIRES + QUALITY_REQUIRE + TESTS_REQUIRE,
"quality": INSTALL_REQUIRES + QUALITY_REQUIRE,
"docs": INSTALL_REQUIRES
+ [
"recommonmark",
"sphinx==3.1.2",
"sphinx-markdown-tables",
"sphinx-rtd-theme==0.4.3",
"sphinx-copybutton",
],
}
setup(
name="autotrain-advanced",
description=DOCLINES[0],
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
author="HuggingFace Inc.",
author_email="autotrain@huggingface.co",
url="https://github.com/huggingface/autotrain-advanced",
download_url="https://github.com/huggingface/autotrain-advanced/tags",
license="Apache 2.0",
package_dir={"": "src"},
packages=find_packages("src"),
extras_require=EXTRAS_REQUIRE,
install_requires=INSTALL_REQUIRES,
entry_points={"console_scripts": ["autotrain=autotrain.cli.autotrain:main"]},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
keywords="automl autonlp autotrain huggingface",
)
| autotrain-advanced-main | setup.py |
import os
from uuid import uuid4
from datasets import load_dataset
from autotrain.dataset import AutoTrainDataset
from autotrain.project import Project
RANDOM_ID = str(uuid4())
DATASET = "amazon_reviews_multi"
PROJECT_NAME = f"amazon_reviews_multi_{RANDOM_ID}"
TASK = "text_multi_class_classification"
MODEL = "bert-base-uncased"
USERNAME = os.environ["AUTOTRAIN_USERNAME"]
TOKEN = os.environ["HF_TOKEN"]
if __name__ == "__main__":
dataset = load_dataset(DATASET, "en")
train = dataset["train"]
validation = dataset["test"]
# convert to pandas dataframe
train_df = train.to_pandas()
validation_df = validation.to_pandas()
# prepare dataset for AutoTrain
dset = AutoTrainDataset(
train_data=[train_df],
valid_data=[validation_df],
task=TASK,
token=TOKEN,
project_name=PROJECT_NAME,
username=USERNAME,
column_mapping={"text": "review_body", "label": "stars"},
percent_valid=None,
)
dset.prepare()
#
# How to get params for a task:
#
# from autotrain.params import Params
# params = Params(task=TASK, training_type="hub_model").get()
# print(params) to get full list of params for the task
# define params in proper format
job1 = {
"task": TASK,
"learning_rate": 1e-5,
"optimizer": "adamw_torch",
"scheduler": "linear",
"epochs": 5,
}
job2 = {
"task": TASK,
"learning_rate": 3e-5,
"optimizer": "adamw_torch",
"scheduler": "cosine",
"epochs": 5,
}
job3 = {
"task": TASK,
"learning_rate": 5e-5,
"optimizer": "sgd",
"scheduler": "cosine",
"epochs": 5,
}
jobs = [job1, job2, job3]
project = Project(dataset=dset, hub_model=MODEL, job_params=jobs)
project_id = project.create()
project.approve(project_id)
| autotrain-advanced-main | examples/text_classification_multiclass.py |
import os
from uuid import uuid4
from datasets import load_dataset
from autotrain.dataset import AutoTrainDataset
from autotrain.project import Project
RANDOM_ID = str(uuid4())
DATASET = "imdb"
PROJECT_NAME = f"imdb_{RANDOM_ID}"
TASK = "text_binary_classification"
MODEL = "bert-base-uncased"
USERNAME = os.environ["AUTOTRAIN_USERNAME"]
TOKEN = os.environ["HF_TOKEN"]
if __name__ == "__main__":
dataset = load_dataset(DATASET)
train = dataset["train"]
validation = dataset["test"]
# convert to pandas dataframe
train_df = train.to_pandas()
validation_df = validation.to_pandas()
# prepare dataset for AutoTrain
dset = AutoTrainDataset(
train_data=[train_df],
valid_data=[validation_df],
task=TASK,
token=TOKEN,
project_name=PROJECT_NAME,
username=USERNAME,
column_mapping={"text": "text", "label": "label"},
percent_valid=None,
)
dset.prepare()
#
# How to get params for a task:
#
# from autotrain.params import Params
# params = Params(task=TASK, training_type="hub_model").get()
# print(params) to get full list of params for the task
# define params in proper format
job1 = {
"task": TASK,
"learning_rate": 1e-5,
"optimizer": "adamw_torch",
"scheduler": "linear",
"epochs": 5,
}
job2 = {
"task": TASK,
"learning_rate": 3e-5,
"optimizer": "adamw_torch",
"scheduler": "cosine",
"epochs": 5,
}
job3 = {
"task": TASK,
"learning_rate": 5e-5,
"optimizer": "sgd",
"scheduler": "cosine",
"epochs": 5,
}
jobs = [job1, job2, job3]
project = Project(dataset=dset, hub_model=MODEL, job_params=jobs)
project_id = project.create()
project.approve(project_id)
| autotrain-advanced-main | examples/text_classification_binary.py |
import sys
from accelerate.state import PartialState
from loguru import logger
emojis = {
"TRACE": "🔍",
"DEBUG": "🐛",
"INFO": "🚀",
"SUCCESS": "✅",
"WARNING": "⚠️",
"ERROR": "❌",
"CRITICAL": "🚨",
}
def should_log(record):
return PartialState().is_main_process
def emoji_filter(record):
level = record["level"].name
record["level_emoji"] = emojis.get(level, "") + " " + level
return True
log_format = (
"<level>{level_emoji: <8}</level> | "
"<green>{time:YYYY-MM-DD HH:mm:ss}</green> | "
"<cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan> - "
"<level>{message}</level>"
)
logger.remove()
if not hasattr(logger, "_is_customized") or not logger._is_customized:
logger.add(sys.stderr, format=log_format, filter=lambda x: should_log(x) and emoji_filter(x))
logger._is_customized = True
custom_logger = logger
| autotrain-advanced-main | src/autotrain/logging.py |
from dataclasses import dataclass
from typing import Literal
import gradio as gr
from pydantic import BaseModel, Field
from autotrain.languages import SUPPORTED_LANGUAGES
from autotrain.tasks import TASKS
class LoraR:
TYPE = "int"
MIN_VALUE = 1
MAX_VALUE = 100
DEFAULT = 16
STEP = 1
STREAMLIT_INPUT = "number_input"
PRETTY_NAME = "LoRA R"
GRADIO_INPUT = gr.Slider(minimum=MIN_VALUE, maximum=MAX_VALUE, value=DEFAULT, step=STEP)
class LoraAlpha:
TYPE = "int"
MIN_VALUE = 1
MAX_VALUE = 256
DEFAULT = 32
STEP = 1
STREAMLIT_INPUT = "number_input"
PRETTY_NAME = "LoRA Alpha"
GRADIO_INPUT = gr.Slider(minimum=MIN_VALUE, maximum=MAX_VALUE, value=DEFAULT, step=STEP)
class LoraDropout:
TYPE = "float"
MIN_VALUE = 0.0
MAX_VALUE = 1.0
DEFAULT = 0.05
STEP = 0.01
STREAMLIT_INPUT = "number_input"
PRETTY_NAME = "LoRA Dropout"
GRADIO_INPUT = gr.Slider(minimum=MIN_VALUE, maximum=MAX_VALUE, value=DEFAULT, step=STEP)
class LearningRate:
TYPE = "float"
MIN_VALUE = 1e-7
MAX_VALUE = 1e-1
DEFAULT = 1e-3
STEP = 1e-6
FORMAT = "%.2E"
STREAMLIT_INPUT = "number_input"
PRETTY_NAME = "Learning Rate"
GRADIO_INPUT = gr.Slider(minimum=MIN_VALUE, maximum=MAX_VALUE, value=DEFAULT, step=STEP)
class LMLearningRate(LearningRate):
DEFAULT = 5e-5
class Optimizer:
TYPE = "str"
DEFAULT = "adamw_torch"
CHOICES = ["adamw_torch", "adamw_hf", "sgd", "adafactor", "adagrad"]
STREAMLIT_INPUT = "selectbox"
PRETTY_NAME = "Optimizer"
GRADIO_INPUT = gr.Dropdown(CHOICES, value=DEFAULT)
class LMTrainingType:
TYPE = "str"
DEFAULT = "generic"
CHOICES = ["generic", "chat"]
STREAMLIT_INPUT = "selectbox"
PRETTY_NAME = "LM Training Type"
GRAIDO_INPUT = gr.Dropdown(CHOICES, value=DEFAULT)
class Scheduler:
TYPE = "str"
DEFAULT = "linear"
CHOICES = ["linear", "cosine"]
STREAMLIT_INPUT = "selectbox"
PRETTY_NAME = "Scheduler"
GRADIO_INPUT = gr.Dropdown(CHOICES, value=DEFAULT)
class TrainBatchSize:
TYPE = "int"
MIN_VALUE = 1
MAX_VALUE = 128
DEFAULT = 2
STEP = 2
STREAMLIT_INPUT = "number_input"
PRETTY_NAME = "Train Batch Size"
GRADIO_INPUT = gr.Slider(minimum=MIN_VALUE, maximum=MAX_VALUE, value=DEFAULT, step=STEP)
class LMTrainBatchSize(TrainBatchSize):
DEFAULT = 4
class Epochs:
TYPE = "int"
MIN_VALUE = 1
MAX_VALUE = 1000
DEFAULT = 10
STREAMLIT_INPUT = "number_input"
PRETTY_NAME = "Epochs"
GRADIO_INPUT = gr.Number(value=DEFAULT)
class LMEpochs(Epochs):
DEFAULT = 1
class PercentageWarmup:
TYPE = "float"
MIN_VALUE = 0.0
MAX_VALUE = 1.0
DEFAULT = 0.1
STEP = 0.01
STREAMLIT_INPUT = "number_input"
PRETTY_NAME = "Percentage Warmup"
GRADIO_INPUT = gr.Slider(minimum=MIN_VALUE, maximum=MAX_VALUE, value=DEFAULT, step=STEP)
class GradientAccumulationSteps:
TYPE = "int"
MIN_VALUE = 1
MAX_VALUE = 100
DEFAULT = 1
STREAMLIT_INPUT = "number_input"
PRETTY_NAME = "Gradient Accumulation Steps"
GRADIO_INPUT = gr.Number(value=DEFAULT)
class WeightDecay:
TYPE = "float"
MIN_VALUE = 0.0
MAX_VALUE = 1.0
DEFAULT = 0.0
STREAMLIT_INPUT = "number_input"
PRETTY_NAME = "Weight Decay"
GRADIO_INPUT = gr.Number(value=DEFAULT)
class SourceLanguage:
TYPE = "str"
DEFAULT = "en"
CHOICES = SUPPORTED_LANGUAGES
STREAMLIT_INPUT = "selectbox"
PRETTY_NAME = "Source Language"
GRADIO_INPUT = gr.Dropdown(CHOICES, value=DEFAULT)
class TargetLanguage:
TYPE = "str"
DEFAULT = "en"
CHOICES = SUPPORTED_LANGUAGES
STREAMLIT_INPUT = "selectbox"
PRETTY_NAME = "Target Language"
GRADIO_INPUT = gr.Dropdown(CHOICES, value=DEFAULT)
class NumModels:
TYPE = "int"
MIN_VALUE = 1
MAX_VALUE = 25
DEFAULT = 1
STREAMLIT_INPUT = "number_input"
PRETTY_NAME = "Number of Models"
GRADIO_INPUT = gr.Slider(minimum=MIN_VALUE, maximum=MAX_VALUE, value=DEFAULT, step=1)
class DBNumSteps:
TYPE = "int"
MIN_VALUE = 100
MAX_VALUE = 10000
DEFAULT = 1500
STREAMLIT_INPUT = "number_input"
PRETTY_NAME = "Number of Steps"
GRADIO_INPUT = gr.Slider(minimum=MIN_VALUE, maximum=MAX_VALUE, value=DEFAULT, step=100)
class DBTextEncoderStepsPercentage:
TYPE = "int"
MIN_VALUE = 1
MAX_VALUE = 100
DEFAULT = 30
STREAMLIT_INPUT = "number_input"
PRETTY_NAME = "Text encoder steps percentage"
GRADIO_INPUT = gr.Slider(minimum=MIN_VALUE, maximum=MAX_VALUE, value=DEFAULT, step=1)
class DBPriorPreservation:
TYPE = "bool"
DEFAULT = False
STREAMLIT_INPUT = "checkbox"
PRETTY_NAME = "Prior preservation"
GRADIO_INPUT = gr.Dropdown(["True", "False"], value="False")
class ImageSize:
TYPE = "int"
MIN_VALUE = 64
MAX_VALUE = 2048
DEFAULT = 512
STREAMLIT_INPUT = "number_input"
PRETTY_NAME = "Image Size"
GRADIO_INPUT = gr.Slider(minimum=MIN_VALUE, maximum=MAX_VALUE, value=DEFAULT, step=64)
class DreamboothConceptType:
TYPE = "str"
DEFAULT = "person"
CHOICES = ["person", "object"]
STREAMLIT_INPUT = "selectbox"
PRETTY_NAME = "Concept Type"
GRADIO_INPUT = gr.Dropdown(CHOICES, value=DEFAULT)
class SourceLanguageUnk:
TYPE = "str"
DEFAULT = "unk"
CHOICES = ["unk"]
STREAMLIT_INPUT = "selectbox"
PRETTY_NAME = "Source Language"
GRADIO_INPUT = gr.Dropdown(CHOICES, value=DEFAULT)
class HubModel:
TYPE = "str"
DEFAULT = "bert-base-uncased"
PRETTY_NAME = "Hub Model"
GRADIO_INPUT = gr.Textbox(lines=1, max_lines=1, label="Hub Model")
class TextBinaryClassificationParams(BaseModel):
task: Literal["text_binary_classification"]
learning_rate: float = Field(5e-5, title="Learning rate")
num_train_epochs: int = Field(3, title="Number of training epochs")
max_seq_length: int = Field(128, title="Max sequence length")
train_batch_size: int = Field(32, title="Training batch size")
warmup_ratio: float = Field(0.1, title="Warmup proportion")
gradient_accumulation_steps: int = Field(1, title="Gradient accumulation steps")
optimizer: str = Field("adamw_torch", title="Optimizer")
scheduler: str = Field("linear", title="Scheduler")
weight_decay: float = Field(0.0, title="Weight decay")
max_grad_norm: float = Field(1.0, title="Max gradient norm")
seed: int = Field(42, title="Seed")
class TextMultiClassClassificationParams(BaseModel):
task: Literal["text_multi_class_classification"]
learning_rate: float = Field(5e-5, title="Learning rate")
num_train_epochs: int = Field(3, title="Number of training epochs")
max_seq_length: int = Field(128, title="Max sequence length")
train_batch_size: int = Field(32, title="Training batch size")
warmup_ratio: float = Field(0.1, title="Warmup proportion")
gradient_accumulation_steps: int = Field(1, title="Gradient accumulation steps")
optimizer: str = Field("adamw_torch", title="Optimizer")
scheduler: str = Field("linear", title="Scheduler")
weight_decay: float = Field(0.0, title="Weight decay")
max_grad_norm: float = Field(1.0, title="Max gradient norm")
seed: int = Field(42, title="Seed")
class DreamboothParams(BaseModel):
task: Literal["dreambooth"]
num_steps: int = Field(1500, title="Number of steps")
image_size: int = Field(512, title="Image size")
text_encoder_steps_percentage: int = Field(30, title="Text encoder steps percentage")
prior_preservation: bool = Field(False, title="Prior preservation")
learning_rate: float = Field(2e-6, title="Learning rate")
train_batch_size: int = Field(1, title="Training batch size")
gradient_accumulation_steps: int = Field(1, title="Gradient accumulation steps")
class ImageBinaryClassificationParams(BaseModel):
task: Literal["image_binary_classification"]
learning_rate: float = Field(3e-5, title="Learning rate")
num_train_epochs: int = Field(3, title="Number of training epochs")
train_batch_size: int = Field(8, title="Training batch size")
warmup_ratio: float = Field(0.1, title="Warmup proportion")
gradient_accumulation_steps: int = Field(1, title="Gradient accumulation steps")
optimizer: str = Field("adamw_torch", title="Optimizer")
scheduler: str = Field("linear", title="Scheduler")
weight_decay: float = Field(0.0, title="Weight decay")
max_grad_norm: float = Field(1.0, title="Max gradient norm")
seed: int = Field(42, title="Seed")
class ImageMultiClassClassificationParams(BaseModel):
task: Literal["image_multi_class_classification"]
learning_rate: float = Field(3e-5, title="Learning rate")
num_train_epochs: int = Field(3, title="Number of training epochs")
train_batch_size: int = Field(8, title="Training batch size")
warmup_ratio: float = Field(0.1, title="Warmup proportion")
gradient_accumulation_steps: int = Field(1, title="Gradient accumulation steps")
optimizer: str = Field("adamw_torch", title="Optimizer")
scheduler: str = Field("linear", title="Scheduler")
weight_decay: float = Field(0.0, title="Weight decay")
max_grad_norm: float = Field(1.0, title="Max gradient norm")
seed: int = Field(42, title="Seed")
class LMTrainingParams(BaseModel):
task: Literal["lm_training"]
learning_rate: float = Field(3e-5, title="Learning rate")
num_train_epochs: int = Field(3, title="Number of training epochs")
train_batch_size: int = Field(8, title="Training batch size")
warmup_ratio: float = Field(0.1, title="Warmup proportion")
gradient_accumulation_steps: int = Field(1, title="Gradient accumulation steps")
optimizer: str = Field("adamw_torch", title="Optimizer")
scheduler: str = Field("linear", title="Scheduler")
weight_decay: float = Field(0.0, title="Weight decay")
max_grad_norm: float = Field(1.0, title="Max gradient norm")
seed: int = Field(42, title="Seed")
add_eos_token: bool = Field(True, title="Add EOS token")
block_size: int = Field(-1, title="Block size")
lora_r: int = Field(16, title="Lora r")
lora_alpha: int = Field(32, title="Lora alpha")
lora_dropout: float = Field(0.05, title="Lora dropout")
training_type: str = Field("generic", title="Training type")
train_on_inputs: bool = Field(False, title="Train on inputs")
@dataclass
class Params:
task: str
param_choice: str
model_choice: str
def __post_init__(self):
# task should be one of the keys in TASKS
if self.task not in TASKS:
raise ValueError(f"task must be one of {TASKS.keys()}")
self.task_id = TASKS[self.task]
if self.param_choice not in ("autotrain", "manual"):
raise ValueError("param_choice must be either autotrain or manual")
if self.model_choice not in ("autotrain", "hub_model"):
raise ValueError("model_choice must be either autotrain or hub_model")
def _dreambooth(self):
if self.param_choice == "manual":
return {
"hub_model": HubModel,
"image_size": ImageSize,
"learning_rate": LearningRate,
"train_batch_size": TrainBatchSize,
"num_steps": DBNumSteps,
"gradient_accumulation_steps": GradientAccumulationSteps,
}
if self.param_choice == "autotrain":
if self.model_choice == "hub_model":
return {
"hub_model": HubModel,
"image_size": ImageSize,
"num_models": NumModels,
}
else:
return {
"num_models": NumModels,
}
def _tabular_binary_classification(self):
return {
"num_models": NumModels,
}
def _lm_training(self):
if self.param_choice == "manual":
return {
"hub_model": HubModel,
"learning_rate": LMLearningRate,
"optimizer": Optimizer,
"scheduler": Scheduler,
"train_batch_size": LMTrainBatchSize,
"num_train_epochs": LMEpochs,
"percentage_warmup": PercentageWarmup,
"gradient_accumulation_steps": GradientAccumulationSteps,
"weight_decay": WeightDecay,
"lora_r": LoraR,
"lora_alpha": LoraAlpha,
"lora_dropout": LoraDropout,
"training_type": LMTrainingType,
}
if self.param_choice == "autotrain":
if self.model_choice == "autotrain":
return {
"num_models": NumModels,
"training_type": LMTrainingType,
}
else:
return {
"hub_model": HubModel,
"num_models": NumModels,
"training_type": LMTrainingType,
}
raise ValueError("param_choice must be either autotrain or manual")
def _tabular_multi_class_classification(self):
return self._tabular_binary_classification()
def _tabular_single_column_regression(self):
return self._tabular_binary_classification()
def tabular_multi_label_classification(self):
return self._tabular_binary_classification()
def _text_binary_classification(self):
if self.param_choice == "manual":
return {
"hub_model": HubModel,
"learning_rate": LearningRate,
"optimizer": Optimizer,
"scheduler": Scheduler,
"train_batch_size": TrainBatchSize,
"num_train_epochs": Epochs,
"percentage_warmup": PercentageWarmup,
"gradient_accumulation_steps": GradientAccumulationSteps,
"weight_decay": WeightDecay,
}
if self.param_choice == "autotrain":
if self.model_choice == "autotrain":
return {
"source_language": SourceLanguage,
"num_models": NumModels,
}
return {
"hub_model": HubModel,
"source_language": SourceLanguageUnk,
"num_models": NumModels,
}
raise ValueError("param_choice must be either autotrain or manual")
def _text_multi_class_classification(self):
return self._text_binary_classification()
def _text_entity_extraction(self):
return self._text_binary_classification()
def _text_single_column_regression(self):
return self._text_binary_classification()
def _text_natural_language_inference(self):
return self._text_binary_classification()
def _image_binary_classification(self):
if self.param_choice == "manual":
return {
"hub_model": HubModel,
"learning_rate": LearningRate,
"optimizer": Optimizer,
"scheduler": Scheduler,
"train_batch_size": TrainBatchSize,
"num_train_epochs": Epochs,
"percentage_warmup": PercentageWarmup,
"gradient_accumulation_steps": GradientAccumulationSteps,
"weight_decay": WeightDecay,
}
if self.param_choice == "autotrain":
if self.model_choice == "autotrain":
return {
"num_models": NumModels,
}
return {
"hub_model": HubModel,
"num_models": NumModels,
}
raise ValueError("param_choice must be either autotrain or manual")
def _image_multi_class_classification(self):
return self._image_binary_classification()
def get(self):
if self.task in ("text_binary_classification", "text_multi_class_classification"):
return self._text_binary_classification()
if self.task == "text_entity_extraction":
return self._text_entity_extraction()
if self.task == "text_single_column_regression":
return self._text_single_column_regression()
if self.task == "text_natural_language_inference":
return self._text_natural_language_inference()
if self.task == "tabular_binary_classification":
return self._tabular_binary_classification()
if self.task == "tabular_multi_class_classification":
return self._tabular_multi_class_classification()
if self.task == "tabular_single_column_regression":
return self._tabular_single_column_regression()
if self.task == "tabular_multi_label_classification":
return self.tabular_multi_label_classification()
if self.task in ("image_binary_classification", "image_multi_class_classification"):
return self._image_binary_classification()
if self.task == "dreambooth":
return self._dreambooth()
if self.task == "lm_training":
return self._lm_training()
raise ValueError(f"task {self.task} not supported")
| autotrain-advanced-main | src/autotrain/params.py |
import io
import json
import os
from dataclasses import dataclass
from typing import Union
import requests
from huggingface_hub import HfApi
from autotrain import logger
from autotrain.dataset import AutoTrainDataset, AutoTrainDreamboothDataset
from autotrain.trainers.clm.params import LLMTrainingParams
from autotrain.trainers.dreambooth.params import DreamBoothTrainingParams
from autotrain.trainers.generic.params import GenericParams
from autotrain.trainers.image_classification.params import ImageClassificationParams
from autotrain.trainers.tabular.params import TabularParams
from autotrain.trainers.text_classification.params import TextClassificationParams
def _tabular_munge_data(params, username):
if isinstance(params.target_columns, str):
col_map_label = [params.target_columns]
else:
col_map_label = params.target_columns
task = params.task
if task == "classification" and len(col_map_label) > 1:
task = "tabular_multi_label_classification"
elif task == "classification" and len(col_map_label) == 1:
task = "tabular_multi_class_classification"
elif task == "regression" and len(col_map_label) > 1:
task = "tabular_multi_column_regression"
elif task == "regression" and len(col_map_label) == 1:
task = "tabular_single_column_regression"
else:
raise Exception("Please select a valid task.")
train_data_path = f"{params.data_path}/{params.train_split}.csv"
if params.valid_split is not None:
valid_data_path = f"{params.data_path}/{params.valid_split}.csv"
else:
valid_data_path = []
if os.path.exists(train_data_path):
dset = AutoTrainDataset(
train_data=[train_data_path],
task=task,
token=params.token,
project_name=params.project_name,
username=username,
column_mapping={"id": params.col_map_id, "label": col_map_label},
valid_data=valid_data_path,
percent_valid=None, # TODO: add to UI
)
dset.prepare()
return f"{username}/autotrain-data-{params.project_name}"
return params.data_path
def _llm_munge_data(params, username):
train_data_path = f"{params.data_path}/{params.train_split}.csv"
if params.valid_split is not None:
valid_data_path = f"{params.data_path}/{params.valid_split}.csv"
else:
valid_data_path = []
if os.path.exists(train_data_path):
dset = AutoTrainDataset(
train_data=[train_data_path],
task="lm_training",
token=params.token,
project_name=params.project_name,
username=username,
column_mapping={"text": params.text_column},
valid_data=valid_data_path,
percent_valid=None, # TODO: add to UI
)
dset.prepare()
return f"{username}/autotrain-data-{params.project_name}"
return params.data_path
def _text_clf_munge_data(params, username):
train_data_path = f"{params.data_path}/{params.train_split}.csv"
if params.valid_split is not None:
valid_data_path = f"{params.data_path}/{params.valid_split}.csv"
else:
valid_data_path = None
if os.path.exists(train_data_path):
dset = AutoTrainDataset(
train_data=[train_data_path],
valid_data=[valid_data_path] if valid_data_path is not None else None,
task="text_multi_class_classification",
token=params.token,
project_name=params.project_name,
username=username,
column_mapping={"text": params.text_column, "label": params.target_column},
percent_valid=None, # TODO: add to UI
)
dset.prepare()
return f"{username}/autotrain-data-{params.project_name}"
return params.data_path
def _dreambooth_munge_data(params, username):
# check if params.image_path is a directory
if os.path.isdir(params.image_path):
training_data = [os.path.join(params.image_path, f) for f in os.listdir(params.image_path)]
training_data = [io.BytesIO(open(f, "rb").read()) for f in training_data]
dset = AutoTrainDreamboothDataset(
concept_images=training_data,
concept_name=params.prompt,
token=params.token,
project_name=params.project_name,
username=username,
)
dset.prepare()
return f"{username}/autotrain-data-{params.project_name}"
return params.image_path
@dataclass
class EndpointsRunner:
params: Union[TextClassificationParams, ImageClassificationParams, LLMTrainingParams]
backend: str
def __post_init__(self):
self.endpoints_backends = {
"ep-aws-useast1-s": "aws_us-east-1_gpu_small_g4dn.xlarge",
"ep-aws-useast1-m": "aws_us-east-1_gpu_medium_g5.2xlarge",
"ep-aws-useast1-l": "aws_us-east-1_gpu_large_g4dn.12xlarge",
"ep-aws-useast1-xl": "aws_us-east-1_gpu_xlarge_p4de",
"ep-aws-useast1-2xl": "aws_us-east-1_gpu_2xlarge_p4de",
"ep-aws-useast1-4xl": "aws_us-east-1_gpu_4xlarge_p4de",
"ep-aws-useast1-8xl": "aws_us-east-1_gpu_8xlarge_p4de",
}
if self.params.repo_id is not None:
self.username = self.params.repo_id.split("/")[0]
elif self.params.username is not None:
self.username = self.params.username
else:
raise ValueError("Must provide either repo_id or username")
self.api_url = f"https://api.endpoints.huggingface.cloud/v2/endpoint/{self.username}"
if isinstance(self.params, LLMTrainingParams):
self.task_id = 9
def _create_endpoint(self):
hardware = self.endpoints_backends[self.backend]
accelerator = hardware.split("_")[2]
instance_size = hardware.split("_")[3]
region = hardware.split("_")[1]
vendor = hardware.split("_")[0]
instance_type = hardware.split("_")[4]
payload = {
"accountId": self.username,
"compute": {
"accelerator": accelerator,
"instanceSize": instance_size,
"instanceType": instance_type,
"scaling": {"maxReplica": 1, "minReplica": 1},
},
"model": {
"framework": "custom",
"image": {
"custom": {
"env": {
"HF_TOKEN": self.params.token,
"AUTOTRAIN_USERNAME": self.username,
"PROJECT_NAME": self.params.project_name,
"PARAMS": json.dumps(self.params.json()),
"DATA_PATH": self.params.data_path,
"TASK_ID": str(self.task_id),
"MODEL": self.params.model,
"OUTPUT_MODEL_REPO": self.params.repo_id,
"ENDPOINT_ID": f"{self.username}/{self.params.project_name}",
},
"health_route": "/",
"port": 7860,
"url": "huggingface/autotrain-advanced-api:latest",
}
},
"repository": "autotrain-projects/autotrain-advanced",
"revision": "main",
"task": "custom",
},
"name": self.params.project_name,
"provider": {"region": region, "vendor": vendor},
"type": "protected",
}
headers = {"Authorization": f"Bearer {self.params.token}"}
r = requests.post(self.api_url, json=payload, headers=headers)
logger.info(r.json())
return r.json()
def prepare(self):
if isinstance(self.params, LLMTrainingParams):
data_path = _llm_munge_data(self.params, self.username)
self.params.data_path = data_path
endpoint_id = self._create_endpoint()
return endpoint_id
if isinstance(self.params, TextClassificationParams):
data_path = _text_clf_munge_data(self.params, self.username)
self.params.data_path = data_path
endpoint_id = self._create_endpoint()
return endpoint_id
raise NotImplementedError
@dataclass
class SpaceRunner:
params: Union[TextClassificationParams, ImageClassificationParams, LLMTrainingParams, GenericParams, TabularParams]
backend: str
def __post_init__(self):
self.spaces_backends = {
"a10gl": "a10g-large",
"a10gs": "a10g-small",
"a100": "a100-large",
"t4m": "t4-medium",
"t4s": "t4-small",
"cpu": "cpu-upgrade",
"cpuf": "cpu-basic",
}
if not isinstance(self.params, GenericParams):
if self.params.repo_id is not None:
self.username = self.params.repo_id.split("/")[0]
elif self.params.username is not None:
self.username = self.params.username
else:
raise ValueError("Must provide either repo_id or username")
else:
self.username = self.params.username
if isinstance(self.params, LLMTrainingParams):
self.task_id = 9
elif isinstance(self.params, TextClassificationParams):
self.task_id = 2
elif isinstance(self.params, TabularParams):
self.task_id = 26
elif isinstance(self.params, GenericParams):
self.task_id = 27
elif isinstance(self.params, DreamBoothTrainingParams):
self.task_id = 25
else:
raise NotImplementedError
def prepare(self):
if isinstance(self.params, LLMTrainingParams):
self.task_id = 9
data_path = _llm_munge_data(self.params, self.username)
self.params.data_path = data_path
space_id = self._create_space()
return space_id
if isinstance(self.params, TextClassificationParams):
self.task_id = 2
data_path = _text_clf_munge_data(self.params, self.username)
self.params.data_path = data_path
space_id = self._create_space()
return space_id
if isinstance(self.params, TabularParams):
self.task_id = 26
data_path = _tabular_munge_data(self.params, self.username)
self.params.data_path = data_path
space_id = self._create_space()
return space_id
if isinstance(self.params, GenericParams):
self.task_id = 27
space_id = self._create_space()
return space_id
if isinstance(self.params, DreamBoothTrainingParams):
self.task_id = 25
data_path = _dreambooth_munge_data(self.params, self.username)
space_id = self._create_space()
return space_id
raise NotImplementedError
def _create_readme(self):
_readme = "---\n"
_readme += f"title: {self.params.project_name}\n"
_readme += "emoji: 🚀\n"
_readme += "colorFrom: green\n"
_readme += "colorTo: indigo\n"
_readme += "sdk: docker\n"
_readme += "pinned: false\n"
_readme += "duplicated_from: autotrain-projects/autotrain-advanced\n"
_readme += "---\n"
_readme = io.BytesIO(_readme.encode())
return _readme
def _add_secrets(self, api, repo_id):
if isinstance(self.params, GenericParams):
for k, v in self.params.env.items():
api.add_space_secret(repo_id=repo_id, key=k, value=v)
self.params.env = {}
api.add_space_secret(repo_id=repo_id, key="HF_TOKEN", value=self.params.token)
api.add_space_secret(repo_id=repo_id, key="AUTOTRAIN_USERNAME", value=self.username)
api.add_space_secret(repo_id=repo_id, key="PROJECT_NAME", value=self.params.project_name)
api.add_space_secret(repo_id=repo_id, key="TASK_ID", value=str(self.task_id))
api.add_space_secret(repo_id=repo_id, key="PARAMS", value=json.dumps(self.params.json()))
if isinstance(self.params, DreamBoothTrainingParams):
api.add_space_secret(repo_id=repo_id, key="DATA_PATH", value=self.params.image_path)
else:
api.add_space_secret(repo_id=repo_id, key="DATA_PATH", value=self.params.data_path)
if not isinstance(self.params, GenericParams):
api.add_space_secret(repo_id=repo_id, key="MODEL", value=self.params.model)
api.add_space_secret(repo_id=repo_id, key="OUTPUT_MODEL_REPO", value=self.params.repo_id)
def _create_space(self):
api = HfApi(token=self.params.token)
repo_id = f"{self.username}/autotrain-{self.params.project_name}"
api.create_repo(
repo_id=repo_id,
repo_type="space",
space_sdk="docker",
space_hardware=self.spaces_backends[self.backend.split("-")[1].lower()],
private=True,
)
self._add_secrets(api, repo_id)
readme = self._create_readme()
api.upload_file(
path_or_fileobj=readme,
path_in_repo="README.md",
repo_id=repo_id,
repo_type="space",
)
_dockerfile = "FROM huggingface/autotrain-advanced:latest\nCMD autotrain api --port 7860 --host 0.0.0.0"
_dockerfile = io.BytesIO(_dockerfile.encode())
api.upload_file(
path_or_fileobj=_dockerfile,
path_in_repo="Dockerfile",
repo_id=repo_id,
repo_type="space",
)
return repo_id
| autotrain-advanced-main | src/autotrain/backend.py |
NLP_TASKS = {
"text_binary_classification": 1,
"text_multi_class_classification": 2,
"text_entity_extraction": 4,
"text_extractive_question_answering": 5,
"text_summarization": 8,
"text_single_column_regression": 10,
"speech_recognition": 11,
"natural_language_inference": 22,
"lm_training": 9,
}
VISION_TASKS = {
"image_binary_classification": 17,
"image_multi_class_classification": 18,
"image_single_column_regression": 24,
"dreambooth": 25,
}
TABULAR_TASKS = {
"tabular_binary_classification": 13,
"tabular_multi_class_classification": 14,
"tabular_multi_label_classification": 15,
"tabular_single_column_regression": 16,
"tabular": 26,
}
TASKS = {
**NLP_TASKS,
**VISION_TASKS,
**TABULAR_TASKS,
}
COLUMN_MAPPING = {
"text_binary_classification": ("text", "label"),
"text_multi_class_classification": ("text", "label"),
"text_entity_extraction": ("text", "tokens"),
"text_extractive_question_answering": ("text", "context", "question", "answer"),
"text_summarization": ("text", "summary"),
"text_single_column_regression": ("text", "label"),
"speech_recognition": ("audio", "text"),
"natural_language_inference": ("premise", "hypothesis", "label"),
"image_binary_classification": ("image", "label"),
"image_multi_class_classification": ("image", "label"),
"image_single_column_regression": ("image", "label"),
# "dreambooth": ("image", "label"),
"tabular_binary_classification": ("id", "label"),
"tabular_multi_class_classification": ("id", "label"),
"tabular_multi_label_classification": ("id", "label"),
"tabular_single_column_regression": ("id", "label"),
"lm_training": ("text", "prompt_start", "prompt", "context", "response"),
}
TASK_TYPE_MAPPING = {
"text_binary_classification": "Natural Language Processing",
"text_multi_class_classification": "Natural Language Processing",
"text_entity_extraction": "Natural Language Processing",
"text_extractive_question_answering": "Natural Language Processing",
"text_summarization": "Natural Language Processing",
"text_single_column_regression": "Natural Language Processing",
"lm_training": "Natural Language Processing",
"speech_recognition": "Natural Language Processing",
"natural_language_inference": "Natural Language Processing",
"image_binary_classification": "Computer Vision",
"image_multi_class_classification": "Computer Vision",
"image_single_column_regression": "Computer Vision",
"dreambooth": "Computer Vision",
"tabular_binary_classification": "Tabular",
"tabular_multi_class_classification": "Tabular",
"tabular_multi_label_classification": "Tabular",
"tabular_single_column_regression": "Tabular",
}
| autotrain-advanced-main | src/autotrain/tasks.py |
import os
import sys
from autotrain import logger
AUTOTRAIN_BACKEND_API = os.getenv("AUTOTRAIN_BACKEND_API", "https://api.autotrain.huggingface.co")
HF_API = os.getenv("HF_API", "https://huggingface.co")
logger.configure(handlers=[dict(sink=sys.stderr, format="> <level>{level:<7} {message}</level>")])
| autotrain-advanced-main | src/autotrain/config.py |
TEXT_CLASSIFICATION = [
".csv",
".jsonl",
]
| autotrain-advanced-main | src/autotrain/allowed_file_types.py |
# coding=utf-8
# Copyright 2020-2023 The HuggingFace AutoTrain Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
import os
os.environ["BITSANDBYTES_NOWELCOME"] = "1"
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import warnings
from autotrain.logging import custom_logger as logger
warnings.filterwarnings("ignore", category=UserWarning, module="tensorflow")
__version__ = "0.6.33.dev0"
| autotrain-advanced-main | src/autotrain/__init__.py |
import os
import uuid
import zipfile
from dataclasses import dataclass
from typing import Any, Dict, List, Optional
import pandas as pd
from autotrain import logger
from autotrain.preprocessor.dreambooth import DreamboothPreprocessor
from autotrain.preprocessor.tabular import (
TabularBinaryClassificationPreprocessor,
TabularMultiClassClassificationPreprocessor,
TabularMultiColumnRegressionPreprocessor,
TabularMultiLabelClassificationPreprocessor,
TabularSingleColumnRegressionPreprocessor,
)
from autotrain.preprocessor.text import (
LLMPreprocessor,
TextBinaryClassificationPreprocessor,
TextMultiClassClassificationPreprocessor,
TextSingleColumnRegressionPreprocessor,
)
from autotrain.preprocessor.vision import ImageClassificationPreprocessor
def remove_non_image_files(folder):
# Define allowed image file extensions
allowed_extensions = {".jpg", ".jpeg", ".png", ".JPG", ".JPEG", ".PNG"}
# Iterate through all files in the folder
for root, dirs, files in os.walk(folder):
for file in files:
# Get the file extension
file_extension = os.path.splitext(file)[1]
# If the file extension is not in the allowed list, remove the file
if file_extension.lower() not in allowed_extensions:
file_path = os.path.join(root, file)
os.remove(file_path)
print(f"Removed file: {file_path}")
# Recursively call the function on each subfolder
for subfolder in dirs:
remove_non_image_files(os.path.join(root, subfolder))
@dataclass
class AutoTrainDreamboothDataset:
concept_images: List[Any]
concept_name: str
token: str
project_name: str
username: str
def __str__(self) -> str:
info = f"Dataset: {self.project_name} ({self.task})\n"
return info
def __post_init__(self):
self.task = "dreambooth"
logger.info(self.__str__())
@property
def num_samples(self):
return len(self.concept_images)
def prepare(self):
preprocessor = DreamboothPreprocessor(
concept_images=self.concept_images,
concept_name=self.concept_name,
token=self.token,
project_name=self.project_name,
username=self.username,
)
preprocessor.prepare()
@dataclass
class AutoTrainImageClassificationDataset:
train_data: str
token: str
project_name: str
username: str
valid_data: Optional[str] = None
percent_valid: Optional[float] = None
def __str__(self) -> str:
info = f"Dataset: {self.project_name} ({self.task})\n"
info += f"Train data: {self.train_data}\n"
info += f"Valid data: {self.valid_data}\n"
return info
def __post_init__(self):
self.task = "image_multi_class_classification"
if not self.valid_data and self.percent_valid is None:
self.percent_valid = 0.2
elif self.valid_data and self.percent_valid is not None:
raise ValueError("You can only specify one of valid_data or percent_valid")
elif self.valid_data:
self.percent_valid = 0.0
logger.info(self.__str__())
self.num_files = self._count_files()
@property
def num_samples(self):
return self.num_files
def _count_files(self):
num_files = 0
zip_ref = zipfile.ZipFile(self.train_data, "r")
for _ in zip_ref.namelist():
num_files += 1
if self.valid_data:
zip_ref = zipfile.ZipFile(self.valid_data, "r")
for _ in zip_ref.namelist():
num_files += 1
return num_files
def prepare(self):
cache_dir = os.environ.get("HF_HOME")
if not cache_dir:
cache_dir = os.path.join(os.path.expanduser("~"), ".cache", "huggingface")
random_uuid = uuid.uuid4()
train_dir = os.path.join(cache_dir, "autotrain", str(random_uuid))
os.makedirs(train_dir, exist_ok=True)
zip_ref = zipfile.ZipFile(self.train_data, "r")
zip_ref.extractall(train_dir)
# remove the __MACOSX directory
macosx_dir = os.path.join(train_dir, "__MACOSX")
if os.path.exists(macosx_dir):
os.system(f"rm -rf {macosx_dir}")
remove_non_image_files(train_dir)
valid_dir = None
if self.valid_data:
random_uuid = uuid.uuid4()
valid_dir = os.path.join(cache_dir, "autotrain", str(random_uuid))
os.makedirs(valid_dir, exist_ok=True)
zip_ref = zipfile.ZipFile(self.valid_data, "r")
zip_ref.extractall(valid_dir)
# remove the __MACOSX directory
macosx_dir = os.path.join(valid_dir, "__MACOSX")
if os.path.exists(macosx_dir):
os.system(f"rm -rf {macosx_dir}")
remove_non_image_files(valid_dir)
preprocessor = ImageClassificationPreprocessor(
train_data=train_dir,
valid_data=valid_dir,
token=self.token,
project_name=self.project_name,
username=self.username,
)
preprocessor.prepare()
@dataclass
class AutoTrainDataset:
train_data: List[str]
task: str
token: str
project_name: str
username: str
column_mapping: Optional[Dict[str, str]] = None
valid_data: Optional[List[str]] = None
percent_valid: Optional[float] = None
convert_to_class_label: Optional[bool] = False
def __str__(self) -> str:
info = f"Dataset: {self.project_name} ({self.task})\n"
info += f"Train data: {self.train_data}\n"
info += f"Valid data: {self.valid_data}\n"
info += f"Column mapping: {self.column_mapping}\n"
return info
def __post_init__(self):
if not self.valid_data and self.percent_valid is None:
self.percent_valid = 0.2
elif self.valid_data and self.percent_valid is not None:
raise ValueError("You can only specify one of valid_data or percent_valid")
elif self.valid_data:
self.percent_valid = 0.0
self.train_df, self.valid_df = self._preprocess_data()
logger.info(self.__str__())
def _preprocess_data(self):
train_df = []
for file in self.train_data:
if isinstance(file, pd.DataFrame):
train_df.append(file)
else:
train_df.append(pd.read_csv(file))
if len(train_df) > 1:
train_df = pd.concat(train_df)
else:
train_df = train_df[0]
valid_df = None
if len(self.valid_data) > 0:
valid_df = []
for file in self.valid_data:
if isinstance(file, pd.DataFrame):
valid_df.append(file)
else:
valid_df.append(pd.read_csv(file))
if len(valid_df) > 1:
valid_df = pd.concat(valid_df)
else:
valid_df = valid_df[0]
return train_df, valid_df
@property
def num_samples(self):
return len(self.train_df) + len(self.valid_df) if self.valid_df is not None else len(self.train_df)
def prepare(self):
if self.task == "text_binary_classification":
text_column = self.column_mapping["text"]
label_column = self.column_mapping["label"]
preprocessor = TextBinaryClassificationPreprocessor(
train_data=self.train_df,
text_column=text_column,
label_column=label_column,
username=self.username,
project_name=self.project_name,
valid_data=self.valid_df,
test_size=self.percent_valid,
token=self.token,
seed=42,
convert_to_class_label=self.convert_to_class_label,
)
preprocessor.prepare()
elif self.task == "text_multi_class_classification":
text_column = self.column_mapping["text"]
label_column = self.column_mapping["label"]
preprocessor = TextMultiClassClassificationPreprocessor(
train_data=self.train_df,
text_column=text_column,
label_column=label_column,
username=self.username,
project_name=self.project_name,
valid_data=self.valid_df,
test_size=self.percent_valid,
token=self.token,
seed=42,
convert_to_class_label=self.convert_to_class_label,
)
preprocessor.prepare()
elif self.task == "text_single_column_regression":
text_column = self.column_mapping["text"]
label_column = self.column_mapping["label"]
preprocessor = TextSingleColumnRegressionPreprocessor(
train_data=self.train_df,
text_column=text_column,
label_column=label_column,
username=self.username,
project_name=self.project_name,
valid_data=self.valid_df,
test_size=self.percent_valid,
token=self.token,
seed=42,
)
preprocessor.prepare()
elif self.task == "lm_training":
text_column = self.column_mapping.get("text", None)
if text_column is None:
prompt_column = self.column_mapping["prompt"]
response_column = self.column_mapping["response"]
else:
prompt_column = None
response_column = None
context_column = self.column_mapping.get("context", None)
prompt_start_column = self.column_mapping.get("prompt_start", None)
preprocessor = LLMPreprocessor(
train_data=self.train_df,
text_column=text_column,
prompt_column=prompt_column,
response_column=response_column,
context_column=context_column,
prompt_start_column=prompt_start_column,
username=self.username,
project_name=self.project_name,
valid_data=self.valid_df,
test_size=self.percent_valid,
token=self.token,
seed=42,
)
preprocessor.prepare()
elif self.task == "tabular_binary_classification":
id_column = self.column_mapping["id"]
label_column = self.column_mapping["label"][0]
if len(id_column.strip()) == 0:
id_column = None
preprocessor = TabularBinaryClassificationPreprocessor(
train_data=self.train_df,
id_column=id_column,
label_column=label_column,
username=self.username,
project_name=self.project_name,
valid_data=self.valid_df,
test_size=self.percent_valid,
token=self.token,
seed=42,
)
preprocessor.prepare()
elif self.task == "tabular_multi_class_classification":
id_column = self.column_mapping["id"]
label_column = self.column_mapping["label"][0]
if len(id_column.strip()) == 0:
id_column = None
preprocessor = TabularMultiClassClassificationPreprocessor(
train_data=self.train_df,
id_column=id_column,
label_column=label_column,
username=self.username,
project_name=self.project_name,
valid_data=self.valid_df,
test_size=self.percent_valid,
token=self.token,
seed=42,
)
preprocessor.prepare()
elif self.task == "tabular_single_column_regression":
id_column = self.column_mapping["id"]
label_column = self.column_mapping["label"][0]
if len(id_column.strip()) == 0:
id_column = None
preprocessor = TabularSingleColumnRegressionPreprocessor(
train_data=self.train_df,
id_column=id_column,
label_column=label_column,
username=self.username,
project_name=self.project_name,
valid_data=self.valid_df,
test_size=self.percent_valid,
token=self.token,
seed=42,
)
preprocessor.prepare()
elif self.task == "tabular_multi_column_regression":
id_column = self.column_mapping["id"]
label_column = self.column_mapping["label"]
if len(id_column.strip()) == 0:
id_column = None
preprocessor = TabularMultiColumnRegressionPreprocessor(
train_data=self.train_df,
id_column=id_column,
label_column=label_column,
username=self.username,
project_name=self.project_name,
valid_data=self.valid_df,
test_size=self.percent_valid,
token=self.token,
seed=42,
)
preprocessor.prepare()
elif self.task == "tabular_multi_label_classification":
id_column = self.column_mapping["id"]
label_column = self.column_mapping["label"]
if len(id_column.strip()) == 0:
id_column = None
preprocessor = TabularMultiLabelClassificationPreprocessor(
train_data=self.train_df,
id_column=id_column,
label_column=label_column,
username=self.username,
project_name=self.project_name,
valid_data=self.valid_df,
test_size=self.percent_valid,
token=self.token,
seed=42,
)
preprocessor.prepare()
else:
raise ValueError(f"Task {self.task} not supported")
| autotrain-advanced-main | src/autotrain/dataset.py |
import json
import os
import subprocess
import psutil
from fastapi import FastAPI
from autotrain import logger
from autotrain.trainers.clm.params import LLMTrainingParams
from autotrain.trainers.dreambooth.params import DreamBoothTrainingParams
from autotrain.trainers.generic.params import GenericParams
from autotrain.trainers.tabular.params import TabularParams
from autotrain.trainers.text_classification.params import TextClassificationParams
HF_TOKEN = os.environ.get("HF_TOKEN")
AUTOTRAIN_USERNAME = os.environ.get("AUTOTRAIN_USERNAME")
PROJECT_NAME = os.environ.get("PROJECT_NAME")
TASK_ID = int(os.environ.get("TASK_ID"))
PARAMS = os.environ.get("PARAMS")
DATA_PATH = os.environ.get("DATA_PATH")
MODEL = os.environ.get("MODEL")
OUTPUT_MODEL_REPO = os.environ.get("OUTPUT_MODEL_REPO")
PID = None
api = FastAPI()
logger.info(f"AUTOTRAIN_USERNAME: {AUTOTRAIN_USERNAME}")
logger.info(f"PROJECT_NAME: {PROJECT_NAME}")
logger.info(f"TASK_ID: {TASK_ID}")
logger.info(f"DATA_PATH: {DATA_PATH}")
logger.info(f"MODEL: {MODEL}")
logger.info(f"OUTPUT_MODEL_REPO: {OUTPUT_MODEL_REPO}")
def run_training():
params = json.loads(PARAMS)
logger.info(params)
if TASK_ID == 9:
params = LLMTrainingParams.parse_raw(params)
params.project_name = "/tmp/model"
params.save(output_dir=params.project_name)
cmd = ["accelerate", "launch", "--num_machines", "1", "--num_processes", "1"]
cmd.append("--mixed_precision")
if params.fp16:
cmd.append("fp16")
else:
cmd.append("no")
cmd.extend(
[
"-m",
"autotrain.trainers.clm",
"--training_config",
os.path.join(params.project_name, "training_params.json"),
]
)
elif TASK_ID in (1, 2):
params = TextClassificationParams.parse_raw(params)
params.project_name = "/tmp/model"
params.save(output_dir=params.project_name)
cmd = ["accelerate", "launch", "--num_machines", "1", "--num_processes", "1"]
cmd.append("--mixed_precision")
if params.fp16:
cmd.append("fp16")
else:
cmd.append("no")
cmd.extend(
[
"-m",
"autotrain.trainers.text_classification",
"--training_config",
os.path.join(params.project_name, "training_params.json"),
]
)
elif TASK_ID in (13, 14, 15, 16, 26):
params = TabularParams.parse_raw(params)
params.project_name = "/tmp/model"
params.save(output_dir=params.project_name)
cmd = [
"python",
"-m",
"autotrain.trainers.tabular",
"--training_config",
os.path.join(params.project_name, "training_params.json"),
]
elif TASK_ID == 27:
params = GenericParams.parse_raw(params)
params.project_name = "/tmp/model"
params.save(output_dir=params.project_name)
cmd = [
"python",
"-m",
"autotrain.trainers.generic",
"--config",
os.path.join(params.project_name, "training_params.json"),
]
elif TASK_ID == 25:
params = DreamBoothTrainingParams.parse_raw(params)
params.project_name = "/tmp/model"
params.save(output_dir=params.project_name)
cmd = [
"python",
"-m",
"autotrain.trainers.dreambooth",
"--training_config",
os.path.join(params.project_name, "training_params.json"),
]
else:
raise NotImplementedError
cmd = [str(c) for c in cmd]
logger.info(cmd)
env = os.environ.copy()
process = subprocess.Popen(" ".join(cmd), shell=True, env=env)
return process.pid
def get_process_status(pid):
try:
process = psutil.Process(pid)
return process.status()
except psutil.NoSuchProcess:
return "No process found with PID: {}".format(pid)
def kill_process(pid):
try:
parent_process = psutil.Process(pid)
children = parent_process.children(recursive=True) # This will get all the child processes recursively
# First, terminate the child processes
for child in children:
child.terminate()
# Wait for the child processes to terminate, and kill them if they don't
gone, still_alive = psutil.wait_procs(children, timeout=3)
for child in still_alive:
child.kill()
# Now, terminate the parent process
parent_process.terminate()
parent_process.wait(timeout=5)
logger.info(f"Process with pid {pid} and its children have been killed")
return f"Process with pid {pid} and its children have been killed"
except psutil.NoSuchProcess:
logger.info(f"No process found with pid {pid}")
return f"No process found with pid {pid}"
except psutil.TimeoutExpired:
logger.info(f"Process {pid} or one of its children has not terminated in time")
return f"Process {pid} or one of its children has not terminated in time"
@api.on_event("startup")
async def startup_event():
process_pid = run_training()
logger.info(f"Started training with PID {process_pid}")
global PID
PID = process_pid
@api.get("/")
async def root():
return "Your model is being trained..."
@api.get("/status")
async def status():
return get_process_status(pid=PID)
@api.get("/kill")
async def kill():
return kill_process(pid=PID)
@api.get("/health")
async def health():
return "OK"
| autotrain-advanced-main | src/autotrain/api.py |
import glob
import json
import os
import re
import shutil
import subprocess
import traceback
from typing import Dict, Optional
import requests
from accelerate.state import PartialState
from huggingface_hub import HfApi, HfFolder
from huggingface_hub.repository import Repository
from transformers import AutoConfig
from autotrain import config, logger
from autotrain.tasks import TASKS
FORMAT_TAG = "\033[{code}m"
RESET_TAG = FORMAT_TAG.format(code=0)
BOLD_TAG = FORMAT_TAG.format(code=1)
RED_TAG = FORMAT_TAG.format(code=91)
GREEN_TAG = FORMAT_TAG.format(code=92)
YELLOW_TAG = FORMAT_TAG.format(code=93)
PURPLE_TAG = FORMAT_TAG.format(code=95)
CYAN_TAG = FORMAT_TAG.format(code=96)
LFS_PATTERNS = [
"*.bin.*",
"*.lfs.*",
"*.bin",
"*.h5",
"*.tflite",
"*.tar.gz",
"*.ot",
"*.onnx",
"*.pt",
"*.pkl",
"*.parquet",
"*.joblib",
"tokenizer.json",
]
class UnauthenticatedError(Exception):
pass
class UnreachableAPIError(Exception):
pass
def get_auth_headers(token: str, prefix: str = "Bearer"):
return {"Authorization": f"{prefix} {token}"}
def http_get(
path: str,
token: str,
domain: str = config.AUTOTRAIN_BACKEND_API,
token_prefix: str = "Bearer",
suppress_logs: bool = False,
**kwargs,
) -> requests.Response:
"""HTTP GET request to the AutoNLP API, raises UnreachableAPIError if the API cannot be reached"""
logger.info(f"Sending GET request to {domain + path}")
try:
response = requests.get(
url=domain + path, headers=get_auth_headers(token=token, prefix=token_prefix), **kwargs
)
except requests.exceptions.ConnectionError:
raise UnreachableAPIError("❌ Failed to reach AutoNLP API, check your internet connection")
response.raise_for_status()
return response
def http_post(
path: str,
token: str,
payload: Optional[Dict] = None,
domain: str = config.AUTOTRAIN_BACKEND_API,
suppress_logs: bool = False,
**kwargs,
) -> requests.Response:
"""HTTP POST request to the AutoNLP API, raises UnreachableAPIError if the API cannot be reached"""
logger.info(f"Sending POST request to {domain + path}")
try:
response = requests.post(
url=domain + path, json=payload, headers=get_auth_headers(token=token), allow_redirects=True, **kwargs
)
except requests.exceptions.ConnectionError:
raise UnreachableAPIError("❌ Failed to reach AutoNLP API, check your internet connection")
response.raise_for_status()
return response
def get_task(task_id: int) -> str:
for key, value in TASKS.items():
if value == task_id:
return key
return "❌ Unsupported task! Please update autonlp"
def get_user_token():
return HfFolder.get_token()
def user_authentication(token):
logger.info("Authenticating user...")
headers = {}
cookies = {}
if token.startswith("hf_"):
headers["Authorization"] = f"Bearer {token}"
else:
cookies = {"token": token}
try:
response = requests.get(
config.HF_API + "/api/whoami-v2",
headers=headers,
cookies=cookies,
timeout=3,
)
except (requests.Timeout, ConnectionError) as err:
logger.error(f"Failed to request whoami-v2 - {repr(err)}")
raise Exception("Hugging Face Hub is unreachable, please try again later.")
return response.json()
def get_project_cost(username, token, task, num_samples, num_models):
logger.info("Getting project cost...")
task_id = TASKS[task]
pricing = http_get(
path=f"/pricing/compute?username={username}&task_id={task_id}&num_samples={num_samples}&num_models={num_models}",
token=token,
)
return pricing.json()["price"]
def app_error_handler(func):
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as err:
logger.error(f"{func.__name__} has failed due to an exception:")
logger.error(traceback.format_exc())
if "param_choice" in str(err):
ValueError("Unable to estimate costs. Job params not chosen yet.")
elif "Failed to reach AutoNLP API" in str(err):
ValueError("Unable to reach AutoTrain API. Please check your internet connection.")
elif "An error has occurred: 'NoneType' object has no attribute 'type'" in str(err):
ValueError("Unable to estimate costs. Data not uploaded yet.")
else:
ValueError(f"An error has occurred: {err}")
return wrapper
def clone_hf_repo(repo_url: str, local_dir: str, token: str) -> Repository:
os.makedirs(local_dir, exist_ok=True)
repo_url = re.sub(r"(https?://)", rf"\1user:{token}@", repo_url)
subprocess.run(
"git lfs install".split(),
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
check=True,
encoding="utf-8",
cwd=local_dir,
)
subprocess.run(
f"git lfs clone {repo_url} .".split(),
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
check=True,
encoding="utf-8",
cwd=local_dir,
)
data_repo = Repository(local_dir=local_dir, use_auth_token=token)
return data_repo
def create_repo(project_name, autotrain_user, huggingface_token, model_path):
repo_name = f"autotrain-{project_name}"
repo_url = HfApi().create_repo(
repo_id=f"{autotrain_user}/{repo_name}",
token=huggingface_token,
exist_ok=False,
private=True,
)
if len(repo_url.strip()) == 0:
repo_url = f"https://huggingface.co/{autotrain_user}/{repo_name}"
logger.info(f"Created repo: {repo_url}")
model_repo = clone_hf_repo(
local_dir=model_path,
repo_url=repo_url,
token=huggingface_token,
)
model_repo.lfs_track(patterns=LFS_PATTERNS)
return model_repo
def save_model(torch_model, model_path):
torch_model.save_pretrained(model_path)
try:
torch_model.save_pretrained(model_path, safe_serialization=True)
except Exception as e:
logger.error(f"Safe serialization failed with error: {e}")
def save_tokenizer(tok, model_path):
tok.save_pretrained(model_path)
def update_model_config(model, job_config):
model.config._name_or_path = "AutoTrain"
if job_config.task in ("speech_recognition", "summarization"):
return model
if "max_seq_length" in job_config:
model.config.max_length = job_config.max_seq_length
model.config.padding = "max_length"
return model
def save_model_card(model_card, model_path):
with open(os.path.join(model_path, "README.md"), "w") as fp:
fp.write(f"{model_card}")
def create_file(filename, file_content, model_path):
with open(os.path.join(model_path, filename), "w") as fp:
fp.write(f"{file_content}")
def save_config(conf, model_path):
with open(os.path.join(model_path, "config.json"), "w") as fp:
json.dump(conf, fp)
def remove_checkpoints(model_path):
subfolders = glob.glob(os.path.join(model_path, "*/"))
for subfolder in subfolders:
shutil.rmtree(subfolder)
try:
os.remove(os.path.join(model_path, "emissions.csv"))
except OSError:
pass
def job_watcher(func):
def wrapper(co2_tracker, *args, **kwargs):
try:
return func(co2_tracker, *args, **kwargs)
except Exception:
logger.error(f"{func.__name__} has failed due to an exception:")
logger.error(traceback.format_exc())
co2_tracker.stop()
# delete training tracker file
os.remove(os.path.join("/tmp", "training"))
return wrapper
def get_model_architecture(model_path_or_name: str, revision: str = "main") -> str:
config = AutoConfig.from_pretrained(model_path_or_name, revision=revision, trust_remote_code=True)
architectures = config.architectures
if architectures is None or len(architectures) > 1:
raise ValueError(
f"The model architecture is either not defined or not unique. Found architectures: {architectures}"
)
return architectures[0]
def monitor(func):
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception:
if PartialState().process_index == 0:
logger.error(f"{func.__name__} has failed due to an exception:")
logger.error(traceback.format_exc())
if "SPACE_ID" in os.environ:
# shut down the space
logger.info("Pausing space...")
api = HfApi(token=os.environ["HF_TOKEN"])
api.pause_space(repo_id=os.environ["SPACE_ID"])
return wrapper
| autotrain-advanced-main | src/autotrain/utils.py |
import json
import os
import random
import string
import zipfile
import gradio as gr
import pandas as pd
from huggingface_hub import list_models
from autotrain import logger
from autotrain.dataset import AutoTrainDataset, AutoTrainDreamboothDataset, AutoTrainImageClassificationDataset
from autotrain.languages import SUPPORTED_LANGUAGES
from autotrain.params import Params
from autotrain.project import Project
from autotrain.utils import get_project_cost, get_user_token, user_authentication
APP_TASKS = {
"Natural Language Processing": ["Text Classification"],
# "Tabular": TABULAR_TASKS,
"Computer Vision": ["Image Classification", "Dreambooth"],
}
APP_TASKS_MAPPING = {
"Text Classification": "text_multi_class_classification",
"LLM Finetuning": "lm_training",
"Image Classification": "image_multi_class_classification",
"Dreambooth": "dreambooth",
}
APP_TASK_TYPE_MAPPING = {
"text_classification": "Natural Language Processing",
"lm_training": "Natural Language Processing",
"image_classification": "Computer Vision",
"dreambooth": "Computer Vision",
}
ALLOWED_FILE_TYPES = [
".csv",
".CSV",
".jsonl",
".JSONL",
".zip",
".ZIP",
".png",
".PNG",
".jpg",
".JPG",
".jpeg",
".JPEG",
]
def _login_user(user_token):
user_info = user_authentication(token=user_token)
username = user_info["name"]
user_can_pay = user_info["canPay"]
orgs = user_info["orgs"]
valid_orgs = [org for org in orgs if org["canPay"] is True]
valid_orgs = [org for org in valid_orgs if org["roleInOrg"] in ("admin", "write")]
valid_orgs = [org["name"] for org in valid_orgs]
valid_can_pay = [username] + valid_orgs if user_can_pay else valid_orgs
who_is_training = [username] + [org["name"] for org in orgs]
return user_token, valid_can_pay, who_is_training
def _update_task_type(project_type):
return gr.Dropdown.update(
value=APP_TASKS[project_type][0],
choices=APP_TASKS[project_type],
visible=True,
)
def _update_model_choice(task, autotrain_backend):
# TODO: add tabular and remember, for tabular, we only support AutoTrain
if autotrain_backend.lower() != "huggingface internal":
model_choice = ["HuggingFace Hub"]
return gr.Dropdown.update(
value=model_choice[0],
choices=model_choice,
visible=True,
)
if task == "LLM Finetuning":
model_choice = ["HuggingFace Hub"]
else:
model_choice = ["AutoTrain", "HuggingFace Hub"]
return gr.Dropdown.update(
value=model_choice[0],
choices=model_choice,
visible=True,
)
def _update_file_type(task):
task = APP_TASKS_MAPPING[task]
if task in ("text_multi_class_classification", "lm_training"):
return gr.Radio.update(
value="CSV",
choices=["CSV", "JSONL"],
visible=True,
)
elif task == "image_multi_class_classification":
return gr.Radio.update(
value="ZIP",
choices=["Image Subfolders", "ZIP"],
visible=True,
)
elif task == "dreambooth":
return gr.Radio.update(
value="ZIP",
choices=["Image Folder", "ZIP"],
visible=True,
)
else:
raise NotImplementedError
def _update_param_choice(model_choice, autotrain_backend):
logger.info(f"model_choice: {model_choice}")
choices = ["AutoTrain", "Manual"] if model_choice == "HuggingFace Hub" else ["AutoTrain"]
choices = ["Manual"] if autotrain_backend != "HuggingFace Internal" else choices
return gr.Dropdown.update(
value=choices[0],
choices=choices,
visible=True,
)
def _project_type_update(project_type, task_type, autotrain_backend):
logger.info(f"project_type: {project_type}, task_type: {task_type}")
task_choices_update = _update_task_type(project_type)
model_choices_update = _update_model_choice(task_choices_update["value"], autotrain_backend)
param_choices_update = _update_param_choice(model_choices_update["value"], autotrain_backend)
return [
task_choices_update,
model_choices_update,
param_choices_update,
_update_hub_model_choices(task_choices_update["value"], model_choices_update["value"]),
]
def _task_type_update(task_type, autotrain_backend):
logger.info(f"task_type: {task_type}")
model_choices_update = _update_model_choice(task_type, autotrain_backend)
param_choices_update = _update_param_choice(model_choices_update["value"], autotrain_backend)
return [
model_choices_update,
param_choices_update,
_update_hub_model_choices(task_type, model_choices_update["value"]),
]
def _update_col_map(training_data, task):
task = APP_TASKS_MAPPING[task]
if task == "text_multi_class_classification":
data_cols = pd.read_csv(training_data[0].name, nrows=2).columns.tolist()
return [
gr.Dropdown.update(visible=True, choices=data_cols, label="Map `text` column", value=data_cols[0]),
gr.Dropdown.update(visible=True, choices=data_cols, label="Map `target` column", value=data_cols[1]),
gr.Text.update(visible=False),
]
elif task == "lm_training":
data_cols = pd.read_csv(training_data[0].name, nrows=2).columns.tolist()
return [
gr.Dropdown.update(visible=True, choices=data_cols, label="Map `text` column", value=data_cols[0]),
gr.Dropdown.update(visible=False),
gr.Text.update(visible=False),
]
elif task == "dreambooth":
return [
gr.Dropdown.update(visible=False),
gr.Dropdown.update(visible=False),
gr.Text.update(visible=True, label="Concept Token", interactive=True),
]
else:
return [
gr.Dropdown.update(visible=False),
gr.Dropdown.update(visible=False),
gr.Text.update(visible=False),
]
def _estimate_costs(
training_data, validation_data, task, user_token, autotrain_username, training_params_txt, autotrain_backend
):
if autotrain_backend.lower() != "huggingface internal":
return [
gr.Markdown.update(
value="Cost estimation is not available for this backend",
visible=True,
),
gr.Number.update(visible=False),
]
try:
logger.info("Estimating costs....")
if training_data is None:
return [
gr.Markdown.update(
value="Could not estimate cost. Please add training data",
visible=True,
),
gr.Number.update(visible=False),
]
if validation_data is None:
validation_data = []
training_params = json.loads(training_params_txt)
if len(training_params) == 0:
return [
gr.Markdown.update(
value="Could not estimate cost. Please add atleast one job",
visible=True,
),
gr.Number.update(visible=False),
]
elif len(training_params) == 1:
if "num_models" in training_params[0]:
num_models = training_params[0]["num_models"]
else:
num_models = 1
else:
num_models = len(training_params)
task = APP_TASKS_MAPPING[task]
num_samples = 0
logger.info("Estimating number of samples")
if task in ("text_multi_class_classification", "lm_training"):
for _f in training_data:
num_samples += pd.read_csv(_f.name).shape[0]
for _f in validation_data:
num_samples += pd.read_csv(_f.name).shape[0]
elif task == "image_multi_class_classification":
logger.info(f"training_data: {training_data}")
if len(training_data) > 1:
return [
gr.Markdown.update(
value="Only one training file is supported for image classification",
visible=True,
),
gr.Number.update(visible=False),
]
if len(validation_data) > 1:
return [
gr.Markdown.update(
value="Only one validation file is supported for image classification",
visible=True,
),
gr.Number.update(visible=False),
]
for _f in training_data:
zip_ref = zipfile.ZipFile(_f.name, "r")
for _ in zip_ref.namelist():
num_samples += 1
for _f in validation_data:
zip_ref = zipfile.ZipFile(_f.name, "r")
for _ in zip_ref.namelist():
num_samples += 1
elif task == "dreambooth":
num_samples = len(training_data)
else:
raise NotImplementedError
logger.info(f"Estimating costs for: num_models: {num_models}, task: {task}, num_samples: {num_samples}")
estimated_cost = get_project_cost(
username=autotrain_username,
token=user_token,
task=task,
num_samples=num_samples,
num_models=num_models,
)
logger.info(f"Estimated_cost: {estimated_cost}")
return [
gr.Markdown.update(
value=f"Estimated cost: ${estimated_cost:.2f}. Note: clicking on 'Create Project' will start training and incur charges!",
visible=True,
),
gr.Number.update(visible=False),
]
except Exception as e:
logger.error(e)
logger.error("Could not estimate cost, check inputs")
return [
gr.Markdown.update(
value="Could not estimate cost, check inputs",
visible=True,
),
gr.Number.update(visible=False),
]
def get_job_params(param_choice, training_params, task):
if param_choice == "autotrain":
if len(training_params) > 1:
raise ValueError("❌ Only one job parameter is allowed for AutoTrain.")
training_params[0].update({"task": task})
elif param_choice.lower() == "manual":
for i in range(len(training_params)):
training_params[i].update({"task": task})
if "hub_model" in training_params[i]:
# remove hub_model from training_params
training_params[i].pop("hub_model")
return training_params
def _update_project_name():
random_project_name = "-".join(
["".join(random.choices(string.ascii_lowercase + string.digits, k=4)) for _ in range(3)]
)
# check if training tracker exists
if os.path.exists(os.path.join("/tmp", "training")):
return [
gr.Text.update(value=random_project_name, visible=True, interactive=True),
gr.Button.update(interactive=False),
]
return [
gr.Text.update(value=random_project_name, visible=True, interactive=True),
gr.Button.update(interactive=True),
]
def _update_hub_model_choices(task, model_choice):
task = APP_TASKS_MAPPING[task]
logger.info(f"Updating hub model choices for task: {task}, model_choice: {model_choice}")
if model_choice.lower() == "autotrain":
return gr.Dropdown.update(
visible=False,
interactive=False,
)
if task == "text_multi_class_classification":
hub_models1 = list_models(filter="fill-mask", sort="downloads", direction=-1, limit=100)
hub_models2 = list_models(filter="text-classification", sort="downloads", direction=-1, limit=100)
hub_models = list(hub_models1) + list(hub_models2)
elif task == "lm_training":
hub_models = list(list_models(filter="text-generation", sort="downloads", direction=-1, limit=100))
elif task == "image_multi_class_classification":
hub_models = list(list_models(filter="image-classification", sort="downloads", direction=-1, limit=100))
elif task == "dreambooth":
hub_models = list(list_models(filter="text-to-image", sort="downloads", direction=-1, limit=100))
else:
raise NotImplementedError
# sort by number of downloads in descending order
hub_models = [{"id": m.modelId, "downloads": m.downloads} for m in hub_models if m.private is False]
hub_models = sorted(hub_models, key=lambda x: x["downloads"], reverse=True)
if task == "dreambooth":
choices = ["stabilityai/stable-diffusion-xl-base-1.0"] + [m["id"] for m in hub_models]
value = choices[0]
return gr.Dropdown.update(
choices=choices,
value=value,
visible=True,
interactive=True,
)
return gr.Dropdown.update(
choices=[m["id"] for m in hub_models],
value=hub_models[0]["id"],
visible=True,
interactive=True,
)
def _update_backend(backend):
if backend != "Hugging Face Internal":
return [
gr.Dropdown.update(
visible=True,
interactive=True,
choices=["HuggingFace Hub"],
value="HuggingFace Hub",
),
gr.Dropdown.update(
visible=True,
interactive=True,
choices=["Manual"],
value="Manual",
),
]
return [
gr.Dropdown.update(
visible=True,
interactive=True,
),
gr.Dropdown.update(
visible=True,
interactive=True,
),
]
def _create_project(
autotrain_username,
valid_can_pay,
project_name,
user_token,
task,
training_data,
validation_data,
col_map_text,
col_map_label,
concept_token,
training_params_txt,
hub_model,
estimated_cost,
autotrain_backend,
):
task = APP_TASKS_MAPPING[task]
valid_can_pay = valid_can_pay.split(",")
can_pay = autotrain_username in valid_can_pay
logger.info(f"🚨🚨🚨Creating project: {project_name}")
logger.info(f"🚨Task: {task}")
logger.info(f"🚨Training data: {training_data}")
logger.info(f"🚨Validation data: {validation_data}")
logger.info(f"🚨Training params: {training_params_txt}")
logger.info(f"🚨Hub model: {hub_model}")
logger.info(f"🚨Estimated cost: {estimated_cost}")
logger.info(f"🚨:Can pay: {can_pay}")
if can_pay is False and estimated_cost > 0:
raise gr.Error("❌ You do not have enough credits to create this project. Please add a valid payment method.")
training_params = json.loads(training_params_txt)
if len(training_params) == 0:
raise gr.Error("Please add atleast one job")
elif len(training_params) == 1:
if "num_models" in training_params[0]:
param_choice = "autotrain"
else:
param_choice = "manual"
else:
param_choice = "manual"
if task == "image_multi_class_classification":
training_data = training_data[0].name
if validation_data is not None:
validation_data = validation_data[0].name
dset = AutoTrainImageClassificationDataset(
train_data=training_data,
token=user_token,
project_name=project_name,
username=autotrain_username,
valid_data=validation_data,
percent_valid=None, # TODO: add to UI
)
elif task == "text_multi_class_classification":
training_data = [f.name for f in training_data]
if validation_data is None:
validation_data = []
else:
validation_data = [f.name for f in validation_data]
dset = AutoTrainDataset(
train_data=training_data,
task=task,
token=user_token,
project_name=project_name,
username=autotrain_username,
column_mapping={"text": col_map_text, "label": col_map_label},
valid_data=validation_data,
percent_valid=None, # TODO: add to UI
)
elif task == "lm_training":
training_data = [f.name for f in training_data]
if validation_data is None:
validation_data = []
else:
validation_data = [f.name for f in validation_data]
dset = AutoTrainDataset(
train_data=training_data,
task=task,
token=user_token,
project_name=project_name,
username=autotrain_username,
column_mapping={"text": col_map_text},
valid_data=validation_data,
percent_valid=None, # TODO: add to UI
)
elif task == "dreambooth":
dset = AutoTrainDreamboothDataset(
concept_images=training_data,
concept_name=concept_token,
token=user_token,
project_name=project_name,
username=autotrain_username,
)
else:
raise NotImplementedError
dset.prepare()
project = Project(
dataset=dset,
param_choice=param_choice,
hub_model=hub_model,
job_params=get_job_params(param_choice, training_params, task),
)
if autotrain_backend.lower() == "huggingface internal":
project_id = project.create()
project.approve(project_id)
return gr.Markdown.update(
value=f"Project created successfully. Monitor progess on the [dashboard](https://ui.autotrain.huggingface.co/{project_id}/trainings).",
visible=True,
)
else:
project.create(local=True)
def get_variable_name(var, namespace):
for name in namespace:
if namespace[name] is var:
return name
return None
def disable_create_project_button():
return gr.Button.update(interactive=False)
def main():
with gr.Blocks(theme="freddyaboulton/dracula_revamped") as demo:
gr.Markdown("## 🤗 AutoTrain Advanced")
user_token = os.environ.get("HF_TOKEN", "")
if len(user_token) == 0:
user_token = get_user_token()
if user_token is None:
gr.Markdown(
"""Please login with a write [token](https://huggingface.co/settings/tokens).
Pass your HF token in an environment variable called `HF_TOKEN` and then restart this app.
"""
)
return demo
user_token, valid_can_pay, who_is_training = _login_user(user_token)
if user_token is None or len(user_token) == 0:
gr.Error("Please login with a write token.")
user_token = gr.Textbox(
value=user_token, type="password", lines=1, max_lines=1, visible=False, interactive=False
)
valid_can_pay = gr.Textbox(value=",".join(valid_can_pay), visible=False, interactive=False)
with gr.Row():
with gr.Column():
with gr.Row():
autotrain_username = gr.Dropdown(
label="AutoTrain Username",
choices=who_is_training,
value=who_is_training[0] if who_is_training else "",
)
autotrain_backend = gr.Dropdown(
label="AutoTrain Backend",
choices=["HuggingFace Internal", "HuggingFace Spaces"],
value="HuggingFace Internal",
interactive=True,
)
with gr.Row():
project_name = gr.Textbox(label="Project name", value="", lines=1, max_lines=1, interactive=True)
project_type = gr.Dropdown(
label="Project Type", choices=list(APP_TASKS.keys()), value=list(APP_TASKS.keys())[0]
)
task_type = gr.Dropdown(
label="Task",
choices=APP_TASKS[list(APP_TASKS.keys())[0]],
value=APP_TASKS[list(APP_TASKS.keys())[0]][0],
interactive=True,
)
model_choice = gr.Dropdown(
label="Model Choice",
choices=["AutoTrain", "HuggingFace Hub"],
value="AutoTrain",
visible=True,
interactive=True,
)
hub_model = gr.Dropdown(
label="Hub Model",
value="",
visible=False,
interactive=True,
elem_id="hub_model",
)
gr.Markdown("<hr>")
with gr.Row():
with gr.Column():
with gr.Tabs(elem_id="tabs"):
with gr.TabItem("Data"):
with gr.Column():
# file_type_training = gr.Radio(
# label="File Type",
# choices=["CSV", "JSONL"],
# value="CSV",
# visible=True,
# interactive=True,
# )
training_data = gr.File(
label="Training Data",
file_types=ALLOWED_FILE_TYPES,
file_count="multiple",
visible=True,
interactive=True,
elem_id="training_data_box",
)
with gr.Accordion("Validation Data (Optional)", open=False):
validation_data = gr.File(
label="Validation Data (Optional)",
file_types=ALLOWED_FILE_TYPES,
file_count="multiple",
visible=True,
interactive=True,
elem_id="validation_data_box",
)
with gr.Row():
col_map_text = gr.Dropdown(
label="Text Column", choices=[], visible=False, interactive=True
)
col_map_target = gr.Dropdown(
label="Target Column", choices=[], visible=False, interactive=True
)
concept_token = gr.Text(
value="", visible=False, interactive=True, lines=1, max_lines=1
)
with gr.TabItem("Params"):
with gr.Row():
source_language = gr.Dropdown(
label="Source Language",
choices=SUPPORTED_LANGUAGES[:-1],
value="en",
visible=True,
interactive=True,
elem_id="source_language",
)
num_models = gr.Slider(
label="Number of Models",
minimum=1,
maximum=25,
value=5,
step=1,
visible=True,
interactive=True,
elem_id="num_models",
)
target_language = gr.Dropdown(
label="Target Language",
choices=["fr"],
value="fr",
visible=False,
interactive=True,
elem_id="target_language",
)
image_size = gr.Number(
label="Image Size",
value=512,
visible=False,
interactive=True,
elem_id="image_size",
)
with gr.Row():
learning_rate = gr.Number(
label="Learning Rate",
value=5e-5,
visible=False,
interactive=True,
elem_id="learning_rate",
)
batch_size = gr.Number(
label="Train Batch Size",
value=32,
visible=False,
interactive=True,
elem_id="train_batch_size",
)
num_epochs = gr.Number(
label="Number of Epochs",
value=3,
visible=False,
interactive=True,
elem_id="num_train_epochs",
)
with gr.Row():
gradient_accumulation_steps = gr.Number(
label="Gradient Accumulation Steps",
value=1,
visible=False,
interactive=True,
elem_id="gradient_accumulation_steps",
)
percentage_warmup_steps = gr.Number(
label="Percentage of Warmup Steps",
value=0.1,
visible=False,
interactive=True,
elem_id="percentage_warmup",
)
weight_decay = gr.Number(
label="Weight Decay",
value=0.01,
visible=False,
interactive=True,
elem_id="weight_decay",
)
with gr.Row():
lora_r = gr.Number(
label="LoraR",
value=16,
visible=False,
interactive=True,
elem_id="lora_r",
)
lora_alpha = gr.Number(
label="LoraAlpha",
value=32,
visible=False,
interactive=True,
elem_id="lora_alpha",
)
lora_dropout = gr.Number(
label="Lora Dropout",
value=0.1,
visible=False,
interactive=True,
elem_id="lora_dropout",
)
with gr.Row():
db_num_steps = gr.Number(
label="Num Steps",
value=500,
visible=False,
interactive=True,
elem_id="num_steps",
)
with gr.Row():
optimizer = gr.Dropdown(
label="Optimizer",
choices=["adamw_torch", "adamw_hf", "sgd", "adafactor", "adagrad"],
value="adamw_torch",
visible=False,
interactive=True,
elem_id="optimizer",
)
scheduler = gr.Dropdown(
label="Scheduler",
choices=["linear", "cosine"],
value="linear",
visible=False,
interactive=True,
elem_id="scheduler",
)
add_job_button = gr.Button(
value="Add Job",
visible=True,
interactive=True,
elem_id="add_job",
)
# clear_jobs_button = gr.Button(
# value="Clear Jobs",
# visible=True,
# interactive=True,
# elem_id="clear_jobs",
# )
gr.Markdown("<hr>")
estimated_costs_md = gr.Markdown(value="Estimated Costs: N/A", visible=True, interactive=False)
estimated_costs_num = gr.Number(value=0, visible=False, interactive=False)
create_project_button = gr.Button(
value="Create Project",
visible=True,
interactive=True,
elem_id="create_project",
)
with gr.Column():
param_choice = gr.Dropdown(
label="Param Choice",
choices=["AutoTrain"],
value="AutoTrain",
visible=True,
interactive=True,
)
training_params_txt = gr.Text(value="[]", visible=False, interactive=False)
training_params_md = gr.DataFrame(visible=False, interactive=False)
final_output = gr.Markdown(value="", visible=True, interactive=False)
hyperparameters = [
hub_model,
num_models,
source_language,
target_language,
learning_rate,
batch_size,
num_epochs,
gradient_accumulation_steps,
lora_r,
lora_alpha,
lora_dropout,
optimizer,
scheduler,
percentage_warmup_steps,
weight_decay,
db_num_steps,
image_size,
]
def _update_params(params_data):
_task = params_data[task_type]
_task = APP_TASKS_MAPPING[_task]
params = Params(
task=_task,
param_choice="autotrain" if params_data[param_choice] == "AutoTrain" else "manual",
model_choice="autotrain" if params_data[model_choice] == "AutoTrain" else "hub_model",
)
params = params.get()
visible_params = []
for param in hyperparameters:
if param.elem_id in params.keys():
visible_params.append(param.elem_id)
op = [h.update(visible=h.elem_id in visible_params) for h in hyperparameters]
op.append(add_job_button.update(visible=True))
op.append(training_params_md.update(visible=False))
op.append(training_params_txt.update(value="[]"))
return op
autotrain_backend.change(
_project_type_update,
inputs=[project_type, task_type, autotrain_backend],
outputs=[task_type, model_choice, param_choice, hub_model],
)
project_type.change(
_project_type_update,
inputs=[project_type, task_type, autotrain_backend],
outputs=[task_type, model_choice, param_choice, hub_model],
)
task_type.change(
_task_type_update,
inputs=[task_type, autotrain_backend],
outputs=[model_choice, param_choice, hub_model],
)
model_choice.change(
_update_param_choice,
inputs=[model_choice, autotrain_backend],
outputs=param_choice,
).then(
_update_hub_model_choices,
inputs=[task_type, model_choice],
outputs=hub_model,
)
param_choice.change(
_update_params,
inputs=set([task_type, param_choice, model_choice] + hyperparameters + [add_job_button]),
outputs=hyperparameters + [add_job_button, training_params_md, training_params_txt],
)
task_type.change(
_update_params,
inputs=set([task_type, param_choice, model_choice] + hyperparameters + [add_job_button]),
outputs=hyperparameters + [add_job_button, training_params_md, training_params_txt],
)
model_choice.change(
_update_params,
inputs=set([task_type, param_choice, model_choice] + hyperparameters + [add_job_button]),
outputs=hyperparameters + [add_job_button, training_params_md, training_params_txt],
)
def _add_job(params_data):
_task = params_data[task_type]
_task = APP_TASKS_MAPPING[_task]
_param_choice = "autotrain" if params_data[param_choice] == "AutoTrain" else "manual"
_model_choice = "autotrain" if params_data[model_choice] == "AutoTrain" else "hub_model"
if _model_choice == "hub_model" and params_data[hub_model] is None:
logger.error("Hub model is None")
return
_training_params = {}
params = Params(task=_task, param_choice=_param_choice, model_choice=_model_choice)
params = params.get()
for _param in hyperparameters:
if _param.elem_id in params.keys():
_training_params[_param.elem_id] = params_data[_param]
_training_params_md = json.loads(params_data[training_params_txt])
if _param_choice == "autotrain":
if len(_training_params_md) > 0:
_training_params_md[0] = _training_params
_training_params_md = _training_params_md[:1]
else:
_training_params_md.append(_training_params)
else:
_training_params_md.append(_training_params)
params_df = pd.DataFrame(_training_params_md)
# remove hub_model column
if "hub_model" in params_df.columns:
params_df = params_df.drop(columns=["hub_model"])
return [
gr.DataFrame.update(value=params_df, visible=True),
gr.Textbox.update(value=json.dumps(_training_params_md), visible=False),
]
add_job_button.click(
_add_job,
inputs=set(
[task_type, param_choice, model_choice] + hyperparameters + [training_params_md, training_params_txt]
),
outputs=[training_params_md, training_params_txt],
)
col_map_components = [
col_map_text,
col_map_target,
concept_token,
]
training_data.change(
_update_col_map,
inputs=[training_data, task_type],
outputs=col_map_components,
)
task_type.change(
_update_col_map,
inputs=[training_data, task_type],
outputs=col_map_components,
)
estimate_costs_inputs = [
training_data,
validation_data,
task_type,
user_token,
autotrain_username,
training_params_txt,
autotrain_backend,
]
estimate_costs_outputs = [estimated_costs_md, estimated_costs_num]
training_data.change(_estimate_costs, inputs=estimate_costs_inputs, outputs=estimate_costs_outputs)
validation_data.change(_estimate_costs, inputs=estimate_costs_inputs, outputs=estimate_costs_outputs)
training_params_txt.change(_estimate_costs, inputs=estimate_costs_inputs, outputs=estimate_costs_outputs)
task_type.change(_estimate_costs, inputs=estimate_costs_inputs, outputs=estimate_costs_outputs)
add_job_button.click(_estimate_costs, inputs=estimate_costs_inputs, outputs=estimate_costs_outputs)
create_project_button.click(disable_create_project_button, None, create_project_button).then(
_create_project,
inputs=[
autotrain_username,
valid_can_pay,
project_name,
user_token,
task_type,
training_data,
validation_data,
col_map_text,
col_map_target,
concept_token,
training_params_txt,
hub_model,
estimated_costs_num,
autotrain_backend,
],
outputs=final_output,
)
demo.load(
_update_project_name,
outputs=[project_name, create_project_button],
)
return demo
| autotrain-advanced-main | src/autotrain/app.py |
TRAIN_SPLIT = "train"
VALID_SPLIT = "valid"
TEST_SPLIT = "test"
| autotrain-advanced-main | src/autotrain/splits.py |
import os
import pty
import random
import shutil
import string
import subprocess
import gradio as gr
from huggingface_hub import HfApi, whoami
# ❯ autotrain dreambooth --help
# usage: autotrain <command> [<args>] dreambooth [-h] --model MODEL [--revision REVISION] [--tokenizer TOKENIZER] --image-path IMAGE_PATH
# [--class-image-path CLASS_IMAGE_PATH] --prompt PROMPT [--class-prompt CLASS_PROMPT]
# [--num-class-images NUM_CLASS_IMAGES] [--class-labels-conditioning CLASS_LABELS_CONDITIONING]
# [--prior-preservation] [--prior-loss-weight PRIOR_LOSS_WEIGHT] --output OUTPUT [--seed SEED]
# --resolution RESOLUTION [--center-crop] [--train-text-encoder] [--batch-size BATCH_SIZE]
# [--sample-batch-size SAMPLE_BATCH_SIZE] [--epochs EPOCHS] [--num-steps NUM_STEPS]
# [--checkpointing-steps CHECKPOINTING_STEPS] [--resume-from-checkpoint RESUME_FROM_CHECKPOINT]
# [--gradient-accumulation GRADIENT_ACCUMULATION] [--gradient-checkpointing] [--lr LR] [--scale-lr]
# [--scheduler SCHEDULER] [--warmup-steps WARMUP_STEPS] [--num-cycles NUM_CYCLES] [--lr-power LR_POWER]
# [--dataloader-num-workers DATALOADER_NUM_WORKERS] [--use-8bit-adam] [--adam-beta1 ADAM_BETA1]
# [--adam-beta2 ADAM_BETA2] [--adam-weight-decay ADAM_WEIGHT_DECAY] [--adam-epsilon ADAM_EPSILON]
# [--max-grad-norm MAX_GRAD_NORM] [--allow-tf32]
# [--prior-generation-precision PRIOR_GENERATION_PRECISION] [--local-rank LOCAL_RANK] [--xformers]
# [--pre-compute-text-embeddings] [--tokenizer-max-length TOKENIZER_MAX_LENGTH]
# [--text-encoder-use-attention-mask] [--rank RANK] [--xl] [--fp16] [--bf16] [--hub-token HUB_TOKEN]
# [--hub-model-id HUB_MODEL_ID] [--push-to-hub] [--validation-prompt VALIDATION_PROMPT]
# [--num-validation-images NUM_VALIDATION_IMAGES] [--validation-epochs VALIDATION_EPOCHS]
# [--checkpoints-total-limit CHECKPOINTS_TOTAL_LIMIT] [--validation-images VALIDATION_IMAGES]
# [--logging]
REPO_ID = os.environ.get("SPACE_ID")
ALLOWED_FILE_TYPES = ["png", "jpg", "jpeg"]
MODELS = [
"stabilityai/stable-diffusion-xl-base-1.0",
"runwayml/stable-diffusion-v1-5",
"stabilityai/stable-diffusion-2-1",
"stabilityai/stable-diffusion-2-1-base",
]
WELCOME_TEXT = """
Welcome to the AutoTrain DreamBooth! This app allows you to train a DreamBooth model using AutoTrain.
The app runs on HuggingFace Spaces. Your data is not stored anywhere.
The trained model (LoRA) will be pushed to your HuggingFace Hub account.
You need to use your HuggingFace Hub write [token](https://huggingface.co/settings/tokens) to push the model to your account.
NOTE: This space requires GPU to train. Please make sure you have GPU enabled in space settings.
Please make sure to shutdown / pause the space to avoid any additional charges.
"""
STEPS = """
1. [Duplicate](https://huggingface.co/spaces/autotrain-projects/dreambooth?duplicate=true) this space
2. Upgrade the space to GPU
3. Enter your HuggingFace Hub write token
4. Upload images and adjust prompt (remember the prompt!)
5. Click on Train and wait for the training to finish
6. Go to your HuggingFace Hub account to find the trained model
NOTE: For any issues or feature requests, please open an issue [here](https://github.com/huggingface/autotrain-advanced/issues)
"""
def _update_project_name():
random_project_name = "-".join(
["".join(random.choices(string.ascii_lowercase + string.digits, k=4)) for _ in range(3)]
)
# check if training tracker exists
if os.path.exists(os.path.join("/tmp", "training")):
return [
gr.Text.update(value=random_project_name, visible=True, interactive=True),
gr.Button.update(interactive=False),
]
return [
gr.Text.update(value=random_project_name, visible=True, interactive=True),
gr.Button.update(interactive=True),
]
def run_command(cmd):
cmd = [str(c) for c in cmd]
print(f"Running command: {' '.join(cmd)}")
master, slave = pty.openpty()
p = subprocess.Popen(cmd, stdout=slave, stderr=slave)
os.close(slave)
while p.poll() is None:
try:
output = os.read(master, 1024).decode()
except OSError:
# Handle exception here, e.g. the pty was closed
break
else:
print(output, end="")
def _run_training(
hub_token,
project_name,
model,
images,
prompt,
learning_rate,
num_steps,
batch_size,
gradient_accumulation_steps,
prior_preservation,
scale_lr,
use_8bit_adam,
train_text_encoder,
gradient_checkpointing,
center_crop,
prior_loss_weight,
num_cycles,
lr_power,
adam_beta1,
adam_beta2,
adam_weight_decay,
adam_epsilon,
max_grad_norm,
warmup_steps,
scheduler,
resolution,
fp16,
):
if REPO_ID == "autotrain-projects/dreambooth":
return gr.Markdown.update(
value="❌ Please [duplicate](https://huggingface.co/spaces/autotrain-projects/dreambooth?duplicate=true) this space before training."
)
api = HfApi(token=hub_token)
if os.path.exists(os.path.join("/tmp", "training")):
return gr.Markdown.update(value="❌ Another training job is already running in this space.")
with open(os.path.join("/tmp", "training"), "w") as f:
f.write("training")
hub_model_id = whoami(token=hub_token)["name"] + "/" + str(project_name).strip()
image_path = "/tmp/data"
os.makedirs(image_path, exist_ok=True)
output_dir = "/tmp/model"
os.makedirs(output_dir, exist_ok=True)
for image in images:
shutil.copy(image.name, image_path)
cmd = [
"autotrain",
"dreambooth",
"--model",
model,
"--output",
output_dir,
"--image-path",
image_path,
"--prompt",
prompt,
"--resolution",
"1024",
"--batch-size",
batch_size,
"--num-steps",
num_steps,
"--gradient-accumulation",
gradient_accumulation_steps,
"--lr",
learning_rate,
"--scheduler",
scheduler,
"--warmup-steps",
warmup_steps,
"--num-cycles",
num_cycles,
"--lr-power",
lr_power,
"--adam-beta1",
adam_beta1,
"--adam-beta2",
adam_beta2,
"--adam-weight-decay",
adam_weight_decay,
"--adam-epsilon",
adam_epsilon,
"--max-grad-norm",
max_grad_norm,
"--prior-loss-weight",
prior_loss_weight,
"--push-to-hub",
"--hub-token",
hub_token,
"--hub-model-id",
hub_model_id,
]
if prior_preservation:
cmd.append("--prior-preservation")
if scale_lr:
cmd.append("--scale-lr")
if use_8bit_adam:
cmd.append("--use-8bit-adam")
if train_text_encoder:
cmd.append("--train-text-encoder")
if gradient_checkpointing:
cmd.append("--gradient-checkpointing")
if center_crop:
cmd.append("--center-crop")
if fp16:
cmd.append("--fp16")
try:
run_command(cmd)
# delete the training tracker file in /tmp/
os.remove(os.path.join("/tmp", "training"))
# switch off space
if REPO_ID is not None:
api.pause_space(repo_id=REPO_ID)
return gr.Markdown.update(value=f"✅ Training finished! Model pushed to {hub_model_id}")
except Exception as e:
print(e)
print("Error running command")
# delete the training tracker file in /tmp/
os.remove(os.path.join("/tmp", "training"))
return gr.Markdown.update(value="❌ Error running command. Please try again.")
def main():
with gr.Blocks(theme="freddyaboulton/dracula_revamped") as demo:
gr.Markdown("## 🤗 AutoTrain DreamBooth")
gr.Markdown(WELCOME_TEXT)
with gr.Accordion("Steps", open=False):
gr.Markdown(STEPS)
hub_token = gr.Textbox(
label="Hub Token",
value="",
lines=1,
max_lines=1,
interactive=True,
type="password",
)
with gr.Row():
with gr.Column():
project_name = gr.Textbox(
label="Project name",
value="",
lines=1,
max_lines=1,
interactive=True,
)
model = gr.Dropdown(
label="Model",
choices=MODELS,
value=MODELS[0],
visible=True,
interactive=True,
elem_id="model",
allow_custom_values=True,
)
images = gr.File(
label="Images",
file_types=ALLOWED_FILE_TYPES,
file_count="multiple",
visible=True,
interactive=True,
)
with gr.Column():
prompt = gr.Textbox(
label="Prompt",
placeholder="photo of sks dog",
lines=1,
)
with gr.Row():
learning_rate = gr.Number(
label="Learning Rate",
value=1e-4,
visible=True,
interactive=True,
elem_id="learning_rate",
)
num_steps = gr.Number(
label="Number of Steps",
value=500,
visible=True,
interactive=True,
elem_id="num_steps",
precision=0,
)
batch_size = gr.Number(
label="Batch Size",
value=1,
visible=True,
interactive=True,
elem_id="batch_size",
precision=0,
)
with gr.Row():
gradient_accumulation_steps = gr.Number(
label="Gradient Accumulation Steps",
value=4,
visible=True,
interactive=True,
elem_id="gradient_accumulation_steps",
precision=0,
)
resolution = gr.Number(
label="Resolution",
value=1024,
visible=True,
interactive=True,
elem_id="resolution",
precision=0,
)
scheduler = gr.Dropdown(
label="Scheduler",
choices=["cosine", "linear", "constant"],
value="constant",
visible=True,
interactive=True,
elem_id="scheduler",
)
with gr.Column():
with gr.Group():
fp16 = gr.Checkbox(
label="FP16",
value=True,
visible=True,
interactive=True,
elem_id="fp16",
)
prior_preservation = gr.Checkbox(
label="Prior Preservation",
value=False,
visible=True,
interactive=True,
elem_id="prior_preservation",
)
scale_lr = gr.Checkbox(
label="Scale LR",
value=False,
visible=True,
interactive=True,
elem_id="scale_lr",
)
use_8bit_adam = gr.Checkbox(
label="Use 8bit Adam",
value=True,
visible=True,
interactive=True,
elem_id="use_8bit_adam",
)
train_text_encoder = gr.Checkbox(
label="Train Text Encoder",
value=False,
visible=True,
interactive=True,
elem_id="train_text_encoder",
)
gradient_checkpointing = gr.Checkbox(
label="Gradient Checkpointing",
value=False,
visible=True,
interactive=True,
elem_id="gradient_checkpointing",
)
center_crop = gr.Checkbox(
label="Center Crop",
value=False,
visible=True,
interactive=True,
elem_id="center_crop",
)
with gr.Accordion("Advanced Parameters", open=False):
with gr.Row():
prior_loss_weight = gr.Number(
label="Prior Loss Weight",
value=1.0,
visible=True,
interactive=True,
elem_id="prior_loss_weight",
)
num_cycles = gr.Number(
label="Num Cycles",
value=1,
visible=True,
interactive=True,
elem_id="num_cycles",
precision=0,
)
lr_power = gr.Number(
label="LR Power",
value=1,
visible=True,
interactive=True,
elem_id="lr_power",
)
adam_beta1 = gr.Number(
label="Adam Beta1",
value=0.9,
visible=True,
interactive=True,
elem_id="adam_beta1",
)
adam_beta2 = gr.Number(
label="Adam Beta2",
value=0.999,
visible=True,
interactive=True,
elem_id="adam_beta2",
)
adam_weight_decay = gr.Number(
label="Adam Weight Decay",
value=1e-2,
visible=True,
interactive=True,
elem_id="adam_weight_decay",
)
adam_epsilon = gr.Number(
label="Adam Epsilon",
value=1e-8,
visible=True,
interactive=True,
elem_id="adam_epsilon",
)
max_grad_norm = gr.Number(
label="Max Grad Norm",
value=1,
visible=True,
interactive=True,
elem_id="max_grad_norm",
)
warmup_steps = gr.Number(
label="Warmup Steps",
value=0,
visible=True,
interactive=True,
elem_id="warmup_steps",
precision=0,
)
train_button = gr.Button(value="Train", elem_id="train")
output_md = gr.Markdown("## Output")
inputs = [
hub_token,
project_name,
model,
images,
prompt,
learning_rate,
num_steps,
batch_size,
gradient_accumulation_steps,
prior_preservation,
scale_lr,
use_8bit_adam,
train_text_encoder,
gradient_checkpointing,
center_crop,
prior_loss_weight,
num_cycles,
lr_power,
adam_beta1,
adam_beta2,
adam_weight_decay,
adam_epsilon,
max_grad_norm,
warmup_steps,
scheduler,
resolution,
fp16,
]
train_button.click(_run_training, inputs=inputs, outputs=output_md)
demo.load(
_update_project_name,
outputs=[project_name, train_button],
)
return demo
if __name__ == "__main__":
demo = main()
demo.launch()
| autotrain-advanced-main | src/autotrain/dreambooth_app.py |
APP_AUTOTRAIN_USERNAME = """Please choose the user or organization who is creating the AutoTrain Project.
In case of non-free tier, this user or organization will be billed.
"""
APP_PROJECT_NAME = """A unique name for the AutoTrain Project.
This name will be used to identify the project in the AutoTrain dashboard."""
APP_IMAGE_CLASSIFICATION_DATA_HELP = """The data for the Image Classification task should be in the following format:
- The data should be in a zip file.
- The zip file should contain multiple folders (the classes), each folder should contain images of a single class.
- The name of the folder should be the name of the class.
- The images must be jpeg, jpg or png.
- There should be at least 5 images per class.
- There should not be any other files in the zip file.
- There should not be any other folders inside the zip folder.
"""
APP_LM_TRAINING_TYPE = """There are two types of Language Model Training:
- generic
- chat
In the generic mode, you provide a CSV with a text column which has already been formatted by you for training a language model.
In the chat mode, you provide a CSV with two or three text columns: prompt, context (optional) and response.
Context column can be empty for samples if not needed. You can also have a "prompt start" column. If provided, "prompt start" will be prepended before the prompt column.
Please see [this](https://huggingface.co/datasets/tatsu-lab/alpaca) dataset which has both formats in the same dataset.
"""
| autotrain-advanced-main | src/autotrain/help.py |
SUPPORTED_LANGUAGES = [
"en",
"ar",
"bn",
"de",
"es",
"fi",
"fr",
"hi",
"it",
"ja",
"ko",
"nl",
"pt",
"sv",
"tr",
"zh",
"unk",
]
| autotrain-advanced-main | src/autotrain/languages.py |
"""
Copyright 2023 The HuggingFace Team
"""
import json
import os
import time
from dataclasses import dataclass
from typing import Dict, List, Optional, Union
import pandas as pd
from codecarbon import EmissionsTracker
from autotrain import logger
from autotrain.backend import SpaceRunner
from autotrain.dataset import AutoTrainDataset, AutoTrainDreamboothDataset, AutoTrainImageClassificationDataset
from autotrain.languages import SUPPORTED_LANGUAGES
from autotrain.tasks import TASKS
from autotrain.trainers.clm.params import LLMTrainingParams
from autotrain.trainers.dreambooth.params import DreamBoothTrainingParams
from autotrain.trainers.tabular.params import TabularParams
from autotrain.trainers.text_classification.params import TextClassificationParams
from autotrain.utils import http_get, http_post
@dataclass
class AutoTrainProject:
dataset: Union[AutoTrainDataset, AutoTrainDreamboothDataset, AutoTrainImageClassificationDataset]
job_params: pd.DataFrame
def __post_init__(self):
self.token = self.dataset.token
self.project_name = self.dataset.project_name
self.username = self.dataset.username
self.task = self.dataset.task
if isinstance(self.dataset, AutoTrainDataset):
self.col_mapping = self.dataset.column_mapping
self.data_path = f"{self.username}/autotrain-data-{self.project_name}"
self.backend = self.job_params.loc[0, "backend"]
if "model_choice" in self.job_params.columns:
self.model_choice = self.job_params.loc[0, "model_choice"]
if "param_choice" in self.job_params.columns:
self.param_choice = self.job_params.loc[0, "param_choice"]
self.task_id = TASKS.get(self.task)
self.num_jobs = len(self.job_params)
if self.task in ("text_multi_class_classification", "text_binary_classification"):
self.col_map_text = "autotrain_text"
self.col_map_target = "autotrain_label"
if self.task == "lm_training":
self.col_map_text = "autotrain_text"
if self.task.startswith("tabular_"):
self.col_map_id = "autotrain_id"
_tabular_target_cols = ["autotrain_label"]
if isinstance(self.col_mapping["label"], str) or len(self.col_mapping["label"]) > 1:
_tabular_target_cols = [f"autotrain_label_{i}" for i in range(len(self.col_mapping["label"]))]
self.col_map_target = _tabular_target_cols
self.spaces_backends = {
"A10G Large": "spaces-a10gl",
"A10G Small": "spaces-a10gs",
"A100 Large": "spaces-a100",
"T4 Medium": "spaces-t4m",
"T4 Small": "spaces-t4s",
"CPU Upgrade": "spaces-cpu",
"CPU (Free)": "spaces-cpuf",
# "Local": "local",
# "AutoTrain": "autotrain",
}
self.job_params_json = self.job_params.to_json(orient="records")
logger.info(self.job_params_json)
def _munge_common_params(self, job_idx):
_params = json.loads(self.job_params_json)[job_idx]
_params["token"] = self.token
_params["project_name"] = f"{self.project_name}-{job_idx}"
_params["push_to_hub"] = True
_params["repo_id"] = f"{self.username}/{self.project_name}-{job_idx}"
_params["data_path"] = self.data_path
_params["username"] = self.username
return _params
def _munge_params_llm(self, job_idx):
_params = self._munge_common_params(job_idx)
_params["model"] = self.model_choice
_params["text_column"] = self.col_map_text
if "trainer" in _params:
_params["trainer"] = _params["trainer"].lower()
if "use_fp16" in _params:
_params["fp16"] = _params["use_fp16"]
_params.pop("use_fp16")
if "int4_8" in _params:
if _params["int4_8"] == "int4":
_params["use_int4"] = True
_params["use_int8"] = False
elif _params["int4_8"] == "int8":
_params["use_int4"] = False
_params["use_int8"] = True
else:
_params["use_int4"] = False
_params["use_int8"] = False
_params.pop("int4_8")
return _params
def _munge_params_text_clf(self, job_idx):
_params = self._munge_common_params(job_idx)
_params["model"] = self.model_choice
_params["text_column"] = self.col_map_text
_params["target_column"] = self.col_map_target
_params["valid_split"] = "validation"
if "use_fp16" in _params:
_params["fp16"] = _params["use_fp16"]
_params.pop("use_fp16")
return _params
def _munge_params_tabular(self, job_idx):
_params = self._munge_common_params(job_idx)
_params["id_column"] = self.col_map_id
_params["target_columns"] = self.col_map_target
_params["valid_split"] = "validation"
if len(_params["categorical_imputer"].strip()) == 0 or _params["categorical_imputer"].lower() == "none":
_params["categorical_imputer"] = None
if len(_params["numerical_imputer"].strip()) == 0 or _params["numerical_imputer"].lower() == "none":
_params["numerical_imputer"] = None
if len(_params["numeric_scaler"].strip()) == 0 or _params["numeric_scaler"].lower() == "none":
_params["numeric_scaler"] = None
return _params
def _munge_params_dreambooth(self, job_idx):
_params = self._munge_common_params(job_idx)
_params["model"] = self.model_choice
_params["image_path"] = self.data_path
if "weight_decay" in _params:
_params["adam_weight_decay"] = _params["weight_decay"]
_params.pop("weight_decay")
return _params
def create_spaces(self):
_created_spaces = []
for job_idx in range(self.num_jobs):
if self.task_id == 9:
_params = self._munge_params_llm(job_idx)
_params = LLMTrainingParams.parse_obj(_params)
elif self.task_id in (1, 2):
_params = self._munge_params_text_clf(job_idx)
_params = TextClassificationParams.parse_obj(_params)
elif self.task_id in (13, 14, 15, 16, 26):
_params = self._munge_params_tabular(job_idx)
_params = TabularParams.parse_obj(_params)
elif self.task_id == 25:
_params = self._munge_params_dreambooth(job_idx)
_params = DreamBoothTrainingParams.parse_obj(_params)
else:
raise NotImplementedError
logger.info(f"Creating Space for job: {job_idx}")
logger.info(f"Using params: {_params}")
sr = SpaceRunner(params=_params, backend=self.spaces_backends[self.backend])
space_id = sr.prepare()
logger.info(f"Space created with id: {space_id}")
_created_spaces.append(space_id)
return _created_spaces
def create(self):
if self.backend == "AutoTrain":
raise NotImplementedError
if self.backend == "Local":
raise NotImplementedError
if self.backend in self.spaces_backends:
return self.create_spaces()
@dataclass
class Project:
dataset: Union[AutoTrainDataset, AutoTrainDreamboothDataset, AutoTrainImageClassificationDataset]
param_choice: Optional[str] = "autotrain"
hub_model: Optional[str] = None
job_params: Optional[List[Dict[str, str]]] = None
def __post_init__(self):
self.token = self.dataset.token
self.name = self.dataset.project_name
self.username = self.dataset.username
self.task = self.dataset.task
self.param_choice = self.param_choice.lower()
if self.hub_model is not None:
if len(self.hub_model) == 0:
self.hub_model = None
if self.job_params is None:
self.job_params = []
logger.info(f"🚀🚀🚀 Creating project {self.name}, task: {self.task}")
logger.info(f"🚀 Using username: {self.username}")
logger.info(f"🚀 Using param_choice: {self.param_choice}")
logger.info(f"🚀 Using hub_model: {self.hub_model}")
logger.info(f"🚀 Using job_params: {self.job_params}")
if self.token is None:
raise ValueError("❌ Please login using `huggingface-cli login`")
if self.hub_model is not None and len(self.job_params) == 0:
raise ValueError("❌ Job parameters are required when hub model is specified.")
if self.hub_model is None and len(self.job_params) > 1:
raise ValueError("❌ Only one job parameter is allowed in AutoTrain mode.")
if self.param_choice == "autotrain":
if "source_language" in self.job_params[0] and "target_language" not in self.job_params[0]:
self.language = self.job_params[0]["source_language"]
# remove source language from job params
self.job_params[0].pop("source_language")
elif "source_language" in self.job_params[0] and "target_language" in self.job_params[0]:
self.language = f'{self.job_params[0]["target_language"]}2{self.job_params[0]["source_language"]}'
# remove source and target language from job params
self.job_params[0].pop("source_language")
self.job_params[0].pop("target_language")
else:
self.language = "unk"
if "num_models" in self.job_params[0]:
self.max_models = self.job_params[0]["num_models"]
self.job_params[0].pop("num_models")
elif "num_models" not in self.job_params[0] and "source_language" in self.job_params[0]:
raise ValueError("❌ Please specify num_models in job_params when using AutoTrain model")
else:
self.language = "unk"
self.max_models = len(self.job_params)
def create_local(self, payload):
from autotrain.trainers.dreambooth import train_ui as train_dreambooth
from autotrain.trainers.image_classification import train as train_image_classification
from autotrain.trainers.lm_trainer import train as train_lm
from autotrain.trainers.text_classification import train as train_text_classification
# check if training tracker file exists in /tmp/
if os.path.exists(os.path.join("/tmp", "training")):
raise ValueError("❌ Another training job is already running in this workspace.")
if len(payload["config"]["params"]) > 1:
raise ValueError("❌ Only one job parameter is allowed in spaces/local mode.")
model_path = os.path.join("/tmp/model", payload["proj_name"])
os.makedirs(model_path, exist_ok=True)
co2_tracker = EmissionsTracker(save_to_file=False)
co2_tracker.start()
# create a training tracker file in /tmp/, using touch
with open(os.path.join("/tmp", "training"), "w") as f:
f.write("training")
if payload["task"] in [1, 2]:
_ = train_text_classification(
co2_tracker=co2_tracker,
payload=payload,
huggingface_token=self.token,
model_path=model_path,
)
elif payload["task"] in [17, 18]:
_ = train_image_classification(
co2_tracker=co2_tracker,
payload=payload,
huggingface_token=self.token,
model_path=model_path,
)
elif payload["task"] == 25:
_ = train_dreambooth(
co2_tracker=co2_tracker,
payload=payload,
huggingface_token=self.token,
model_path=model_path,
)
elif payload["task"] == 9:
_ = train_lm(
co2_tracker=co2_tracker,
payload=payload,
huggingface_token=self.token,
model_path=model_path,
)
else:
raise NotImplementedError
# remove the training tracker file in /tmp/, using rm
os.remove(os.path.join("/tmp", "training"))
def create(self, local=False):
"""Create a project and return it"""
logger.info(f"🚀 Creating project {self.name}, task: {self.task}")
task_id = TASKS.get(self.task)
if task_id is None:
raise ValueError(f"❌ Invalid task selected. Please choose one of {TASKS.keys()}")
language = str(self.language).strip().lower()
if task_id is None:
raise ValueError(f"❌ Invalid task specified. Please choose one of {list(TASKS.keys())}")
if self.hub_model is not None:
language = "unk"
if language not in SUPPORTED_LANGUAGES:
raise ValueError("❌ Invalid language. Please check supported languages in AutoTrain documentation.")
payload = {
"username": self.username,
"proj_name": self.name,
"task": task_id,
"config": {
"advanced": True,
"autotrain": True if self.param_choice == "autotrain" else False,
"language": language,
"max_models": self.max_models,
"hub_model": self.hub_model,
"params": self.job_params,
},
}
logger.info(f"🚀 Creating project with payload: {payload}")
if local is True:
return self.create_local(payload=payload)
logger.info(f"🚀 Creating project with payload: {payload}")
json_resp = http_post(path="/projects/create", payload=payload, token=self.token).json()
proj_name = json_resp["proj_name"]
proj_id = json_resp["id"]
created = json_resp["created"]
if created is True:
return proj_id
raise ValueError(f"❌ Project with name {proj_name} already exists.")
def approve(self, project_id):
# Process data
_ = http_post(
path=f"/projects/{project_id}/data/start_processing",
token=self.token,
).json()
logger.info("⏳ Waiting for data processing to complete ...")
is_data_processing_success = False
while is_data_processing_success is not True:
project_status = http_get(
path=f"/projects/{project_id}",
token=self.token,
).json()
# See database.database.enums.ProjectStatus for definitions of `status`
if project_status["status"] == 3:
is_data_processing_success = True
logger.info("✅ Data processing complete!")
time.sleep(3)
logger.info(f"🚀 Approving project # {project_id}")
# Approve training job
_ = http_post(
path=f"/projects/{project_id}/start_training",
token=self.token,
).json()
| autotrain-advanced-main | src/autotrain/project.py |
def test_dummy():
assert 1 + 1 == 2
| autotrain-advanced-main | src/autotrain/tests/test_dummy.py |
import os
from argparse import ArgumentParser
from . import BaseAutoTrainCommand
def run_app_command_factory(args):
return RunAutoTrainAppCommand(
args.port,
args.host,
args.task,
)
class RunAutoTrainAppCommand(BaseAutoTrainCommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
run_app_parser = parser.add_parser(
"app",
description="✨ Run AutoTrain app",
)
run_app_parser.add_argument(
"--port",
type=int,
default=7860,
help="Port to run the app on",
required=False,
)
run_app_parser.add_argument(
"--host",
type=str,
default="127.0.0.1",
help="Host to run the app on",
required=False,
)
run_app_parser.add_argument(
"--task",
type=str,
required=False,
help="Task to run",
)
run_app_parser.set_defaults(func=run_app_command_factory)
def __init__(self, port, host, task):
self.port = port
self.host = host
self.task = task
def run(self):
if os.environ.get("TASK") == "Dreambooth" or self.task == "dreambooth":
from ..apps.dreambooth import main
elif os.environ.get("TASK") == "LLM":
from ..apps.llm import main
elif os.environ.get("TASK") == "TEXT_CLASSIFICATION":
from ..apps.text_classification import main
else:
from ..apps.main import main
demo = main()
demo.queue(concurrency_count=10).launch()
| autotrain-advanced-main | src/autotrain/cli/run_app.py |
from argparse import ArgumentParser
from autotrain.backend import SpaceRunner
from autotrain.trainers.generic.params import GenericParams
from autotrain.trainers.generic.utils import create_dataset_repo
from . import BaseAutoTrainCommand
BACKEND_CHOICES = [
"spaces-a10gl",
"spaces-a10gs",
"spaces-a100",
"spaces-t4m",
"spaces-t4s",
"spaces-cpu",
"spaces-cpuf",
]
def run_spacerunner_command_factory(args):
return RunAutoTrainSpaceRunnerCommand(args)
class RunAutoTrainSpaceRunnerCommand(BaseAutoTrainCommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
arg_list = [
{
"arg": "--project-name",
"help": "Name of the project. Must be unique.",
"required": True,
"type": str,
},
{
"arg": "--script-path",
"help": "Path to the script",
"required": True,
"type": str,
},
{
"arg": "--username",
"help": "Hugging Face Username, can also be an organization name",
"required": True,
"type": str,
},
{
"arg": "--token",
"help": "Hugging Face API Token",
"required": True,
"type": str,
},
{
"arg": "--backend",
"help": "Hugging Face backend to use",
"required": True,
"type": str,
"choices": BACKEND_CHOICES,
},
{
"arg": "--env",
"help": "Environment variables, e.g. --env FOO=bar;FOO2=bar2;FOO3=bar3",
"required": False,
"type": str,
},
]
run_spacerunner_parser = parser.add_parser("spacerunner", description="✨ Run AutoTrain SpaceRunner")
for arg in arg_list:
names = [arg["arg"]] + arg.get("alias", [])
if "action" in arg:
run_spacerunner_parser.add_argument(
*names,
dest=arg["arg"].replace("--", "").replace("-", "_"),
help=arg["help"],
required=arg.get("required", False),
action=arg.get("action"),
default=arg.get("default"),
choices=arg.get("choices"),
)
else:
run_spacerunner_parser.add_argument(
*names,
dest=arg["arg"].replace("--", "").replace("-", "_"),
help=arg["help"],
required=arg.get("required", False),
type=arg.get("type"),
default=arg.get("default"),
choices=arg.get("choices"),
)
run_spacerunner_parser.set_defaults(func=run_spacerunner_command_factory)
def __init__(self, args):
self.args = args
store_true_arg_names = []
for arg_name in store_true_arg_names:
if getattr(self.args, arg_name) is None:
setattr(self.args, arg_name, False)
env_vars = {}
if self.args.env:
env_vars = dict([env_var.split("=") for env_var in self.args.env.split(";")])
self.args.env = env_vars
def run(self):
dataset_id = create_dataset_repo(
username=self.args.username,
project_name=self.args.project_name,
script_path=self.args.script_path,
token=self.args.token,
)
params = GenericParams(
project_name=self.args.project_name,
data_path=dataset_id,
username=self.args.username,
token=self.args.token,
backend=self.args.backend,
script_path=self.args.script_path,
env=self.args.env,
)
sr = SpaceRunner(params=params, backend=self.args.backend)
sr.prepare()
| autotrain-advanced-main | src/autotrain/cli/run_spacerunner.py |
from argparse import ArgumentParser
from . import BaseAutoTrainCommand
def run_api_command_factory(args):
return RunAutoTrainAPICommand(
args.port,
args.host,
args.task,
)
class RunAutoTrainAPICommand(BaseAutoTrainCommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
run_api_parser = parser.add_parser(
"api",
description="✨ Run AutoTrain API",
)
run_api_parser.add_argument(
"--port",
type=int,
default=7860,
help="Port to run the api on",
required=False,
)
run_api_parser.add_argument(
"--host",
type=str,
default="127.0.0.1",
help="Host to run the api on",
required=False,
)
run_api_parser.add_argument(
"--task",
type=str,
required=False,
help="Task to run",
)
run_api_parser.set_defaults(func=run_api_command_factory)
def __init__(self, port, host, task):
self.port = port
self.host = host
self.task = task
def run(self):
import uvicorn
from autotrain.api import api
uvicorn.run(api, host=self.host, port=self.port)
| autotrain-advanced-main | src/autotrain/cli/run_api.py |
import os
import sys
from argparse import ArgumentParser
import torch
from autotrain import logger
from autotrain.backend import EndpointsRunner, SpaceRunner
from . import BaseAutoTrainCommand
def run_tabular_command_factory(args):
return RunAutoTrainTabularCommand(args)
class RunAutoTrainTabularCommand(BaseAutoTrainCommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
arg_list = [
{
"arg": "--train",
"help": "Train the model",
"required": False,
"action": "store_true",
},
{
"arg": "--deploy",
"help": "Deploy the model",
"required": False,
"action": "store_true",
},
{
"arg": "--inference",
"help": "Run inference",
"required": False,
"action": "store_true",
},
{
"arg": "--data-path",
"help": "Train dataset to use",
"required": False,
"type": str,
},
{
"arg": "--model",
"help": "Model name",
"required": True,
"type": str,
},
{
"arg": "--username",
"help": "Hugging Face Username",
"required": False,
"type": str,
},
{
"arg": "--seed",
"help": "Seed",
"required": False,
"type": int,
"default": 42,
},
{
"arg": "--train-split",
"help": "Train split",
"required": False,
"type": str,
"default": "train",
},
{
"arg": "--valid-split",
"help": "Validation split",
"required": False,
"type": str,
"default": "valid",
},
{
"arg": "--project-name",
"help": "Output directory",
"required": True,
"type": str,
"alias": ["--project"],
},
{
"arg": "--token",
"help": "Hub Token",
"required": False,
"type": str,
},
{
"arg": "--push-to-hub",
"help": "Push to hub",
"required": False,
"action": "store_true",
},
{
"arg": "--id-column",
"help": "ID column",
"required": True,
"type": str,
},
{
"arg": "--target-columns",
"help": "Target column(s), separated by commas",
"required": True,
"type": str,
},
{
"arg": "--repo-id",
"help": "Repo ID",
"required": False,
"type": str,
},
{
"arg": "--categorical-columns",
"help": "Categorical columns",
"required": False,
"type": str,
},
{
"arg": "--numerical-columns",
"help": "Numerical columns",
"required": False,
"type": str,
},
{
"arg": "--task",
"help": "Task",
"required": True,
"type": str,
},
{
"arg": "--num-trials",
"help": "Number of trials",
"required": False,
"type": int,
"default": 100,
},
{
"arg": "--time-limit",
"help": "Time limit",
"required": False,
"type": int,
"default": 3600,
},
{
"arg": "--categorical-imputer",
"help": "Categorical imputer",
"required": False,
"type": str,
},
{
"arg": "--numerical-imputer",
"help": "Numerical imputer",
"required": False,
"type": str,
},
{
"arg": "--numeric-scaler",
"help": "Numeric scaler",
"required": False,
"type": str,
},
{
"arg": "--backend",
"help": "Backend to use: default or spaces. Spaces backend requires push_to_hub and repo_id",
"required": False,
"type": str,
"default": "default",
},
]
run_tabular_parser = parser.add_parser("tabular", description="✨ Run AutoTrain Tabular Data Training")
for arg in arg_list:
if "action" in arg:
run_tabular_parser.add_argument(
arg["arg"],
help=arg["help"],
required=arg.get("required", False),
action=arg.get("action"),
default=arg.get("default"),
)
else:
run_tabular_parser.add_argument(
arg["arg"],
help=arg["help"],
required=arg.get("required", False),
type=arg.get("type"),
default=arg.get("default"),
)
run_tabular_parser.set_defaults(func=run_tabular_command_factory)
def __init__(self, args):
self.args = args
store_true_arg_names = [
"train",
"deploy",
"inference",
"push_to_hub",
]
for arg_name in store_true_arg_names:
if getattr(self.args, arg_name) is None:
setattr(self.args, arg_name, False)
if self.args.train:
if self.args.project_name is None:
raise ValueError("Project name must be specified")
if self.args.data_path is None:
raise ValueError("Data path must be specified")
if self.args.model is None:
raise ValueError("Model must be specified")
if self.args.push_to_hub:
if self.args.username is None:
raise ValueError("Username must be specified for push to hub")
else:
raise ValueError("Must specify --train, --deploy or --inference")
if not torch.cuda.is_available():
self.device = "cpu"
self.num_gpus = torch.cuda.device_count()
if len(str(self.args.token)) < 6:
self.args.token = os.environ.get("HF_TOKEN", None)
self.args.target_columns = [k.strip() for k in self.args.target_columns.split(",")]
def run(self):
from autotrain.trainers.tabular.__main__ import train as train_tabular
from autotrain.trainers.tabular.params import TabularParams
logger.info("Running Tabular Training...")
if self.args.train:
params = TabularParams(
data_path=self.args.data_path,
model=self.args.model,
username=self.args.username,
seed=self.args.seed,
train_split=self.args.train_split,
valid_split=self.args.valid_split,
project_name=self.args.project_name,
token=self.args.token,
push_to_hub=self.args.push_to_hub,
id_column=self.args.id_column,
target_columns=self.args.target_columns,
repo_id=self.args.repo_id,
categorical_columns=self.args.categorical_columns,
numerical_columns=self.args.numerical_columns,
task=self.args.task,
num_trials=self.args.num_trials,
time_limit=self.args.time_limit,
categorical_imputer=self.args.categorical_imputer,
numerical_imputer=self.args.numerical_imputer,
numeric_scaler=self.args.numeric_scaler,
)
if self.args.backend.startswith("spaces"):
logger.info("Creating space...")
sr = SpaceRunner(
params=params,
backend=self.args.backend,
)
space_id = sr.prepare()
logger.info(f"Training Space created. Check progress at https://hf.co/spaces/{space_id}")
sys.exit(0)
if self.args.backend.startswith("ep-"):
logger.info("Creating training endpoint...")
sr = EndpointsRunner(
params=params,
backend=self.args.backend,
)
sr.prepare()
logger.info("Training endpoint created.")
sys.exit(0)
params.save(output_dir=self.args.project_name)
train_tabular(params)
| autotrain-advanced-main | src/autotrain/cli/run_tabular.py |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class BaseAutoTrainCommand(ABC):
@staticmethod
@abstractmethod
def register_subcommand(parser: ArgumentParser):
raise NotImplementedError()
@abstractmethod
def run(self):
raise NotImplementedError()
| autotrain-advanced-main | src/autotrain/cli/__init__.py |
import subprocess
from argparse import ArgumentParser
from autotrain import logger
from . import BaseAutoTrainCommand
def run_app_command_factory(args):
return RunSetupCommand(args.update_torch)
class RunSetupCommand(BaseAutoTrainCommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
run_setup_parser = parser.add_parser(
"setup",
description="✨ Run AutoTrain setup",
)
run_setup_parser.add_argument(
"--update-torch",
action="store_true",
help="Update PyTorch to latest version",
)
run_setup_parser.set_defaults(func=run_app_command_factory)
def __init__(self, update_torch: bool):
self.update_torch = update_torch
def run(self):
# install latest transformers
cmd = "pip uninstall -y transformers && pip install git+https://github.com/huggingface/transformers.git"
pipe = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
logger.info("Installing latest transformers@main")
_, _ = pipe.communicate()
logger.info("Successfully installed latest transformers")
cmd = "pip uninstall -y peft && pip install git+https://github.com/huggingface/peft.git"
pipe = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
logger.info("Installing latest peft@main")
_, _ = pipe.communicate()
logger.info("Successfully installed latest peft")
cmd = "pip uninstall -y diffusers && pip install git+https://github.com/huggingface/diffusers.git"
pipe = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
logger.info("Installing latest diffusers@main")
_, _ = pipe.communicate()
logger.info("Successfully installed latest diffusers")
cmd = "pip uninstall -y trl && pip install git+https://github.com/huggingface/trl.git"
pipe = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
logger.info("Installing latest trl@main")
_, _ = pipe.communicate()
logger.info("Successfully installed latest trl")
cmd = "pip install -U xformers"
pipe = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
logger.info("Installing latest xformers")
_, _ = pipe.communicate()
logger.info("Successfully installed latest xformers")
if self.update_torch:
cmd = "pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118"
pipe = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
logger.info("Installing latest PyTorch")
_, _ = pipe.communicate()
logger.info("Successfully installed latest PyTorch")
| autotrain-advanced-main | src/autotrain/cli/run_setup.py |
import argparse
from .. import __version__
from .run_api import RunAutoTrainAPICommand
from .run_app import RunAutoTrainAppCommand
from .run_dreambooth import RunAutoTrainDreamboothCommand
from .run_image_classification import RunAutoTrainImageClassificationCommand
from .run_llm import RunAutoTrainLLMCommand
from .run_setup import RunSetupCommand
from .run_spacerunner import RunAutoTrainSpaceRunnerCommand
from .run_tabular import RunAutoTrainTabularCommand
from .run_text_classification import RunAutoTrainTextClassificationCommand
def main():
parser = argparse.ArgumentParser(
"AutoTrain advanced CLI",
usage="autotrain <command> [<args>]",
epilog="For more information about a command, run: `autotrain <command> --help`",
)
parser.add_argument("--version", "-v", help="Display AutoTrain version", action="store_true")
commands_parser = parser.add_subparsers(help="commands")
# Register commands
RunAutoTrainAppCommand.register_subcommand(commands_parser)
RunAutoTrainLLMCommand.register_subcommand(commands_parser)
RunSetupCommand.register_subcommand(commands_parser)
RunAutoTrainDreamboothCommand.register_subcommand(commands_parser)
RunAutoTrainAPICommand.register_subcommand(commands_parser)
RunAutoTrainTextClassificationCommand.register_subcommand(commands_parser)
RunAutoTrainImageClassificationCommand.register_subcommand(commands_parser)
RunAutoTrainTabularCommand.register_subcommand(commands_parser)
RunAutoTrainSpaceRunnerCommand.register_subcommand(commands_parser)
args = parser.parse_args()
if args.version:
print(__version__)
exit(0)
if not hasattr(args, "func"):
parser.print_help()
exit(1)
command = args.func(args)
command.run()
if __name__ == "__main__":
main()
| autotrain-advanced-main | src/autotrain/cli/autotrain.py |
import os
import subprocess
from argparse import ArgumentParser
import torch
from autotrain import logger
from . import BaseAutoTrainCommand
def run_image_classification_command_factory(args):
return RunAutoTrainImageClassificationCommand(args)
class RunAutoTrainImageClassificationCommand(BaseAutoTrainCommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
arg_list = [
{
"arg": "--train",
"help": "Train the model",
"required": False,
"action": "store_true",
},
{
"arg": "--deploy",
"help": "Deploy the model",
"required": False,
"action": "store_true",
},
{
"arg": "--inference",
"help": "Run inference",
"required": False,
"action": "store_true",
},
{
"arg": "--data-path",
"help": "Train dataset to use",
"required": False,
"type": str,
},
{
"arg": "--train-split",
"help": "Test dataset split to use",
"required": False,
"type": str,
"default": "train",
},
{
"arg": "--valid-split",
"help": "Validation dataset split to use",
"required": False,
"type": str,
"default": None,
},
{
"arg": "--image-column",
"help": "Image column to use",
"required": False,
"type": str,
"default": "image",
},
{
"arg": "--target-column",
"help": "Target column to use",
"required": False,
"type": str,
"default": "target",
},
{
"arg": "--model",
"help": "Model to use",
"required": False,
"type": str,
},
{
"arg": "--lr",
"help": "Learning rate to use",
"required": False,
"type": float,
"default": 3e-5,
},
{
"arg": "--epochs",
"help": "Number of training epochs to use",
"required": False,
"type": int,
"default": 1,
},
{
"arg": "--batch-size",
"help": "Training batch size to use",
"required": False,
"type": int,
"default": 2,
},
{
"arg": "--warmup-ratio",
"help": "Warmup proportion to use",
"required": False,
"type": float,
"default": 0.1,
},
{
"arg": "--gradient-accumulation",
"help": "Gradient accumulation steps to use",
"required": False,
"type": int,
"default": 1,
},
{
"arg": "--optimizer",
"help": "Optimizer to use",
"required": False,
"type": str,
"default": "adamw_torch",
},
{
"arg": "--scheduler",
"help": "Scheduler to use",
"required": False,
"type": str,
"default": "linear",
},
{
"arg": "--weight-decay",
"help": "Weight decay to use",
"required": False,
"type": float,
"default": 0.0,
},
{
"arg": "--max-grad-norm",
"help": "Max gradient norm to use",
"required": False,
"type": float,
"default": 1.0,
},
{
"arg": "--seed",
"help": "Seed to use",
"required": False,
"type": int,
"default": 42,
},
{
"arg": "--logging-steps",
"help": "Logging steps to use",
"required": False,
"type": int,
"default": -1,
},
{
"arg": "--project-name",
"help": "Output directory",
"required": False,
"type": str,
},
{
"arg": "--evaluation-strategy",
"help": "Evaluation strategy to use",
"required": False,
"type": str,
"default": "epoch",
},
{
"arg": "--save-total-limit",
"help": "Save total limit to use",
"required": False,
"type": int,
"default": 1,
},
{
"arg": "--save-strategy",
"help": "Save strategy to use",
"required": False,
"type": str,
"default": "epoch",
},
{
"arg": "--auto-find-batch-size",
"help": "Auto find batch size True/False",
"required": False,
"action": "store_true",
},
{
"arg": "--fp16",
"help": "FP16 True/False",
"required": False,
"action": "store_true",
},
{
"arg": "--push-to-hub",
"help": "Push to hub True/False. In case you want to push the trained model to huggingface hub",
"required": False,
"action": "store_true",
},
{
"arg": "--repo-id",
"help": "Repo id for hugging face hub",
"required": False,
"type": str,
},
]
run_text_classification_parser = parser.add_parser(
"image-classification", description="✨ Run AutoTrain Image Classification"
)
for arg in arg_list:
if "action" in arg:
run_text_classification_parser.add_argument(
arg["arg"],
help=arg["help"],
required=arg.get("required", False),
action=arg.get("action"),
default=arg.get("default"),
)
else:
run_text_classification_parser.add_argument(
arg["arg"],
help=arg["help"],
required=arg.get("required", False),
type=arg.get("type"),
default=arg.get("default"),
)
run_text_classification_parser.set_defaults(func=run_image_classification_command_factory)
def __init__(self, args):
self.args = args
store_true_arg_names = [
"train",
"deploy",
"inference",
"auto_find_batch_size",
"fp16",
"push_to_hub",
]
for arg_name in store_true_arg_names:
if getattr(self.args, arg_name) is None:
setattr(self.args, arg_name, False)
if self.args.train:
if self.args.project_name is None:
raise ValueError("Project name must be specified")
if self.args.data_path is None:
raise ValueError("Data path must be specified")
if self.args.model is None:
raise ValueError("Model must be specified")
if self.args.push_to_hub:
if self.args.repo_id is None:
raise ValueError("Repo id must be specified for push to hub")
else:
raise ValueError("Must specify --train, --deploy or --inference")
if not torch.cuda.is_available():
self.device = "cpu"
self.num_gpus = torch.cuda.device_count()
def run(self):
from autotrain.trainers.image_classification.__main__ import train as train_image_classification
from autotrain.trainers.image_classification.params import ImageClassificationParams
logger.info("Running Text Classification")
if self.args.train:
params = ImageClassificationParams(
data_path=self.args.data_path,
train_split=self.args.train_split,
valid_split=self.args.valid_split,
image_column=self.args.image_column,
target_column=self.args.target_column,
model_name=self.args.model,
lr=self.args.lr,
epochs=self.args.epochs,
batch_size=self.args.batch_size,
warmup_ratio=self.args.warmup_ratio,
gradient_accumulation=self.args.gradient_accumulation,
optimizer=self.args.optimizer,
scheduler=self.args.scheduler,
weight_decay=self.args.weight_decay,
max_grad_norm=self.args.max_grad_norm,
seed=self.args.seed,
logging_steps=self.args.logging_steps,
project_name=self.args.project_name,
evaluation_strategy=self.args.evaluation_strategy,
save_total_limit=self.args.save_total_limit,
save_strategy=self.args.save_strategy,
auto_find_batch_size=self.args.auto_find_batch_size,
fp16=self.args.fp16,
push_to_hub=self.args.push_to_hub,
repo_id=self.args.repo_id,
)
params.save(output_dir=self.args.project_name)
if self.num_gpus == 1:
train_image_classification(params)
else:
cmd = ["accelerate", "launch", "--multi_gpu", "--num_machines", "1", "--num_processes"]
cmd.append(str(self.num_gpus))
cmd.append("--mixed_precision")
if self.args.fp16:
cmd.append("fp16")
else:
cmd.append("no")
cmd.extend(
[
"-m",
"autotrain.trainers.image_classification",
"--training_config",
os.path.join(self.args.project_name, "training_params.json"),
]
)
env = os.environ.copy()
process = subprocess.Popen(cmd, env=env)
process.wait()
| autotrain-advanced-main | src/autotrain/cli/run_image_classification.py |
import os
import subprocess
import sys
from argparse import ArgumentParser
import torch
from autotrain import logger
from autotrain.backend import EndpointsRunner, SpaceRunner
from . import BaseAutoTrainCommand
def run_text_classification_command_factory(args):
return RunAutoTrainTextClassificationCommand(args)
class RunAutoTrainTextClassificationCommand(BaseAutoTrainCommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
arg_list = [
{
"arg": "--train",
"help": "Train the model",
"required": False,
"action": "store_true",
},
{
"arg": "--deploy",
"help": "Deploy the model",
"required": False,
"action": "store_true",
},
{
"arg": "--inference",
"help": "Run inference",
"required": False,
"action": "store_true",
},
{
"arg": "--data-path",
"help": "Train dataset to use",
"required": False,
"type": str,
},
{
"arg": "--train-split",
"help": "Test dataset split to use",
"required": False,
"type": str,
"default": "train",
},
{
"arg": "--valid-split",
"help": "Validation dataset split to use",
"required": False,
"type": str,
"default": None,
},
{
"arg": "--text-column",
"help": "Text column to use",
"required": False,
"type": str,
"default": "text",
},
{
"arg": "--target-column",
"help": "Target column to use",
"required": False,
"type": str,
"default": "target",
},
{
"arg": "--model",
"help": "Model to use",
"required": False,
"type": str,
},
{
"arg": "--lr",
"help": "Learning rate to use",
"required": False,
"type": float,
"default": 3e-5,
},
{
"arg": "--epochs",
"help": "Number of training epochs to use",
"required": False,
"type": int,
"default": 1,
},
{
"arg": "--max-seq-length",
"help": "Maximum number of tokens in a sequence to use",
"required": False,
"type": int,
"default": 128,
},
{
"arg": "--batch-size",
"help": "Training batch size to use",
"required": False,
"type": int,
"default": 2,
},
{
"arg": "--warmup-ratio",
"help": "Warmup proportion to use",
"required": False,
"type": float,
"default": 0.1,
},
{
"arg": "--gradient-accumulation",
"help": "Gradient accumulation steps to use",
"required": False,
"type": int,
"default": 1,
},
{
"arg": "--optimizer",
"help": "Optimizer to use",
"required": False,
"type": str,
"default": "adamw_torch",
},
{
"arg": "--scheduler",
"help": "Scheduler to use",
"required": False,
"type": str,
"default": "linear",
},
{
"arg": "--weight-decay",
"help": "Weight decay to use",
"required": False,
"type": float,
"default": 0.0,
},
{
"arg": "--max-grad-norm",
"help": "Max gradient norm to use",
"required": False,
"type": float,
"default": 1.0,
},
{
"arg": "--seed",
"help": "Seed to use",
"required": False,
"type": int,
"default": 42,
},
{
"arg": "--logging-steps",
"help": "Logging steps to use",
"required": False,
"type": int,
"default": -1,
},
{
"arg": "--project-name",
"help": "Output directory",
"required": False,
"type": str,
},
{
"arg": "--evaluation-strategy",
"help": "Evaluation strategy to use",
"required": False,
"type": str,
"default": "epoch",
},
{
"arg": "--save-total-limit",
"help": "Save total limit to use",
"required": False,
"type": int,
"default": 1,
},
{
"arg": "--save-strategy",
"help": "Save strategy to use",
"required": False,
"type": str,
"default": "epoch",
},
{
"arg": "--auto-find-batch-size",
"help": "Auto find batch size True/False",
"required": False,
"action": "store_true",
},
{
"arg": "--fp16",
"help": "FP16 True/False",
"required": False,
"action": "store_true",
},
{
"arg": "--token",
"help": "Hugging face token",
"required": False,
"type": str,
"default": "",
},
{
"arg": "--push-to-hub",
"help": "Push to hub True/False. In case you want to push the trained model to huggingface hub",
"required": False,
"action": "store_true",
},
{
"arg": "--repo-id",
"help": "Repo id for hugging face hub",
"required": False,
"type": str,
},
{
"arg": "--backend",
"help": "Backend to use: default or spaces. Spaces backend requires push_to_hub and repo_id",
"required": False,
"type": str,
"default": "default",
},
{
"arg": "--username",
"help": "Huggingface username to use",
"required": False,
"type": str,
},
]
run_text_classification_parser = parser.add_parser(
"text-classification", description="✨ Run AutoTrain Text Classification"
)
for arg in arg_list:
if "action" in arg:
run_text_classification_parser.add_argument(
arg["arg"],
help=arg["help"],
required=arg.get("required", False),
action=arg.get("action"),
default=arg.get("default"),
)
else:
run_text_classification_parser.add_argument(
arg["arg"],
help=arg["help"],
required=arg.get("required", False),
type=arg.get("type"),
default=arg.get("default"),
)
run_text_classification_parser.set_defaults(func=run_text_classification_command_factory)
def __init__(self, args):
self.args = args
store_true_arg_names = [
"train",
"deploy",
"inference",
"auto_find_batch_size",
"fp16",
"push_to_hub",
]
for arg_name in store_true_arg_names:
if getattr(self.args, arg_name) is None:
setattr(self.args, arg_name, False)
if self.args.train:
if self.args.project_name is None:
raise ValueError("Project name must be specified")
if self.args.data_path is None:
raise ValueError("Data path must be specified")
if self.args.model is None:
raise ValueError("Model must be specified")
if self.args.push_to_hub:
if self.args.repo_id is None:
raise ValueError("Repo id must be specified for push to hub")
else:
raise ValueError("Must specify --train, --deploy or --inference")
if not torch.cuda.is_available():
self.device = "cpu"
self.num_gpus = torch.cuda.device_count()
if len(str(self.args.token)) < 6:
self.args.token = os.environ.get("HF_TOKEN", None)
def run(self):
from autotrain.trainers.text_classification.__main__ import train as train_text_classification
from autotrain.trainers.text_classification.params import TextClassificationParams
logger.info("Running Text Classification")
if self.args.train:
params = TextClassificationParams(
data_path=self.args.data_path,
train_split=self.args.train_split,
valid_split=self.args.valid_split,
text_column=self.args.text_column,
target_column=self.args.target_column,
model_name=self.args.model,
lr=self.args.lr,
epochs=self.args.epochs,
max_seq_length=self.args.max_seq_length,
batch_size=self.args.batch_size,
warmup_ratio=self.args.warmup_ratio,
gradient_accumulation=self.args.gradient_accumulation,
optimizer=self.args.optimizer,
scheduler=self.args.scheduler,
weight_decay=self.args.weight_decay,
max_grad_norm=self.args.max_grad_norm,
seed=self.args.seed,
logging_steps=self.args.logging_steps,
project_name=self.args.project_name,
evaluation_strategy=self.args.evaluation_strategy,
save_total_limit=self.args.save_total_limit,
save_strategy=self.args.save_strategy,
auto_find_batch_size=self.args.auto_find_batch_size,
fp16=self.args.fp16,
push_to_hub=self.args.push_to_hub,
repo_id=self.args.repo_id,
token=self.args.token,
username=self.args.username,
)
if self.args.backend.startswith("spaces"):
logger.info("Creating space...")
sr = SpaceRunner(
params=params,
backend=self.args.backend,
)
space_id = sr.prepare()
logger.info(f"Training Space created. Check progress at https://hf.co/spaces/{space_id}")
sys.exit(0)
if self.args.backend.startswith("ep-"):
logger.info("Creating training endpoint...")
sr = EndpointsRunner(
params=params,
backend=self.args.backend,
)
sr.prepare()
logger.info("Training endpoint created.")
sys.exit(0)
params.save(output_dir=self.args.project_name)
if self.num_gpus == 1:
train_text_classification(params)
else:
cmd = ["accelerate", "launch", "--multi_gpu", "--num_machines", "1", "--num_processes"]
cmd.append(str(self.num_gpus))
cmd.append("--mixed_precision")
if self.args.fp16:
cmd.append("fp16")
else:
cmd.append("no")
cmd.extend(
[
"-m",
"autotrain.trainers.text_classification",
"--training_config",
os.path.join(self.args.project_name, "training_params.json"),
]
)
env = os.environ.copy()
process = subprocess.Popen(cmd, env=env)
process.wait()
| autotrain-advanced-main | src/autotrain/cli/run_text_classification.py |
import os
import subprocess
import sys
from argparse import ArgumentParser
import torch
from autotrain import logger
from . import BaseAutoTrainCommand
def run_llm_command_factory(args):
return RunAutoTrainLLMCommand(args)
class RunAutoTrainLLMCommand(BaseAutoTrainCommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
arg_list = [
{
"arg": "--train",
"help": "Train the model",
"required": False,
"action": "store_true",
},
{
"arg": "--deploy",
"help": "Deploy the model",
"required": False,
"action": "store_true",
},
{
"arg": "--inference",
"help": "Run inference",
"required": False,
"action": "store_true",
},
{
"arg": "--data_path",
"help": "Train dataset to use",
"required": False,
"type": str,
"alias": ["--data-path"],
},
{
"arg": "--train_split",
"help": "Test dataset split to use",
"required": False,
"type": str,
"default": "train",
"alias": ["--train-split"],
},
{
"arg": "--valid_split",
"help": "Validation dataset split to use",
"required": False,
"type": str,
"default": None,
"alias": ["--valid-split"],
},
{
"arg": "--text_column",
"help": "Text column to use",
"required": False,
"type": str,
"default": "text",
"alias": ["--text-column"],
},
{
"arg": "--model",
"help": "Model to use",
"required": False,
"type": str,
},
{
"arg": "--learning_rate",
"help": "Learning rate to use",
"required": False,
"type": float,
"default": 3e-5,
"alias": ["--lr", "--learning-rate"],
},
{
"arg": "--num_train_epochs",
"help": "Number of training epochs to use",
"required": False,
"type": int,
"default": 1,
"alias": ["--epochs"],
},
{
"arg": "--train_batch_size",
"help": "Training batch size to use",
"required": False,
"type": int,
"default": 2,
"alias": ["--train-batch-size", "--batch-size"],
},
{
"arg": "--warmup_ratio",
"help": "Warmup proportion to use",
"required": False,
"type": float,
"default": 0.1,
"alias": ["--warmup-ratio"],
},
{
"arg": "--gradient_accumulation_steps",
"help": "Gradient accumulation steps to use",
"required": False,
"type": int,
"default": 1,
"alias": ["--gradient-accumulation-steps", "--gradient-accumulation"],
},
{
"arg": "--optimizer",
"help": "Optimizer to use",
"required": False,
"type": str,
"default": "adamw_torch",
},
{
"arg": "--scheduler",
"help": "Scheduler to use",
"required": False,
"type": str,
"default": "linear",
},
{
"arg": "--weight_decay",
"help": "Weight decay to use",
"required": False,
"type": float,
"default": 0.0,
"alias": ["--weight-decay"],
},
{
"arg": "--max_grad_norm",
"help": "Max gradient norm to use",
"required": False,
"type": float,
"default": 1.0,
"alias": ["--max-grad-norm"],
},
{
"arg": "--seed",
"help": "Seed to use",
"required": False,
"type": int,
"default": 42,
},
{
"arg": "--add_eos_token",
"help": "Add EOS token to use",
"required": False,
"action": "store_true",
"alias": ["--add-eos-token"],
},
{
"arg": "--block_size",
"help": "Block size to use",
"required": False,
"type": int,
"default": -1,
"alias": ["--block-size"],
},
{
"arg": "--use_peft",
"help": "Use PEFT to use",
"required": False,
"action": "store_true",
"alias": ["--use-peft"],
},
{
"arg": "--lora_r",
"help": "Lora r to use",
"required": False,
"type": int,
"default": 16,
"alias": ["--lora-r"],
},
{
"arg": "--lora_alpha",
"help": "Lora alpha to use",
"required": False,
"type": int,
"default": 32,
"alias": ["--lora-alpha"],
},
{
"arg": "--lora_dropout",
"help": "Lora dropout to use",
"required": False,
"type": float,
"default": 0.05,
"alias": ["--lora-dropout"],
},
{
"arg": "--logging_steps",
"help": "Logging steps to use",
"required": False,
"type": int,
"default": -1,
"alias": ["--logging-steps"],
},
{
"arg": "--project_name",
"help": "Output directory",
"required": False,
"type": str,
"alias": ["--project-name"],
},
{
"arg": "--evaluation_strategy",
"help": "Evaluation strategy to use",
"required": False,
"type": str,
"default": "epoch",
"alias": ["--evaluation-strategy"],
},
{
"arg": "--save_total_limit",
"help": "Save total limit to use",
"required": False,
"type": int,
"default": 1,
"alias": ["--save-total-limit"],
},
{
"arg": "--save_strategy",
"help": "Save strategy to use",
"required": False,
"type": str,
"default": "epoch",
"alias": ["--save-strategy"],
},
{
"arg": "--auto_find_batch_size",
"help": "Auto find batch size True/False",
"required": False,
"action": "store_true",
"alias": ["--auto-find-batch-size"],
},
{
"arg": "--fp16",
"help": "FP16 True/False",
"required": False,
"action": "store_true",
},
{
"arg": "--push_to_hub",
"help": "Push to hub True/False. In case you want to push the trained model to huggingface hub",
"required": False,
"action": "store_true",
"alias": ["--push-to-hub"],
},
{
"arg": "--use_int8",
"help": "Use int8 True/False",
"required": False,
"action": "store_true",
"alias": ["--use-int8"],
},
{
"arg": "--model_max_length",
"help": "Model max length to use",
"required": False,
"type": int,
"default": 1024,
"alias": ["--max-len", "--max-length"],
},
{
"arg": "--repo_id",
"help": "Repo id for hugging face hub. Format is username/repo_name",
"required": False,
"type": str,
"alias": ["--repo-id"],
},
{
"arg": "--use_int4",
"help": "Use int4 True/False",
"required": False,
"action": "store_true",
"alias": ["--use-int4"],
},
{
"arg": "--trainer",
"help": "Trainer type to use",
"required": False,
"type": str,
"default": "default",
},
{
"arg": "--target_modules",
"help": "Target modules to use",
"required": False,
"type": str,
"default": None,
"alias": ["--target-modules"],
},
{
"arg": "--merge_adapter",
"help": "Use this flag to merge PEFT adapter with the model",
"required": False,
"action": "store_true",
"alias": ["--merge-adapter"],
},
{
"arg": "--token",
"help": "Hugingface token to use",
"required": False,
"type": str,
},
{
"arg": "--backend",
"help": "Backend to use: default or spaces. Spaces backend requires push_to_hub and repo_id",
"required": False,
"type": str,
"default": "default",
},
{
"arg": "--username",
"help": "Huggingface username to use",
"required": False,
"type": str,
},
]
run_llm_parser = parser.add_parser("llm", description="✨ Run AutoTrain LLM")
for arg in arg_list:
names = [arg["arg"]] + arg.get("alias", [])
if "action" in arg:
run_llm_parser.add_argument(
*names,
dest=arg["arg"].replace("--", "").replace("-", "_"),
help=arg["help"],
required=arg.get("required", False),
action=arg.get("action"),
default=arg.get("default"),
)
else:
run_llm_parser.add_argument(
*names,
dest=arg["arg"].replace("--", "").replace("-", "_"),
help=arg["help"],
required=arg.get("required", False),
type=arg.get("type"),
default=arg.get("default"),
)
run_llm_parser.set_defaults(func=run_llm_command_factory)
def __init__(self, args):
self.args = args
store_true_arg_names = [
"train",
"deploy",
"inference",
"add_eos_token",
"use_peft",
"auto_find_batch_size",
"fp16",
"push_to_hub",
"use_int8",
"use_int4",
"merge_adapter",
]
for arg_name in store_true_arg_names:
if getattr(self.args, arg_name) is None:
setattr(self.args, arg_name, False)
if self.args.train:
if self.args.project_name is None:
raise ValueError("Project name must be specified")
if self.args.data_path is None:
raise ValueError("Data path must be specified")
if self.args.model is None:
raise ValueError("Model must be specified")
if self.args.push_to_hub:
if self.args.repo_id is None:
raise ValueError("Repo id must be specified for push to hub")
if self.args.backend.startswith("spaces") or self.args.backend.startswith("ep-"):
if not self.args.push_to_hub:
raise ValueError("Push to hub must be specified for spaces backend")
if self.args.repo_id is None:
raise ValueError("Repo id must be specified for spaces backend")
if self.args.token is None:
raise ValueError("Token must be specified for spaces backend")
if self.args.inference:
from autotrain.infer.text_generation import TextGenerationInference
tgi = TextGenerationInference(
self.args.project_name, use_int4=self.args.use_int4, use_int8=self.args.use_int8
)
while True:
prompt = input("User: ")
if prompt == "exit()":
break
print(f"Bot: {tgi.chat(prompt)}")
if not torch.cuda.is_available():
raise ValueError("No GPU found. Please install CUDA and try again.")
self.num_gpus = torch.cuda.device_count()
def run(self):
from autotrain.backend import EndpointsRunner, SpaceRunner
from autotrain.trainers.clm.__main__ import train as train_llm
from autotrain.trainers.clm.params import LLMTrainingParams
logger.info("Running LLM")
logger.info(f"Params: {self.args}")
if self.args.train:
params = LLMTrainingParams(
model=self.args.model,
data_path=self.args.data_path,
train_split=self.args.train_split,
valid_split=self.args.valid_split,
text_column=self.args.text_column,
lr=self.args.learning_rate,
epochs=self.args.num_train_epochs,
batch_size=self.args.train_batch_size,
warmup_ratio=self.args.warmup_ratio,
gradient_accumulation=self.args.gradient_accumulation_steps,
optimizer=self.args.optimizer,
scheduler=self.args.scheduler,
weight_decay=self.args.weight_decay,
max_grad_norm=self.args.max_grad_norm,
seed=self.args.seed,
add_eos_token=self.args.add_eos_token,
block_size=self.args.block_size,
use_peft=self.args.use_peft,
lora_r=self.args.lora_r,
lora_alpha=self.args.lora_alpha,
lora_dropout=self.args.lora_dropout,
logging_steps=self.args.logging_steps,
project_name=self.args.project_name,
evaluation_strategy=self.args.evaluation_strategy,
save_total_limit=self.args.save_total_limit,
save_strategy=self.args.save_strategy,
auto_find_batch_size=self.args.auto_find_batch_size,
fp16=self.args.fp16,
push_to_hub=self.args.push_to_hub,
use_int8=self.args.use_int8,
model_max_length=self.args.model_max_length,
repo_id=self.args.repo_id,
use_int4=self.args.use_int4,
trainer=self.args.trainer,
target_modules=self.args.target_modules,
token=self.args.token,
merge_adapter=self.args.merge_adapter,
username=self.args.username,
)
# space training
if self.args.backend.startswith("spaces"):
logger.info("Creating space...")
sr = SpaceRunner(
params=params,
backend=self.args.backend,
)
space_id = sr.prepare()
logger.info(f"Training Space created. Check progress at https://hf.co/spaces/{space_id}")
sys.exit(0)
if self.args.backend.startswith("ep-"):
logger.info("Creating training endpoint...")
sr = EndpointsRunner(
params=params,
backend=self.args.backend,
)
sr.prepare()
logger.info("Training endpoint created.")
sys.exit(0)
# local training
params.save(output_dir=self.args.project_name)
if self.num_gpus == 1:
train_llm(params)
else:
cmd = ["accelerate", "launch", "--multi_gpu", "--num_machines", "1", "--num_processes"]
cmd.append(str(self.num_gpus))
cmd.append("--mixed_precision")
if self.args.fp16:
cmd.append("fp16")
else:
cmd.append("no")
cmd.extend(
[
"-m",
"autotrain.trainers.clm",
"--training_config",
os.path.join(self.args.project_name, "training_params.json"),
]
)
env = os.environ.copy()
process = subprocess.Popen(cmd, env=env)
process.wait()
| autotrain-advanced-main | src/autotrain/cli/run_llm.py |
import glob
import os
from argparse import ArgumentParser
from autotrain import logger
from autotrain.cli import BaseAutoTrainCommand
try:
from autotrain.trainers.dreambooth.__main__ import train as train_dreambooth
from autotrain.trainers.dreambooth.params import DreamBoothTrainingParams
from autotrain.trainers.dreambooth.utils import VALID_IMAGE_EXTENSIONS, XL_MODELS
except ImportError:
logger.warning(
"❌ Some DreamBooth components are missing! Please run `autotrain setup` to install it. Ignore this warning if you are not using DreamBooth or running `autotrain setup` already."
)
def count_images(directory):
files_grabbed = []
for files in VALID_IMAGE_EXTENSIONS:
files_grabbed.extend(glob.glob(os.path.join(directory, "*" + files)))
return len(files_grabbed)
def run_dreambooth_command_factory(args):
return RunAutoTrainDreamboothCommand(args)
class RunAutoTrainDreamboothCommand(BaseAutoTrainCommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
arg_list = [
{
"arg": "--model",
"help": "Model to use for training",
"required": True,
"type": str,
},
{
"arg": "--revision",
"help": "Model revision to use for training",
"required": False,
"type": str,
},
{
"arg": "--tokenizer",
"help": "Tokenizer to use for training",
"required": False,
"type": str,
},
{
"arg": "--image-path",
"help": "Path to the images",
"required": True,
"type": str,
},
{
"arg": "--class-image-path",
"help": "Path to the class images",
"required": False,
"type": str,
},
{
"arg": "--prompt",
"help": "Instance prompt",
"required": True,
"type": str,
},
{
"arg": "--class-prompt",
"help": "Class prompt",
"required": False,
"type": str,
},
{
"arg": "--num-class-images",
"help": "Number of class images",
"required": False,
"default": 100,
"type": int,
},
{
"arg": "--class-labels-conditioning",
"help": "Class labels conditioning",
"required": False,
"type": str,
},
{
"arg": "--prior-preservation",
"help": "With prior preservation",
"required": False,
"action": "store_true",
},
{
"arg": "--prior-loss-weight",
"help": "Prior loss weight",
"required": False,
"default": 1.0,
"type": float,
},
{
"arg": "--project-name",
"help": "Output directory or repo id",
"required": True,
"type": str,
},
{
"arg": "--seed",
"help": "Seed",
"required": False,
"default": 42,
"type": int,
},
{
"arg": "--resolution",
"help": "Resolution",
"required": True,
"type": int,
},
{
"arg": "--center-crop",
"help": "Center crop",
"required": False,
"action": "store_true",
},
{
"arg": "--train-text-encoder",
"help": "Train text encoder",
"required": False,
"action": "store_true",
},
{
"arg": "--batch-size",
"help": "Train batch size",
"required": False,
"default": 4,
"type": int,
},
{
"arg": "--sample-batch-size",
"help": "Sample batch size",
"required": False,
"default": 4,
"type": int,
},
{
"arg": "--epochs",
"help": "Number of training epochs",
"required": False,
"default": 1,
"type": int,
},
{
"arg": "--num-steps",
"help": "Max train steps",
"required": False,
"type": int,
},
{
"arg": "--checkpointing-steps",
"help": "Checkpointing steps",
"required": False,
"default": 100000,
"type": int,
},
{
"arg": "--resume-from-checkpoint",
"help": "Resume from checkpoint",
"required": False,
"type": str,
},
{
"arg": "--gradient-accumulation",
"help": "Gradient accumulation steps",
"required": False,
"default": 1,
"type": int,
},
{
"arg": "--gradient-checkpointing",
"help": "Gradient checkpointing",
"required": False,
"action": "store_true",
},
{
"arg": "--lr",
"help": "Learning rate",
"required": False,
"default": 5e-4,
"type": float,
},
{
"arg": "--scale-lr",
"help": "Scale learning rate",
"required": False,
"action": "store_true",
},
{
"arg": "--scheduler",
"help": "Learning rate scheduler",
"required": False,
"default": "constant",
},
{
"arg": "--warmup-steps",
"help": "Learning rate warmup steps",
"required": False,
"default": 0,
"type": int,
},
{
"arg": "--num-cycles",
"help": "Learning rate num cycles",
"required": False,
"default": 1,
"type": int,
},
{
"arg": "--lr-power",
"help": "Learning rate power",
"required": False,
"default": 1.0,
"type": float,
},
{
"arg": "--dataloader-num-workers",
"help": "Dataloader num workers",
"required": False,
"default": 0,
"type": int,
},
{
"arg": "--use-8bit-adam",
"help": "Use 8bit adam",
"required": False,
"action": "store_true",
},
{
"arg": "--adam-beta1",
"help": "Adam beta 1",
"required": False,
"default": 0.9,
"type": float,
},
{
"arg": "--adam-beta2",
"help": "Adam beta 2",
"required": False,
"default": 0.999,
"type": float,
},
{
"arg": "--adam-weight-decay",
"help": "Adam weight decay",
"required": False,
"default": 1e-2,
"type": float,
},
{
"arg": "--adam-epsilon",
"help": "Adam epsilon",
"required": False,
"default": 1e-8,
"type": float,
},
{
"arg": "--max-grad-norm",
"help": "Max grad norm",
"required": False,
"default": 1.0,
"type": float,
},
{
"arg": "--allow-tf32",
"help": "Allow TF32",
"required": False,
"action": "store_true",
},
{
"arg": "--prior-generation-precision",
"help": "Prior generation precision",
"required": False,
"type": str,
},
{
"arg": "--local-rank",
"help": "Local rank",
"required": False,
"default": -1,
"type": int,
},
{
"arg": "--xformers",
"help": "Enable xformers memory efficient attention",
"required": False,
"action": "store_true",
},
{
"arg": "--pre-compute-text-embeddings",
"help": "Pre compute text embeddings",
"required": False,
"action": "store_true",
},
{
"arg": "--tokenizer-max-length",
"help": "Tokenizer max length",
"required": False,
"type": int,
},
{
"arg": "--text-encoder-use-attention-mask",
"help": "Text encoder use attention mask",
"required": False,
"action": "store_true",
},
{
"arg": "--rank",
"help": "Rank",
"required": False,
"default": 4,
"type": int,
},
{
"arg": "--xl",
"help": "XL",
"required": False,
"action": "store_true",
},
{
"arg": "--fp16",
"help": "FP16",
"required": False,
"action": "store_true",
},
{
"arg": "--bf16",
"help": "BF16",
"required": False,
"action": "store_true",
},
{
"arg": "--token",
"help": "Hub token",
"required": False,
"type": str,
},
{
"arg": "--repo-id",
"help": "Hub repo id",
"required": False,
"type": str,
},
{
"arg": "--push-to-hub",
"help": "Push to hub",
"required": False,
"action": "store_true",
},
{
"arg": "--validation-prompt",
"help": "Validation prompt",
"required": False,
"type": str,
},
{
"arg": "--num-validation-images",
"help": "Number of validation images",
"required": False,
"default": 4,
"type": int,
},
{
"arg": "--validation-epochs",
"help": "Validation epochs",
"required": False,
"default": 50,
"type": int,
},
{
"arg": "--checkpoints-total-limit",
"help": "Checkpoints total limit",
"required": False,
"type": int,
},
{
"arg": "--validation-images",
"help": "Validation images",
"required": False,
"type": str,
},
{
"arg": "--logging",
"help": "Logging using tensorboard",
"required": False,
"action": "store_true",
},
{
"arg": "--username",
"help": "Hugging Face Hub Username",
"required": False,
"type": str,
},
]
run_dreambooth_parser = parser.add_parser("dreambooth", description="✨ Run AutoTrain DreamBooth Training")
for arg in arg_list:
if "action" in arg:
run_dreambooth_parser.add_argument(
arg["arg"],
help=arg["help"],
required=arg.get("required", False),
action=arg.get("action"),
default=arg.get("default"),
)
else:
run_dreambooth_parser.add_argument(
arg["arg"],
help=arg["help"],
required=arg.get("required", False),
type=arg.get("type"),
default=arg.get("default"),
)
run_dreambooth_parser.set_defaults(func=run_dreambooth_command_factory)
def __init__(self, args):
self.args = args
logger.info(self.args)
store_true_arg_names = [
"center_crop",
"train_text_encoder",
"gradient_checkpointing",
"scale_lr",
"use_8bit_adam",
"allow_tf32",
"xformers",
"pre_compute_text_embeddings",
"text_encoder_use_attention_mask",
"xl",
"fp16",
"bf16",
"push_to_hub",
"logging",
"prior_preservation",
]
for arg_name in store_true_arg_names:
if getattr(self.args, arg_name) is None:
setattr(self.args, arg_name, False)
if self.args.fp16 and self.args.bf16:
raise ValueError("❌ Please choose either FP16 or BF16")
# check if self.args.image_path is a directory with images
if not os.path.isdir(self.args.image_path):
raise ValueError("❌ Please specify a valid image directory")
# count the number of images in the directory. valid images are .jpg, .jpeg, .png
num_images = count_images(self.args.image_path)
if num_images == 0:
raise ValueError("❌ Please specify a valid image directory")
if self.args.push_to_hub:
if self.args.repo_id is None and self.args.username is None:
raise ValueError("❌ Please specify a username or repo id to push to hub")
if self.args.model in XL_MODELS:
self.args.xl = True
def run(self):
logger.info("Running DreamBooth Training")
params = DreamBoothTrainingParams(**vars(self.args))
train_dreambooth(params)
| autotrain-advanced-main | src/autotrain/cli/run_dreambooth.py |
import os
import albumentations as A
import numpy as np
import torch
from datasets import load_dataset
from sklearn import metrics
from transformers import (
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
EarlyStoppingCallback,
Trainer,
TrainingArguments,
)
from autotrain import logger, utils
from autotrain.params import ImageBinaryClassificationParams, ImageMultiClassClassificationParams
BINARY_CLASSIFICATION_EVAL_METRICS = (
"eval_loss",
"eval_accuracy",
"eval_f1",
"eval_auc",
"eval_precision",
"eval_recall",
)
MULTI_CLASS_CLASSIFICATION_EVAL_METRICS = (
"eval_loss",
"eval_accuracy",
"eval_f1_macro",
"eval_f1_micro",
"eval_f1_weighted",
"eval_precision_macro",
"eval_precision_micro",
"eval_precision_weighted",
"eval_recall_macro",
"eval_recall_micro",
"eval_recall_weighted",
)
MODEL_CARD = """
---
tags:
- autotrain
- image-classification
widget:
- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg
example_title: Tiger
- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg
example_title: Teapot
- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg
example_title: Palace
datasets:
- {dataset}
co2_eq_emissions:
emissions: {co2}
---
# Model Trained Using AutoTrain
- Problem type: Image Classification
- CO2 Emissions (in grams): {co2:.4f}
## Validation Metricsg
{validation_metrics}
"""
class Dataset:
def __init__(self, data, transforms):
self.data = data
self.transforms = transforms
def __len__(self):
return len(self.data)
def __getitem__(self, item):
image = self.data[item]["image"]
target = int(self.data[item]["label"])
image = self.transforms(image=np.array(image.convert("RGB")))["image"]
image = np.transpose(image, (2, 0, 1)).astype(np.float32)
return {
"pixel_values": torch.tensor(image, dtype=torch.float),
"labels": torch.tensor(target, dtype=torch.long),
}
def _binary_classification_metrics(pred):
raw_predictions, labels = pred
predictions = np.argmax(raw_predictions, axis=1)
result = {
"f1": metrics.f1_score(labels, predictions),
"precision": metrics.precision_score(labels, predictions),
"recall": metrics.recall_score(labels, predictions),
"auc": metrics.roc_auc_score(labels, raw_predictions[:, 1]),
"accuracy": metrics.accuracy_score(labels, predictions),
}
return result
def _multi_class_classification_metrics(pred):
raw_predictions, labels = pred
predictions = np.argmax(raw_predictions, axis=1)
results = {
"f1_macro": metrics.f1_score(labels, predictions, average="macro"),
"f1_micro": metrics.f1_score(labels, predictions, average="micro"),
"f1_weighted": metrics.f1_score(labels, predictions, average="weighted"),
"precision_macro": metrics.precision_score(labels, predictions, average="macro"),
"precision_micro": metrics.precision_score(labels, predictions, average="micro"),
"precision_weighted": metrics.precision_score(labels, predictions, average="weighted"),
"recall_macro": metrics.recall_score(labels, predictions, average="macro"),
"recall_micro": metrics.recall_score(labels, predictions, average="micro"),
"recall_weighted": metrics.recall_score(labels, predictions, average="weighted"),
"accuracy": metrics.accuracy_score(labels, predictions),
}
return results
def process_data(train_data, valid_data, image_processor):
if "shortest_edge" in image_processor.size:
size = image_processor.size["shortest_edge"]
else:
size = (image_processor.size["height"], image_processor.size["width"])
try:
height, width = size
except TypeError:
height = size
width = size
train_transforms = A.Compose(
[
A.RandomResizedCrop(height=height, width=width),
A.RandomRotate90(),
A.HorizontalFlip(p=0.5),
A.RandomBrightnessContrast(p=0.2),
A.Normalize(mean=image_processor.image_mean, std=image_processor.image_std),
]
)
val_transforms = A.Compose(
[
A.Resize(height=height, width=width),
A.Normalize(mean=image_processor.image_mean, std=image_processor.image_std),
]
)
train_data = Dataset(train_data, train_transforms)
valid_data = Dataset(valid_data, val_transforms)
return train_data, valid_data
@utils.job_watcher
def train(co2_tracker, payload, huggingface_token, model_path):
# create model repo
model_repo = utils.create_repo(
project_name=payload["proj_name"],
autotrain_user=payload["username"],
huggingface_token=huggingface_token,
model_path=model_path,
)
data_path = f"{payload['username']}/autotrain-data-{payload['proj_name']}"
data = load_dataset(data_path, use_auth_token=huggingface_token)
logger.info(f"Loaded data from {data_path}")
job_config = payload["config"]["params"][0]
job_config["model_name"] = payload["config"]["hub_model"]
train_data = data["train"]
valid_data = data["validation"]
labels = train_data.features["label"].names
label2id, id2label = {}, {}
for i, label in enumerate(labels):
label2id[label] = str(i)
id2label[str(i)] = label
num_classes = len(labels)
model_name = job_config["model_name"]
device = job_config.get("device", "cuda")
# remove model_name from job config
del job_config["model_name"]
if num_classes == 2:
job_config["task"] = "image_binary_classification"
job_config = ImageBinaryClassificationParams(**job_config)
elif num_classes > 2:
job_config["task"] = "image_multi_class_classification"
job_config = ImageMultiClassClassificationParams(**job_config)
else:
raise ValueError("Invalid number of classes")
model_config = AutoConfig.from_pretrained(
model_name,
num_labels=num_classes,
use_auth_token=huggingface_token,
)
model_config._num_labels = len(label2id)
model_config.label2id = label2id
model_config.id2label = id2label
logger.info(model_config)
try:
model = AutoModelForImageClassification.from_pretrained(
model_name,
config=model_config,
use_auth_token=huggingface_token,
ignore_mismatched_sizes=True,
)
except OSError:
model = AutoModelForImageClassification.from_pretrained(
model_name,
config=model_config,
use_auth_token=huggingface_token,
from_tf=True,
ignore_mismatched_sizes=True,
)
image_processor = AutoImageProcessor.from_pretrained(model_name, use_auth_token=huggingface_token)
train_dataset, valid_dataset = process_data(train_data, valid_data, image_processor)
# trainer specific
logging_steps = int(0.2 * len(valid_dataset) / job_config.train_batch_size)
if logging_steps == 0:
logging_steps = 1
fp16 = True
if device == "cpu":
fp16 = False
training_args = dict(
output_dir=model_path,
per_device_train_batch_size=job_config.train_batch_size,
per_device_eval_batch_size=job_config.train_batch_size,
learning_rate=job_config.learning_rate,
num_train_epochs=job_config.num_train_epochs,
fp16=fp16,
load_best_model_at_end=True,
evaluation_strategy="epoch",
logging_steps=logging_steps,
save_total_limit=1,
save_strategy="epoch",
disable_tqdm=not bool(os.environ.get("ENABLE_TQDM", 0)),
gradient_accumulation_steps=job_config.gradient_accumulation_steps,
report_to="none",
auto_find_batch_size=True,
lr_scheduler_type=job_config.scheduler,
optim=job_config.optimizer,
warmup_ratio=job_config.warmup_ratio,
weight_decay=job_config.weight_decay,
max_grad_norm=job_config.max_grad_norm,
)
early_stop = EarlyStoppingCallback(early_stopping_patience=3, early_stopping_threshold=0.01)
callbacks_to_use = [early_stop]
args = TrainingArguments(**training_args)
trainer_args = dict(
args=args,
model=model,
callbacks=callbacks_to_use,
compute_metrics=_binary_classification_metrics if num_classes == 2 else _multi_class_classification_metrics,
)
trainer = Trainer(
**trainer_args,
train_dataset=train_dataset,
eval_dataset=valid_dataset,
)
trainer.train()
logger.info("Finished training")
logger.info(trainer.state.best_metric)
eval_scores = trainer.evaluate()
# create and save model card
co2_consumed = co2_tracker.stop()
co2_consumed = co2_consumed * 1000 if co2_consumed is not None else 0
valid_metrics = BINARY_CLASSIFICATION_EVAL_METRICS if num_classes == 2 else MULTI_CLASS_CLASSIFICATION_EVAL_METRICS
eval_scores = [f"{k[len('eval_'):]}: {v}" for k, v in eval_scores.items() if k in valid_metrics]
eval_scores = "\n\n".join(eval_scores)
model_card = MODEL_CARD.format(
language=payload["config"]["language"],
dataset=data_path,
co2=co2_consumed,
validation_metrics=eval_scores,
)
logger.info(model_card)
utils.save_model_card(model_card, model_path)
# save model, image_processor and config
model = utils.update_model_config(trainer.model, job_config)
utils.save_tokenizer(image_processor, model_path)
utils.save_model(model, model_path)
utils.remove_checkpoints(model_path=model_path)
# push model to hub
logger.info("Pushing model to Hub")
model_repo.git_pull()
model_repo.git_add()
model_repo.git_commit(commit_message="Commit From AutoTrain")
model_repo.git_push()
| autotrain-advanced-main | src/autotrain/trainers/image_classification.py |
autotrain-advanced-main | src/autotrain/trainers/__init__.py |
|
import os
import numpy as np
import torch
from datasets import load_dataset
from sklearn import metrics
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
EarlyStoppingCallback,
Trainer,
TrainingArguments,
)
from autotrain import logger, utils
from autotrain.params import TextBinaryClassificationParams, TextMultiClassClassificationParams
TEXT_COLUMN = "autotrain_text"
LABEL_COLUMN = "autotrain_label"
FP32_MODELS = ("t5", "mt5", "pegasus", "longt5", "bigbird_pegasus")
BINARY_CLASSIFICATION_EVAL_METRICS = (
"eval_loss",
"eval_accuracy",
"eval_f1",
"eval_auc",
"eval_precision",
"eval_recall",
)
MULTI_CLASS_CLASSIFICATION_EVAL_METRICS = (
"eval_loss",
"eval_accuracy",
"eval_f1_macro",
"eval_f1_micro",
"eval_f1_weighted",
"eval_precision_macro",
"eval_precision_micro",
"eval_precision_weighted",
"eval_recall_macro",
"eval_recall_micro",
"eval_recall_weighted",
)
MODEL_CARD = """
---
tags:
- autotrain
- text-classification
language:
- {language}
widget:
- text: "I love AutoTrain"
datasets:
- {dataset}
co2_eq_emissions:
emissions: {co2}
---
# Model Trained Using AutoTrain
- Problem type: Text Classification
- CO2 Emissions (in grams): {co2:.4f}
## Validation Metrics
{validation_metrics}
"""
class Dataset:
def __init__(self, data, tokenizer, label2id, config):
self.data = data
self.tokenizer = tokenizer
self.config = config
self.label2id = label2id
def __len__(self):
return len(self.data)
def __getitem__(self, item):
text = str(self.data[item][TEXT_COLUMN])
target = self.data[item][LABEL_COLUMN]
target = int(self.label2id[target])
inputs = self.tokenizer(
text,
max_length=self.config.max_seq_length,
padding="max_length",
truncation=True,
)
ids = inputs["input_ids"]
mask = inputs["attention_mask"]
if "token_type_ids" in inputs:
token_type_ids = inputs["token_type_ids"]
else:
token_type_ids = None
if token_type_ids is not None:
return {
"input_ids": torch.tensor(ids, dtype=torch.long),
"attention_mask": torch.tensor(mask, dtype=torch.long),
"token_type_ids": torch.tensor(token_type_ids, dtype=torch.long),
"labels": torch.tensor(target, dtype=torch.long),
}
return {
"input_ids": torch.tensor(ids, dtype=torch.long),
"attention_mask": torch.tensor(mask, dtype=torch.long),
"labels": torch.tensor(target, dtype=torch.long),
}
def _binary_classification_metrics(pred):
raw_predictions, labels = pred
predictions = np.argmax(raw_predictions, axis=1)
result = {
"f1": metrics.f1_score(labels, predictions),
"precision": metrics.precision_score(labels, predictions),
"recall": metrics.recall_score(labels, predictions),
"auc": metrics.roc_auc_score(labels, raw_predictions[:, 1]),
"accuracy": metrics.accuracy_score(labels, predictions),
}
return result
def _multi_class_classification_metrics(pred):
raw_predictions, labels = pred
predictions = np.argmax(raw_predictions, axis=1)
results = {
"f1_macro": metrics.f1_score(labels, predictions, average="macro"),
"f1_micro": metrics.f1_score(labels, predictions, average="micro"),
"f1_weighted": metrics.f1_score(labels, predictions, average="weighted"),
"precision_macro": metrics.precision_score(labels, predictions, average="macro"),
"precision_micro": metrics.precision_score(labels, predictions, average="micro"),
"precision_weighted": metrics.precision_score(labels, predictions, average="weighted"),
"recall_macro": metrics.recall_score(labels, predictions, average="macro"),
"recall_micro": metrics.recall_score(labels, predictions, average="micro"),
"recall_weighted": metrics.recall_score(labels, predictions, average="weighted"),
"accuracy": metrics.accuracy_score(labels, predictions),
}
return results
@utils.job_watcher
def train(co2_tracker, payload, huggingface_token, model_path):
model_repo = utils.create_repo(
project_name=payload["proj_name"],
autotrain_user=payload["username"],
huggingface_token=huggingface_token,
model_path=model_path,
)
data_path = f"{payload['username']}/autotrain-data-{payload['proj_name']}"
data = load_dataset(data_path, use_auth_token=huggingface_token)
logger.info(f"Loaded data from {data_path}")
job_config = payload["config"]["params"][0]
job_config["model_name"] = payload["config"]["hub_model"]
train_data = data["train"]
valid_data = data["validation"]
classes = train_data.unique(LABEL_COLUMN)
label2id = {c: i for i, c in enumerate(classes)}
num_classes = len(classes)
model_name = job_config["model_name"]
device = job_config.get("device", "cuda")
# remove model_name from job config
del job_config["model_name"]
if num_classes == 2:
job_config["task"] = "text_binary_classification"
job_config = TextBinaryClassificationParams(**job_config)
elif num_classes > 2:
job_config["task"] = "text_multi_class_classification"
job_config = TextMultiClassClassificationParams(**job_config)
else:
raise ValueError("Invalid number of classes")
model_config = AutoConfig.from_pretrained(
model_name,
num_labels=num_classes,
)
model_config._num_labels = len(label2id)
model_config.label2id = label2id
model_config.id2label = {v: k for k, v in label2id.items()}
logger.info(model_config)
try:
model = AutoModelForSequenceClassification.from_pretrained(model_name, config=model_config)
except OSError:
model = AutoModelForSequenceClassification.from_pretrained(model_name, config=model_config, from_tf=True)
tokenizer = AutoTokenizer.from_pretrained(model_name)
train_dataset = Dataset(data=train_data, tokenizer=tokenizer, label2id=label2id, config=job_config)
valid_dataset = Dataset(data=valid_data, tokenizer=tokenizer, label2id=label2id, config=job_config)
logging_steps = int(0.2 * len(valid_dataset) / job_config.train_batch_size)
if logging_steps == 0:
logging_steps = 1
fp16 = True
if model_config.model_type in FP32_MODELS or device == "cpu":
fp16 = False
training_args = dict(
output_dir="/tmp/autotrain",
per_device_train_batch_size=job_config.train_batch_size,
per_device_eval_batch_size=2 * job_config.train_batch_size,
learning_rate=job_config.learning_rate,
num_train_epochs=job_config.num_train_epochs,
fp16=fp16,
load_best_model_at_end=True,
evaluation_strategy="epoch",
logging_steps=logging_steps,
save_total_limit=1,
save_strategy="epoch",
disable_tqdm=not bool(os.environ.get("ENABLE_TQDM", 0)),
gradient_accumulation_steps=job_config.gradient_accumulation_steps,
report_to="none",
auto_find_batch_size=True,
lr_scheduler_type=job_config.scheduler,
optim=job_config.optimizer,
warmup_ratio=job_config.warmup_ratio,
weight_decay=job_config.weight_decay,
max_grad_norm=job_config.max_grad_norm,
)
early_stop = EarlyStoppingCallback(early_stopping_patience=3, early_stopping_threshold=0.01)
callbacks_to_use = [early_stop]
args = TrainingArguments(**training_args)
trainer_args = dict(
args=args,
model=model,
callbacks=callbacks_to_use,
compute_metrics=_binary_classification_metrics if num_classes == 2 else _multi_class_classification_metrics,
)
trainer = Trainer(
**trainer_args,
train_dataset=train_dataset,
eval_dataset=valid_dataset,
)
trainer.train()
logger.info("Finished training")
logger.info(trainer.state.best_metric)
eval_scores = trainer.evaluate()
co2_consumed = co2_tracker.stop()
co2_consumed = co2_consumed * 1000 if co2_consumed is not None else 0
eval_scores = [f"{k}: {v}" for k, v in eval_scores.items()]
eval_scores = "\n\n".join(eval_scores)
model_card = MODEL_CARD.format(
language=payload["config"]["language"],
dataset=data_path,
co2=co2_consumed,
validation_metrics=eval_scores,
)
utils.save_model_card(model_card, model_path)
# save model, tokenizer and config
model = utils.update_model_config(trainer.model, job_config)
utils.save_tokenizer(tokenizer, model_path)
utils.save_model(model, model_path)
utils.remove_checkpoints(model_path=model_path)
# push model to hub
logger.info("Pushing model to Hub")
model_repo.git_pull()
model_repo.git_add()
model_repo.git_commit(commit_message="Commit From AutoTrain")
model_repo.git_push()
| autotrain-advanced-main | src/autotrain/trainers/text_classification.py |
import os
from pydantic import BaseModel
from autotrain import logger
class AutoTrainParams(BaseModel):
def save(self, output_dir):
os.makedirs(output_dir, exist_ok=True)
path = os.path.join(output_dir, "training_params.json")
# save formatted json
with open(path, "w") as f:
f.write(self.json(indent=4))
def __str__(self):
data = self.dict()
data["token"] = "*****" if data.get("token") else None
return str(data)
def __init__(self, **data):
super().__init__(**data)
# Parameters not supplied by the user
defaults = {f.name for f in self.__fields__.values() if f.default == self.__dict__[f.name]}
supplied = set(data.keys())
not_supplied = defaults - supplied
if not_supplied:
logger.warning(f"Parameters not supplied by user and set to default: {', '.join(not_supplied)}")
# Parameters that were supplied but not used
# This is a naive implementation. It might catch some internal Pydantic params.
unused = supplied - set(self.__fields__)
if unused:
logger.warning(f"Parameters supplied but not used: {', '.join(unused)}")
| autotrain-advanced-main | src/autotrain/trainers/common.py |
import os
from itertools import chain
import torch
from datasets import Dataset, load_dataset
from peft import LoraConfig, get_peft_model, prepare_model_for_int8_training
from transformers import (
AutoConfig,
AutoModelForCausalLM,
AutoTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
)
from autotrain import logger, utils
from autotrain.params import LMTrainingParams
TEXT_COLUMN = "autotrain_text"
IGNORE_INDEX = -100
DEFAULT_PAD_TOKEN = "[PAD]"
DEFAULT_EOS_TOKEN = "</s>"
DEFAULT_BOS_TOKEN = "</s>"
DEFAULT_UNK_TOKEN = "</s>"
EVAL_METRICS = ("eval_loss",)
MODEL_CARD = """
---
tags:
- autotrain
- text-generation
widget:
- text: "I love AutoTrain because "
datasets:
- {dataset}
co2_eq_emissions:
emissions: {co2}
---
# Model Trained Using AutoTrain
- Problem type: Text Generation
- CO2 Emissions (in grams): {co2:.4f}
## Validation Metrics
{validation_metrics}
"""
HANDLER_CONTENT = """
from typing import Dict, List, Any
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel, PeftConfig
import torch
class EndpointHandler:
def __init__(self, path=""):
# load model and processor from path
model = AutoModelForCausalLM.from_pretrained(
path, torch_dtype=torch.float16, load_in_8bit=True, device_map="auto"
)
self.tokenizer = AutoTokenizer.from_pretrained(path)
self.model.eval()
def __call__(self, data: Dict[str, Any]) -> Dict[str, str]:
'''
Args:
data (:dict:):
The payload with the text prompt and generation parameters.
'''
# process input
inputs = data.pop("inputs", data)
parameters = data.pop("parameters", None)
# preprocess
input_ids = self.tokenizer(inputs, return_tensors="pt").input_ids
# pass inputs with all kwargs in data
if parameters is not None:
outputs = self.model.generate(input_ids=input_ids, **parameters)
else:
outputs = self.model.generate(input_ids=input_ids)
# postprocess the prediction
prediction = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
return [{"generated_text": prediction}]
"""
HANDLER_CONTENT_PEFT = """
from typing import Dict, List, Any
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel, PeftConfig
import torch
class EndpointHandler:
def __init__(self, path=""):
# load model and processor from path
config = PeftConfig.from_pretrained(path)
model = AutoModelForCausalLM.from_pretrained(
config.base_model_name_or_path, torch_dtype=torch.float16, load_in_8bit=True, device_map="auto"
)
self.model = PeftModel.from_pretrained(model, path)
self.tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
self.model.eval()
def __call__(self, data: Dict[str, Any]) -> Dict[str, str]:
'''
Args:
data (:dict:):
The payload with the text prompt and generation parameters.
'''
# process input
inputs = data.pop("inputs", data)
parameters = data.pop("parameters", None)
# preprocess
input_ids = self.tokenizer(inputs, return_tensors="pt").input_ids
# pass inputs with all kwargs in data
if parameters is not None:
outputs = self.model.generate(input_ids=input_ids, **parameters)
else:
outputs = self.model.generate(input_ids=input_ids)
# postprocess the prediction
prediction = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
return [{"generated_text": prediction}]
"""
REQUIREMENTS = """
accelerate==0.18.0
transformers==4.28.1
git+https://github.com/huggingface/peft.git
bitsandbytes
tokenizers>=0.13.3
"""
def _eval_metrics(pred):
raw_predictions, labels = pred
return 0
def tokenize(tokenizer, prompt, add_eos_token=True):
result = tokenizer(
prompt,
truncation=True,
max_length=tokenizer.model_max_length,
padding=False,
return_tensors=None,
)
if result["input_ids"][-1] != tokenizer.eos_token_id and add_eos_token:
if len(result["input_ids"]) >= tokenizer.model_max_length:
result["input_ids"] = result["input_ids"][:-1]
result["attention_mask"] = result["attention_mask"][:-1]
result["input_ids"].append(tokenizer.eos_token_id)
result["attention_mask"].append(1)
result["labels"] = result["input_ids"].copy()
return result
def _process_data(data, tokenizer, job_config):
data = data.to_pandas()
data = data.fillna("")
data = data[[TEXT_COLUMN]]
if job_config.add_eos_token:
data[TEXT_COLUMN] = data[TEXT_COLUMN] + tokenizer.eos_token
data = Dataset.from_pandas(data)
return data
def group_texts(examples, block_size):
# Concatenate all texts.
concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
if total_length >= block_size:
total_length = (total_length // block_size) * block_size
# Split by chunks of max_len.
result = {
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
result["labels"] = result["input_ids"].copy()
return result
@utils.job_watcher
def train(co2_tracker, payload, huggingface_token, model_path):
# create model repo
model_repo = utils.create_repo(
project_name=payload["proj_name"],
autotrain_user=payload["username"],
huggingface_token=huggingface_token,
model_path=model_path,
)
data_path = f"{payload['username']}/autotrain-data-{payload['proj_name']}"
data = load_dataset(data_path, use_auth_token=huggingface_token)
logger.info(f"Loaded data from {data_path}")
job_config = payload["config"]["params"][0]
job_config["model_name"] = payload["config"]["hub_model"]
train_data = data["train"]
valid_data = data["validation"]
model_name = job_config["model_name"]
del job_config["model_name"]
job_config = LMTrainingParams(**job_config)
tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=huggingface_token)
if tokenizer.model_max_length > 2048:
tokenizer.model_max_length = 2048
m_arch = utils.get_model_architecture(model_name).lower()
logger.info(f"Model architecture: {m_arch}")
use_peft = False
use_int8 = False
if "llama" in m_arch or "rwforcausallm" in m_arch:
use_peft = True
use_int8 = True
if "gptneo" in m_arch:
use_peft = True
use_int8 = False
# process data
train_data = _process_data(data=train_data, tokenizer=tokenizer, job_config=job_config)
valid_data = _process_data(data=valid_data, tokenizer=tokenizer, job_config=job_config)
model_config = AutoConfig.from_pretrained(
model_name,
use_auth_token=huggingface_token,
trust_remote_code=True,
)
logger.info(model_config)
if use_peft:
try:
model = AutoModelForCausalLM.from_pretrained(
model_name,
config=model_config,
use_auth_token=huggingface_token,
torch_dtype=torch.float16,
load_in_8bit=use_int8,
device_map="auto",
trust_remote_code=True,
)
except OSError:
model = AutoModelForCausalLM.from_pretrained(
model_name,
config=model_config,
use_auth_token=huggingface_token,
from_tf=True,
torch_dtype=torch.float16,
load_in_8bit=use_int8,
device_map="auto",
trust_remote_code=True,
)
else:
try:
model = AutoModelForCausalLM.from_pretrained(
model_name,
config=model_config,
use_auth_token=huggingface_token,
trust_remote_code=True,
)
except OSError:
model = AutoModelForCausalLM.from_pretrained(
model_name,
config=model_config,
use_auth_token=huggingface_token,
from_tf=True,
trust_remote_code=True,
)
# PEFT:
model.resize_token_embeddings(len(tokenizer))
if use_peft:
if use_int8:
model = prepare_model_for_int8_training(model)
peft_config = LoraConfig(
r=job_config.lora_r,
lora_alpha=job_config.lora_alpha,
lora_dropout=job_config.lora_dropout,
bias="none",
task_type="CAUSAL_LM",
target_modules=[
"query_key_value",
"dense",
"dense_h_to_4h",
"dense_4h_to_h",
]
if "rwforcausallm" in m_arch
else None,
)
model = get_peft_model(model, peft_config)
if job_config.block_size == -1:
job_config.block_size = None
if job_config.block_size is None:
block_size = tokenizer.model_max_length
if block_size > 1024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`."
)
block_size = 1024
else:
if job_config.block_size > tokenizer.model_max_length:
logger.warning(
f"The block_size passed ({job_config['block_size']}) is larger than the maximum length for the model"
f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}."
)
block_size = min(job_config.block_size, tokenizer.model_max_length)
logger.info(model)
def tokenize_function(examples):
output = tokenizer(examples[TEXT_COLUMN])
return output
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
if total_length >= block_size:
total_length = (total_length // block_size) * block_size
# Split by chunks of max_len.
result = {
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
result["labels"] = result["input_ids"].copy()
return result
train_data = train_data.map(
tokenize_function,
batched=True,
num_proc=4,
remove_columns=list(train_data.features),
desc="Running tokenizer on train dataset",
)
valid_data = valid_data.map(
tokenize_function,
batched=True,
num_proc=4,
remove_columns=list(valid_data.features),
desc="Running tokenizer on validation dataset",
)
train_data = train_data.map(
group_texts,
batched=True,
num_proc=4,
desc=f"Grouping texts in chunks of {block_size}",
)
valid_data = valid_data.map(
group_texts,
batched=True,
num_proc=4,
desc=f"Grouping texts in chunks of {block_size}",
)
logger.info("creating trainer")
# trainer specific
logging_steps = int(0.2 * len(valid_data) / job_config.train_batch_size)
if logging_steps == 0:
logging_steps = 1
training_args = dict(
output_dir=model_path,
per_device_train_batch_size=job_config.train_batch_size,
per_device_eval_batch_size=2 * job_config.train_batch_size,
learning_rate=job_config.learning_rate,
num_train_epochs=job_config.num_train_epochs,
evaluation_strategy="epoch",
logging_steps=logging_steps,
save_total_limit=1,
save_strategy="epoch",
disable_tqdm=not bool(os.environ.get("ENABLE_TQDM", 0)),
gradient_accumulation_steps=job_config.gradient_accumulation_steps,
report_to="none",
auto_find_batch_size=True,
lr_scheduler_type=job_config.scheduler,
optim=job_config.optimizer,
warmup_ratio=job_config.warmup_ratio,
weight_decay=job_config.weight_decay,
max_grad_norm=job_config.max_grad_norm,
fp16=True,
)
args = TrainingArguments(**training_args)
trainer_args = dict(
args=args,
model=model,
)
data_collator = default_data_collator
trainer = Trainer(
**trainer_args,
train_dataset=train_data,
eval_dataset=valid_data,
tokenizer=tokenizer,
data_collator=data_collator,
)
model.config.use_cache = False
trainer.train()
logger.info("Finished training")
logger.info(trainer.state.best_metric)
eval_scores = trainer.evaluate()
# create and save model card
co2_consumed = co2_tracker.stop()
co2_consumed = co2_consumed * 1000 if co2_consumed is not None else 0
eval_scores = [f"{k[len('eval_'):]}: {v}" for k, v in eval_scores.items() if k in EVAL_METRICS]
eval_scores = "\n\n".join(eval_scores)
model_card = MODEL_CARD.format(
language=payload["config"]["language"],
dataset=data_path,
co2=co2_consumed,
validation_metrics=eval_scores,
)
logger.info(model_card)
utils.save_model_card(model_card, model_path)
utils.create_file(
filename="handler.py",
file_content=HANDLER_CONTENT_PEFT.strip() if use_peft else HANDLER_CONTENT.strip(),
model_path=model_path,
)
utils.create_file(filename="requirements.txt", file_content=REQUIREMENTS.strip(), model_path=model_path)
# save model, tokenizer and config
model = utils.update_model_config(trainer.model, job_config)
utils.save_tokenizer(tokenizer, model_path)
utils.save_model(model, model_path)
utils.remove_checkpoints(model_path=model_path)
# push model to hub
logger.info("Pushing model to Hub")
model_repo.git_pull()
model_repo.git_add()
model_repo.git_commit(commit_message="Commit From AutoTrain")
model_repo.git_push()
| autotrain-advanced-main | src/autotrain/trainers/lm_trainer.py |
from typing import List, Union
from pydantic import Field
from autotrain.trainers.common import AutoTrainParams
class TabularParams(AutoTrainParams):
data_path: str = Field(None, title="Data path")
model: str = Field("xgboost", title="Model name")
username: str = Field(None, title="Hugging Face Username")
seed: int = Field(42, title="Seed")
train_split: str = Field("train", title="Train split")
valid_split: str = Field(None, title="Validation split")
project_name: str = Field("Project Name", title="Output directory")
token: str = Field(None, title="Hub Token")
push_to_hub: bool = Field(False, title="Push to hub")
id_column: str = Field("id", title="ID column")
target_columns: Union[List[str], str] = Field(["target"], title="Target column(s)")
repo_id: str = Field(None, title="Repo ID")
categorical_columns: List[str] = Field(None, title="Categorical columns")
numerical_columns: List[str] = Field(None, title="Numerical columns")
task: str = Field("classification", title="Task")
num_trials: int = Field(10, title="Number of trials")
time_limit: int = Field(600, title="Time limit")
categorical_imputer: str = Field(None, title="Categorical imputer")
numerical_imputer: str = Field(None, title="Numerical imputer")
numeric_scaler: str = Field(None, title="Numeric scaler")
| autotrain-advanced-main | src/autotrain/trainers/tabular/params.py |
autotrain-advanced-main | src/autotrain/trainers/tabular/__init__.py |
|
import copy
from collections import defaultdict
from dataclasses import dataclass
from functools import partial
from typing import List, Optional
import numpy as np
from sklearn import ensemble, impute, linear_model
from sklearn import metrics as skmetrics
from sklearn import naive_bayes, neighbors, pipeline, preprocessing, svm, tree
from xgboost import XGBClassifier, XGBRegressor
MARKDOWN = """
---
tags:
- autotrain
- tabular
- {task}
- tabular-{task}
datasets:
- {dataset}
---
# Model Trained Using AutoTrain
- Problem type: Tabular {task}
## Validation Metrics
{metrics}
## Best Params
{params}
## Usage
```python
import json
import joblib
import pandas as pd
model = joblib.load('model.joblib')
config = json.load(open('config.json'))
features = config['features']
# data = pd.read_csv("data.csv")
data = data[features]
predictions = model.predict(data) # or model.predict_proba(data)
# predictions can be converted to original labels using label_encoders.pkl
```
"""
_MODELS: dict = defaultdict(dict)
_MODELS["xgboost"]["classification"] = XGBClassifier
_MODELS["xgboost"]["regression"] = XGBRegressor
_MODELS["logistic_regression"]["classification"] = linear_model.LogisticRegression
_MODELS["logistic_regression"]["regression"] = linear_model.LogisticRegression
_MODELS["random_forest"]["classification"] = ensemble.RandomForestClassifier
_MODELS["random_forest"]["regression"] = ensemble.RandomForestRegressor
_MODELS["extra_trees"]["classification"] = ensemble.ExtraTreesClassifier
_MODELS["extra_trees"]["regression"] = ensemble.ExtraTreesRegressor
_MODELS["gradient_boosting"]["classification"] = ensemble.GradientBoostingClassifier
_MODELS["gradient_boosting"]["regression"] = ensemble.GradientBoostingRegressor
_MODELS["adaboost"]["classification"] = ensemble.AdaBoostClassifier
_MODELS["adaboost"]["regression"] = ensemble.AdaBoostRegressor
_MODELS["ridge"]["classification"] = linear_model.RidgeClassifier
_MODELS["ridge"]["regression"] = linear_model.Ridge
_MODELS["svm"]["classification"] = svm.LinearSVC
_MODELS["svm"]["regression"] = svm.LinearSVR
_MODELS["decision_tree"]["classification"] = tree.DecisionTreeClassifier
_MODELS["decision_tree"]["regression"] = tree.DecisionTreeRegressor
_MODELS["lasso"]["regression"] = linear_model.Lasso
_MODELS["linear_regression"]["regression"] = linear_model.LinearRegression
_MODELS["naive_bayes"]["classification"] = naive_bayes.GaussianNB
_MODELS["knn"]["classification"] = neighbors.KNeighborsClassifier
_MODELS["knn"]["regression"] = neighbors.KNeighborsRegressor
CLASSIFICATION_TASKS = ("binary_classification", "multi_class_classification", "multi_label_classification")
REGRESSION_TASKS = ("single_column_regression", "multi_column_regression")
@dataclass
class TabularMetrics:
sub_task: str
labels: Optional[List] = None
def __post_init__(self):
if self.sub_task == "binary_classification":
self.valid_metrics = {
"auc": skmetrics.roc_auc_score,
"logloss": skmetrics.log_loss,
"f1": skmetrics.f1_score,
"accuracy": skmetrics.accuracy_score,
"precision": skmetrics.precision_score,
"recall": skmetrics.recall_score,
}
elif self.sub_task == "multi_class_classification":
self.valid_metrics = {
"logloss": partial(skmetrics.log_loss, labels=self.labels),
"accuracy": skmetrics.accuracy_score,
"mlogloss": partial(skmetrics.log_loss, labels=self.labels),
"f1_macro": partial(skmetrics.f1_score, average="macro", labels=self.labels),
"f1_micro": partial(skmetrics.f1_score, average="micro", labels=self.labels),
"f1_weighted": partial(skmetrics.f1_score, average="weighted", labels=self.labels),
"precision_macro": partial(skmetrics.precision_score, average="macro", labels=self.labels),
"precision_micro": partial(skmetrics.precision_score, average="micro", labels=self.labels),
"precision_weighted": partial(skmetrics.precision_score, average="weighted", labels=self.labels),
"recall_macro": partial(skmetrics.recall_score, average="macro", labels=self.labels),
"recall_micro": partial(skmetrics.recall_score, average="micro", labels=self.labels),
"recall_weighted": partial(skmetrics.recall_score, average="weighted", labels=self.labels),
}
elif self.sub_task in ("single_column_regression", "multi_column_regression"):
self.valid_metrics = {
"r2": skmetrics.r2_score,
"mse": skmetrics.mean_squared_error,
"mae": skmetrics.mean_absolute_error,
"rmse": partial(skmetrics.mean_squared_error, squared=False),
"rmsle": partial(skmetrics.mean_squared_log_error, squared=False),
}
elif self.sub_task == "multi_label_classification":
self.valid_metrics = {
"logloss": skmetrics.log_loss,
}
else:
raise ValueError("Invalid problem type")
def calculate(self, y_true, y_pred):
metrics = {}
for metric_name, metric_func in self.valid_metrics.items():
if self.sub_task == "binary_classification":
if metric_name == "auc":
metrics[metric_name] = metric_func(y_true, y_pred[:, 1])
elif metric_name == "logloss":
metrics[metric_name] = metric_func(y_true, y_pred)
else:
metrics[metric_name] = metric_func(y_true, y_pred[:, 1] >= 0.5)
elif self.sub_task == "mul":
if metric_name in (
"accuracy",
"f1_macro",
"f1_micro",
"f1_weighted",
"precision_macro",
"precision_micro",
"precision_weighted",
"recall_macro",
"recall_micro",
"recall_weighted",
):
metrics[metric_name] = metric_func(y_true, np.argmax(y_pred, axis=1))
else:
metrics[metric_name] = metric_func(y_true, y_pred)
else:
if metric_name == "rmsle":
temp_pred = copy.deepcopy(y_pred)
temp_pred = np.clip(y_pred, 0, None)
metrics[metric_name] = metric_func(y_true, temp_pred)
else:
metrics[metric_name] = metric_func(y_true, y_pred)
return metrics
class TabularModel:
def __init__(self, model, preprocessor, sub_task, params):
self.model = model
self.preprocessor = preprocessor
self.sub_task = sub_task
self.params = params
self.use_predict_proba = True
_model = self._get_model()
if self.preprocessor is not None:
self.pipeline = pipeline.Pipeline([("preprocessor", self.preprocessor), ("model", _model)])
else:
self.pipeline = pipeline.Pipeline([("model", _model)])
def _get_model(self):
if self.model in _MODELS:
if self.sub_task in CLASSIFICATION_TASKS:
if self.model in ("svm", "ridge"):
self.use_predict_proba = False
return _MODELS[self.model]["classification"](**self.params)
elif self.sub_task in REGRESSION_TASKS:
self.use_predict_proba = False
return _MODELS[self.model]["regression"](**self.params)
else:
raise ValueError("Invalid task")
else:
raise ValueError("Invalid model")
def get_params(trial, model, task):
if model == "xgboost":
params = {
"learning_rate": trial.suggest_float("learning_rate", 1e-2, 0.25, log=True),
"reg_lambda": trial.suggest_float("reg_lambda", 1e-8, 100.0, log=True),
"reg_alpha": trial.suggest_float("reg_alpha", 1e-8, 100.0, log=True),
"subsample": trial.suggest_float("subsample", 0.1, 1.0),
"colsample_bytree": trial.suggest_float("colsample_bytree", 0.1, 1.0),
"max_depth": trial.suggest_int("max_depth", 1, 9),
"early_stopping_rounds": trial.suggest_int("early_stopping_rounds", 100, 500),
"n_estimators": trial.suggest_categorical("n_estimators", [7000, 15000, 20000]),
"tree_method": "hist",
"random_state": 42,
}
return params
if model == "logistic_regression":
if task in CLASSIFICATION_TASKS:
params = {
"C": trial.suggest_float("C", 1e-8, 1e3, log=True),
"fit_intercept": trial.suggest_categorical("fit_intercept", [True, False]),
"solver": trial.suggest_categorical("solver", ["liblinear", "saga"]),
"penalty": trial.suggest_categorical("penalty", ["l1", "l2"]),
"n_jobs": -1,
}
return params
raise ValueError("Task not supported")
if model == "random_forest":
params = {
"n_estimators": trial.suggest_int("n_estimators", 10, 10000),
"max_depth": trial.suggest_int("max_depth", 2, 15),
"max_features": trial.suggest_categorical("max_features", ["auto", "sqrt", "log2", None]),
"min_samples_split": trial.suggest_int("min_samples_split", 2, 20),
"min_samples_leaf": trial.suggest_int("min_samples_leaf", 1, 20),
"bootstrap": trial.suggest_categorical("bootstrap", [True, False]),
"n_jobs": -1,
}
if task in CLASSIFICATION_TASKS:
params["criterion"] = trial.suggest_categorical("criterion", ["gini", "entropy"])
return params
if task in REGRESSION_TASKS:
params["criterion"] = trial.suggest_categorical(
"criterion", ["squared_error", "absolute_error", "poisson"]
)
return params
raise ValueError("Task not supported")
if model == "extra_trees":
params = {
"n_estimators": trial.suggest_int("n_estimators", 10, 10000),
"max_depth": trial.suggest_int("max_depth", 2, 15),
"max_features": trial.suggest_categorical("max_features", ["auto", "sqrt", "log2", None]),
"min_samples_split": trial.suggest_int("min_samples_split", 2, 20),
"min_samples_leaf": trial.suggest_int("min_samples_leaf", 1, 20),
"bootstrap": trial.suggest_categorical("bootstrap", [True, False]),
"n_jobs": -1,
}
if task in CLASSIFICATION_TASKS:
params["criterion"] = trial.suggest_categorical("criterion", ["gini", "entropy"])
return params
if task in REGRESSION_TASKS:
params["criterion"] = trial.suggest_categorical("criterion", ["squared_error", "absolute_error"])
return params
raise ValueError("Task not supported")
if model == "decision_tree":
params = {
"max_depth": trial.suggest_int("max_depth", 1, 15),
"min_samples_split": trial.suggest_int("min_samples_split", 2, 20),
"min_samples_leaf": trial.suggest_int("min_samples_leaf", 1, 20),
"max_features": trial.suggest_categorical("max_features", ["auto", "sqrt", "log2", None]),
"splitter": trial.suggest_categorical("splitter", ["best", "random"]),
}
if task in CLASSIFICATION_TASKS:
params["criterion"] = trial.suggest_categorical("criterion", ["gini", "entropy"])
return params
if task in REGRESSION_TASKS:
params["criterion"] = trial.suggest_categorical(
"criterion", ["squared_error", "absolute_error", "friedman_mse", "poisson"]
)
return params
raise ValueError("Task not supported")
if model == "linear_regression":
if task in REGRESSION_TASKS:
params = {
"fit_intercept": trial.suggest_categorical("fit_intercept", [True, False]),
}
return params
raise ValueError("Task not supported")
if model == "svm":
if task in CLASSIFICATION_TASKS:
params = {
"C": trial.suggest_float("C", 1e-8, 1e3, log=True),
"fit_intercept": trial.suggest_categorical("fit_intercept", [True, False]),
"penalty": "l2",
"max_iter": trial.suggest_int("max_iter", 1000, 10000),
}
return params
if task in REGRESSION_TASKS:
params = {
"C": trial.suggest_float("C", 1e-8, 1e3, log=True),
"fit_intercept": trial.suggest_categorical("fit_intercept", [True, False]),
"loss": trial.suggest_categorical("loss", ["epsilon_insensitive", "squared_epsilon_insensitive"]),
"epsilon": trial.suggest_float("epsilon", 1e-8, 1e-1, log=True),
"max_iter": trial.suggest_int("max_iter", 1000, 10000),
}
return params
raise ValueError("Task not supported")
if model == "ridge":
params = {
"alpha": trial.suggest_float("alpha", 1e-8, 1e3, log=True),
"fit_intercept": trial.suggest_categorical("fit_intercept", [True, False]),
"max_iter": trial.suggest_int("max_iter", 1000, 10000),
}
if task in CLASSIFICATION_TASKS:
return params
if task in REGRESSION_TASKS:
return params
raise ValueError("Task not supported")
if model == "lasso":
if task in REGRESSION_TASKS:
params = {
"alpha": trial.suggest_float("alpha", 1e-8, 1e3, log=True),
"fit_intercept": trial.suggest_categorical("fit_intercept", [True, False]),
"max_iter": trial.suggest_int("max_iter", 1000, 10000),
}
return params
raise ValueError("Task not supported")
if model == "knn":
params = {
"n_neighbors": trial.suggest_int("n_neighbors", 1, 25),
"weights": trial.suggest_categorical("weights", ["uniform", "distance"]),
"algorithm": trial.suggest_categorical("algorithm", ["ball_tree", "kd_tree", "brute"]),
"leaf_size": trial.suggest_int("leaf_size", 1, 100),
"p": trial.suggest_categorical("p", [1, 2]),
"metric": trial.suggest_categorical("metric", ["minkowski", "euclidean", "manhattan"]),
}
if task in CLASSIFICATION_TASKS or task in REGRESSION_TASKS:
return params
raise ValueError("Task not supported")
return ValueError("Invalid model")
def get_imputer(imputer_name):
if imputer_name is None:
return None
if imputer_name == "median":
return impute.SimpleImputer(strategy="median")
if imputer_name == "mean":
return impute.SimpleImputer(strategy="mean")
if imputer_name == "most_frequent":
return impute.SimpleImputer(strategy="most_frequent")
raise ValueError("Invalid imputer")
def get_scaler(scaler_name):
if scaler_name is None:
return None
if scaler_name == "standard":
return preprocessing.StandardScaler()
if scaler_name == "minmax":
return preprocessing.MinMaxScaler()
if scaler_name == "robust":
return preprocessing.RobustScaler()
if scaler_name == "normal":
return preprocessing.Normalizer()
raise ValueError("Invalid scaler")
def get_metric_direction(sub_task):
if sub_task == "binary_classification":
return "logloss", "minimize"
if sub_task == "multi_class_classification":
return "mlogloss", "minimize"
if sub_task == "single_column_regression":
return "rmse", "minimize"
if sub_task == "multi_label_classification":
return "logloss", "minimize"
if sub_task == "multi_column_regression":
return "rmse", "minimize"
raise ValueError("Invalid sub_task")
def get_categorical_columns(df):
return list(df.select_dtypes(include=["category", "object"]).columns)
def get_numerical_columns(df):
return list(df.select_dtypes(include=["number"]).columns)
def create_model_card(config, sub_task, best_params, best_metrics):
best_metrics = "\n".join([f"- {k}: {v}" for k, v in best_metrics.items()])
best_params = "\n".join([f"- {k}: {v}" for k, v in best_params.items()])
return MARKDOWN.format(
task=config.task,
dataset=config.data_path,
metrics=best_metrics,
params=best_params,
)
| autotrain-advanced-main | src/autotrain/trainers/tabular/utils.py |
import argparse
import json
import os
from functools import partial
import joblib
import numpy as np
import optuna
import pandas as pd
from datasets import load_dataset
from huggingface_hub import HfApi
from sklearn import pipeline, preprocessing
from sklearn.compose import ColumnTransformer
from autotrain import logger
from autotrain.trainers.tabular import utils
from autotrain.trainers.tabular.params import TabularParams
from autotrain.utils import monitor
def parse_args():
# get training_config.json from the end user
parser = argparse.ArgumentParser()
parser.add_argument("--training_config", type=str, required=True)
return parser.parse_args()
def optimize(trial, model_name, xtrain, xvalid, ytrain, yvalid, eval_metric, task, preprocessor):
if isinstance(trial, dict):
params = trial
else:
params = utils.get_params(trial, model_name, task)
labels = None
if task == "multi_class_classification":
labels = np.unique(ytrain)
metrics = utils.TabularMetrics(sub_task=task, labels=labels)
if task in ("binary_classification", "multi_class_classification", "single_column_regression"):
ytrain = ytrain.ravel()
yvalid = yvalid.ravel()
if preprocessor is not None:
try:
xtrain = preprocessor.fit_transform(xtrain)
xvalid = preprocessor.transform(xvalid)
except ValueError:
logger.info("Preprocessing failed, using nan_to_num")
train_cols = xtrain.columns.tolist()
valid_cols = xvalid.columns.tolist()
xtrain = np.nan_to_num(xtrain)
xvalid = np.nan_to_num(xvalid)
# convert back to dataframe
xtrain = pd.DataFrame(xtrain, columns=train_cols)
xvalid = pd.DataFrame(xvalid, columns=valid_cols)
xtrain = preprocessor.fit_transform(xtrain)
xvalid = preprocessor.transform(xvalid)
if model_name == "xgboost":
params["eval_metric"] = eval_metric
_model = utils.TabularModel(model_name, preprocessor=None, sub_task=task, params=params)
model = _model.pipeline
models = []
if task in ("multi_label_classification", "multi_column_regression"):
# also multi_column_regression
ypred = []
models = [model] * ytrain.shape[1]
for idx, _m in enumerate(models):
if model_name == "xgboost":
_m.fit(
xtrain,
ytrain[:, idx],
model__eval_set=[(xvalid, yvalid[:, idx])],
model__verbose=False,
)
else:
_m.fit(xtrain, ytrain[:, idx])
if task == "multi_column_regression":
ypred_temp = _m.predict(xvalid)
else:
if _model.use_predict_proba:
ypred_temp = _m.predict_proba(xvalid)[:, 1]
else:
ypred_temp = _m.predict(xvalid)
ypred.append(ypred_temp)
ypred = np.column_stack(ypred)
else:
models = [model]
if model_name == "xgboost":
model.fit(
xtrain,
ytrain,
model__eval_set=[(xvalid, yvalid)],
model__verbose=False,
)
else:
models[0].fit(xtrain, ytrain)
if _model.use_predict_proba:
ypred = models[0].predict_proba(xvalid)
else:
ypred = models[0].predict(xvalid)
if task == "multi_class_classification":
if ypred.reshape(xvalid.shape[0], -1).shape[1] != len(labels):
ypred_ohe = np.zeros((xvalid.shape[0], len(labels)))
ypred_ohe[np.arange(xvalid.shape[0]), ypred] = 1
ypred = ypred_ohe
if task == "binary_classification":
if ypred.reshape(xvalid.shape[0], -1).shape[1] != 2:
ypred = np.column_stack([1 - ypred, ypred])
# calculate metric
metric_dict = metrics.calculate(yvalid, ypred)
# change eval_metric key to loss
if eval_metric in metric_dict:
metric_dict["loss"] = metric_dict[eval_metric]
logger.info(f"Metrics: {metric_dict}")
if isinstance(trial, dict):
return models, preprocessor, metric_dict
return metric_dict["loss"]
@monitor
def train(config):
if isinstance(config, dict):
config = TabularParams(**config)
if config.repo_id is None and config.username is not None:
config.repo_id = f"{config.username}/{config.project_name}"
logger.info("Starting training...")
logger.info(f"Training config: {config}")
train_data = None
valid_data = None
train_path = f"{config.data_path}/{config.train_split}.csv"
if os.path.exists(train_path):
logger.info("loading dataset from csv")
train_data = pd.read_csv(train_path)
else:
train_data = load_dataset(
config.data_path,
split=config.train_split,
token=config.token,
)
train_data = train_data.to_pandas()
if config.valid_split is not None:
valid_path = f"{config.data_path}/{config.valid_split}.csv"
if os.path.exists(valid_path):
logger.info("loading dataset from csv")
valid_data = pd.read_csv(valid_path)
else:
valid_data = load_dataset(
config.data_path,
split=config.valid_split,
token=config.token,
)
valid_data = valid_data.to_pandas()
if valid_data is None:
raise Exception("valid_data is None. Please provide a valid_split for tabular training.")
# determine which columns are categorical
if config.categorical_columns is None:
config.categorical_columns = utils.get_categorical_columns(train_data)
if config.numerical_columns is None:
config.numerical_columns = utils.get_numerical_columns(train_data)
_id_target_cols = (
[config.id_column] + config.target_columns if config.id_column is not None else config.target_columns
)
config.numerical_columns = [c for c in config.numerical_columns if c not in _id_target_cols]
config.categorical_columns = [c for c in config.categorical_columns if c not in _id_target_cols]
useful_columns = config.categorical_columns + config.numerical_columns
logger.info(f"Categorical columns: {config.categorical_columns}")
logger.info(f"Numerical columns: {config.numerical_columns}")
# convert object columns to categorical
for col in config.categorical_columns:
train_data[col] = train_data[col].astype("category")
valid_data[col] = valid_data[col].astype("category")
logger.info(f"Useful columns: {useful_columns}")
target_encoders = {}
if config.task == "classification":
for target_column in config.target_columns:
target_encoder = preprocessing.LabelEncoder()
target_encoder.fit(train_data[target_column])
target_encoders[target_column] = target_encoder
# encode target columns in train and valid data
for k, v in target_encoders.items():
train_data.loc[:, k] = v.transform(train_data[k])
valid_data.loc[:, k] = v.transform(valid_data[k])
numeric_transformer = "passthrough"
categorical_transformer = "passthrough"
transformers = []
preprocessor = None
numeric_steps = []
imputer = utils.get_imputer(config.numerical_imputer)
scaler = utils.get_scaler(config.numeric_scaler)
if imputer is not None:
numeric_steps.append(("num_imputer", imputer))
if scaler is not None:
numeric_steps.append(("num_scaler", scaler))
if len(numeric_steps) > 0:
numeric_transformer = pipeline.Pipeline(numeric_steps)
transformers.append(("numeric", numeric_transformer, config.numerical_columns))
categorical_steps = []
imputer = utils.get_imputer(config.categorical_imputer)
if imputer is not None:
categorical_steps.append(("cat_imputer", imputer))
if len(config.categorical_columns) > 0:
if config.model in ("xgboost", "lightgbm", "randomforest", "catboost", "extratrees"):
categorical_steps.append(
(
"cat_encoder",
preprocessing.OrdinalEncoder(
handle_unknown="use_encoded_value",
categories="auto",
unknown_value=np.nan,
),
)
)
else:
categorical_steps.append(
(
"cat_encoder",
preprocessing.OneHotEncoder(handle_unknown="ignore"),
)
)
if len(categorical_steps) > 0:
categorical_transformer = pipeline.Pipeline(categorical_steps)
transformers.append(("categorical", categorical_transformer, config.categorical_columns))
if len(transformers) > 0:
preprocessor = ColumnTransformer(transformers=transformers, verbose=True, n_jobs=-1)
logger.info(f"Preprocessor: {preprocessor}")
xtrain = train_data[useful_columns].reset_index(drop=True)
xvalid = valid_data[useful_columns].reset_index(drop=True)
ytrain = train_data[config.target_columns].values
yvalid = valid_data[config.target_columns].values
# determine sub_task
if config.task == "classification":
if len(target_encoders) == 1:
if len(target_encoders[config.target_columns[0]].classes_) == 2:
sub_task = "binary_classification"
else:
sub_task = "multi_class_classification"
else:
sub_task = "multi_label_classification"
else:
if len(config.target_columns) > 1:
sub_task = "multi_column_regression"
else:
sub_task = "single_column_regression"
eval_metric, direction = utils.get_metric_direction(sub_task)
args = {
"model_name": config.model,
"xtrain": xtrain,
"xvalid": xvalid,
"ytrain": ytrain,
"yvalid": yvalid,
"eval_metric": eval_metric,
"task": sub_task,
"preprocessor": preprocessor,
}
optimize_func = partial(optimize, **args)
study = optuna.create_study(direction=direction, study_name="AutoTrain")
study.optimize(optimize_func, n_trials=config.num_trials, timeout=config.time_limit)
best_params = study.best_params
logger.info(f"Best params: {best_params}")
best_models, best_preprocessors, best_metrics = optimize(best_params, **args)
models = (
[pipeline.Pipeline([("preprocessor", best_preprocessors), ("model", m)]) for m in best_models]
if best_preprocessors is not None
else best_models
)
joblib.dump(
models[0] if len(models) == 1 else models,
os.path.join(config.project_name, "model.joblib"),
)
joblib.dump(target_encoders, os.path.join(config.project_name, "target_encoders.joblib"))
model_card = utils.create_model_card(config, sub_task, best_params, best_metrics)
if model_card is not None:
with open(os.path.join(config.project_name, "README.md"), "w") as fp:
fp.write(f"{model_card}")
# remove token key from training_params.json located in output directory
# first check if file exists
if os.path.exists(f"{config.project_name}/training_params.json"):
training_params = json.load(open(f"{config.project_name}/training_params.json"))
training_params.pop("token")
json.dump(training_params, open(f"{config.project_name}/training_params.json", "w"))
# save model card to output directory as README.md
with open(f"{config.project_name}/README.md", "w") as f:
f.write(model_card)
if config.push_to_hub:
logger.info("Pushing model to hub...")
api = HfApi(token=config.token)
api.create_repo(repo_id=config.repo_id, repo_type="model", private=True)
api.upload_folder(folder_path=config.project_name, repo_id=config.repo_id, repo_type="model")
if "SPACE_ID" in os.environ:
# shut down the space
logger.info("Pausing space...")
api = HfApi(token=config.token)
api.pause_space(repo_id=os.environ["SPACE_ID"])
if "ENDPOINT_ID" in os.environ:
# shut down the endpoint
logger.info("Pausing endpoint...")
utils.pause_endpoint(config)
if __name__ == "__main__":
args = parse_args()
training_config = json.load(open(args.training_config))
config = TabularParams(**training_config)
train(config)
| autotrain-advanced-main | src/autotrain/trainers/tabular/__main__.py |
from typing import Dict
from pydantic import Field
from autotrain.trainers.common import AutoTrainParams
class GenericParams(AutoTrainParams):
username: str = Field(None, title="Hugging Face Username")
project_name: str = Field(None, title="Output directory")
data_path: str = Field(None, title="Data path")
token: str = Field(None, title="Hub Token")
script_path: str = Field(None, title="Script path")
env: Dict[str, str] = Field(None, title="Environment Variables")
| autotrain-advanced-main | src/autotrain/trainers/generic/params.py |
autotrain-advanced-main | src/autotrain/trainers/generic/__init__.py |
|
import os
import subprocess
import requests
from huggingface_hub import HfApi, snapshot_download
from loguru import logger
def create_dataset_repo(username, project_name, script_path, token):
logger.info("Creating dataset repo...")
api = HfApi(token=token)
repo_id = f"{username}/autotrain-{project_name}"
api.create_repo(
repo_id=repo_id,
repo_type="dataset",
private=True,
)
logger.info("Uploading dataset...")
api.upload_folder(
folder_path=script_path,
repo_id=repo_id,
repo_type="dataset",
)
logger.info("Dataset uploaded.")
return repo_id
def pull_dataset_repo(params):
snapshot_download(
repo_id=params.data_path,
local_dir=params.project_name,
token=params.token,
repo_type="dataset",
)
def install_requirements(params):
# check if params.project_name has a requirements.txt
if os.path.exists(f"{params.project_name}/requirements.txt"):
# install the requirements using subprocess, wait for it to finish
pipe = subprocess.Popen(
[
"pip",
"install",
"-r",
"requirements.txt",
],
cwd=params.project_name,
)
pipe.wait()
logger.info("Requirements installed.")
return
logger.info("No requirements.txt found. Skipping requirements installation.")
return
def run_command(params):
if os.path.exists(f"{params.project_name}/script.py"):
cmd = ["python", "script.py"]
pipe = subprocess.Popen(cmd, cwd=params.project_name)
pipe.wait()
logger.info("Command finished.")
return
raise ValueError("No script.py found.")
def pause_endpoint(params):
endpoint_id = os.environ["ENDPOINT_ID"]
username = endpoint_id.split("/")[0]
project_name = endpoint_id.split("/")[1]
api_url = f"https://api.endpoints.huggingface.cloud/v2/endpoint/{username}/{project_name}/pause"
headers = {"Authorization": f"Bearer {params.token}"}
r = requests.post(api_url, headers=headers)
return r.json()
| autotrain-advanced-main | src/autotrain/trainers/generic/utils.py |
import argparse
import json
import os
from huggingface_hub import HfApi
from autotrain import logger
from autotrain.trainers.generic import utils
from autotrain.trainers.generic.params import GenericParams
from autotrain.utils import monitor
def parse_args():
# get training_config.json from the end user
parser = argparse.ArgumentParser()
parser.add_argument("--config", type=str, required=True)
return parser.parse_args()
@monitor
def run(config):
if isinstance(config, dict):
config = GenericParams(**config)
# download the data repo
logger.info("Downloading data repo...")
utils.pull_dataset_repo(config)
# install the requirements
logger.info("Installing requirements...")
utils.install_requirements(config)
# run the command
logger.info("Running command...")
utils.run_command(config)
if "SPACE_ID" in os.environ:
# shut down the space
logger.info("Pausing space...")
api = HfApi(token=config.token)
api.pause_space(repo_id=os.environ["SPACE_ID"])
if "ENDPOINT_ID" in os.environ:
# shut down the endpoint
logger.info("Pausing endpoint...")
utils.pause_endpoint(config)
if __name__ == "__main__":
args = parse_args()
_config = json.load(open(args.config))
_config = GenericParams(**_config)
run(_config)
| autotrain-advanced-main | src/autotrain/trainers/generic/__main__.py |
from pydantic import Field
from autotrain.trainers.common import AutoTrainParams
class DreamBoothTrainingParams(AutoTrainParams):
model: str = Field(None, title="Model name")
revision: str = Field(None, title="Revision")
tokenizer: str = Field(None, title="Tokenizer, if different from model")
image_path: str = Field(None, title="Image path")
class_image_path: str = Field(None, title="Class image path")
prompt: str = Field(None, title="Instance prompt")
class_prompt: str = Field(None, title="Class prompt")
num_class_images: int = Field(100, title="Number of class images")
class_labels_conditioning: str = Field(None, title="Class labels conditioning")
prior_preservation: bool = Field(False, title="With prior preservation")
prior_loss_weight: float = Field(1.0, title="Prior loss weight")
project_name: str = Field("dreambooth-model", title="Output directory")
seed: int = Field(42, title="Seed")
resolution: int = Field(512, title="Resolution")
center_crop: bool = Field(False, title="Center crop")
train_text_encoder: bool = Field(False, title="Train text encoder")
batch_size: int = Field(4, title="Train batch size")
sample_batch_size: int = Field(4, title="Sample batch size")
epochs: int = Field(1, title="Number of training epochs")
num_steps: int = Field(None, title="Max train steps")
checkpointing_steps: int = Field(500, title="Checkpointing steps")
resume_from_checkpoint: str = Field(None, title="Resume from checkpoint")
gradient_accumulation: int = Field(1, title="Gradient accumulation steps")
gradient_checkpointing: bool = Field(False, title="Gradient checkpointing")
lr: float = Field(5e-4, title="Learning rate")
scale_lr: bool = Field(False, title="Scale learning rate")
scheduler: str = Field("constant", title="Learning rate scheduler")
warmup_steps: int = Field(0, title="Learning rate warmup steps")
num_cycles: int = Field(1, title="Learning rate num cycles")
lr_power: float = Field(1.0, title="Learning rate power")
dataloader_num_workers: int = Field(0, title="Dataloader num workers")
use_8bit_adam: bool = Field(False, title="Use 8bit adam")
adam_beta1: float = Field(0.9, title="Adam beta 1")
adam_beta2: float = Field(0.999, title="Adam beta 2")
adam_weight_decay: float = Field(1e-2, title="Adam weight decay")
adam_epsilon: float = Field(1e-8, title="Adam epsilon")
max_grad_norm: float = Field(1.0, title="Max grad norm")
allow_tf32: bool = Field(False, title="Allow TF32")
prior_generation_precision: str = Field(None, title="Prior generation precision")
local_rank: int = Field(-1, title="Local rank")
xformers: bool = Field(False, title="Enable xformers memory efficient attention")
pre_compute_text_embeddings: bool = Field(False, title="Pre compute text embeddings")
tokenizer_max_length: int = Field(None, title="Tokenizer max length")
text_encoder_use_attention_mask: bool = Field(False, title="Text encoder use attention mask")
rank: int = Field(4, title="Rank")
xl: bool = Field(False, title="XL")
fp16: bool = Field(False, title="FP16")
bf16: bool = Field(False, title="BF16")
token: str = Field(None, title="Hub token")
repo_id: str = Field(None, title="Hub model id")
push_to_hub: bool = Field(False, title="Push to hub")
username: str = Field(None, title="Hub username")
# disabled:
validation_prompt: str = Field(None, title="Validation prompt")
num_validation_images: int = Field(4, title="Number of validation images")
validation_epochs: int = Field(50, title="Validation epochs")
checkpoints_total_limit: int = Field(None, title="Checkpoints total limit")
validation_images: str = Field(None, title="Validation images")
logging: bool = Field(False, title="Logging using tensorboard")
| autotrain-advanced-main | src/autotrain/trainers/dreambooth/params.py |
from pathlib import Path
import torch
from PIL import Image
from PIL.ImageOps import exif_transpose
from torch.utils.data import Dataset
from torchvision import transforms
class PromptDataset(Dataset):
"A simple dataset to prepare the prompts to generate class images on multiple GPUs."
def __init__(self, prompt, num_samples):
self.prompt = prompt
self.num_samples = num_samples
def __len__(self):
return self.num_samples
def __getitem__(self, index):
example = {}
example["prompt"] = self.prompt
example["index"] = index
return example
class DreamBoothDatasetXL(Dataset):
"""
A dataset to prepare the instance and class images with the prompts for fine-tuning the model.
It pre-processes the images.
"""
def __init__(
self,
instance_data_root,
class_data_root=None,
class_num=None,
size=1024,
center_crop=False,
):
self.size = size
self.center_crop = center_crop
self.instance_data_root = Path(instance_data_root)
if not self.instance_data_root.exists():
raise ValueError("Instance images root doesn't exists.")
self.instance_images_path = list(Path(instance_data_root).iterdir())
self.num_instance_images = len(self.instance_images_path)
self._length = self.num_instance_images
if class_data_root is not None:
self.class_data_root = Path(class_data_root)
self.class_data_root.mkdir(parents=True, exist_ok=True)
self.class_images_path = list(self.class_data_root.iterdir())
if class_num is not None:
self.num_class_images = min(len(self.class_images_path), class_num)
else:
self.num_class_images = len(self.class_images_path)
self._length = max(self.num_class_images, self.num_instance_images)
else:
self.class_data_root = None
self.image_transforms = transforms.Compose(
[
transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR),
transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __len__(self):
return self._length
def __getitem__(self, index):
example = {}
instance_image = Image.open(self.instance_images_path[index % self.num_instance_images])
instance_image = exif_transpose(instance_image)
if not instance_image.mode == "RGB":
instance_image = instance_image.convert("RGB")
example["instance_images"] = self.image_transforms(instance_image)
if self.class_data_root:
class_image = Image.open(self.class_images_path[index % self.num_class_images])
class_image = exif_transpose(class_image)
if not class_image.mode == "RGB":
class_image = class_image.convert("RGB")
example["class_images"] = self.image_transforms(class_image)
return example
class DreamBoothDataset(Dataset):
"""
A dataset to prepare the instance and class images with the prompts for fine-tuning the model.
It pre-processes the images and the tokenizes prompts.
"""
def __init__(self, config, tokenizers, encoder_hidden_states, instance_prompt_encoder_hidden_states):
self.config = config
self.tokenizer = tokenizers[0]
self.size = self.config.resolution
self.center_crop = self.config.center_crop
self.tokenizer_max_length = self.config.tokenizer_max_length
self.instance_data_root = Path(self.config.image_path)
self.instance_prompt = self.config.prompt
self.class_data_root = Path(self.config.class_image_path) if self.config.prior_preservation else None
self.class_prompt = self.config.class_prompt
self.class_num = self.config.num_class_images
self.encoder_hidden_states = encoder_hidden_states
self.instance_prompt_encoder_hidden_states = instance_prompt_encoder_hidden_states
if not self.instance_data_root.exists():
raise ValueError("Instance images root doesn't exists.")
self.instance_images_path = list(Path(self.instance_data_root).iterdir())
self.num_instance_images = len(self.instance_images_path)
self._length = self.num_instance_images
if self.class_data_root is not None:
self.class_data_root.mkdir(parents=True, exist_ok=True)
self.class_images_path = list(self.class_data_root.iterdir())
if self.class_num is not None:
self.num_class_images = min(len(self.class_images_path), self.class_num)
else:
self.num_class_images = len(self.class_images_path)
self._length = max(self.num_class_images, self.num_instance_images)
else:
self.class_data_root = None
self.image_transforms = transforms.Compose(
[
transforms.Resize(self.size, interpolation=transforms.InterpolationMode.BILINEAR),
transforms.CenterCrop(self.size) if self.center_crop else transforms.RandomCrop(self.size),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __len__(self):
return self._length
def _tokenize_prompt(self, tokenizer, prompt, tokenizer_max_length=None):
# this function is here to avoid cyclic import issues
if tokenizer_max_length is not None:
max_length = tokenizer_max_length
else:
max_length = tokenizer.model_max_length
text_inputs = tokenizer(
prompt,
truncation=True,
padding="max_length",
max_length=max_length,
return_tensors="pt",
)
return text_inputs
def __getitem__(self, index):
example = {}
instance_image = Image.open(self.instance_images_path[index % self.num_instance_images])
instance_image = exif_transpose(instance_image)
if not instance_image.mode == "RGB":
instance_image = instance_image.convert("RGB")
example["instance_images"] = self.image_transforms(instance_image)
if not self.config.xl:
if self.encoder_hidden_states is not None:
example["instance_prompt_ids"] = self.encoder_hidden_states
else:
text_inputs = self._tokenize_prompt(
self.tokenizer, self.instance_prompt, tokenizer_max_length=self.tokenizer_max_length
)
example["instance_prompt_ids"] = text_inputs.input_ids
example["instance_attention_mask"] = text_inputs.attention_mask
if self.class_data_root:
class_image = Image.open(self.class_images_path[index % self.num_class_images])
class_image = exif_transpose(class_image)
if not class_image.mode == "RGB":
class_image = class_image.convert("RGB")
example["class_images"] = self.image_transforms(class_image)
if not self.config.xl:
if self.instance_prompt_encoder_hidden_states is not None:
example["class_prompt_ids"] = self.instance_prompt_encoder_hidden_states
else:
class_text_inputs = self._tokenize_prompt(
self.tokenizer, self.class_prompt, tokenizer_max_length=self.tokenizer_max_length
)
example["class_prompt_ids"] = class_text_inputs.input_ids
example["class_attention_mask"] = class_text_inputs.attention_mask
return example
def collate_fn(examples, config):
pixel_values = [example["instance_images"] for example in examples]
if not config.xl:
has_attention_mask = "instance_attention_mask" in examples[0]
input_ids = [example["instance_prompt_ids"] for example in examples]
if has_attention_mask:
attention_mask = [example["instance_attention_mask"] for example in examples]
if config.prior_preservation:
pixel_values += [example["class_images"] for example in examples]
if not config.xl:
input_ids += [example["class_prompt_ids"] for example in examples]
if has_attention_mask:
attention_mask += [example["class_attention_mask"] for example in examples]
pixel_values = torch.stack(pixel_values)
pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
batch = {
"pixel_values": pixel_values,
}
if not config.xl:
input_ids = torch.cat(input_ids, dim=0)
batch["input_ids"] = input_ids
if has_attention_mask:
# attention_mask = torch.cat(attention_mask, dim=0)
batch["attention_mask"] = attention_mask
return batch
| autotrain-advanced-main | src/autotrain/trainers/dreambooth/datasets.py |
autotrain-advanced-main | src/autotrain/trainers/dreambooth/__init__.py |
|
import hashlib
import itertools
import os
from pathlib import Path
from typing import Dict
import torch
from diffusers import AutoencoderKL, DDPMScheduler, DiffusionPipeline, StableDiffusionXLPipeline, UNet2DConditionModel
from diffusers.utils.import_utils import is_xformers_available
from packaging import version
from tqdm import tqdm
from transformers import AutoTokenizer, PretrainedConfig
from autotrain import logger
from autotrain.trainers.dreambooth.datasets import PromptDataset
VALID_IMAGE_EXTENSIONS = [".jpg", ".jpeg", ".png", ".JPG", ".JPEG", ".PNG"]
XL_MODELS = [
"stabilityai/stable-diffusion-xl-base-1.0",
"stabilityai/stable-diffusion-xl-base-0.9",
"diffusers/stable-diffusion-xl-base-1.0",
]
def create_model_card(repo_id: str, base_model: str, train_text_encoder: bool, prompt: str, repo_folder: str):
if train_text_encoder:
text_encoder_text = "trained"
else:
text_encoder_text = "not trained"
yaml = f"""
---
base_model: {base_model}
instance_prompt: {prompt}
tags:
- text-to-image
- diffusers
- autotrain
inference: true
---
"""
model_card = f"""
# DreamBooth trained by AutoTrain
Text encoder was {text_encoder_text}.
"""
with open(os.path.join(repo_folder, "README.md"), "w") as f:
f.write(yaml + model_card)
def import_model_class_from_model_name_or_path(
pretrained_model_name_or_path: str, revision: str, subfolder: str = "text_encoder"
):
text_encoder_config = PretrainedConfig.from_pretrained(
pretrained_model_name_or_path, subfolder=subfolder, revision=revision
)
model_class = text_encoder_config.architectures[0]
if model_class == "CLIPTextModel":
from transformers import CLIPTextModel
return CLIPTextModel
elif model_class == "CLIPTextModelWithProjection":
from transformers import CLIPTextModelWithProjection
return CLIPTextModelWithProjection
elif model_class == "RobertaSeriesModelWithTransformation":
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import RobertaSeriesModelWithTransformation
return RobertaSeriesModelWithTransformation
elif model_class == "T5EncoderModel":
from transformers import T5EncoderModel
return T5EncoderModel
else:
raise ValueError(f"{model_class} is not supported.")
def tokenize_prompt(tokenizer, prompt, tokenizer_max_length=None):
if tokenizer_max_length is not None:
max_length = tokenizer_max_length
else:
max_length = tokenizer.model_max_length
text_inputs = tokenizer(
prompt,
truncation=True,
padding="max_length",
max_length=max_length,
return_tensors="pt",
)
return text_inputs
def encode_prompt(text_encoder, input_ids, attention_mask, text_encoder_use_attention_mask=None):
text_input_ids = input_ids.to(text_encoder.device)
if text_encoder_use_attention_mask:
attention_mask = attention_mask.to(text_encoder.device)
else:
attention_mask = None
prompt_embeds = text_encoder(
text_input_ids,
attention_mask=attention_mask,
)
prompt_embeds = prompt_embeds[0]
return prompt_embeds
def encode_prompt_xl(text_encoders, tokenizers, prompt, text_input_ids_list=None):
prompt_embeds_list = []
# logger.info(f"Computing text embeddings for prompt: {prompt}")
# logger.info(f"Text encoders: {text_encoders}")
# logger.info(f"Tokenizers: {tokenizers}")
for i, text_encoder in enumerate(text_encoders):
if tokenizers is not None:
tokenizer = tokenizers[i]
text_input_ids = tokenize_prompt(tokenizer, prompt).input_ids
# logger.info(f"Text input ids: {text_input_ids}")
else:
assert text_input_ids_list is not None
text_input_ids = text_input_ids_list[i]
prompt_embeds = text_encoder(
text_input_ids.to(text_encoder.device),
output_hidden_states=True,
)
# We are only ALWAYS interested in the pooled output of the final text encoder
pooled_prompt_embeds = prompt_embeds[0]
prompt_embeds = prompt_embeds.hidden_states[-2]
bs_embed, seq_len, _ = prompt_embeds.shape
prompt_embeds = prompt_embeds.view(bs_embed, seq_len, -1)
prompt_embeds_list.append(prompt_embeds)
prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
pooled_prompt_embeds = pooled_prompt_embeds.view(bs_embed, -1)
return prompt_embeds, pooled_prompt_embeds
def unet_attn_processors_state_dict(unet) -> Dict[str, torch.tensor]:
r"""
Returns:
a state dict containing just the attention processor parameters.
"""
attn_processors = unet.attn_processors
attn_processors_state_dict = {}
for attn_processor_key, attn_processor in attn_processors.items():
for parameter_key, parameter in attn_processor.state_dict().items():
attn_processors_state_dict[f"{attn_processor_key}.{parameter_key}"] = parameter
return attn_processors_state_dict
def setup_prior_preservation(accelerator, config):
class_images_dir = Path(config.class_image_path)
if not class_images_dir.exists():
class_images_dir.mkdir(parents=True)
cur_class_images = len(list(class_images_dir.iterdir()))
if cur_class_images < config.num_class_images:
torch_dtype = torch.float16 if accelerator.device.type == "cuda" else torch.float32
if config.prior_generation_precision == "fp32":
torch_dtype = torch.float32
elif config.prior_generation_precision == "fp16":
torch_dtype = torch.float16
elif config.prior_generation_precision == "bf16":
torch_dtype = torch.bfloat16
if config.xl:
pipeline = StableDiffusionXLPipeline.from_pretrained(
config.model,
torch_dtype=torch_dtype,
safety_checker=None,
revision=config.revision,
)
else:
pipeline = DiffusionPipeline.from_pretrained(
config.model,
torch_dtype=torch_dtype,
safety_checker=None,
revision=config.revision,
)
pipeline.set_progress_bar_config(disable=True)
num_new_images = config.num_class_images - cur_class_images
logger.info(f"Number of class images to sample: {num_new_images}.")
sample_dataset = PromptDataset(config.class_prompt, num_new_images)
sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=config.sample_batch_size)
sample_dataloader = accelerator.prepare(sample_dataloader)
pipeline.to(accelerator.device)
for example in tqdm(
sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process
):
images = pipeline(example["prompt"]).images
for i, image in enumerate(images):
hash_image = hashlib.sha1(image.tobytes()).hexdigest()
image_filename = class_images_dir / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg"
image.save(image_filename)
del pipeline
if torch.cuda.is_available():
torch.cuda.empty_cache()
def load_model_components(config, device, weight_dtype):
tokenizers = []
tokenizers.append(
AutoTokenizer.from_pretrained(
config.model,
subfolder="tokenizer",
revision=config.revision,
use_fast=False,
)
)
if config.xl:
tokenizers.append(
AutoTokenizer.from_pretrained(
config.model,
subfolder="tokenizer_2",
revision=config.revision,
use_fast=False,
)
)
cls_text_encoders = []
cls_text_encoders.append(
import_model_class_from_model_name_or_path(config.model, config.revision),
)
if config.xl:
cls_text_encoders.append(
import_model_class_from_model_name_or_path(config.model, config.revision, subfolder="text_encoder_2")
)
text_encoders = []
text_encoders.append(
cls_text_encoders[0].from_pretrained(
config.model,
subfolder="text_encoder",
revision=config.revision,
)
)
if config.xl:
text_encoders.append(
cls_text_encoders[1].from_pretrained(
config.model,
subfolder="text_encoder_2",
revision=config.revision,
)
)
try:
vae = AutoencoderKL.from_pretrained(config.model, subfolder="vae", revision=config.revision)
except OSError:
logger.warning("No VAE found. Training without VAE.")
vae = None
unet = UNet2DConditionModel.from_pretrained(
config.model,
subfolder="unet",
revision=config.revision,
)
noise_scheduler = DDPMScheduler.from_pretrained(config.model, subfolder="scheduler")
# TODO: non-peft version
if vae is not None:
vae.requires_grad_(False)
for _text_encoder in text_encoders:
_text_encoder.requires_grad_(False)
unet.requires_grad_(False)
if vae is not None:
if config.xl:
vae.to(device, dtype=torch.float32)
else:
vae.to(device, dtype=weight_dtype)
unet.to(device, dtype=weight_dtype)
for _text_encoder in text_encoders:
_text_encoder.to(device, dtype=weight_dtype)
return tokenizers, text_encoders, vae, unet, noise_scheduler
def enable_gradient_checkpointing(unet, text_encoders, config):
if config.gradient_checkpointing:
logger.info("Enabling gradient checkpointing.")
unet.enable_gradient_checkpointing()
if config.train_text_encoder:
for i in range(len(text_encoders)):
text_encoders[i].gradient_checkpointing_enable()
def enable_xformers(unet, config):
if config.xformers:
if is_xformers_available():
logger.info("Enabling xformers")
import xformers
xformers_version = version.parse(xformers.__version__)
if xformers_version == version.parse("0.0.16"):
logger.warn(
"xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
)
unet.enable_xformers_memory_efficient_attention()
else:
raise ValueError("xformers is not available. Make sure it is installed correctly")
def get_optimizer(config, unet_lora_parameters, text_lora_parameters):
# Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs
if config.use_8bit_adam:
try:
import bitsandbytes as bnb
except ImportError:
raise ImportError(
"To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`."
)
optimizer_class = bnb.optim.AdamW8bit
else:
optimizer_class = torch.optim.AdamW
if len(text_lora_parameters) == 0:
params_to_optimize = unet_lora_parameters
elif len(text_lora_parameters) == 1:
params_to_optimize = itertools.chain(unet_lora_parameters, text_lora_parameters[0])
elif len(text_lora_parameters) == 2:
params_to_optimize = itertools.chain(unet_lora_parameters, text_lora_parameters[0], text_lora_parameters[1])
else:
raise ValueError("More than 2 text encoders are not supported.")
optimizer = optimizer_class(
params_to_optimize,
lr=config.lr,
betas=(config.adam_beta1, config.adam_beta2),
weight_decay=config.adam_weight_decay,
eps=config.adam_epsilon,
)
return optimizer
def pre_compute_text_embeddings(config, tokenizers, text_encoders):
if config.pre_compute_text_embeddings:
tokenizer = tokenizers[0]
text_encoder = text_encoders[0]
def compute_text_embeddings(prompt):
with torch.no_grad():
text_inputs = tokenize_prompt(tokenizer, prompt, tokenizer_max_length=config.tokenizer_max_length)
prompt_embeds = encode_prompt(
text_encoder,
text_inputs.input_ids,
text_inputs.attention_mask,
text_encoder_use_attention_mask=config.text_encoder_use_attention_mask,
)
return prompt_embeds
pre_computed_encoder_hidden_states = compute_text_embeddings(config.prompt)
# disable validation prompt for now
# validation_prompt_negative_prompt_embeds = compute_text_embeddings("")
# if args.validation_prompt is not None:
# validation_prompt_encoder_hidden_states = compute_text_embeddings(args.validation_prompt)
# else:
# validation_prompt_encoder_hidden_states = None
if config.prompt is not None:
pre_computed_instance_prompt_encoder_hidden_states = compute_text_embeddings(config.prompt)
else:
pre_computed_instance_prompt_encoder_hidden_states = None
else:
pre_computed_encoder_hidden_states = None
# validation_prompt_encoder_hidden_states = None
pre_computed_instance_prompt_encoder_hidden_states = None
return pre_computed_encoder_hidden_states, pre_computed_instance_prompt_encoder_hidden_states
| autotrain-advanced-main | src/autotrain/trainers/dreambooth/utils.py |
import itertools
import math
import os
import shutil
import torch
import torch.nn.functional as F
from diffusers import StableDiffusionXLPipeline
from diffusers.loaders import LoraLoaderMixin, text_encoder_lora_state_dict
from diffusers.optimization import get_scheduler
from huggingface_hub import create_repo, upload_folder
from tqdm import tqdm
from autotrain import logger
from autotrain.trainers.dreambooth import utils
class Trainer:
def __init__(
self,
unet,
vae,
train_dataloader,
train_dataset,
text_encoders,
config,
optimizer,
accelerator,
noise_scheduler,
weight_dtype,
text_lora_parameters,
unet_lora_parameters,
tokenizers,
):
self.train_dataloader = train_dataloader
self.config = config
self.optimizer = optimizer
self.accelerator = accelerator
self.unet = unet
self.vae = vae
self.noise_scheduler = noise_scheduler
self.train_dataset = train_dataset
self.weight_dtype = weight_dtype
self.text_lora_parameters = text_lora_parameters
self.unet_lora_parameters = unet_lora_parameters
self.tokenizers = tokenizers
self.text_encoders = text_encoders
if self.config.xl:
self._setup_xl()
self.text_encoder1 = text_encoders[0]
self.text_encoder2 = None
if len(text_encoders) == 2:
self.text_encoder2 = text_encoders[1]
overrode_max_train_steps = False
self.num_update_steps_per_epoch = math.ceil(len(train_dataloader) / config.gradient_accumulation)
if self.config.num_steps is None:
self.config.num_steps = self.config.epochs * self.num_update_steps_per_epoch
overrode_max_train_steps = True
self.scheduler = get_scheduler(
self.config.scheduler,
optimizer=self.optimizer,
num_warmup_steps=self.config.warmup_steps * self.accelerator.num_processes,
num_training_steps=self.config.num_steps * self.accelerator.num_processes,
num_cycles=self.config.num_cycles,
power=self.config.lr_power,
)
if self.config.train_text_encoder:
if len(text_encoders) == 1:
(
self.unet,
self.text_encoder1,
self.optimizer,
self.train_dataloader,
self.scheduler,
) = self.accelerator.prepare(
self.unet, self.text_encoder1, self.optimizer, self.train_dataloader, self.scheduler
)
elif len(text_encoders) == 2:
(
self.unet,
self.text_encoder1,
self.text_encoder2,
self.optimizer,
self.train_dataloader,
self.scheduler,
) = self.accelerator.prepare(
self.unet,
self.text_encoder1,
self.text_encoder2,
self.optimizer,
self.train_dataloader,
self.scheduler,
)
else:
self.unet, self.optimizer, self.train_dataloader, self.scheduler = accelerator.prepare(
self.unet, self.optimizer, self.train_dataloader, self.scheduler
)
self.num_update_steps_per_epoch = math.ceil(len(self.train_dataloader) / self.config.gradient_accumulation)
if overrode_max_train_steps:
self.config.num_steps = self.config.epochs * self.num_update_steps_per_epoch
# Afterwards we recalculate our number of training epochs
self.config.epochs = math.ceil(self.config.num_steps / self.num_update_steps_per_epoch)
if self.accelerator.is_main_process:
self.accelerator.init_trackers("dreambooth")
self.total_batch_size = (
self.config.batch_size * self.accelerator.num_processes * self.config.gradient_accumulation
)
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(self.train_dataset)}")
logger.info(f" Num batches each epoch = {len(self.train_dataloader)}")
logger.info(f" Num Epochs = {self.config.epochs}")
logger.info(f" Instantaneous batch size per device = {config.batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {self.total_batch_size}")
logger.info(f" Gradient Accumulation steps = {self.config.gradient_accumulation}")
logger.info(f" Total optimization steps = {self.config.num_steps}")
logger.info(f" Training config = {self.config}")
self.global_step = 0
self.first_epoch = 0
if config.resume_from_checkpoint:
self._resume_from_checkpoint()
def compute_text_embeddings(self, prompt):
logger.info(f"Computing text embeddings for prompt: {prompt}")
with torch.no_grad():
prompt_embeds, pooled_prompt_embeds = utils.encode_prompt_xl(self.text_encoders, self.tokenizers, prompt)
prompt_embeds = prompt_embeds.to(self.accelerator.device)
pooled_prompt_embeds = pooled_prompt_embeds.to(self.accelerator.device)
return prompt_embeds, pooled_prompt_embeds
def compute_time_ids(self):
# Adapted from pipeline.StableDiffusionXLPipeline._get_add_time_ids
original_size = (self.config.resolution, self.config.resolution)
target_size = (self.config.resolution, self.config.resolution)
# crops_coords_top_left = (self.config.crops_coords_top_left_h, self.config.crops_coords_top_left_w)
crops_coords_top_left = (0, 0)
add_time_ids = list(original_size + crops_coords_top_left + target_size)
add_time_ids = torch.tensor([add_time_ids])
add_time_ids = add_time_ids.to(self.accelerator.device, dtype=self.weight_dtype)
return add_time_ids
def _setup_xl(self):
# Handle instance prompt.
instance_time_ids = self.compute_time_ids()
if not self.config.train_text_encoder:
instance_prompt_hidden_states, instance_pooled_prompt_embeds = self.compute_text_embeddings(
self.config.prompt
)
# Handle class prompt for prior-preservation.
if self.config.prior_preservation:
class_time_ids = self.compute_time_ids()
if not self.config.train_text_encoder:
class_prompt_hidden_states, class_pooled_prompt_embeds = self.compute_text_embeddings(
self.config.class_prompt
)
self.add_time_ids = instance_time_ids
if self.config.prior_preservation:
self.add_time_ids = torch.cat([self.add_time_ids, class_time_ids], dim=0)
if not self.config.train_text_encoder:
self.prompt_embeds = instance_prompt_hidden_states
self.unet_add_text_embeds = instance_pooled_prompt_embeds
if self.config.prior_preservation:
self.prompt_embeds = torch.cat([self.prompt_embeds, class_prompt_hidden_states], dim=0)
self.unet_add_text_embeds = torch.cat([self.unet_add_text_embeds, class_pooled_prompt_embeds], dim=0)
else:
self.tokens_one = utils.tokenize_prompt(self.tokenizers[0], self.config.prompt).input_ids
self.tokens_two = utils.tokenize_prompt(self.tokenizers[1], self.config.prompt).input_ids
if self.config.prior_preservation:
class_tokens_one = utils.tokenize_prompt(self.tokenizers[0], self.config.class_prompt).input_ids
class_tokens_two = utils.tokenize_prompt(self.tokenizers[1], self.config.class_prompt).input_ids
self.tokens_one = torch.cat([self.tokens_one, class_tokens_one], dim=0)
self.tokens_two = torch.cat([self.tokens_two, class_tokens_two], dim=0)
def _resume_from_checkpoint(self):
if self.config.resume_from_checkpoint != "latest":
path = os.path.basename(self.config.resume_from_checkpoint)
else:
# Get the mos recent checkpoint
dirs = os.listdir(self.config.project_name)
dirs = [d for d in dirs if d.startswith("checkpoint")]
dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
path = dirs[-1] if len(dirs) > 0 else None
if path is None:
self.accelerator.print(
f"Checkpoint '{self.config.resume_from_checkpoint}' does not exist. Starting a new training run."
)
self.config.resume_from_checkpoint = None
else:
self.accelerator.print(f"Resuming from checkpoint {path}")
self.accelerator.load_state(os.path.join(self.config.project_name, path))
self.global_step = int(path.split("-")[1])
resume_global_step = self.global_step * self.config.gradient_accumulation
self.first_epoch = self.global_step // self.num_update_steps_per_epoch
self.resume_step = resume_global_step % (
self.num_update_steps_per_epoch * self.config.gradient_accumulation
)
def _calculate_loss(self, model_pred, noise, model_input, timesteps):
if model_pred.shape[1] == 6 and not self.config.xl:
model_pred, _ = torch.chunk(model_pred, 2, dim=1)
# Get the target for loss depending on the prediction type
if self.noise_scheduler.config.prediction_type == "epsilon":
target = noise
elif self.noise_scheduler.config.prediction_type == "v_prediction":
target = self.noise_scheduler.get_velocity(model_input, noise, timesteps)
else:
raise ValueError(f"Unknown prediction type {self.noise_scheduler.config.prediction_type}")
if self.config.prior_preservation:
# Chunk the noise and model_pred into two parts and compute the loss on each part separately.
model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0)
target, target_prior = torch.chunk(target, 2, dim=0)
# Compute instance loss
loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
# Compute prior loss
prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction="mean")
# Add the prior loss to the instance loss.
loss = loss + self.config.prior_loss_weight * prior_loss
else:
loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
return loss
def _clip_gradients(self):
if self.accelerator.sync_gradients:
if len(self.text_lora_parameters) == 0:
params_to_clip = self.unet_lora_parameters
elif len(self.text_lora_parameters) == 1:
params_to_clip = itertools.chain(self.unet_lora_parameters, self.text_lora_parameters[0])
elif len(self.text_lora_parameters) == 2:
params_to_clip = itertools.chain(
self.unet_lora_parameters, self.text_lora_parameters[0], self.text_lora_parameters[1]
)
else:
raise ValueError("More than 2 text encoders are not supported.")
self.accelerator.clip_grad_norm_(params_to_clip, self.config.max_grad_norm)
def _save_checkpoint(self):
if self.accelerator.is_main_process:
if self.global_step % self.config.checkpointing_steps == 0:
# _before_ saving state, check if this save would set us over the `checkpoints_total_limit`
if self.config.checkpoints_total_limit is not None:
checkpoints = os.listdir(self.config.project_name)
checkpoints = [d for d in checkpoints if d.startswith("checkpoint")]
checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1]))
# before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints
if len(checkpoints) >= self.config.checkpoints_total_limit:
num_to_remove = len(checkpoints) - self.config.checkpoints_total_limit + 1
removing_checkpoints = checkpoints[0:num_to_remove]
logger.info(
f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints"
)
logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}")
for removing_checkpoint in removing_checkpoints:
removing_checkpoint = os.path.join(self.config.project_name, removing_checkpoint)
shutil.rmtree(removing_checkpoint)
save_path = os.path.join(self.config.project_name, f"checkpoint-{self.global_step}")
self.accelerator.save_state(save_path)
logger.info(f"Saved state to {save_path}")
def _get_model_pred(self, batch, channels, noisy_model_input, timesteps, bsz):
if self.config.xl:
elems_to_repeat = bsz // 2 if self.config.prior_preservation else bsz
if not self.config.train_text_encoder:
unet_added_conditions = {
"time_ids": self.add_time_ids.repeat(elems_to_repeat, 1),
"text_embeds": self.unet_add_text_embeds.repeat(elems_to_repeat, 1),
}
model_pred = self.unet(
noisy_model_input,
timesteps,
self.prompt_embeds.repeat(elems_to_repeat, 1, 1),
added_cond_kwargs=unet_added_conditions,
).sample
else:
unet_added_conditions = {"time_ids": self.add_time_ids.repeat(elems_to_repeat, 1)}
prompt_embeds, pooled_prompt_embeds = utils.encode_prompt_xl(
text_encoders=self.text_encoders,
tokenizers=None,
prompt=None,
text_input_ids_list=[self.tokens_one, self.tokens_two],
)
unet_added_conditions.update({"text_embeds": pooled_prompt_embeds.repeat(bsz, 1)})
prompt_embeds = prompt_embeds.repeat(elems_to_repeat, 1, 1)
model_pred = self.unet(
noisy_model_input, timesteps, prompt_embeds, added_cond_kwargs=unet_added_conditions
).sample
else:
if self.config.pre_compute_text_embeddings:
encoder_hidden_states = batch["input_ids"]
else:
encoder_hidden_states = utils.encode_prompt(
self.text_encoder1,
batch["input_ids"],
batch["attention_mask"],
text_encoder_use_attention_mask=self.config.text_encoder_use_attention_mask,
)
if self.accelerator.unwrap_model(self.unet).config.in_channels == channels * 2:
noisy_model_input = torch.cat([noisy_model_input, noisy_model_input], dim=1)
if self.config.class_labels_conditioning == "timesteps":
class_labels = timesteps
else:
class_labels = None
model_pred = self.unet(
noisy_model_input, timesteps, encoder_hidden_states, class_labels=class_labels
).sample
return model_pred
def train(self):
progress_bar = tqdm(
range(self.global_step, self.config.num_steps), disable=not self.accelerator.is_local_main_process
)
progress_bar.set_description("Steps")
for epoch in range(self.first_epoch, self.config.epochs):
self.unet.train()
if self.config.train_text_encoder:
self.text_encoder1.train()
if self.config.xl:
self.text_encoder2.train()
for step, batch in enumerate(self.train_dataloader):
# Skip steps until we reach the resumed step
if self.config.resume_from_checkpoint and epoch == self.first_epoch and step < self.resume_step:
if step % self.config.gradient_accumulation == 0:
progress_bar.update(1)
continue
with self.accelerator.accumulate(self.unet):
if self.config.xl:
pixel_values = batch["pixel_values"]
else:
pixel_values = batch["pixel_values"].to(dtype=self.weight_dtype)
if self.vae is not None:
# Convert images to latent space
model_input = self.vae.encode(pixel_values).latent_dist.sample()
model_input = model_input * self.vae.config.scaling_factor
model_input = model_input.to(dtype=self.weight_dtype)
else:
model_input = pixel_values
# Sample noise that we'll add to the latents
noise = torch.randn_like(model_input)
bsz, channels, height, width = model_input.shape
# Sample a random timestep for each image
timesteps = torch.randint(
0, self.noise_scheduler.config.num_train_timesteps, (bsz,), device=model_input.device
)
timesteps = timesteps.long()
# Add noise to the model input according to the noise magnitude at each timestep
# (this is the forward diffusion process)
noisy_model_input = self.noise_scheduler.add_noise(model_input, noise, timesteps)
model_pred = self._get_model_pred(batch, channels, noisy_model_input, timesteps, bsz)
loss = self._calculate_loss(model_pred, noise, model_input, timesteps)
self.accelerator.backward(loss)
self._clip_gradients()
self.optimizer.step()
self.scheduler.step()
self.optimizer.zero_grad()
if self.accelerator.sync_gradients:
progress_bar.update(1)
self.global_step += 1
self._save_checkpoint()
logs = {"loss": loss.detach().item(), "lr": self.scheduler.get_last_lr()[0]}
progress_bar.set_postfix(**logs)
self.accelerator.log(logs, step=self.global_step)
if self.global_step >= self.config.num_steps:
break
self.accelerator.wait_for_everyone()
if self.accelerator.is_main_process:
self.unet = self.accelerator.unwrap_model(self.unet)
self.unet = self.unet.to(torch.float32)
unet_lora_layers = utils.unet_attn_processors_state_dict(self.unet)
text_encoder_lora_layers_1 = None
text_encoder_lora_layers_2 = None
if self.text_encoder1 is not None and self.config.train_text_encoder:
text_encoder1 = self.accelerator.unwrap_model(self.text_encoder1)
text_encoder1 = text_encoder1.to(torch.float32)
text_encoder_lora_layers_1 = text_encoder_lora_state_dict(text_encoder1)
if self.text_encoder2 is not None and self.config.train_text_encoder:
text_encoder2 = self.accelerator.unwrap_model(self.text_encoder2)
text_encoder2 = text_encoder2.to(torch.float32)
text_encoder_lora_layers_2 = text_encoder_lora_state_dict(text_encoder2)
if self.config.xl:
StableDiffusionXLPipeline.save_lora_weights(
save_directory=self.config.project_name,
unet_lora_layers=unet_lora_layers,
text_encoder_lora_layers=text_encoder_lora_layers_1,
text_encoder_2_lora_layers=text_encoder_lora_layers_2,
safe_serialization=True,
)
else:
LoraLoaderMixin.save_lora_weights(
save_directory=self.config.project_name,
unet_lora_layers=unet_lora_layers,
text_encoder_lora_layers=text_encoder_lora_layers_1,
safe_serialization=True,
)
self.accelerator.end_training()
def push_to_hub(self):
repo_id = create_repo(
repo_id=self.config.repo_id,
exist_ok=True,
private=True,
token=self.config.token,
).repo_id
utils.create_model_card(
repo_id,
base_model=self.config.model,
train_text_encoder=self.config.train_text_encoder,
prompt=self.config.prompt,
repo_folder=self.config.project_name,
)
upload_folder(
repo_id=repo_id,
folder_path=self.config.project_name,
commit_message="End of training",
ignore_patterns=["step_*", "epoch_*"],
token=self.config.token,
)
| autotrain-advanced-main | src/autotrain/trainers/dreambooth/trainer.py |
import argparse
import json
import os
import diffusers
import torch
import torch.nn.functional as F
import transformers
from accelerate import Accelerator
from accelerate.utils import ProjectConfiguration, set_seed
from diffusers import StableDiffusionXLPipeline
from diffusers.loaders import LoraLoaderMixin, text_encoder_lora_state_dict
from diffusers.models.attention_processor import (
AttnAddedKVProcessor,
AttnAddedKVProcessor2_0,
LoRAAttnAddedKVProcessor,
LoRAAttnProcessor,
LoRAAttnProcessor2_0,
SlicedAttnAddedKVProcessor,
)
from huggingface_hub import HfApi, snapshot_download
from autotrain import logger
from autotrain.trainers.dreambooth import utils
from autotrain.trainers.dreambooth.datasets import DreamBoothDataset, collate_fn
from autotrain.trainers.dreambooth.params import DreamBoothTrainingParams
from autotrain.trainers.dreambooth.trainer import Trainer
from autotrain.utils import monitor
def parse_args():
# get training_config.json from the end user
parser = argparse.ArgumentParser()
parser.add_argument("--training_config", type=str, required=True)
return parser.parse_args()
@monitor
def train(config):
if isinstance(config, dict):
config = DreamBoothTrainingParams(**config)
config.prompt = str(config.prompt).strip()
if config.model in utils.XL_MODELS:
config.xl = True
if config.repo_id is None and config.username is not None:
config.repo_id = f"{config.username}/{config.project_name}"
if config.project_name == "/tmp/model":
snapshot_download(
repo_id=config.image_path,
local_dir=config.project_name,
token=config.token,
repo_type="dataset",
)
config.image_path = "/tmp/model/concept1/"
accelerator_project_config = ProjectConfiguration(
project_dir=config.project_name, logging_dir=os.path.join(config.project_name, "logs")
)
if config.fp16:
mixed_precision = "fp16"
elif config.bf16:
mixed_precision = "bf16"
else:
mixed_precision = "no"
accelerator = Accelerator(
gradient_accumulation_steps=config.gradient_accumulation,
mixed_precision=mixed_precision,
log_with="tensorboard" if config.logging else None,
project_config=accelerator_project_config,
)
if config.train_text_encoder and config.gradient_accumulation > 1 and accelerator.num_processes > 1:
raise ValueError(
"Gradient accumulation is not supported when training the text encoder in distributed training. "
"Please set gradient_accumulation_steps to 1. This feature will be supported in the future."
)
if accelerator.is_local_main_process:
transformers.utils.logging.set_verbosity_warning()
diffusers.utils.logging.set_verbosity_info()
else:
transformers.utils.logging.set_verbosity_error()
diffusers.utils.logging.set_verbosity_error()
set_seed(config.seed)
# Generate class images if prior preservation is enabled.
if config.prior_preservation:
utils.setup_prior_preservation(accelerator, config)
# Handle the repository creation
if accelerator.is_main_process:
if config.project_name is not None:
os.makedirs(config.project_name, exist_ok=True)
weight_dtype = torch.float32
if accelerator.mixed_precision == "fp16":
weight_dtype = torch.float16
elif accelerator.mixed_precision == "bf16":
weight_dtype = torch.bfloat16
tokenizers, text_encoders, vae, unet, noise_scheduler = utils.load_model_components(
config, accelerator.device, weight_dtype
)
utils.enable_xformers(unet, config)
utils.enable_gradient_checkpointing(unet, text_encoders, config)
unet_lora_attn_procs = {}
unet_lora_parameters = []
for name, attn_processor in unet.attn_processors.items():
cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim
if name.startswith("mid_block"):
hidden_size = unet.config.block_out_channels[-1]
elif name.startswith("up_blocks"):
block_id = int(name[len("up_blocks.")])
hidden_size = list(reversed(unet.config.block_out_channels))[block_id]
elif name.startswith("down_blocks"):
block_id = int(name[len("down_blocks.")])
hidden_size = unet.config.block_out_channels[block_id]
if isinstance(attn_processor, (AttnAddedKVProcessor, SlicedAttnAddedKVProcessor, AttnAddedKVProcessor2_0)):
lora_attn_processor_class = LoRAAttnAddedKVProcessor
else:
lora_attn_processor_class = (
LoRAAttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else LoRAAttnProcessor
)
module = lora_attn_processor_class(hidden_size=hidden_size, cross_attention_dim=cross_attention_dim)
unet_lora_attn_procs[name] = module
unet_lora_parameters.extend(module.parameters())
unet.set_attn_processor(unet_lora_attn_procs)
text_lora_parameters = []
if config.train_text_encoder:
# ensure that dtype is float32, even if rest of the model that isn't trained is loaded in fp16
text_lora_parameters = [
LoraLoaderMixin._modify_text_encoder(_text_encoder, dtype=torch.float32) for _text_encoder in text_encoders
]
def save_model_hook(models, weights, output_dir):
# there are only two options here. Either are just the unet attn processor layers
# or there are the unet and text encoder atten layers
unet_lora_layers_to_save = None
text_encoder_lora_layers_to_save = []
for model in models:
if isinstance(model, type(accelerator.unwrap_model(unet))):
unet_lora_layers_to_save = utils.unet_attn_processors_state_dict(model)
for _text_encoder in text_encoders:
if isinstance(model, type(accelerator.unwrap_model(_text_encoder))):
text_encoder_lora_layers_to_save.append(text_encoder_lora_state_dict(model))
# make sure to pop weight so that corresponding model is not saved again
weights.pop()
if len(text_encoder_lora_layers_to_save) == 0:
LoraLoaderMixin.save_lora_weights(
output_dir,
unet_lora_layers=unet_lora_layers_to_save,
text_encoder_lora_layers=None,
safe_serialization=True,
)
elif len(text_encoder_lora_layers_to_save) == 1:
LoraLoaderMixin.save_lora_weights(
output_dir,
unet_lora_layers=unet_lora_layers_to_save,
text_encoder_lora_layers=text_encoder_lora_layers_to_save[0],
safe_serialization=True,
)
elif len(text_encoder_lora_layers_to_save) == 2:
StableDiffusionXLPipeline.save_lora_weights(
output_dir,
unet_lora_layers=unet_lora_layers_to_save,
text_encoder_lora_layers=text_encoder_lora_layers_to_save[0],
text_encoder_2_lora_layers=text_encoder_lora_layers_to_save[1],
safe_serialization=True,
)
else:
raise ValueError("unexpected number of text encoders")
def load_model_hook(models, input_dir):
unet_ = None
text_encoders_ = []
while len(models) > 0:
model = models.pop()
if isinstance(model, type(accelerator.unwrap_model(unet))):
unet_ = model
for _text_encoder in text_encoders:
if isinstance(model, type(accelerator.unwrap_model(_text_encoder))):
text_encoders_.append(model)
lora_state_dict, network_alpha = LoraLoaderMixin.lora_state_dict(input_dir)
LoraLoaderMixin.load_lora_into_unet(lora_state_dict, network_alpha=network_alpha, unet=unet_)
if len(text_encoders_) == 0:
LoraLoaderMixin.load_lora_into_text_encoder(
lora_state_dict,
network_alpha=network_alpha,
text_encoder=None,
)
elif len(text_encoders_) == 1:
LoraLoaderMixin.load_lora_into_text_encoder(
lora_state_dict,
network_alpha=network_alpha,
text_encoder=text_encoders_[0],
)
elif len(text_encoders_) == 2:
LoraLoaderMixin.load_lora_into_text_encoder(
lora_state_dict,
network_alpha=network_alpha,
text_encoder=text_encoders_[0],
)
LoraLoaderMixin.load_lora_into_text_encoder(
lora_state_dict,
network_alpha=network_alpha,
text_encoder=text_encoders_[1],
)
else:
raise ValueError("unexpected number of text encoders")
accelerator.register_save_state_pre_hook(save_model_hook)
accelerator.register_load_state_pre_hook(load_model_hook)
if config.allow_tf32:
torch.backends.cuda.matmul.allow_tf32 = True
if config.scale_lr:
config.lr = config.lr * config.gradient_accumulation * config.batch_size * accelerator.num_processes
optimizer = utils.get_optimizer(config, unet_lora_parameters, text_lora_parameters)
encoder_hs, instance_prompt_encoder_hs = utils.pre_compute_text_embeddings(
config=config, text_encoders=text_encoders, tokenizers=tokenizers
)
train_dataset = DreamBoothDataset(
config=config,
tokenizers=tokenizers,
encoder_hidden_states=encoder_hs,
instance_prompt_encoder_hidden_states=instance_prompt_encoder_hs,
)
train_dataloader = torch.utils.data.DataLoader(
train_dataset,
batch_size=config.batch_size,
shuffle=True,
collate_fn=lambda examples: collate_fn(examples, config),
num_workers=config.dataloader_num_workers,
)
trainer = Trainer(
unet=unet,
vae=vae,
train_dataloader=train_dataloader,
text_encoders=text_encoders,
config=config,
optimizer=optimizer,
accelerator=accelerator,
noise_scheduler=noise_scheduler,
train_dataset=train_dataset,
weight_dtype=weight_dtype,
text_lora_parameters=text_lora_parameters,
unet_lora_parameters=unet_lora_parameters,
tokenizers=tokenizers,
)
trainer.train()
# remove token key from training_params.json located in output directory
# first check if file exists
if os.path.exists(f"{config.project_name}/training_params.json"):
training_params = json.load(open(f"{config.project_name}/training_params.json"))
training_params.pop("token")
json.dump(training_params, open(f"{config.project_name}/training_params.json", "w"))
# remove config.image_path directory if it exists
if os.path.exists(config.image_path):
os.system(f"rm -rf {config.image_path}")
# add config.prompt as a text file in the output directory
with open(f"{config.project_name}/prompt.txt", "w") as f:
f.write(config.prompt)
if config.push_to_hub:
trainer.push_to_hub()
if "SPACE_ID" in os.environ:
# shut down the space
logger.info("Pausing space...")
api = HfApi(token=config.token)
api.pause_space(repo_id=os.environ["SPACE_ID"])
if __name__ == "__main__":
args = parse_args()
training_config = json.load(open(args.training_config))
config = DreamBoothTrainingParams(**training_config)
train(config)
| autotrain-advanced-main | src/autotrain/trainers/dreambooth/__main__.py |
import os
from pydantic import BaseModel, Field
from autotrain import logger
class TextClassificationParams(BaseModel):
data_path: str = Field(None, title="Data path")
model: str = Field("bert-base-uncased", title="Model name")
lr: float = Field(5e-5, title="Learning rate")
epochs: int = Field(3, title="Number of training epochs")
max_seq_length: int = Field(128, title="Max sequence length")
batch_size: int = Field(8, title="Training batch size")
warmup_ratio: float = Field(0.1, title="Warmup proportion")
gradient_accumulation: int = Field(1, title="Gradient accumulation steps")
optimizer: str = Field("adamw_torch", title="Optimizer")
scheduler: str = Field("linear", title="Scheduler")
weight_decay: float = Field(0.0, title="Weight decay")
max_grad_norm: float = Field(1.0, title="Max gradient norm")
seed: int = Field(42, title="Seed")
train_split: str = Field("train", title="Train split")
valid_split: str = Field(None, title="Validation split")
text_column: str = Field("text", title="Text column")
target_column: str = Field("target", title="Target column")
logging_steps: int = Field(-1, title="Logging steps")
project_name: str = Field("Project Name", title="Output directory")
auto_find_batch_size: bool = Field(False, title="Auto find batch size")
fp16: bool = Field(False, title="Enable fp16")
save_total_limit: int = Field(1, title="Save total limit")
save_strategy: str = Field("epoch", title="Save strategy")
token: str = Field(None, title="Hub Token")
push_to_hub: bool = Field(False, title="Push to hub")
repo_id: str = Field(None, title="Repo id")
evaluation_strategy: str = Field("epoch", title="Evaluation strategy")
username: str = Field(None, title="Hugging Face Username")
def __str__(self):
data = self.dict()
data["token"] = "*****" if data.get("token") else None
return str(data)
def save(self, output_dir):
os.makedirs(output_dir, exist_ok=True)
path = os.path.join(output_dir, "training_params.json")
# save formatted json
with open(path, "w") as f:
f.write(self.json(indent=4))
def __init__(self, **data):
super().__init__(**data)
# Parameters not supplied by the user
defaults = {f.name for f in self.__fields__.values() if f.default == self.__dict__[f.name]}
supplied = set(data.keys())
not_supplied = defaults - supplied
if not_supplied:
logger.warning(f"Parameters not supplied by user and set to default: {', '.join(not_supplied)}")
# Parameters that were supplied but not used
# This is a naive implementation. It might catch some internal Pydantic params.
unused = supplied - set(self.__fields__)
if unused:
logger.warning(f"Parameters supplied but not used: {', '.join(unused)}")
| autotrain-advanced-main | src/autotrain/trainers/text_classification/params.py |
autotrain-advanced-main | src/autotrain/trainers/text_classification/__init__.py |
|
import torch
class TextClassificationDataset:
def __init__(self, data, tokenizer, config):
self.data = data
self.tokenizer = tokenizer
self.config = config
self.text_column = self.config.text_column
self.target_column = self.config.target_column
def __len__(self):
return len(self.data)
def __getitem__(self, item):
text = str(self.data[item][self.text_column])
target = self.data[item][self.target_column]
target = int(target)
inputs = self.tokenizer(
text,
max_length=self.config.max_seq_length,
padding="max_length",
truncation=True,
)
ids = inputs["input_ids"]
mask = inputs["attention_mask"]
if "token_type_ids" in inputs:
token_type_ids = inputs["token_type_ids"]
else:
token_type_ids = None
if token_type_ids is not None:
return {
"input_ids": torch.tensor(ids, dtype=torch.long),
"attention_mask": torch.tensor(mask, dtype=torch.long),
"token_type_ids": torch.tensor(token_type_ids, dtype=torch.long),
"labels": torch.tensor(target, dtype=torch.long),
}
return {
"input_ids": torch.tensor(ids, dtype=torch.long),
"attention_mask": torch.tensor(mask, dtype=torch.long),
"labels": torch.tensor(target, dtype=torch.long),
}
| autotrain-advanced-main | src/autotrain/trainers/text_classification/dataset.py |
import os
import numpy as np
import requests
from sklearn import metrics
BINARY_CLASSIFICATION_EVAL_METRICS = (
"eval_loss",
"eval_accuracy",
"eval_f1",
"eval_auc",
"eval_precision",
"eval_recall",
)
MULTI_CLASS_CLASSIFICATION_EVAL_METRICS = (
"eval_loss",
"eval_accuracy",
"eval_f1_macro",
"eval_f1_micro",
"eval_f1_weighted",
"eval_precision_macro",
"eval_precision_micro",
"eval_precision_weighted",
"eval_recall_macro",
"eval_recall_micro",
"eval_recall_weighted",
)
MODEL_CARD = """
---
tags:
- autotrain
- text-classification
widget:
- text: "I love AutoTrain"
datasets:
- {dataset}
---
# Model Trained Using AutoTrain
- Problem type: Text Classification
## Validation Metrics
{validation_metrics}
"""
def _binary_classification_metrics(pred):
raw_predictions, labels = pred
predictions = np.argmax(raw_predictions, axis=1)
result = {
"f1": metrics.f1_score(labels, predictions),
"precision": metrics.precision_score(labels, predictions),
"recall": metrics.recall_score(labels, predictions),
"auc": metrics.roc_auc_score(labels, raw_predictions[:, 1]),
"accuracy": metrics.accuracy_score(labels, predictions),
}
return result
def _multi_class_classification_metrics(pred):
raw_predictions, labels = pred
predictions = np.argmax(raw_predictions, axis=1)
results = {
"f1_macro": metrics.f1_score(labels, predictions, average="macro"),
"f1_micro": metrics.f1_score(labels, predictions, average="micro"),
"f1_weighted": metrics.f1_score(labels, predictions, average="weighted"),
"precision_macro": metrics.precision_score(labels, predictions, average="macro"),
"precision_micro": metrics.precision_score(labels, predictions, average="micro"),
"precision_weighted": metrics.precision_score(labels, predictions, average="weighted"),
"recall_macro": metrics.recall_score(labels, predictions, average="macro"),
"recall_micro": metrics.recall_score(labels, predictions, average="micro"),
"recall_weighted": metrics.recall_score(labels, predictions, average="weighted"),
"accuracy": metrics.accuracy_score(labels, predictions),
}
return results
def create_model_card(config, trainer, num_classes):
if config.valid_split is not None:
eval_scores = trainer.evaluate()
valid_metrics = (
BINARY_CLASSIFICATION_EVAL_METRICS if num_classes == 2 else MULTI_CLASS_CLASSIFICATION_EVAL_METRICS
)
eval_scores = [f"{k[len('eval_'):]}: {v}" for k, v in eval_scores.items() if k in valid_metrics]
eval_scores = "\n\n".join(eval_scores)
else:
eval_scores = "No validation metrics available"
model_card = MODEL_CARD.format(
dataset=config.data_path,
validation_metrics=eval_scores,
)
return model_card
def pause_endpoint(params):
endpoint_id = os.environ["ENDPOINT_ID"]
username = endpoint_id.split("/")[0]
project_name = endpoint_id.split("/")[1]
api_url = f"https://api.endpoints.huggingface.cloud/v2/endpoint/{username}/{project_name}/pause"
headers = {"Authorization": f"Bearer {params.token}"}
r = requests.post(api_url, headers=headers)
return r.json()
| autotrain-advanced-main | src/autotrain/trainers/text_classification/utils.py |
import argparse
import json
import os
import pandas as pd
from accelerate.state import PartialState
from datasets import Dataset, load_dataset
from huggingface_hub import HfApi
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
EarlyStoppingCallback,
Trainer,
TrainingArguments,
)
from autotrain import logger
from autotrain.trainers.text_classification import utils
from autotrain.trainers.text_classification.dataset import TextClassificationDataset
from autotrain.trainers.text_classification.params import TextClassificationParams
from autotrain.utils import monitor
def parse_args():
# get training_config.json from the end user
parser = argparse.ArgumentParser()
parser.add_argument("--training_config", type=str, required=True)
return parser.parse_args()
@monitor
def train(config):
if isinstance(config, dict):
config = TextClassificationParams(**config)
if config.repo_id is None and config.username is not None:
config.repo_id = f"{config.username}/{config.project_name}"
if PartialState().process_index == 0:
logger.info("Starting training...")
logger.info(f"Training config: {config}")
train_data = None
valid_data = None
# check if config.train_split.csv exists in config.data_path
if config.train_split is not None:
train_path = f"{config.data_path}/{config.train_split}.csv"
if os.path.exists(train_path):
logger.info("loading dataset from csv")
train_data = pd.read_csv(train_path)
train_data = Dataset.from_pandas(train_data)
else:
train_data = load_dataset(
config.data_path,
split=config.train_split,
token=config.token,
)
if config.valid_split is not None:
valid_path = f"{config.data_path}/{config.valid_split}.csv"
if os.path.exists(valid_path):
logger.info("loading dataset from csv")
valid_data = pd.read_csv(valid_path)
valid_data = Dataset.from_pandas(valid_data)
else:
valid_data = load_dataset(
config.data_path,
split=config.valid_split,
token=config.token,
)
classes = train_data.features[config.target_column].names
label2id = {c: i for i, c in enumerate(classes)}
num_classes = len(classes)
if num_classes < 2:
raise ValueError("Invalid number of classes. Must be greater than 1.")
if config.valid_split is not None:
num_classes_valid = len(valid_data.unique(config.target_column))
if num_classes_valid != num_classes:
raise ValueError(
f"Number of classes in train and valid are not the same. Training has {num_classes} and valid has {num_classes_valid}"
)
model_config = AutoConfig.from_pretrained(config.model, num_labels=num_classes)
model_config._num_labels = len(label2id)
model_config.label2id = label2id
model_config.id2label = {v: k for k, v in label2id.items()}
try:
model = AutoModelForSequenceClassification.from_pretrained(
config.model, config=model_config, trust_remote_code=True, token=config.token
)
except OSError:
model = AutoModelForSequenceClassification.from_pretrained(
config.model, config=model_config, from_tf=True, trust_remote_code=True, token=config.token
)
tokenizer = AutoTokenizer.from_pretrained(config.model, token=config.token, trust_remote_code=True)
train_data = TextClassificationDataset(data=train_data, tokenizer=tokenizer, config=config)
if config.valid_split is not None:
valid_data = TextClassificationDataset(data=valid_data, tokenizer=tokenizer, config=config)
if config.logging_steps == -1:
if config.valid_split is not None:
logging_steps = int(0.2 * len(valid_data) / config.batch_size)
else:
logging_steps = int(0.2 * len(train_data) / config.batch_size)
if logging_steps == 0:
logging_steps = 1
else:
logging_steps = config.logging_steps
training_args = dict(
output_dir=config.project_name,
per_device_train_batch_size=config.batch_size,
per_device_eval_batch_size=2 * config.batch_size,
learning_rate=config.lr,
num_train_epochs=config.epochs,
fp16=config.fp16,
evaluation_strategy=config.evaluation_strategy if config.valid_split is not None else "no",
logging_steps=logging_steps,
save_total_limit=config.save_total_limit,
save_strategy=config.save_strategy,
gradient_accumulation_steps=config.gradient_accumulation,
report_to="tensorboard",
auto_find_batch_size=config.auto_find_batch_size,
lr_scheduler_type=config.scheduler,
optim=config.optimizer,
warmup_ratio=config.warmup_ratio,
weight_decay=config.weight_decay,
max_grad_norm=config.max_grad_norm,
push_to_hub=False,
load_best_model_at_end=True if config.valid_split is not None else False,
ddp_find_unused_parameters=False,
)
if config.valid_split is not None:
early_stop = EarlyStoppingCallback(early_stopping_patience=3, early_stopping_threshold=0.01)
callbacks_to_use = [early_stop]
else:
callbacks_to_use = []
args = TrainingArguments(**training_args)
trainer_args = dict(
args=args,
model=model,
callbacks=callbacks_to_use,
compute_metrics=utils._binary_classification_metrics
if num_classes == 2
else utils._multi_class_classification_metrics,
)
trainer = Trainer(
**trainer_args,
train_dataset=train_data,
eval_dataset=valid_data,
)
trainer.train()
logger.info("Finished training, saving model...")
trainer.save_model(config.project_name)
tokenizer.save_pretrained(config.project_name)
model_card = utils.create_model_card(config, trainer, num_classes)
# remove token key from training_params.json located in output directory
# first check if file exists
if os.path.exists(f"{config.project_name}/training_params.json"):
training_params = json.load(open(f"{config.project_name}/training_params.json"))
training_params.pop("token")
json.dump(training_params, open(f"{config.project_name}/training_params.json", "w"))
# save model card to output directory as README.md
with open(f"{config.project_name}/README.md", "w") as f:
f.write(model_card)
if config.push_to_hub:
if PartialState().process_index == 0:
logger.info("Pushing model to hub...")
api = HfApi(token=config.token)
api.create_repo(repo_id=config.repo_id, repo_type="model", private=True)
api.upload_folder(folder_path=config.project_name, repo_id=config.repo_id, repo_type="model")
if PartialState().process_index == 0:
if "SPACE_ID" in os.environ:
# shut down the space
logger.info("Pausing space...")
api = HfApi(token=config.token)
api.pause_space(repo_id=os.environ["SPACE_ID"])
if "ENDPOINT_ID" in os.environ:
# shut down the endpoint
logger.info("Pausing endpoint...")
utils.pause_endpoint(config)
if __name__ == "__main__":
args = parse_args()
training_config = json.load(open(args.training_config))
config = TextClassificationParams(**training_config)
train(config)
| autotrain-advanced-main | src/autotrain/trainers/text_classification/__main__.py |
import os
from pydantic import BaseModel, Field
class ImageClassificationParams(BaseModel):
data_path: str = Field(None, title="Data path")
model_name: str = Field("bert-base-uncased", title="Model name")
lr: float = Field(5e-5, title="Learning rate")
epochs: int = Field(3, title="Number of training epochs")
batch_size: int = Field(8, title="Training batch size")
warmup_ratio: float = Field(0.1, title="Warmup proportion")
gradient_accumulation: int = Field(1, title="Gradient accumulation steps")
optimizer: str = Field("adamw_torch", title="Optimizer")
scheduler: str = Field("linear", title="Scheduler")
weight_decay: float = Field(0.0, title="Weight decay")
max_grad_norm: float = Field(1.0, title="Max gradient norm")
seed: int = Field(42, title="Seed")
train_split: str = Field("train", title="Train split")
valid_split: str = Field(None, title="Validation split")
logging_steps: int = Field(-1, title="Logging steps")
project_name: str = Field("Project Name", title="Output directory")
auto_find_batch_size: bool = Field(False, title="Auto find batch size")
fp16: bool = Field(False, title="Enable fp16")
save_total_limit: int = Field(1, title="Save total limit")
save_strategy: str = Field("epoch", title="Save strategy")
token: str = Field(None, title="Hub Token")
push_to_hub: bool = Field(False, title="Push to hub")
repo_id: str = Field(None, title="Repo id")
evaluation_strategy: str = Field("epoch", title="Evaluation strategy")
image_column: str = Field("image", title="Image column")
target_column: str = Field("target", title="Target column")
def __str__(self):
data = self.dict()
data["token"] = "*****" if data.get("token") else None
return str(data)
def save(self, output_dir):
os.makedirs(output_dir, exist_ok=True)
path = os.path.join(output_dir, "training_params.json")
# save formatted json
with open(path, "w") as f:
f.write(self.json(indent=4))
| autotrain-advanced-main | src/autotrain/trainers/image_classification/params.py |
autotrain-advanced-main | src/autotrain/trainers/image_classification/__init__.py |
|
import numpy as np
import torch
class ImageClassificationDataset:
def __init__(self, data, transforms, config):
self.data = data
self.transforms = transforms
self.config = config
def __len__(self):
return len(self.data)
def __getitem__(self, item):
image = self.data[item][self.config.image_column]
target = int(self.data[item][self.config.target_column])
image = self.transforms(image=np.array(image.convert("RGB")))["image"]
image = np.transpose(image, (2, 0, 1)).astype(np.float32)
return {
"pixel_values": torch.tensor(image, dtype=torch.float),
"labels": torch.tensor(target, dtype=torch.long),
}
| autotrain-advanced-main | src/autotrain/trainers/image_classification/dataset.py |
import albumentations as A
import numpy as np
from sklearn import metrics
from autotrain.trainers.image_classification.dataset import ImageClassificationDataset
BINARY_CLASSIFICATION_EVAL_METRICS = (
"eval_loss",
"eval_accuracy",
"eval_f1",
"eval_auc",
"eval_precision",
"eval_recall",
)
MULTI_CLASS_CLASSIFICATION_EVAL_METRICS = (
"eval_loss",
"eval_accuracy",
"eval_f1_macro",
"eval_f1_micro",
"eval_f1_weighted",
"eval_precision_macro",
"eval_precision_micro",
"eval_precision_weighted",
"eval_recall_macro",
"eval_recall_micro",
"eval_recall_weighted",
)
MODEL_CARD = """
---
tags:
- autotrain
- image-classification
widget:
- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg
example_title: Tiger
- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg
example_title: Teapot
- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg
example_title: Palace
datasets:
- {dataset}
---
# Model Trained Using AutoTrain
- Problem type: Image Classification
## Validation Metricsg
{validation_metrics}
"""
def _binary_classification_metrics(pred):
raw_predictions, labels = pred
predictions = np.argmax(raw_predictions, axis=1)
result = {
"f1": metrics.f1_score(labels, predictions),
"precision": metrics.precision_score(labels, predictions),
"recall": metrics.recall_score(labels, predictions),
"auc": metrics.roc_auc_score(labels, raw_predictions[:, 1]),
"accuracy": metrics.accuracy_score(labels, predictions),
}
return result
def _multi_class_classification_metrics(pred):
raw_predictions, labels = pred
predictions = np.argmax(raw_predictions, axis=1)
results = {
"f1_macro": metrics.f1_score(labels, predictions, average="macro"),
"f1_micro": metrics.f1_score(labels, predictions, average="micro"),
"f1_weighted": metrics.f1_score(labels, predictions, average="weighted"),
"precision_macro": metrics.precision_score(labels, predictions, average="macro"),
"precision_micro": metrics.precision_score(labels, predictions, average="micro"),
"precision_weighted": metrics.precision_score(labels, predictions, average="weighted"),
"recall_macro": metrics.recall_score(labels, predictions, average="macro"),
"recall_micro": metrics.recall_score(labels, predictions, average="micro"),
"recall_weighted": metrics.recall_score(labels, predictions, average="weighted"),
"accuracy": metrics.accuracy_score(labels, predictions),
}
return results
def process_data(train_data, valid_data, image_processor, config):
if "shortest_edge" in image_processor.size:
size = image_processor.size["shortest_edge"]
else:
size = (image_processor.size["height"], image_processor.size["width"])
try:
height, width = size
except TypeError:
height = size
width = size
train_transforms = A.Compose(
[
A.RandomResizedCrop(height=height, width=width),
A.RandomRotate90(),
A.HorizontalFlip(p=0.5),
A.RandomBrightnessContrast(p=0.2),
A.Normalize(mean=image_processor.image_mean, std=image_processor.image_std),
]
)
val_transforms = A.Compose(
[
A.Resize(height=height, width=width),
A.Normalize(mean=image_processor.image_mean, std=image_processor.image_std),
]
)
train_data = ImageClassificationDataset(train_data, train_transforms, config)
if valid_data is not None:
valid_data = ImageClassificationDataset(valid_data, val_transforms, config)
return train_data, valid_data
return train_data, None
def create_model_card(config, trainer, num_classes):
if config.valid_split is not None:
eval_scores = trainer.evaluate()
valid_metrics = (
BINARY_CLASSIFICATION_EVAL_METRICS if num_classes == 2 else MULTI_CLASS_CLASSIFICATION_EVAL_METRICS
)
eval_scores = [f"{k[len('eval_'):]}: {v}" for k, v in eval_scores.items() if k in valid_metrics]
eval_scores = "\n\n".join(eval_scores)
else:
eval_scores = "No validation metrics available"
model_card = MODEL_CARD.format(
dataset=config.data_path,
validation_metrics=eval_scores,
)
return model_card
| autotrain-advanced-main | src/autotrain/trainers/image_classification/utils.py |
import argparse
import json
from accelerate.state import PartialState
from datasets import load_dataset
from huggingface_hub import HfApi
from transformers import (
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
EarlyStoppingCallback,
Trainer,
TrainingArguments,
)
from autotrain import logger
from autotrain.trainers.image_classification import utils
from autotrain.trainers.image_classification.params import ImageClassificationParams
def parse_args():
# get training_config.json from the end user
parser = argparse.ArgumentParser()
parser.add_argument("--training_config", type=str, required=True)
return parser.parse_args()
def train(config):
if isinstance(config, dict):
config = ImageClassificationParams(**config)
if PartialState().process_index == 0:
logger.info("Starting training...")
logger.info(f"Training config: {config}")
valid_data = None
train_data = load_dataset(
config.data_path,
split=config.train_split,
token=config.token,
)
if config.valid_split is not None:
valid_data = load_dataset(
config.data_path,
split=config.valid_split,
token=config.token,
)
classes = train_data.features[config.target_column].names
logger.info(f"Classes: {classes}")
label2id = {c: i for i, c in enumerate(classes)}
num_classes = len(classes)
if num_classes < 2:
raise ValueError("Invalid number of classes. Must be greater than 1.")
if config.valid_split is not None:
num_classes_valid = len(valid_data.unique(config.target_column))
if num_classes_valid != num_classes:
raise ValueError(
f"Number of classes in train and valid are not the same. Training has {num_classes} and valid has {num_classes_valid}"
)
model_config = AutoConfig.from_pretrained(config.model_name, num_labels=num_classes)
model_config._num_labels = len(label2id)
model_config.label2id = label2id
model_config.id2label = {v: k for k, v in label2id.items()}
try:
model = AutoModelForImageClassification.from_pretrained(
config.model_name,
config=model_config,
trust_remote_code=True,
token=config.token,
ignore_mismatched_sizes=True,
)
except OSError:
model = AutoModelForImageClassification.from_pretrained(
config.model_name,
config=model_config,
from_tf=True,
trust_remote_code=True,
token=config.token,
ignore_mismatched_sizes=True,
)
image_processor = AutoImageProcessor.from_pretrained(config.model_name, token=config.token)
train_data, valid_data = utils.process_data(train_data, valid_data, image_processor, config)
if config.logging_steps == -1:
if config.valid_split is not None:
logging_steps = int(0.2 * len(valid_data) / config.batch_size)
else:
logging_steps = int(0.2 * len(train_data) / config.batch_size)
if logging_steps == 0:
logging_steps = 1
else:
logging_steps = config.logging_steps
training_args = dict(
output_dir=config.project_name,
per_device_train_batch_size=config.batch_size,
per_device_eval_batch_size=2 * config.batch_size,
learning_rate=config.lr,
num_train_epochs=config.epochs,
fp16=config.fp16,
evaluation_strategy=config.evaluation_strategy if config.valid_split is not None else "no",
logging_steps=logging_steps,
save_total_limit=config.save_total_limit,
save_strategy=config.save_strategy,
gradient_accumulation_steps=config.gradient_accumulation,
report_to="tensorboard",
auto_find_batch_size=config.auto_find_batch_size,
lr_scheduler_type=config.scheduler,
optim=config.optimizer,
warmup_ratio=config.warmup_ratio,
weight_decay=config.weight_decay,
max_grad_norm=config.max_grad_norm,
push_to_hub=False,
load_best_model_at_end=True if config.valid_split is not None else False,
ddp_find_unused_parameters=False,
)
if config.valid_split is not None:
early_stop = EarlyStoppingCallback(early_stopping_patience=3, early_stopping_threshold=0.01)
callbacks_to_use = [early_stop]
else:
callbacks_to_use = []
args = TrainingArguments(**training_args)
trainer_args = dict(
args=args,
model=model,
callbacks=callbacks_to_use,
compute_metrics=utils._binary_classification_metrics
if num_classes == 2
else utils._multi_class_classification_metrics,
)
trainer = Trainer(
**trainer_args,
train_dataset=train_data,
eval_dataset=valid_data,
)
trainer.train()
logger.info("Finished training, saving model...")
trainer.save_model(config.project_name)
image_processor.save_pretrained(config.project_name)
model_card = utils.create_model_card(config, trainer, num_classes)
# save model card to output directory as README.md
with open(f"{config.project_name}/README.md", "w") as f:
f.write(model_card)
if config.push_to_hub:
if PartialState().process_index == 0:
logger.info("Pushing model to hub...")
api = HfApi(token=config.token)
api.create_repo(repo_id=config.repo_id, repo_type="model")
api.upload_folder(folder_path=config.project_name, repo_id=config.repo_id, repo_type="model")
if __name__ == "__main__":
args = parse_args()
training_config = json.load(open(args.training_config))
config = ImageClassificationParams(**training_config)
train(config)
| autotrain-advanced-main | src/autotrain/trainers/image_classification/__main__.py |
import os
from pydantic import BaseModel, Field
from autotrain import logger
class LLMTrainingParams(BaseModel):
model: str = Field("gpt2", title="Model name")
data_path: str = Field("data", title="Data path")
project_name: str = Field("Project Name", title="Output directory")
train_split: str = Field("train", title="Train data config")
valid_split: str = Field(None, title="Validation data config")
text_column: str = Field("text", title="Text column")
token: str = Field(None, title="Huggingface token")
lr: float = Field(3e-5, title="Learning rate")
epochs: int = Field(1, title="Number of training epochs")
batch_size: int = Field(2, title="Training batch size")
warmup_ratio: float = Field(0.1, title="Warmup proportion")
gradient_accumulation: int = Field(1, title="Gradient accumulation steps")
optimizer: str = Field("adamw_torch", title="Optimizer")
scheduler: str = Field("linear", title="Scheduler")
weight_decay: float = Field(0.0, title="Weight decay")
max_grad_norm: float = Field(1.0, title="Max gradient norm")
seed: int = Field(42, title="Seed")
add_eos_token: bool = Field(True, title="Add EOS token")
block_size: int = Field(-1, title="Block size")
use_peft: bool = Field(False, title="Use PEFT")
lora_r: int = Field(16, title="Lora r")
lora_alpha: int = Field(32, title="Lora alpha")
lora_dropout: float = Field(0.05, title="Lora dropout")
logging_steps: int = Field(-1, title="Logging steps")
evaluation_strategy: str = Field("epoch", title="Evaluation strategy")
save_total_limit: int = Field(1, title="Save total limit")
save_strategy: str = Field("epoch", title="Save strategy")
auto_find_batch_size: bool = Field(False, title="Auto find batch size")
fp16: bool = Field(False, title="FP16")
push_to_hub: bool = Field(False, title="Push to hub")
use_int8: bool = Field(False, title="Use int8")
model_max_length: int = Field(2048, title="Model max length")
repo_id: str = Field(None, title="Repo id")
use_int4: bool = Field(False, title="Use int4")
trainer: str = Field("default", title="Trainer type")
target_modules: str = Field(None, title="Target modules")
merge_adapter: bool = Field(False, title="Merge adapter")
username: str = Field(None, title="Hugging Face Username")
def save(self, output_dir):
os.makedirs(output_dir, exist_ok=True)
path = os.path.join(output_dir, "training_params.json")
# save formatted json
with open(path, "w") as f:
f.write(self.json(indent=4))
def __str__(self):
data = self.dict()
data["token"] = "*****" if data.get("token") else None
return str(data)
def __init__(self, **data):
super().__init__(**data)
# Parameters not supplied by the user
defaults = {f.name for f in self.__fields__.values() if f.default == self.__dict__[f.name]}
supplied = set(data.keys())
not_supplied = defaults - supplied
if not_supplied:
logger.warning(f"Parameters not supplied by user and set to default: {', '.join(not_supplied)}")
# Parameters that were supplied but not used
# This is a naive implementation. It might catch some internal Pydantic params.
unused = supplied - set(self.__fields__)
if unused:
logger.warning(f"Parameters supplied but not used: {', '.join(unused)}")
| autotrain-advanced-main | src/autotrain/trainers/clm/params.py |
autotrain-advanced-main | src/autotrain/trainers/clm/__init__.py |
|
import os
from itertools import chain
import requests
import torch
from datasets import Dataset
from peft import PeftModel
from transformers import AutoModelForCausalLM, AutoTokenizer
from autotrain import logger
IGNORE_INDEX = -100
DEFAULT_PAD_TOKEN = "[PAD]"
DEFAULT_EOS_TOKEN = "</s>"
DEFAULT_BOS_TOKEN = "</s>"
DEFAULT_UNK_TOKEN = "</s>"
TARGET_MODULES = {
"Salesforce/codegen25-7b-multi": "q_proj,k_proj,v_proj,o_proj,down_proj,up_proj,gate_proj",
}
MODEL_CARD = """
---
tags:
- autotrain
- text-generation
widget:
- text: "I love AutoTrain because "
---
# Model Trained Using AutoTrain
"""
def get_target_modules(config):
if config.target_modules is None:
return TARGET_MODULES.get(config.model)
return config.target_modules.split(",")
def process_data(data, tokenizer, config):
data = data.to_pandas()
data = data.fillna("")
data = data[[config.text_column]]
if config.add_eos_token:
data[config.text_column] = data[config.text_column] + tokenizer.eos_token
data = Dataset.from_pandas(data)
return data
def group_texts(examples, config):
# Concatenate all texts.
concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
if total_length >= config.block_size:
total_length = (total_length // config.block_size) * config.block_size
else:
total_length = 0
# Split by chunks of max_len.
result = {
k: [t[i : i + config.block_size] for i in range(0, total_length, config.block_size)]
for k, t in concatenated_examples.items()
}
result["labels"] = result["input_ids"].copy()
return result
def tokenize(examples, tokenizer, config):
output = tokenizer(examples[config.text_column])
return output
def _tokenize(prompt, tokenizer, config):
result = tokenizer(
prompt,
truncation=True,
max_length=tokenizer.model_max_length,
padding=False,
return_tensors=None,
)
if result["input_ids"][-1] != tokenizer.eos_token_id and config.add_eos_token:
if len(result["input_ids"]) >= tokenizer.model_max_length:
result["input_ids"] = result["input_ids"][:-1]
result["attention_mask"] = result["attention_mask"][:-1]
result["input_ids"].append(tokenizer.eos_token_id)
result["attention_mask"].append(1)
result["labels"] = result["input_ids"].copy()
return result
def merge_adapter(base_model_path, target_model_path, adapter_path):
logger.info("Loading adapter...")
model = AutoModelForCausalLM.from_pretrained(
base_model_path,
torch_dtype=torch.float16,
low_cpu_mem_usage=True,
trust_remote_code=True,
)
model = PeftModel.from_pretrained(model, adapter_path)
tokenizer = AutoTokenizer.from_pretrained(
base_model_path,
trust_remote_code=True,
)
model = model.merge_and_unload()
logger.info("Saving target model...")
model.save_pretrained(target_model_path)
tokenizer.save_pretrained(target_model_path)
def create_model_card():
return MODEL_CARD.strip()
def pause_endpoint(params):
endpoint_id = os.environ["ENDPOINT_ID"]
username = endpoint_id.split("/")[0]
project_name = endpoint_id.split("/")[1]
api_url = f"https://api.endpoints.huggingface.cloud/v2/endpoint/{username}/{project_name}/pause"
headers = {"Authorization": f"Bearer {params.token}"}
r = requests.post(api_url, headers=headers)
return r.json()
| autotrain-advanced-main | src/autotrain/trainers/clm/utils.py |
import os
import torch
from peft import set_peft_model_state_dict
from transformers import TrainerCallback, TrainerControl, TrainerState, TrainingArguments
from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR
class SavePeftModelCallback(TrainerCallback):
def on_save(
self,
args: TrainingArguments,
state: TrainerState,
control: TrainerControl,
**kwargs,
):
checkpoint_folder = os.path.join(args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{state.global_step}")
kwargs["model"].save_pretrained(checkpoint_folder)
pytorch_model_path = os.path.join(checkpoint_folder, "pytorch_model.bin")
torch.save({}, pytorch_model_path)
return control
class LoadBestPeftModelCallback(TrainerCallback):
def on_train_end(
self,
args: TrainingArguments,
state: TrainerState,
control: TrainerControl,
**kwargs,
):
print(f"Loading best peft model from {state.best_model_checkpoint} (score: {state.best_metric}).")
best_model_path = os.path.join(state.best_model_checkpoint, "adapter_model.bin")
adapters_weights = torch.load(best_model_path)
model = kwargs["model"]
set_peft_model_state_dict(model, adapters_weights)
return control
| autotrain-advanced-main | src/autotrain/trainers/clm/callbacks.py |
import argparse
import json
import os
import sys
from functools import partial
import pandas as pd
import torch
from accelerate import Accelerator
from accelerate.state import PartialState
from datasets import Dataset, load_dataset
from huggingface_hub import HfApi
from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training
from transformers import (
AutoConfig,
AutoModelForCausalLM,
AutoTokenizer,
BitsAndBytesConfig,
Trainer,
TrainingArguments,
default_data_collator,
)
from trl import SFTTrainer
from autotrain import logger
from autotrain.trainers.clm import utils
from autotrain.trainers.clm.callbacks import LoadBestPeftModelCallback, SavePeftModelCallback
from autotrain.trainers.clm.params import LLMTrainingParams
from autotrain.utils import monitor
def parse_args():
# get training_config.json from the end user
parser = argparse.ArgumentParser()
parser.add_argument("--training_config", type=str, required=True)
return parser.parse_args()
@monitor
def train(config):
if isinstance(config, dict):
config = LLMTrainingParams(**config)
if config.repo_id is None and config.username is not None:
config.repo_id = f"{config.username}/{config.project_name}"
# TODO: remove when SFT is fixed
# if config.trainer == "sft":
# config.trainer = "default"
# check if config.train_split.csv exists in config.data_path
if config.train_split is not None:
train_path = f"{config.data_path}/{config.train_split}.csv"
if os.path.exists(train_path):
logger.info("loading dataset from csv")
train_data = pd.read_csv(train_path)
train_data = Dataset.from_pandas(train_data)
else:
train_data = load_dataset(
config.data_path,
split=config.train_split,
token=config.token,
)
if config.valid_split is not None:
valid_path = f"{config.data_path}/{config.valid_split}.csv"
if os.path.exists(valid_path):
logger.info("loading dataset from csv")
valid_data = pd.read_csv(valid_path)
valid_data = Dataset.from_pandas(valid_data)
else:
valid_data = load_dataset(
config.data_path,
split=config.valid_split,
token=config.token,
)
tokenizer = AutoTokenizer.from_pretrained(
config.model,
use_auth_token=config.token,
trust_remote_code=True,
)
if tokenizer.model_max_length > 2048:
tokenizer.model_max_length = config.model_max_length
if getattr(tokenizer, "pad_token", None) is None:
tokenizer.pad_token = tokenizer.eos_token
if config.trainer == "default":
train_data = utils.process_data(
data=train_data,
tokenizer=tokenizer,
config=config,
)
if config.valid_split is not None:
valid_data = utils.process_data(
data=valid_data,
tokenizer=tokenizer,
config=config,
)
model_config = AutoConfig.from_pretrained(
config.model,
use_auth_token=config.token,
trust_remote_code=True,
)
if config.use_peft:
if config.use_int4:
bnb_config = BitsAndBytesConfig(
load_in_4bit=config.use_int4,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.float16,
bnb_4bit_use_double_quant=False,
)
elif config.use_int8:
bnb_config = BitsAndBytesConfig(load_in_8bit=config.use_int8)
else:
bnb_config = BitsAndBytesConfig()
model = AutoModelForCausalLM.from_pretrained(
config.model,
config=model_config,
use_auth_token=config.token,
quantization_config=bnb_config,
torch_dtype=torch.float16,
device_map={"": Accelerator().process_index} if torch.cuda.is_available() else None,
trust_remote_code=True,
)
else:
model = AutoModelForCausalLM.from_pretrained(
config.model,
config=model_config,
use_auth_token=config.token,
trust_remote_code=True,
)
model.resize_token_embeddings(len(tokenizer))
if config.use_peft:
if config.use_int8 or config.use_int4:
model = prepare_model_for_kbit_training(model)
peft_config = LoraConfig(
r=config.lora_r,
lora_alpha=config.lora_alpha,
lora_dropout=config.lora_dropout,
bias="none",
task_type="CAUSAL_LM",
target_modules=utils.get_target_modules(config),
)
model = get_peft_model(model, peft_config)
if config.block_size == -1:
config.block_size = None
if config.block_size is None:
block_size = tokenizer.model_max_length
if block_size > 1024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`."
)
block_size = 1024
else:
if config.block_size > tokenizer.model_max_length:
logger.warning(
f"The block_size passed ({config.block_size}) is larger than the maximum length for the model"
f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}."
)
block_size = min(config.block_size, tokenizer.model_max_length)
config.block_size = block_size
if config.trainer == "default":
tokenize_fn = partial(utils.tokenize, tokenizer=tokenizer, config=config)
group_texts_fn = partial(utils.group_texts, config=config)
train_data = train_data.map(
tokenize_fn,
batched=True,
num_proc=1,
remove_columns=list(train_data.features),
desc="Running tokenizer on train dataset",
)
if config.valid_split is not None:
valid_data = valid_data.map(
tokenize_fn,
batched=True,
num_proc=1,
remove_columns=list(valid_data.features),
desc="Running tokenizer on validation dataset",
)
train_data = train_data.map(
group_texts_fn,
batched=True,
num_proc=4,
desc=f"Grouping texts in chunks of {block_size}",
)
if config.valid_split is not None:
valid_data = valid_data.map(
group_texts_fn,
batched=True,
num_proc=4,
desc=f"Grouping texts in chunks of {block_size}",
)
logger.info("creating trainer")
# trainer specific
if config.logging_steps == -1:
if config.valid_split is not None:
logging_steps = int(0.2 * len(valid_data) / config.batch_size)
else:
logging_steps = int(0.2 * len(train_data) / config.batch_size)
if logging_steps == 0:
logging_steps = 1
else:
logging_steps = config.logging_steps
training_args = dict(
output_dir=config.project_name,
per_device_train_batch_size=config.batch_size,
per_device_eval_batch_size=config.batch_size,
learning_rate=config.lr,
num_train_epochs=config.epochs,
evaluation_strategy=config.evaluation_strategy if config.valid_split is not None else "no",
logging_steps=logging_steps,
save_total_limit=config.save_total_limit,
save_strategy=config.save_strategy,
gradient_accumulation_steps=config.gradient_accumulation,
report_to="tensorboard",
auto_find_batch_size=config.auto_find_batch_size,
lr_scheduler_type=config.scheduler,
optim=config.optimizer,
warmup_ratio=config.warmup_ratio,
weight_decay=config.weight_decay,
max_grad_norm=config.max_grad_norm,
fp16=config.fp16,
push_to_hub=False,
load_best_model_at_end=True if config.valid_split is not None else False,
ddp_find_unused_parameters=False,
)
args = TrainingArguments(**training_args)
callbacks = []
if config.use_peft:
callbacks.append(SavePeftModelCallback)
if config.valid_split is not None:
callbacks.append(LoadBestPeftModelCallback)
trainer_args = dict(
args=args,
model=model,
)
if config.trainer == "default":
trainer = Trainer(
**trainer_args,
train_dataset=train_data,
eval_dataset=valid_data if config.valid_split is not None else None,
tokenizer=tokenizer,
data_collator=default_data_collator,
callbacks=callbacks,
)
elif config.trainer == "sft":
trainer = SFTTrainer(
**trainer_args,
train_dataset=train_data,
eval_dataset=valid_data if config.valid_split is not None else None,
peft_config=peft_config if config.use_peft else None,
dataset_text_field=config.text_column,
max_seq_length=config.block_size,
tokenizer=tokenizer,
packing=True,
)
else:
raise ValueError(f"trainer `{config.trainer}` not supported")
model.config.use_cache = False
if torch.__version__ >= "2" and sys.platform != "win32":
model = torch.compile(model)
for name, module in trainer.model.named_modules():
# if isinstance(module, LoraLayer):
# if script_args.bf16:
# module = module.to(torch.bfloat16)
if "norm" in name:
module = module.to(torch.float32)
# if "lm_head" in name or "embed_tokens" in name:
# if hasattr(module, "weight"):
# if script_args.bf16 and module.weight.dtype == torch.float32:
# module = module.to(torch.bfloat16)
trainer.train()
logger.info("Finished training, saving model...")
trainer.save_model(config.project_name)
model_card = utils.create_model_card()
# save model card to output directory as README.md
with open(f"{config.project_name}/README.md", "w") as f:
f.write(model_card)
if config.use_peft and config.merge_adapter:
logger.info("Merging adapter weights...")
try:
utils.merge_adapter(
base_model_path=config.model,
target_model_path=config.project_name,
adapter_path=config.project_name,
)
except Exception as e:
logger.warning(f"Failed to merge adapter weights: {e}")
logger.warning("Skipping adapter merge. Only adapter weights will be saved.")
if config.push_to_hub:
if PartialState().process_index == 0:
logger.info("Pushing model to hub...")
if os.path.exists(f"{config.project_name}/training_params.json"):
training_params = json.load(open(f"{config.project_name}/training_params.json"))
training_params.pop("token")
json.dump(training_params, open(f"{config.project_name}/training_params.json", "w"))
api = HfApi(token=config.token)
api.create_repo(repo_id=config.repo_id, repo_type="model", private=True)
api.upload_folder(folder_path=config.project_name, repo_id=config.repo_id, repo_type="model")
if PartialState().process_index == 0:
if "SPACE_ID" in os.environ:
# shut down the space
logger.info("Pausing space...")
api = HfApi(token=config.token)
api.pause_space(repo_id=os.environ["SPACE_ID"])
if "ENDPOINT_ID" in os.environ:
# shut down the endpoint
logger.info("Pausing endpoint...")
utils.pause_endpoint(config)
if __name__ == "__main__":
args = parse_args()
training_config = json.load(open(args.training_config))
config = LLMTrainingParams(**training_config)
train(config)
| autotrain-advanced-main | src/autotrain/trainers/clm/__main__.py |